From 2b7de7a50345efeb04c8c8445824aa6d559b7fff Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 13:37:19 -0300 Subject: [PATCH 001/290] Add spec-driven-dev CLI with API sync and traceability - Add spec_cli.py for parsing/syncing specs to backend API - Add api_client.py for direct API communication (bypasses MCP) - Add models.py and parse_specs.py for spec parsing - Support linked_design field in requirements API - Update templates with YAML frontmatter and field references - Fix SQLAlchemy DetachedInstanceError with eager loading - Convert webhook-notifications.md to YAML frontmatter format - Add new webhook integration tickets and tasks - Reorganize marketing docs into docs/marketing/ --- .claude/skills/spec-driven-dev/SKILL.md | 276 ++++ .../references/design_template.md | 54 +- .../references/requirements_template.md | 40 +- .../references/task_template.md | 89 +- .../references/ticket_template.md | 123 +- .../spec-driven-dev/scripts/api_client.py | 1278 +++++++++++++++++ .../skills/spec-driven-dev/scripts/models.py | 540 +++++++ .../spec-driven-dev/scripts/parse_specs.py | 577 ++++++++ .../spec-driven-dev/scripts/spec_cli.py | 1075 ++++++++++++++ .../spec-driven-dev/scripts/validate_specs.py | 228 ++- .../requirements/webhook-notifications.md | 17 +- .../tasks/TSK-001-add-webhook-url-fields.md | 67 +- .omoi_os/tasks/TSK-002-create-migration.md | 71 +- .../tasks/TSK-003-webhook-delivery-service.md | 108 +- .../TSK-004-webhook-notification-service.md | 98 +- .omoi_os/tasks/TSK-005-update-api-routes.md | 104 +- .omoi_os/tasks/TSK-006-tests.md | 96 +- .omoi_os/tasks/TSK-007-slack-formatter.md | 32 + .omoi_os/tasks/TSK-008-discord-formatter.md | 32 + .../tickets/TKT-001-webhook-notifications.md | 110 +- .../tickets/TKT-002-webhook-integrations.md | 47 + backend/omoi_os/api/routes/specs.py | 43 +- backend/omoi_os/api/routes/tasks.py | 56 + docs/marketing/README.md | 73 + docs/marketing/cross_market_niche_ideas.md | 329 +++++ docs/{ => marketing}/go_to_market_strategy.md | 17 +- docs/{ => marketing}/marketing_overview.md | 30 +- .../reality_contact_outreach_playbook.md | 15 +- docs/marketing/sub_niche_targeting.md | 199 +++ .../spec_driven_dev_dependencies_plan.md | 530 +++++++ 30 files changed, 6095 insertions(+), 259 deletions(-) create mode 100644 .claude/skills/spec-driven-dev/scripts/api_client.py create mode 100644 .claude/skills/spec-driven-dev/scripts/models.py create mode 100644 .claude/skills/spec-driven-dev/scripts/parse_specs.py create mode 100644 .claude/skills/spec-driven-dev/scripts/spec_cli.py create mode 100644 .omoi_os/tasks/TSK-007-slack-formatter.md create mode 100644 .omoi_os/tasks/TSK-008-discord-formatter.md create mode 100644 .omoi_os/tickets/TKT-002-webhook-integrations.md create mode 100644 docs/marketing/README.md create mode 100644 docs/marketing/cross_market_niche_ideas.md rename docs/{ => marketing}/go_to_market_strategy.md (94%) rename docs/{ => marketing}/marketing_overview.md (85%) rename docs/{ => marketing}/reality_contact_outreach_playbook.md (89%) create mode 100644 docs/marketing/sub_niche_targeting.md create mode 100644 docs/plans/spec_driven_dev_dependencies_plan.md diff --git a/.claude/skills/spec-driven-dev/SKILL.md b/.claude/skills/spec-driven-dev/SKILL.md index 4b96d252..2e8c134d 100644 --- a/.claude/skills/spec-driven-dev/SKILL.md +++ b/.claude/skills/spec-driven-dev/SKILL.md @@ -759,6 +759,282 @@ This skill includes utility scripts in `scripts/`: - `init_feature.py` - Initialize directory structure for new feature - `generate_ids.py` - Generate next ticket/task IDs - `validate_specs.py` - Validate spec documents for completeness +- `spec_cli.py` - **Main CLI** for viewing, validating, and syncing specs +- `api_client.py` - Direct HTTP client for OmoiOS API (bypasses MCP) +- `parse_specs.py` - Parser for .omoi_os/ markdown files +- `models.py` - Data models with cross-ticket dependency logic + +--- + +## Spec CLI (spec_cli.py) + +The main CLI tool for working with specs locally and syncing to the API. + +### View Local Specs + +```bash +# From the scripts directory: +cd .claude/skills/spec-driven-dev/scripts + +# Show all specs (requirements, designs, tickets, tasks, traceability) +python spec_cli.py show all + +# Show only requirements (EARS format) +python spec_cli.py show requirements + +# Show only designs +python spec_cli.py show designs + +# Show only tickets +python spec_cli.py show tickets + +# Show only tasks (with blocking reasons) +python spec_cli.py show tasks + +# Show task dependency graph (within tickets) +python spec_cli.py show graph + +# Show cross-ticket dependency graph +python spec_cli.py show ticket-graph + +# Show full traceability matrix (Requirements → Designs → Tickets → Tasks) +python spec_cli.py show traceability + +# Show only ready tasks (not blocked) +python spec_cli.py show ready + +# Validate specs (circular deps, missing refs) +python spec_cli.py validate + +# Export to JSON (includes all specs + traceability stats) +python spec_cli.py export json +``` + +### API Integration + +```bash +# List all projects +python spec_cli.py projects --api-url http://0.0.0.0:18000 + +# Show project with all tickets and tasks +python spec_cli.py project --api-url http://0.0.0.0:18000 + +# Dry-run sync (see what would change) +python spec_cli.py sync diff --api-url http://0.0.0.0:18000 --project-id + +# Push local specs to API +python spec_cli.py sync push --api-url http://0.0.0.0:18000 --project-id +``` + +### Sync Specs to API (Requirements & Designs) + +```bash +# Sync local requirements and designs to API specs +# This creates or updates specs with EARS-format requirements + +# Dry-run: See what would be synced +python spec_cli.py sync-specs diff --project-id --api-url http://0.0.0.0:18000 + +# Push: Actually create/update specs +python spec_cli.py sync-specs push --project-id --api-url http://0.0.0.0:18000 + +# Optional: Specify a custom spec title +python spec_cli.py sync-specs push --project-id --spec-title "My Feature Spec" +``` + +### View API Traceability + +```bash +# View full traceability from API: Specs → Requirements → Tickets → Tasks +python spec_cli.py api-trace --api-url http://0.0.0.0:18000 +``` + +### Authentication Options + +```bash +# Via API key (recommended) +python spec_cli.py sync push --api-key ... +# Or set OMOIOS_API_KEY environment variable + +# Via JWT token +python spec_cli.py sync push --token ... +# Or set OMOIOS_TOKEN environment variable + +# Via email/password login +python spec_cli.py sync push --email user@example.com --password secret ... +# Or set OMOIOS_EMAIL and OMOIOS_PASSWORD environment variables +``` + +### Sync Behavior + +The sync command uses **create-or-skip** logic: +- **CREATE**: If ticket/task doesn't exist (matched by title) +- **UPDATE**: If exists but description differs +- **SKIP**: If exists with same description + +--- + +## Cross-Ticket Dependencies + +Tasks can be blocked by dependencies at two levels: + +### 1. Task-Level Dependencies (within a ticket) + +```yaml +# In .omoi_os/tasks/TSK-002.md +dependencies: + depends_on: [TSK-001] # Must complete TSK-001 first + blocks: [TSK-003] # TSK-003 waits for this +``` + +### 2. Ticket-Level Dependencies (cross-ticket) + +```yaml +# In .omoi_os/tickets/TKT-002.md +dependencies: + blocked_by: [TKT-001] # ALL tasks in TKT-002 wait for ALL tasks in TKT-001 + blocks: [TKT-003] # TKT-003 waits for this ticket + related: [] # Informational only +``` + +### How Cross-Ticket Blocking Works + +When a ticket has `blocked_by: [TKT-001]`: +1. ALL tasks in that ticket are blocked +2. They remain blocked until ALL tasks in TKT-001 have `status: done` +3. The CLI shows: `[BLOCKED: blocked by ticket(s): TKT-001]` + +### Viewing Dependency Graphs + +```bash +# Task dependencies (within tickets) +python spec_cli.py show graph +# Output: +# └─> TSK-001 (Add models) +# └─> TSK-002 (Create migration) +# └─> TSK-003 (Implement service) + +# Cross-ticket dependencies +python spec_cli.py show ticket-graph +# Output: +# └─> [○] TKT-001 (Webhook Infrastructure) [6 tasks] +# └─> [○] TKT-002 (Slack/Discord Integration) [2 tasks] +# Legend: ✓ = all tasks complete, ○ = incomplete +``` + +--- + +## Direct API Client (api_client.py) + +For programmatic access to the OmoiOS API without MCP: + +```python +from api_client import OmoiOSClient + +# Initialize client +client = OmoiOSClient( + base_url="http://0.0.0.0:18000", + api_key="your-api-key" # or token="jwt-token" +) + +# List projects +projects = await client.list_projects() + +# Get project with tickets and tasks +data = await client.get_project_with_tickets(project_id) + +# List tickets for a project +tickets = await client.list_tickets(project_id) + +# Create a ticket +from models import ParsedTicket +success, msg = await client.create_ticket(parsed_ticket, project_id) + +# Create a task +from models import ParsedTask +success, msg = await client.create_task(parsed_task, ticket_api_id) + +# Full sync from local specs (tickets/tasks) +from parse_specs import SpecParser +parser = SpecParser() +result = parser.parse_all() +summary = await client.sync(result, project_id) +``` + +### Spec/Requirement/Design API Operations + +```python +# === SPEC OPERATIONS === + +# Create a new spec +success, msg, spec_id = await client.create_spec( + title="My Feature Spec", + project_id="project-uuid", + description="Optional description" +) + +# Get spec by ID +spec = await client.get_spec(spec_id) + +# List all specs for a project +specs = await client.list_specs(project_id) + +# === REQUIREMENT OPERATIONS (EARS Format) === + +# Add a requirement using EARS format +success, msg, req_id = await client.add_requirement( + spec_id=spec_id, + title="User authentication", + condition="a user submits valid credentials", # WHEN clause + action="authenticate the user and create a session" # SHALL clause +) + +# Add acceptance criterion to a requirement +success, msg = await client.add_acceptance_criterion( + spec_id=spec_id, + requirement_id=req_id, + text="Session token expires after 24 hours" +) + +# === DESIGN OPERATIONS === + +# Update spec's design artifact +success, msg = await client.update_design( + spec_id=spec_id, + architecture="## Architecture\n\nJWT-based auth with refresh tokens...", + data_model="## Data Model\n\n```sql\nCREATE TABLE sessions...", + api_spec=[ + {"method": "POST", "path": "/api/auth/login", "description": "User login"}, + {"method": "POST", "path": "/api/auth/logout", "description": "User logout"} + ] +) + +# === SYNC LOCAL SPECS TO API === + +# Sync local requirements and designs to API specs +from parse_specs import SpecParser +from models import ParseResult + +parser = SpecParser() +result: ParseResult = parser.parse_all() + +# Dry-run: See what would change +summary = await client.sync_specs(result, project_id, dry_run=True) + +# Actual sync: Create/update specs +summary = await client.sync_specs(result, project_id) + +# Diff only (shorthand for dry_run=True) +summary = await client.diff_specs(result, project_id) + +# === FULL TRACEABILITY === + +# Get complete traceability from API +# Returns: Specs → Requirements → Tickets → Tasks +trace = await client.get_full_traceability(project_id) +``` + +--- ## References diff --git a/.claude/skills/spec-driven-dev/references/design_template.md b/.claude/skills/spec-driven-dev/references/design_template.md index c319db07..c9318b07 100644 --- a/.claude/skills/spec-driven-dev/references/design_template.md +++ b/.claude/skills/spec-driven-dev/references/design_template.md @@ -5,15 +5,20 @@ Use this template for `.omoi_os/designs/{feature-name}.md` files. --- ```markdown -# {Feature Name} - Product Design Document - -**Created**: {YYYY-MM-DD} -**Status**: Draft | Review | Approved -**Purpose**: Design specification for {feature description}. -**Related**: {Requirements doc}, {Other design docs} - +--- +id: DESIGN-{FEATURE}-001 +title: {Feature Name} Design +feature: {feature-name} +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +status: draft +requirements: + - REQ-{DOMAIN}-001 + - REQ-{DOMAIN}-002 --- +# {Feature Name} - Product Design Document + ## Document Overview {Description of the feature/system being designed} @@ -336,6 +341,40 @@ sequenceDiagram --- +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique design ID (DESIGN-FEATURE-001) | +| `title` | string | Yes | Human-readable title | +| `feature` | string | Yes | Feature name (kebab-case) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `status` | string | Yes | draft, review, approved | +| `requirements` | list | No | List of requirement IDs this design implements | +| `tickets` | list | No | Linked ticket IDs for this design | + +--- + +## Design ID Conventions + +### Format +``` +DESIGN-{FEATURE}-{NUM} +``` + +### Examples +- `DESIGN-AUTH-001` - Authentication system design +- `DESIGN-WEBHOOK-001` - Webhook notifications design +- `DESIGN-SYNC-001` - Data synchronization design + +### Numbering +- Start at 001 +- Increment sequentially within feature +- Don't reuse deleted numbers + +--- + ## Best Practices 1. **Architecture First** - Start with high-level before diving into details @@ -343,3 +382,4 @@ sequenceDiagram 3. **Concrete Examples** - Include pseudocode and example payloads 4. **Integration Focus** - Clearly define boundaries and contracts 5. **Traceability** - Link back to requirements throughout +6. **Bidirectional Links** - Reference requirements in frontmatter, link design from requirements diff --git a/.claude/skills/spec-driven-dev/references/requirements_template.md b/.claude/skills/spec-driven-dev/references/requirements_template.md index 56343735..00eea7c1 100644 --- a/.claude/skills/spec-driven-dev/references/requirements_template.md +++ b/.claude/skills/spec-driven-dev/references/requirements_template.md @@ -5,15 +5,22 @@ Use this template for `.omoi_os/requirements/{feature-name}.md` files. --- ```markdown -# {Feature Name} Requirements - -**Created**: {YYYY-MM-DD} -**Status**: Draft | Review | Approved -**Purpose**: {One-line purpose statement} -**Related**: {Links to related docs} - +--- +id: REQ-{DOMAIN}-001 +title: {Feature Name} Requirements +feature: {feature-name} +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +status: draft +category: functional +priority: HIGH +design_ref: designs/{feature-name}.md +condition: "{EARS WHEN clause - triggering condition}" +action: "{EARS SHALL clause - expected behavior}" --- +# {Feature Name} Requirements + ## Document Overview {2-3 sentence overview of what this requirements document covers} @@ -202,6 +209,25 @@ class {Entity}Response(BaseModel): --- +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique requirement ID (REQ-DOMAIN-001) | +| `title` | string | Yes | Human-readable title | +| `feature` | string | Yes | Feature name (kebab-case) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `status` | string | Yes | draft, review, approved | +| `category` | string | Yes | functional, non-functional, constraint | +| `priority` | string | Yes | CRITICAL, HIGH, MEDIUM, LOW | +| `design_ref` | string | No | Path to linked design doc (e.g., `designs/feature.md`) | +| `condition` | string | No | EARS "WHEN" clause | +| `action` | string | No | EARS "THE SYSTEM SHALL" clause | +| `tickets` | list | No | Linked ticket IDs implementing this requirement | + +--- + ## Requirement ID Conventions ### Format diff --git a/.claude/skills/spec-driven-dev/references/task_template.md b/.claude/skills/spec-driven-dev/references/task_template.md index cf9a31c8..a73581e0 100644 --- a/.claude/skills/spec-driven-dev/references/task_template.md +++ b/.claude/skills/spec-driven-dev/references/task_template.md @@ -2,19 +2,28 @@ Use this template for `.omoi_os/tasks/TSK-{NUM}.md` files. ---- +**IMPORTANT**: All task files MUST include YAML frontmatter for programmatic parsing. -```markdown -# TSK-{NUM}: {Task Title} +--- -**Status**: pending | in_progress | review | done | blocked -**Parent Ticket**: TKT-{NUM} -**Estimate**: S | M | L -**Created**: {YYYY-MM-DD} -**Assignee**: {agent-id | unassigned} +## Template +```markdown +--- +id: TSK-{NUM} +title: {Task Title} +status: pending # pending | in_progress | review | done | blocked +parent_ticket: TKT-{NUM} +estimate: M # S | M | L +created: {YYYY-MM-DD} +assignee: null # agent-id or null +dependencies: + depends_on: [] # Task IDs that must complete first + blocks: [] # Task IDs that cannot start until this completes --- +# TSK-{NUM}: {Task Title} + ## Objective {1-2 sentences describing what this task accomplishes} @@ -64,17 +73,6 @@ def example_pattern(): --- -## Dependencies - -**Requires**: -- {TSK-XXX complete} -- {File/module exists} - -**Provides**: -- {What other tasks need from this} - ---- - ## Testing Requirements ### Unit Tests @@ -94,15 +92,23 @@ def test_example(): ## Notes {Additional context, decisions, or warnings} +``` --- -## History +## Frontmatter Field Reference -| Date | Action | By | -|------|--------|-----| -| {YYYY-MM-DD} | Created | {Author} | -``` +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique task ID (TSK-001, TSK-FEAT-001) | +| `title` | string | Yes | Human-readable task title | +| `status` | string | Yes | Current status (see Status Definitions) | +| `parent_ticket` | string | Yes | Parent ticket ID | +| `estimate` | string | Yes | T-shirt size (S/M/L) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `assignee` | string | No | Agent ID or null if unassigned | +| `dependencies.depends_on` | list | No | Task IDs that must complete first | +| `dependencies.blocks` | list | No | Task IDs waiting on this | --- @@ -111,6 +117,7 @@ def test_example(): ### Format ``` TSK-{NUM} +TSK-{PREFIX}-{NUM} ``` ### Numbering @@ -142,10 +149,42 @@ TSK-{NUM} --- +## Dependency Rules + +1. **No Circular Dependencies**: A task cannot depend on itself or create a cycle +2. **Same Ticket Preferred**: Dependencies should ideally be within the same parent ticket +3. **Cross-Ticket Dependencies**: Use ticket-level `blocked_by` instead when possible +4. **Keep Chains Short**: Prefer max 3-4 levels of task dependency depth + +### Example Dependency Graph + +``` +TSK-001 (Add model fields) + └─ blocks: TSK-002 + +TSK-002 (Create migration) + ├─ depends_on: TSK-001 + └─ blocks: TSK-003, TSK-004 + +TSK-003 (Implement service) + ├─ depends_on: TSK-002 + └─ blocks: TSK-005 + +TSK-004 (Update API routes) + ├─ depends_on: TSK-002 + └─ blocks: TSK-005 + +TSK-005 (Add tests) + └─ depends_on: TSK-003, TSK-004 +``` + +--- + ## Best Practices 1. **Atomic** - One clear deliverable per task 2. **Self-Contained** - All context needed is in the task 3. **Testable** - Clear acceptance criteria 4. **Time-Boxed** - Should complete in one session (< 8 hours) -5. **Linked** - Always reference parent ticket +5. **Linked** - Always reference parent ticket in frontmatter +6. **Explicit Dependencies** - List all dependencies in frontmatter, not just prose diff --git a/.claude/skills/spec-driven-dev/references/ticket_template.md b/.claude/skills/spec-driven-dev/references/ticket_template.md index 80c30509..dd37cf4b 100644 --- a/.claude/skills/spec-driven-dev/references/ticket_template.md +++ b/.claude/skills/spec-driven-dev/references/ticket_template.md @@ -2,25 +2,36 @@ Use this template for `.omoi_os/tickets/TKT-{NUM}.md` files. ---- - -```markdown -# TKT-{NUM}: {Ticket Title} - -**Status**: backlog | analyzing | building | testing | done | blocked -**Priority**: CRITICAL | HIGH | MEDIUM | LOW -**Estimate**: S | M | L | XL -**Created**: {YYYY-MM-DD} -**Updated**: {YYYY-MM-DD} +**IMPORTANT**: All ticket files MUST include YAML frontmatter for programmatic parsing. -## Traceability +--- -**Requirements**: {REQ-XXX-YYY, REQ-XXX-YYY} -**Design Reference**: {designs/feature-name.md#section} -**Feature**: {feature-name} +## Template +```markdown +--- +id: TKT-{NUM} +title: {Ticket Title} +status: backlog # backlog | analyzing | building | testing | done | blocked +priority: MEDIUM # CRITICAL | HIGH | MEDIUM | LOW +estimate: M # S | M | L | XL +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +feature: {feature-name} +requirements: + - REQ-XXX-YYY +design_ref: designs/{feature-name}.md +tasks: + - TSK-{NUM} + - TSK-{NUM} +dependencies: + blocked_by: [] # Tickets that must complete before this can start + blocks: [] # Tickets that cannot start until this completes + related: [] # Tickets that are related but not blocking --- +# TKT-{NUM}: {Ticket Title} + ## Description {2-3 paragraph description of what this ticket accomplishes} @@ -48,32 +59,6 @@ Use this template for `.omoi_os/tickets/TKT-{NUM}.md` files. --- -## Dependencies - -### Blocks -{Tickets that cannot start until this completes} -- {TKT-XXX: Reason} - -### Blocked By -{Tickets that must complete before this can start} -- {TKT-XXX: Reason} - -### Related -{Tickets that are related but not blocking} -- {TKT-XXX: Relationship} - ---- - -## Tasks - -| Task ID | Description | Status | Assignee | -|---------|-------------|--------|----------| -| TSK-{NUM} | {Task description} | pending | - | -| TSK-{NUM} | {Task description} | pending | - | -| TSK-{NUM} | {Task description} | pending | - | - ---- - ## Technical Notes ### Implementation Approach @@ -113,16 +98,28 @@ Use this template for `.omoi_os/tickets/TKT-{NUM}.md` files. ## Notes {Additional notes, decisions, or context} +``` --- -## History - -| Date | Action | By | -|------|--------|-----| -| {YYYY-MM-DD} | Created | {Author} | -| {YYYY-MM-DD} | {Action} | {Author} | -``` +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique ticket ID (TKT-001, TKT-FEAT-001) | +| `title` | string | Yes | Human-readable ticket title | +| `status` | string | Yes | Current status (see Status Definitions) | +| `priority` | string | Yes | Priority level (CRITICAL/HIGH/MEDIUM/LOW) | +| `estimate` | string | Yes | T-shirt size (S/M/L/XL) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `feature` | string | No | Feature name for grouping | +| `requirements` | list | No | Linked requirement IDs | +| `design_ref` | string | No | Path to design document | +| `tasks` | list | No | Child task IDs | +| `dependencies.blocked_by` | list | No | Ticket IDs that must complete first | +| `dependencies.blocks` | list | No | Ticket IDs waiting on this | +| `dependencies.related` | list | No | Related ticket IDs (non-blocking) | --- @@ -131,6 +128,7 @@ Use this template for `.omoi_os/tickets/TKT-{NUM}.md` files. ### Format ``` TKT-{NUM} +TKT-{PREFIX}-{NUM} ``` ### Numbering @@ -176,10 +174,37 @@ TKT-{NUM} --- +## Dependency Rules + +1. **No Circular Dependencies**: A ticket cannot be blocked by itself or create a cycle +2. **Explicit Over Implicit**: Always list dependencies in frontmatter, not just in prose +3. **Use `related` for Informational Links**: Non-blocking relationships go in `related` +4. **Keep Chains Short**: Prefer max 3-4 levels of dependency depth + +### Example Dependency Graph + +``` +TKT-001 (Infrastructure) + └─ blocks: TKT-002, TKT-003 + +TKT-002 (User Model) + ├─ blocked_by: TKT-001 + └─ blocks: TKT-004 + +TKT-003 (API Framework) + ├─ blocked_by: TKT-001 + └─ blocks: TKT-004 + +TKT-004 (User API) + └─ blocked_by: TKT-002, TKT-003 +``` + +--- + ## Best Practices 1. **One Component Per Ticket** - Scope to a single major component or feature slice 2. **Clear Acceptance Criteria** - Every criterion should be testable -3. **Explicit Dependencies** - Document all blockers and blocked tickets -4. **Task Breakdown** - Every ticket should have associated tasks +3. **Explicit Dependencies** - Document all blockers in frontmatter +4. **Task Breakdown** - Every ticket should have associated tasks in `tasks` field 5. **Traceability** - Always link to requirements and design docs diff --git a/.claude/skills/spec-driven-dev/scripts/api_client.py b/.claude/skills/spec-driven-dev/scripts/api_client.py new file mode 100644 index 00000000..a6543b4b --- /dev/null +++ b/.claude/skills/spec-driven-dev/scripts/api_client.py @@ -0,0 +1,1278 @@ +#!/usr/bin/env python3 +""" +Direct API client for OmoiOS backend (bypasses MCP server). + +This module provides the OmoiOSClient class for syncing local specs +to the OmoiOS backend API. + +Sync Behavior: +- CREATE: New tickets/tasks are created +- UPDATE DESCRIPTION: If item exists but description differs, update it +- SKIP: If item exists with same description, skip + +Usage: + from api_client import OmoiOSClient + from parse_specs import SpecParser + + client = OmoiOSClient() + parser = SpecParser() + result = parser.parse_all() + + await client.sync(result) +""" + +import asyncio +import re +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +import httpx + +from models import ( + ParseResult, + ParsedDesign, + ParsedRequirement, + ParsedTask, + ParsedTicket, +) + + +class SyncAction(Enum): + """Action taken during sync.""" + + CREATED = "created" + UPDATED = "updated" + SKIPPED = "skipped" + FAILED = "failed" + + +@dataclass +class SyncResult: + """Result of syncing a single item.""" + + item_id: str + item_type: str # "ticket" or "task" + action: SyncAction + message: str = "" + + +@dataclass +class SyncSummary: + """Summary of sync operation.""" + + results: list[SyncResult] + created: int = 0 + updated: int = 0 + skipped: int = 0 + failed: int = 0 + + def add(self, result: SyncResult): + self.results.append(result) + if result.action == SyncAction.CREATED: + self.created += 1 + elif result.action == SyncAction.UPDATED: + self.updated += 1 + elif result.action == SyncAction.SKIPPED: + self.skipped += 1 + elif result.action == SyncAction.FAILED: + self.failed += 1 + + +class OmoiOSClient: + """Direct HTTP client for OmoiOS API.""" + + def __init__( + self, + base_url: str = "http://localhost:18000", + timeout: float = 30.0, + token: Optional[str] = None, + api_key: Optional[str] = None, + ): + """Initialize client. + + Args: + base_url: Base URL of OmoiOS API + timeout: Request timeout in seconds + token: JWT access token for authentication + api_key: API key for authentication (alternative to JWT) + """ + self.base_url = base_url.rstrip("/") + self.timeout = timeout + self.token = token + self.api_key = api_key + + async def _request( + self, + method: str, + endpoint: str, + json: Optional[dict] = None, + ) -> tuple[int, Optional[dict]]: + """Make HTTP request to API. + + Returns: + Tuple of (status_code, response_json or None) + """ + url = f"{self.base_url}{endpoint}" + headers = {} + if self.api_key: + # API key authentication takes precedence + headers["X-API-Key"] = self.api_key + elif self.token: + headers["Authorization"] = f"Bearer {self.token}" + + async with httpx.AsyncClient(timeout=self.timeout) as client: + try: + response = await client.request(method, url, json=json, headers=headers) + try: + data = response.json() + except Exception: + data = None + return response.status_code, data + except httpx.RequestError as e: + return 0, {"error": str(e)} + + # ======================================================================== + # Ticket Operations + # ======================================================================== + + async def get_ticket(self, ticket_id: str) -> Optional[dict]: + """Get ticket by ID.""" + status, data = await self._request("GET", f"/api/v1/tickets/{ticket_id}") + if status == 200: + return data + return None + + async def create_ticket(self, ticket: ParsedTicket, project_id: Optional[str] = None) -> tuple[bool, str]: + """Create a new ticket. + + Returns: + Tuple of (success, message/error) + """ + payload = { + "title": ticket.title, + "description": ticket.description, + "priority": ticket.priority, + "phase_id": "PHASE_IMPLEMENTATION", # Default phase + } + if project_id: + payload["project_id"] = project_id + + status, data = await self._request("POST", "/api/v1/tickets", json=payload) + + if status in (200, 201): + return True, f"Created with API ID: {data.get('id', 'unknown')}" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create: {error}" + + async def update_ticket_description(self, api_id: str, description: str) -> tuple[bool, str]: + """Update ticket description. + + Returns: + Tuple of (success, message/error) + """ + payload = {"description": description} + status, data = await self._request("PATCH", f"/api/v1/tickets/{api_id}", json=payload) + + if status == 200: + return True, "Description updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update: {error}" + + async def list_tickets(self, project_id: Optional[str] = None) -> list[dict]: + """List all tickets, optionally filtered by project.""" + endpoint = "/api/v1/tickets" + if project_id: + endpoint += f"?project_id={project_id}" + + status, data = await self._request("GET", endpoint) + if status == 200 and isinstance(data, dict): + return data.get("tickets", []) + return [] + + # ======================================================================== + # Task Operations + # ======================================================================== + + async def get_task(self, task_id: str) -> Optional[dict]: + """Get task by ID.""" + status, data = await self._request("GET", f"/api/v1/tasks/{task_id}") + if status == 200: + return data + return None + + async def create_task(self, task: ParsedTask, ticket_api_id: str) -> tuple[bool, str]: + """Create a new task. + + Returns: + Tuple of (success, message/error) + """ + # Convert dependencies to backend format + dependencies = None + if task.dependencies.depends_on: + dependencies = {"depends_on": task.dependencies.depends_on} + + payload = { + "ticket_id": ticket_api_id, + "title": task.title, + "description": task.objective, + "task_type": "implementation", # Default type + "priority": "MEDIUM", # Default priority + "phase_id": "PHASE_IMPLEMENTATION", + } + if dependencies: + payload["dependencies"] = dependencies + + status, data = await self._request("POST", "/api/v1/tasks", json=payload) + + if status in (200, 201): + return True, f"Created with API ID: {data.get('id', 'unknown')}" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create: {error}" + + async def update_task_description(self, api_id: str, description: str) -> tuple[bool, str]: + """Update task description. + + Returns: + Tuple of (success, message/error) + """ + payload = {"description": description} + status, data = await self._request("PATCH", f"/api/v1/tasks/{api_id}", json=payload) + + if status == 200: + return True, "Description updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update: {error}" + + async def list_tasks(self, ticket_id: Optional[str] = None) -> list[dict]: + """List all tasks, optionally filtered by ticket.""" + endpoint = "/api/v1/tasks" + # Note: The tasks endpoint doesn't have a ticket_id filter yet + # We filter client-side for now + status, data = await self._request("GET", endpoint) + if status == 200 and isinstance(data, list): + if ticket_id: + return [t for t in data if t.get("ticket_id") == ticket_id] + return data + return [] + + async def get_project_with_tickets(self, project_id: str) -> dict: + """Get project details with all tickets and their tasks.""" + # Get project info + status, project_data = await self._request("GET", f"/api/v1/projects/{project_id}") + if status != 200: + return {"error": f"Project not found: {project_id}"} + + # Get tickets for this project + tickets = await self.list_tickets(project_id) + + # Get all tasks + all_tasks = await self.list_tasks() + + # Group tasks by ticket + tasks_by_ticket = {} + for task in all_tasks: + tid = task.get("ticket_id") + if tid not in tasks_by_ticket: + tasks_by_ticket[tid] = [] + tasks_by_ticket[tid].append(task) + + # Attach tasks to tickets + for ticket in tickets: + ticket["tasks"] = tasks_by_ticket.get(ticket["id"], []) + + return { + "project": project_data, + "tickets": tickets, + "total_tickets": len(tickets), + "total_tasks": len(all_tasks), + } + + # ======================================================================== + # Project Operations + # ======================================================================== + + async def list_projects(self) -> list[dict]: + """List all projects.""" + status, data = await self._request("GET", "/api/v1/projects") + if status == 200 and isinstance(data, dict): + return data.get("projects", []) + return [] + + # ======================================================================== + # Spec Operations + # ======================================================================== + + async def get_spec(self, spec_id: str) -> Optional[dict]: + """Get spec by ID.""" + status, data = await self._request("GET", f"/api/v1/specs/{spec_id}") + if status == 200: + return data + return None + + async def list_specs(self, project_id: str) -> list[dict]: + """List all specs for a project.""" + status, data = await self._request("GET", f"/api/v1/specs/project/{project_id}") + if status == 200 and isinstance(data, dict): + return data.get("specs", []) + return [] + + async def create_spec( + self, + title: str, + project_id: str, + description: Optional[str] = None, + ) -> tuple[bool, str, Optional[str]]: + """Create a new spec. + + Returns: + Tuple of (success, message/error, spec_id if created) + """ + payload = { + "title": title, + "project_id": project_id, + } + if description: + payload["description"] = description + + status, data = await self._request("POST", "/api/v1/specs", json=payload) + + if status in (200, 201) and data: + spec_id = data.get("id") + return True, f"Created spec with ID: {spec_id}", spec_id + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create spec: {error}", None + + async def update_spec( + self, + spec_id: str, + title: Optional[str] = None, + description: Optional[str] = None, + status: Optional[str] = None, + phase: Optional[str] = None, + ) -> tuple[bool, str]: + """Update a spec. + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if title is not None: + payload["title"] = title + if description is not None: + payload["description"] = description + if status is not None: + payload["status"] = status + if phase is not None: + payload["phase"] = phase + + resp_status, data = await self._request( + "PATCH", f"/api/v1/specs/{spec_id}", json=payload + ) + + if resp_status == 200: + return True, "Spec updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {resp_status}" + return False, f"Failed to update spec: {error}" + + # ======================================================================== + # Requirement Operations (EARS format) + # ======================================================================== + + async def add_requirement( + self, + spec_id: str, + title: str, + condition: str, + action: str, + linked_design: Optional[str] = None, + ) -> tuple[bool, str, Optional[str]]: + """Add a requirement to a spec using EARS format. + + EARS format: + - condition: The "WHEN" clause (triggering condition) + - action: The "THE SYSTEM SHALL" clause (expected behavior) + + Args: + linked_design: Optional reference to a design section/ID + + Returns: + Tuple of (success, message/error, requirement_id if created) + """ + payload = { + "title": title, + "condition": condition, + "action": action, + } + if linked_design: + payload["linked_design"] = linked_design + + status, data = await self._request( + "POST", f"/api/v1/specs/{spec_id}/requirements", json=payload + ) + + if status in (200, 201) and data: + req_id = data.get("id") + return True, f"Added requirement with ID: {req_id}", req_id + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to add requirement: {error}", None + + async def update_requirement( + self, + spec_id: str, + req_id: str, + title: Optional[str] = None, + condition: Optional[str] = None, + action: Optional[str] = None, + status: Optional[str] = None, + linked_design: Optional[str] = None, + ) -> tuple[bool, str]: + """Update a requirement. + + Args: + linked_design: Optional reference to a design section/ID + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if title is not None: + payload["title"] = title + if condition is not None: + payload["condition"] = condition + if action is not None: + payload["action"] = action + if status is not None: + payload["status"] = status + if linked_design is not None: + payload["linked_design"] = linked_design + + resp_status, data = await self._request( + "PATCH", f"/api/v1/specs/{spec_id}/requirements/{req_id}", json=payload + ) + + if resp_status == 200: + return True, "Requirement updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {resp_status}" + return False, f"Failed to update requirement: {error}" + + async def add_acceptance_criterion( + self, + spec_id: str, + req_id: str, + text: str, + ) -> tuple[bool, str]: + """Add an acceptance criterion to a requirement. + + Returns: + Tuple of (success, message/error) + """ + payload = {"text": text} + + status, data = await self._request( + "POST", + f"/api/v1/specs/{spec_id}/requirements/{req_id}/criteria", + json=payload, + ) + + if status in (200, 201): + return True, "Criterion added" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to add criterion: {error}" + + # ======================================================================== + # Design Operations + # ======================================================================== + + async def update_design( + self, + spec_id: str, + architecture: Optional[str] = None, + data_model: Optional[str] = None, + api_spec: Optional[list[dict]] = None, + ) -> tuple[bool, str]: + """Update a spec's design artifact. + + Args: + spec_id: The spec ID + architecture: Architecture description (markdown/mermaid) + data_model: Data model description + api_spec: List of API endpoints [{method, endpoint, description}] + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if architecture is not None: + payload["architecture"] = architecture + if data_model is not None: + payload["data_model"] = data_model + if api_spec is not None: + payload["api_spec"] = api_spec + + status, data = await self._request( + "PUT", f"/api/v1/specs/{spec_id}/design", json=payload + ) + + if status == 200: + return True, "Design updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update design: {error}" + + # ======================================================================== + # Sync Specs from Local Files + # ======================================================================== + + async def sync_requirement_to_spec( + self, + spec_id: str, + requirement: ParsedRequirement, + existing_reqs: Optional[list[dict]] = None, + ) -> SyncResult: + """Sync a parsed requirement to an API spec. + + Args: + spec_id: The spec ID to add/update requirement in + requirement: Parsed requirement from local file + existing_reqs: Optional list of existing requirements for comparison + + Returns: + SyncResult indicating action taken + """ + # Check if requirement already exists by title match + existing = None + if existing_reqs: + for req in existing_reqs: + if req.get("title") == requirement.title: + existing = req + break + + if existing: + # Check if needs update (compare condition/action/linked_design) + needs_update = ( + existing.get("condition", "").strip() != requirement.condition.strip() + or existing.get("action", "").strip() != requirement.action.strip() + or existing.get("linked_design") != requirement.linked_design + ) + if needs_update: + success, msg = await self.update_requirement( + spec_id, + existing["id"], + condition=requirement.condition, + action=requirement.action, + linked_design=requirement.linked_design, + ) + return SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + else: + return SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.SKIPPED, + message="Already exists with same content", + ) + else: + # Create new requirement + success, msg, req_id = await self.add_requirement( + spec_id, + requirement.title, + requirement.condition, + requirement.action, + requirement.linked_design, + ) + + result = SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + + # Add acceptance criteria if requirement was created + if success and req_id and requirement.acceptance_criteria: + for criterion in requirement.acceptance_criteria: + await self.add_acceptance_criterion(spec_id, req_id, criterion.text) + + return result + + async def sync_design_to_spec( + self, + spec_id: str, + design: ParsedDesign, + existing_design: Optional[dict] = None, + ) -> SyncResult: + """Sync a parsed design to an API spec. + + Args: + spec_id: The spec ID to update design in + design: Parsed design from local file + existing_design: Optional existing design for comparison + + Returns: + SyncResult indicating action taken + """ + # Convert parsed API endpoints to API format + api_spec = [] + for ep in design.api_endpoints: + api_spec.append({ + "method": ep.method, + "endpoint": ep.path, + "description": ep.description, + }) + + # Build data model description from parsed data models + data_model_parts = [] + for dm in design.data_models: + model_desc = f"### {dm.name}\n{dm.description}\n\n" + if dm.fields: + model_desc += "**Fields:**\n" + for field_name, field_type in dm.fields.items(): + model_desc += f"- `{field_name}`: {field_type}\n" + if dm.relationships: + model_desc += "\n**Relationships:**\n" + for rel in dm.relationships: + model_desc += f"- {rel}\n" + data_model_parts.append(model_desc) + + data_model = "\n".join(data_model_parts) if data_model_parts else None + + # Check if needs update + if existing_design: + existing_arch = existing_design.get("architecture", "") or "" + existing_dm = existing_design.get("data_model", "") or "" + new_arch = design.architecture or "" + new_dm = data_model or "" + + if existing_arch.strip() == new_arch.strip() and existing_dm.strip() == new_dm.strip(): + return SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.SKIPPED, + message="Already exists with same content", + ) + + # Update design + success, msg = await self.update_design( + spec_id, + architecture=design.architecture, + data_model=data_model, + api_spec=api_spec if api_spec else None, + ) + + return SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + + # ======================================================================== + # Auth Operations + # ======================================================================== + + async def login(self, email: str, password: str) -> tuple[bool, str]: + """Login and store access token. + + Returns: + Tuple of (success, message/error) + """ + payload = {"email": email, "password": password} + status, data = await self._request("POST", "/api/v1/auth/login", json=payload) + + if status == 200 and data: + self.token = data.get("access_token") + return True, "Login successful" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Login failed: {error}" + + # ======================================================================== + # Sync Operations + # ======================================================================== + + async def check_connection(self) -> tuple[bool, str]: + """Check if API is reachable. + + Returns: + Tuple of (success, message) + """ + try: + status, _ = await self._request("GET", "/health") + if status == 200: + return True, "Connected" + else: + return False, f"API returned status {status}" + except Exception as e: + return False, str(e) + + async def sync( + self, + result: ParseResult, + project_id: Optional[str] = None, + dry_run: bool = False, + ) -> SyncSummary: + """Sync local specs to API. + + Behavior: + - CREATE: If ticket/task doesn't exist (by title match) + - UPDATE: If exists but description differs + - SKIP: If exists with same description + + Args: + result: Parsed specs from SpecParser + project_id: Optional project ID to associate tickets with + dry_run: If True, don't actually make changes + + Returns: + SyncSummary with results for each item + """ + summary = SyncSummary(results=[]) + + # Get existing items from API for comparison + existing_tickets = await self.list_tickets(project_id) + existing_tasks = await self.list_tasks() + + # Build lookup by title + ticket_by_title = {t["title"]: t for t in existing_tickets} + task_by_title = {t.get("title", ""): t for t in existing_tasks} + + # Track created ticket API IDs for task creation + ticket_api_ids: dict[str, str] = {} + + # Sync tickets + for ticket in result.tickets: + existing = ticket_by_title.get(ticket.title) + + if existing: + ticket_api_ids[ticket.id] = existing["id"] + + # Check if description needs update + existing_desc = existing.get("description", "") or "" + if existing_desc.strip() != ticket.description.strip(): + if dry_run: + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.UPDATED, + message="Would update description (dry run)", + ) + ) + else: + success, msg = await self.update_ticket_description( + existing["id"], ticket.description + ) + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + ) + else: + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.SKIPPED, + message="Already exists with same description", + ) + ) + else: + # Create new ticket + if dry_run: + # Use placeholder ID for dry run so tasks can reference it + ticket_api_ids[ticket.id] = f"dry-run-{ticket.id}" + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.CREATED, + message="Would create (dry run)", + ) + ) + else: + success, msg = await self.create_ticket(ticket, project_id) + if success: + # Extract API ID from message + # Format: "Created with API ID: xxx" + match = re.search(r"API ID: (\S+)", msg) + if match: + ticket_api_ids[ticket.id] = match.group(1) + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + ) + + # Sync tasks + for task in result.tasks: + existing = task_by_title.get(task.title) + + if existing: + # Check if description needs update + existing_desc = existing.get("description", "") or "" + if existing_desc.strip() != task.objective.strip(): + if dry_run: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.UPDATED, + message="Would update description (dry run)", + ) + ) + else: + success, msg = await self.update_task_description( + existing["id"], task.objective + ) + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + ) + else: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.SKIPPED, + message="Already exists with same description", + ) + ) + else: + # Create new task - need parent ticket API ID + ticket_api_id = ticket_api_ids.get(task.parent_ticket) + if not ticket_api_id: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.FAILED, + message=f"Parent ticket {task.parent_ticket} not found in API", + ) + ) + continue + + if dry_run: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.CREATED, + message="Would create (dry run)", + ) + ) + else: + success, msg = await self.create_task(task, ticket_api_id) + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + ) + + return summary + + async def diff(self, result: ParseResult, project_id: Optional[str] = None) -> SyncSummary: + """Show what would change without making changes. + + This is equivalent to sync with dry_run=True. + """ + return await self.sync(result, project_id, dry_run=True) + + async def sync_specs( + self, + result: ParseResult, + project_id: str, + spec_title: Optional[str] = None, + dry_run: bool = False, + ) -> SyncSummary: + """Sync local requirements and designs to API specs. + + This creates/updates a spec document in the API with requirements + and design artifacts parsed from local .omoi_os/ files. + + Workflow: + 1. Find or create spec by title (defaults to first design's title) + 2. Sync all requirements to the spec + 3. Sync all designs to the spec + + Args: + result: Parsed specs from SpecParser + project_id: Project ID to associate spec with + spec_title: Optional spec title (defaults to design feature name) + dry_run: If True, don't actually make changes + + Returns: + SyncSummary with results for each item + """ + summary = SyncSummary(results=[]) + + # Determine spec title + if not spec_title: + if result.designs: + spec_title = result.designs[0].title or result.designs[0].feature + elif result.requirements: + spec_title = f"Spec for {result.requirements[0].title}" + else: + summary.add(SyncResult( + item_id="unknown", + item_type="spec", + action=SyncAction.FAILED, + message="No requirements or designs found to sync", + )) + return summary + + # Get existing specs for this project + existing_specs = await self.list_specs(project_id) + spec_by_title = {s["title"]: s for s in existing_specs} + + # Find or create spec + spec_id = None + if spec_title in spec_by_title: + spec_id = spec_by_title[spec_title]["id"] + existing_spec = spec_by_title[spec_title] + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.SKIPPED, + message=f"Using existing spec: {spec_title}", + )) + else: + if dry_run: + spec_id = f"dry-run-spec-{spec_title}" + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.CREATED, + message=f"Would create spec: {spec_title} (dry run)", + )) + existing_spec = {"requirements": [], "design": None} + else: + # Build description from requirements + description = "" + if result.requirements: + description = f"Requirements ({len(result.requirements)}): " + description += ", ".join(r.title for r in result.requirements[:3]) + if len(result.requirements) > 3: + description += f" and {len(result.requirements) - 3} more" + + success, msg, created_id = await self.create_spec( + title=spec_title, + project_id=project_id, + description=description, + ) + if success and created_id: + spec_id = created_id + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.CREATED, + message=msg, + )) + existing_spec = {"requirements": [], "design": None} + else: + summary.add(SyncResult( + item_id="unknown", + item_type="spec", + action=SyncAction.FAILED, + message=msg, + )) + return summary + + # Sync requirements + if spec_id and not dry_run: + existing_reqs = existing_spec.get("requirements", []) + + for requirement in result.requirements: + req_result = await self.sync_requirement_to_spec( + spec_id, + requirement, + existing_reqs, + ) + summary.add(req_result) + elif dry_run: + for requirement in result.requirements: + summary.add(SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.CREATED, + message=f"Would create requirement: {requirement.title} (dry run)", + )) + + # Sync designs + if spec_id and not dry_run: + existing_design = existing_spec.get("design") + + for design in result.designs: + design_result = await self.sync_design_to_spec( + spec_id, + design, + existing_design, + ) + summary.add(design_result) + elif dry_run: + for design in result.designs: + summary.add(SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.UPDATED, + message=f"Would update design: {design.title} (dry run)", + )) + + return summary + + async def diff_specs( + self, + result: ParseResult, + project_id: str, + spec_title: Optional[str] = None, + ) -> SyncSummary: + """Show what spec changes would happen without making changes. + + This is equivalent to sync_specs with dry_run=True. + """ + return await self.sync_specs(result, project_id, spec_title, dry_run=True) + + async def get_full_traceability( + self, + project_id: str, + ) -> dict: + """Get full traceability from API: Specs → Requirements → Tickets → Tasks. + + Returns: + Dict with: + - specs: List of specs with requirements + - tickets: List of tickets with tasks + - traceability: Mapping of spec requirements to tickets + """ + # Get all specs for the project + specs = await self.list_specs(project_id) + + # Get all tickets for the project + tickets = await self.list_tickets(project_id) + + # Get all tasks + all_tasks = await self.list_tasks() + + # Group tasks by ticket + tasks_by_ticket = {} + for task in all_tasks: + tid = task.get("ticket_id") + if tid not in tasks_by_ticket: + tasks_by_ticket[tid] = [] + tasks_by_ticket[tid].append(task) + + # Build traceability matrix + traceability = { + "specs": [], + "tickets": [], + "orphan_tickets": [], # Tickets not linked to any spec requirement + } + + # Process specs + for spec in specs: + spec_entry = { + "id": spec["id"], + "title": spec["title"], + "status": spec["status"], + "requirements": [], + "linked_tickets": [], + } + + for req in spec.get("requirements", []): + req_entry = { + "id": req["id"], + "title": req["title"], + "condition": req.get("condition", ""), + "action": req.get("action", ""), + "status": req.get("status", "pending"), + "linked_tickets": [], + } + + # Find tickets that might implement this requirement + # (This would require ticket.requirements field - checking by title match for now) + for ticket in tickets: + ticket_title_lower = ticket.get("title", "").lower() + req_title_lower = req["title"].lower() + + # Simple heuristic: ticket title contains requirement keywords + if any(word in ticket_title_lower for word in req_title_lower.split()): + req_entry["linked_tickets"].append(ticket["id"]) + spec_entry["linked_tickets"].append(ticket["id"]) + + spec_entry["requirements"].append(req_entry) + + traceability["specs"].append(spec_entry) + + # Process tickets + linked_ticket_ids = set() + for spec in traceability["specs"]: + linked_ticket_ids.update(spec["linked_tickets"]) + + for ticket in tickets: + ticket_entry = { + "id": ticket["id"], + "title": ticket["title"], + "status": ticket.get("status", "unknown"), + "priority": ticket.get("priority", "MEDIUM"), + "tasks": tasks_by_ticket.get(ticket["id"], []), + } + + if ticket["id"] in linked_ticket_ids: + traceability["tickets"].append(ticket_entry) + else: + traceability["orphan_tickets"].append(ticket_entry) + + return traceability + + +# ============================================================================ +# CLI Integration +# ============================================================================ + + +def print_sync_summary(summary: SyncSummary): + """Print sync summary to console.""" + print("\nSync Results:") + print("-" * 60) + + for result in summary.results: + action_str = { + SyncAction.CREATED: "[CREATE]", + SyncAction.UPDATED: "[UPDATE]", + SyncAction.SKIPPED: "[SKIP] ", + SyncAction.FAILED: "[FAILED]", + }[result.action] + + print(f"{action_str} {result.item_type} {result.item_id}") + if result.message: + print(f" {result.message}") + + print("-" * 60) + print( + f"Summary: {summary.created} created, {summary.updated} updated, " + f"{summary.skipped} skipped, {summary.failed} failed" + ) + + +async def run_sync( + api_url: str, + action: str, + project_id: Optional[str] = None, + email: Optional[str] = None, + password: Optional[str] = None, + token: Optional[str] = None, + api_key: Optional[str] = None, +): + """Run sync from CLI.""" + import os + from parse_specs import SpecParser + + # Auth can come from: argument > env var + auth_token = token or os.environ.get("OMOIOS_TOKEN") + auth_api_key = api_key or os.environ.get("OMOIOS_API_KEY") + + client = OmoiOSClient(base_url=api_url, token=auth_token, api_key=auth_api_key) + parser = SpecParser() + + # Check connection + print(f"Connecting to {api_url}...") + connected, msg = await client.check_connection() + if not connected: + print(f"Error: Cannot connect to API: {msg}") + return False + + print("Connected!") + + # Handle authentication + if client.api_key: + print("Using API key authentication.\n") + elif client.token: + print("Using provided token.\n") + else: + # Try to login if credentials provided + if email and password: + print(f"Logging in as {email}...") + success, msg = await client.login(email, password) + if not success: + print(f"Error: {msg}") + return False + print("Authenticated!\n") + else: + # Try env vars for credentials + env_email = os.environ.get("OMOIOS_EMAIL") + env_password = os.environ.get("OMOIOS_PASSWORD") + if env_email and env_password: + print(f"Logging in as {env_email}...") + success, msg = await client.login(env_email, env_password) + if not success: + print(f"Error: {msg}") + return False + print("Authenticated!\n") + else: + print("Warning: No authentication provided. API calls may fail.") + print("Set OMOIOS_API_KEY, OMOIOS_TOKEN, or OMOIOS_EMAIL/OMOIOS_PASSWORD env vars.\n") + + # Parse specs + result = parser.parse_all() + print(f"Parsed {len(result.tickets)} tickets and {len(result.tasks)} tasks\n") + + # Run validation first + from spec_cli import validate_specs + + errors = validate_specs(result) + if errors: + print("Validation failed! Fix these errors before syncing:") + for error in errors: + print(f" - {error}") + return False + + print("Validation passed!\n") + + # Run sync + if action == "diff": + print("Checking what would change (dry run)...") + summary = await client.diff(result, project_id) + else: # push + print("Syncing to API...") + summary = await client.sync(result, project_id) + + print_sync_summary(summary) + return summary.failed == 0 + + +if __name__ == "__main__": + import sys + + # Quick test + async def test(): + client = OmoiOSClient() + connected, msg = await client.check_connection() + print(f"Connection test: {msg}") + + asyncio.run(test()) diff --git a/.claude/skills/spec-driven-dev/scripts/models.py b/.claude/skills/spec-driven-dev/scripts/models.py new file mode 100644 index 00000000..b0297493 --- /dev/null +++ b/.claude/skills/spec-driven-dev/scripts/models.py @@ -0,0 +1,540 @@ +""" +Data models for parsed spec files. + +These dataclasses represent the structured data extracted from +.omoi_os/ markdown files with YAML frontmatter. + +Supports: +- Requirements (.omoi_os/requirements/*.md) +- Designs (.omoi_os/designs/*.md) +- Tickets (.omoi_os/tickets/*.md) +- Tasks (.omoi_os/tasks/*.md) +""" + +from dataclasses import dataclass, field +from datetime import date +from typing import Optional + + +# ============================================================================ +# Requirement Models +# ============================================================================ + + +@dataclass +class AcceptanceCriterion: + """Single acceptance criterion for a requirement.""" + + text: str + completed: bool = False + + +@dataclass +class ParsedRequirement: + """Parsed requirement from .omoi_os/requirements/*.md + + Uses EARS format (Easy Approach to Requirements Syntax): + - condition: The "WHEN" clause (triggering condition) + - action: The "THE SYSTEM SHALL" clause (expected behavior) + """ + + id: str # REQ-XXX-YYY-NNN format + title: str + status: str # draft, review, approved + created: date + updated: date + category: str = "" # functional, non-functional, constraint + priority: str = "MEDIUM" + condition: str = "" # EARS "WHEN" clause + action: str = "" # EARS "THE SYSTEM SHALL" clause + rationale: str = "" # Why this requirement exists + acceptance_criteria: list[AcceptanceCriterion] = field(default_factory=list) + linked_tickets: list[str] = field(default_factory=list) # TKT-XXX references + linked_design: Optional[str] = None # Design section reference + file_path: str = "" + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +# ============================================================================ +# Design Models +# ============================================================================ + + +@dataclass +class ApiEndpoint: + """API endpoint specification.""" + + method: str # GET, POST, PUT, DELETE, PATCH + path: str # /api/v1/resource + description: str = "" + request_body: Optional[str] = None # JSON schema or description + response: Optional[str] = None # JSON schema or description + + +@dataclass +class DataModel: + """Data model/entity specification.""" + + name: str + description: str = "" + fields: dict[str, str] = field(default_factory=dict) # field_name -> type/description + relationships: list[str] = field(default_factory=list) + + +@dataclass +class ParsedDesign: + """Parsed design from .omoi_os/designs/*.md""" + + id: str # Design identifier + title: str + status: str # draft, review, approved + created: date + updated: date + feature: str = "" # Feature this design covers + requirements: list[str] = field(default_factory=list) # REQ-XXX references + architecture: str = "" # Architecture description/diagram + data_models: list[DataModel] = field(default_factory=list) + api_endpoints: list[ApiEndpoint] = field(default_factory=list) + components: list[str] = field(default_factory=list) # Key components + error_handling: str = "" + security_considerations: str = "" + implementation_notes: str = "" + file_path: str = "" + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +# ============================================================================ +# Ticket/Task Dependency Models +# ============================================================================ + + +@dataclass +class TicketDependencies: + """Dependencies for a ticket.""" + + blocked_by: list[str] = field(default_factory=list) + blocks: list[str] = field(default_factory=list) + related: list[str] = field(default_factory=list) + + +@dataclass +class TaskDependencies: + """Dependencies for a task.""" + + depends_on: list[str] = field(default_factory=list) + blocks: list[str] = field(default_factory=list) + + +@dataclass +class ParsedTicket: + """Parsed ticket from .omoi_os/tickets/*.md""" + + id: str + title: str + status: str + priority: str + estimate: str + created: date + updated: date + feature: Optional[str] = None + requirements: list[str] = field(default_factory=list) + design_ref: Optional[str] = None + tasks: list[str] = field(default_factory=list) + dependencies: TicketDependencies = field(default_factory=TicketDependencies) + description: str = "" + file_path: str = "" + + def is_blocked(self) -> bool: + """Check if this ticket is blocked by other tickets.""" + return len(self.dependencies.blocked_by) > 0 + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +@dataclass +class ParsedTask: + """Parsed task from .omoi_os/tasks/*.md""" + + id: str + title: str + status: str + parent_ticket: str + estimate: str + created: date + assignee: Optional[str] = None + dependencies: TaskDependencies = field(default_factory=TaskDependencies) + objective: str = "" + file_path: str = "" + + def is_blocked(self, completed_tasks: set[str]) -> bool: + """Check if this task is blocked by incomplete tasks.""" + for dep in self.dependencies.depends_on: + if dep not in completed_tasks: + return True + return False + + def is_ready(self, completed_tasks: set[str]) -> bool: + """Check if this task is ready to work on.""" + return self.status == "pending" and not self.is_blocked(completed_tasks) + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +@dataclass +class ValidationError: + """Validation error found in specs.""" + + error_type: str # circular_dependency, missing_reference, etc. + message: str + source_id: str + target_id: Optional[str] = None + + def __str__(self) -> str: + if self.target_id: + return f"[{self.error_type}] {self.source_id} -> {self.target_id}: {self.message}" + return f"[{self.error_type}] {self.source_id}: {self.message}" + + +@dataclass +class ParseResult: + """Result of parsing all spec files.""" + + requirements: list[ParsedRequirement] = field(default_factory=list) + designs: list[ParsedDesign] = field(default_factory=list) + tickets: list[ParsedTicket] = field(default_factory=list) + tasks: list[ParsedTask] = field(default_factory=list) + errors: list[ValidationError] = field(default_factory=list) + + # ======================================================================== + # Requirement Methods + # ======================================================================== + + def get_requirement(self, req_id: str) -> Optional[ParsedRequirement]: + """Get requirement by ID.""" + for req in self.requirements: + if req.id == req_id: + return req + return None + + def get_requirements_by_category(self, category: str) -> list[ParsedRequirement]: + """Get all requirements in a category.""" + return [r for r in self.requirements if r.category == category] + + def get_requirements_by_status(self, status: str) -> list[ParsedRequirement]: + """Get all requirements with a given status.""" + return [r for r in self.requirements if r.status == status] + + # ======================================================================== + # Design Methods + # ======================================================================== + + def get_design(self, design_id: str) -> Optional[ParsedDesign]: + """Get design by ID.""" + for design in self.designs: + if design.id == design_id: + return design + return None + + def get_design_for_feature(self, feature: str) -> Optional[ParsedDesign]: + """Get design for a feature.""" + for design in self.designs: + if design.feature == feature: + return design + return None + + # ======================================================================== + # Ticket Methods + # ======================================================================== + + def get_ticket(self, ticket_id: str) -> Optional[ParsedTicket]: + """Get ticket by ID.""" + for ticket in self.tickets: + if ticket.id == ticket_id: + return ticket + return None + + def get_task(self, task_id: str) -> Optional[ParsedTask]: + """Get task by ID.""" + for task in self.tasks: + if task.id == task_id: + return task + return None + + def get_tasks_for_ticket(self, ticket_id: str) -> list[ParsedTask]: + """Get all tasks belonging to a ticket.""" + return [t for t in self.tasks if t.parent_ticket == ticket_id] + + def get_completed_tasks(self) -> set[str]: + """Get set of completed task IDs.""" + return {t.id for t in self.tasks if t.status == "done"} + + def get_completed_tickets(self) -> set[str]: + """Get set of ticket IDs where all tasks are complete.""" + completed_tickets = set() + for ticket in self.tickets: + tasks = self.get_tasks_for_ticket(ticket.id) + if tasks and all(t.status == "done" for t in tasks): + completed_tickets.add(ticket.id) + elif not tasks and ticket.status == "done": + # Ticket with no tasks is complete if status is done + completed_tickets.add(ticket.id) + return completed_tickets + + def get_blocking_tickets(self, ticket_id: str) -> list[str]: + """Get list of ticket IDs that block a given ticket (transitively). + + Uses BFS to find all tickets that must complete before this ticket. + """ + ticket = self.get_ticket(ticket_id) + if not ticket: + return [] + + blocking = [] + visited = set() + queue = list(ticket.dependencies.blocked_by) + + while queue: + blocker_id = queue.pop(0) + if blocker_id in visited: + continue + visited.add(blocker_id) + blocking.append(blocker_id) + + # Check transitive dependencies + blocker = self.get_ticket(blocker_id) + if blocker: + for transitive in blocker.dependencies.blocked_by: + if transitive not in visited: + queue.append(transitive) + + return blocking + + def is_task_blocked_by_tickets(self, task: ParsedTask) -> tuple[bool, list[str]]: + """Check if a task is blocked by incomplete tickets its parent depends on. + + Returns: + Tuple of (is_blocked, list of blocking ticket IDs) + """ + parent_ticket = self.get_ticket(task.parent_ticket) + if not parent_ticket: + return False, [] + + blocking_tickets = self.get_blocking_tickets(task.parent_ticket) + if not blocking_tickets: + return False, [] + + completed_tickets = self.get_completed_tickets() + incomplete_blockers = [t for t in blocking_tickets if t not in completed_tickets] + + return len(incomplete_blockers) > 0, incomplete_blockers + + def is_task_blocked(self, task: ParsedTask, completed_tasks: Optional[set[str]] = None) -> tuple[bool, str]: + """Check if a task is blocked, considering both task and ticket dependencies. + + Returns: + Tuple of (is_blocked, reason) + """ + if completed_tasks is None: + completed_tasks = self.get_completed_tasks() + + # Check direct task dependencies first + for dep in task.dependencies.depends_on: + if dep not in completed_tasks: + return True, f"blocked by task {dep}" + + # Check cross-ticket dependencies + is_blocked, blocking_tickets = self.is_task_blocked_by_tickets(task) + if is_blocked: + return True, f"blocked by ticket(s): {', '.join(blocking_tickets)}" + + return False, "" + + def get_ready_tasks(self) -> list[ParsedTask]: + """Get tasks that are ready to work on (no blocking dependencies). + + Considers both: + - Direct task dependencies (depends_on) + - Cross-ticket dependencies (parent ticket blocked_by) + """ + completed = self.get_completed_tasks() + ready = [] + + for task in self.tasks: + if task.status != "pending": + continue + + is_blocked, _ = self.is_task_blocked(task, completed) + if not is_blocked: + ready.append(task) + + return ready + + def get_cross_ticket_dependency_graph(self) -> dict[str, list[str]]: + """Build a graph of ticket dependencies. + + Returns: + Dict mapping ticket_id -> list of ticket_ids it blocks + """ + graph = {t.id: [] for t in self.tickets} + + for ticket in self.tickets: + for blocker_id in ticket.dependencies.blocked_by: + if blocker_id in graph: + graph[blocker_id].append(ticket.id) + + return graph + + def is_valid(self) -> bool: + """Check if there are no validation errors.""" + return len(self.errors) == 0 + + # ======================================================================== + # Traceability Methods + # ======================================================================== + + def get_tickets_for_requirement(self, req_id: str) -> list[ParsedTicket]: + """Get all tickets implementing a requirement.""" + return [t for t in self.tickets if req_id in t.requirements] + + def get_design_for_ticket(self, ticket_id: str) -> Optional[ParsedDesign]: + """Get the design document for a ticket.""" + ticket = self.get_ticket(ticket_id) + if not ticket or not ticket.design_ref: + return None + # design_ref is like "designs/feature-name.md" + # Find design by matching feature + for design in self.designs: + if ticket.design_ref.endswith(f"{design.feature}.md"): + return design + if design.id == ticket.design_ref: + return design + return None + + def get_requirements_for_ticket(self, ticket_id: str) -> list[ParsedRequirement]: + """Get all requirements a ticket implements.""" + ticket = self.get_ticket(ticket_id) + if not ticket: + return [] + return [r for r in self.requirements if r.id in ticket.requirements] + + def get_full_traceability(self) -> dict: + """Build complete traceability matrix. + + Returns dict with: + - requirements: REQ-ID -> {requirement, designs, tickets, tasks} + - designs: DESIGN-ID -> {design, requirements, tickets} + - tickets: TKT-ID -> {ticket, requirements, design, tasks} + - orphans: items without proper links + """ + trace = { + "requirements": {}, + "designs": {}, + "tickets": {}, + "orphans": { + "requirements": [], # Requirements not linked to any ticket + "designs": [], # Designs not linked to any ticket + "tickets": [], # Tickets not linked to requirements + }, + } + + # Build requirement traceability + for req in self.requirements: + tickets = self.get_tickets_for_requirement(req.id) + tasks = [] + for ticket in tickets: + tasks.extend(self.get_tasks_for_ticket(ticket.id)) + + trace["requirements"][req.id] = { + "requirement": req, + "linked_design": req.linked_design, + "tickets": [t.id for t in tickets], + "tasks": [t.id for t in tasks], + } + + if not tickets: + trace["orphans"]["requirements"].append(req.id) + + # Build design traceability + for design in self.designs: + linked_tickets = [ + t for t in self.tickets + if t.design_ref and ( + t.design_ref.endswith(f"{design.feature}.md") + or t.design_ref == design.id + ) + ] + + trace["designs"][design.id] = { + "design": design, + "requirements": design.requirements, + "tickets": [t.id for t in linked_tickets], + } + + if not linked_tickets: + trace["orphans"]["designs"].append(design.id) + + # Build ticket traceability + for ticket in self.tickets: + reqs = self.get_requirements_for_ticket(ticket.id) + design = self.get_design_for_ticket(ticket.id) + tasks = self.get_tasks_for_ticket(ticket.id) + + trace["tickets"][ticket.id] = { + "ticket": ticket, + "requirements": [r.id for r in reqs], + "design": design.id if design else None, + "tasks": [t.id for t in tasks], + "blocking_tickets": self.get_blocking_tickets(ticket.id), + } + + if not reqs: + trace["orphans"]["tickets"].append(ticket.id) + + return trace + + def get_traceability_stats(self) -> dict: + """Get summary statistics for traceability. + + Returns coverage metrics showing how well-linked the specs are. + """ + trace = self.get_full_traceability() + + total_reqs = len(self.requirements) + linked_reqs = total_reqs - len(trace["orphans"]["requirements"]) + + total_designs = len(self.designs) + linked_designs = total_designs - len(trace["orphans"]["designs"]) + + total_tickets = len(self.tickets) + linked_tickets = total_tickets - len(trace["orphans"]["tickets"]) + + return { + "requirements": { + "total": total_reqs, + "linked": linked_reqs, + "coverage": (linked_reqs / total_reqs * 100) if total_reqs > 0 else 100, + }, + "designs": { + "total": total_designs, + "linked": linked_designs, + "coverage": (linked_designs / total_designs * 100) if total_designs > 0 else 100, + }, + "tickets": { + "total": total_tickets, + "linked": linked_tickets, + "coverage": (linked_tickets / total_tickets * 100) if total_tickets > 0 else 100, + }, + "tasks": { + "total": len(self.tasks), + "done": len([t for t in self.tasks if t.status == "done"]), + "in_progress": len([t for t in self.tasks if t.status == "in_progress"]), + "pending": len([t for t in self.tasks if t.status == "pending"]), + }, + "orphans": trace["orphans"], + } diff --git a/.claude/skills/spec-driven-dev/scripts/parse_specs.py b/.claude/skills/spec-driven-dev/scripts/parse_specs.py new file mode 100644 index 00000000..50da5952 --- /dev/null +++ b/.claude/skills/spec-driven-dev/scripts/parse_specs.py @@ -0,0 +1,577 @@ +#!/usr/bin/env python3 +""" +Parse .omoi_os/ spec files into structured data. + +This module provides the SpecParser class that reads markdown files +with YAML frontmatter and converts them into structured dataclasses. + +Supports: +- Requirements (.omoi_os/requirements/*.md) +- Designs (.omoi_os/designs/*.md) +- Tickets (.omoi_os/tickets/*.md) +- Tasks (.omoi_os/tasks/*.md) + +Usage: + from parse_specs import SpecParser + + parser = SpecParser() + result = parser.parse_all() + + for req in result.requirements: + print(f"{req.id}: {req.title}") + + for ticket in result.tickets: + print(f"{ticket.id}: {ticket.title}") + + for task in result.get_ready_tasks(): + print(f"Ready: {task.id}") +""" + +import re +from datetime import date +from pathlib import Path +from typing import Optional + +import yaml + +from models import ( + AcceptanceCriterion, + ApiEndpoint, + DataModel, + ParsedDesign, + ParsedRequirement, + ParsedTask, + ParsedTicket, + ParseResult, + TaskDependencies, + TicketDependencies, + ValidationError, +) + + +class SpecParser: + """Parse spec files from .omoi_os/ directory.""" + + def __init__(self, root_dir: Optional[Path] = None): + """Initialize parser with project root directory. + + Args: + root_dir: Project root directory. If None, will search upward + from current directory for .omoi_os/ folder. + """ + self.root = root_dir or self._find_project_root() + self.omoi_dir = self.root / ".omoi_os" + + def _find_project_root(self) -> Path: + """Find project root by looking for .omoi_os or common markers.""" + current = Path.cwd() + + for parent in [current] + list(current.parents): + if (parent / ".omoi_os").exists(): + return parent + if (parent / ".git").exists(): + return parent + + return current + + def _parse_frontmatter(self, content: str) -> tuple[dict, str]: + """Extract YAML frontmatter and markdown body from content. + + Supports two formats: + 1. YAML frontmatter (preferred): ---\nkey: value\n--- + 2. Markdown metadata (legacy): **Key**: Value + + Args: + content: Full file content + + Returns: + Tuple of (frontmatter dict, remaining markdown body) + + Raises: + ValueError: If frontmatter is missing or invalid + """ + # Check for YAML frontmatter delimiters + if content.startswith("---"): + # Find end of frontmatter + end_match = re.search(r"\n---\s*\n", content[3:]) + if end_match: + frontmatter_text = content[3 : end_match.start() + 3] + body = content[end_match.end() + 3 :] + + try: + frontmatter = yaml.safe_load(frontmatter_text) + except yaml.YAMLError as e: + raise ValueError(f"Invalid YAML frontmatter: {e}") + + if not isinstance(frontmatter, dict): + raise ValueError("Frontmatter must be a YAML mapping") + + return frontmatter, body + + # Fallback: Parse markdown-style metadata + # Format: **Key**: Value or **Key:** Value + frontmatter = {} + body_lines = [] + in_header = True + + for line in content.split("\n"): + if in_header: + # Check for markdown metadata pattern + md_match = re.match(r"\*\*([^*]+)\*\*:?\s*(.+)", line) + if md_match: + key = md_match.group(1).strip().lower().replace(" ", "_") + value = md_match.group(2).strip() + frontmatter[key] = value + elif line.startswith("# "): + # Title line - extract id from it if possible + title = line[2:].strip() + frontmatter["title"] = title + # Try to extract ID from title like "REQ-XXX: Title" or "# Title" + id_match = re.match(r"(REQ-[A-Z0-9-]+|[A-Z]+-\d+)", title) + if id_match: + frontmatter["id"] = id_match.group(1) + elif line.strip() == "" or line.startswith("##"): + # End of header section + in_header = False + body_lines.append(line) + else: + body_lines.append(line) + + body = "\n".join(body_lines) + + # Generate missing required fields for requirements/designs + if "id" not in frontmatter: + # Use feature as ID fallback + if "feature" in frontmatter: + frontmatter["id"] = frontmatter["feature"] + # Or generate from title + elif "title" in frontmatter: + # Convert title to ID-like format + title_id = frontmatter["title"].lower().replace(" ", "-") + title_id = re.sub(r"[^a-z0-9-]", "", title_id) + frontmatter["id"] = title_id + + if "status" not in frontmatter: + # Normalize status values + status = frontmatter.get("status", "draft") + if isinstance(status, str): + frontmatter["status"] = status.lower() + else: + frontmatter["status"] = "draft" + + if "created" not in frontmatter: + # Parse from markdown if present + created_str = frontmatter.pop("created_date", None) or frontmatter.get("created") + if created_str: + try: + frontmatter["created"] = date.fromisoformat(created_str) if isinstance(created_str, str) else created_str + except (ValueError, TypeError): + frontmatter["created"] = date.today() + else: + frontmatter["created"] = date.today() + + if "updated" not in frontmatter: + frontmatter["updated"] = frontmatter.get("created", date.today()) + + return frontmatter, body + + def _parse_date(self, value) -> date: + """Parse date from various formats.""" + if isinstance(value, date): + return value + if isinstance(value, str): + return date.fromisoformat(value) + raise ValueError(f"Cannot parse date: {value}") + + def _extract_description(self, body: str) -> str: + """Extract description section from markdown body.""" + # Look for ## Description or ## Objective section + match = re.search( + r"##\s+(?:Description|Objective)\s*\n(.*?)(?=\n##|\n---|\Z)", + body, + re.DOTALL | re.IGNORECASE, + ) + if match: + return match.group(1).strip() + return "" + + def _extract_section(self, body: str, section_name: str) -> str: + """Extract a named section from markdown body.""" + pattern = rf"##\s+{re.escape(section_name)}\s*\n(.*?)(?=\n##|\n---|\Z)" + match = re.search(pattern, body, re.DOTALL | re.IGNORECASE) + if match: + return match.group(1).strip() + return "" + + def _extract_acceptance_criteria(self, body: str) -> list[AcceptanceCriterion]: + """Extract acceptance criteria checkboxes from markdown body.""" + criteria = [] + # Look for checkbox patterns: - [ ] text or - [x] text + pattern = r"- \[([ xX])\] (.+?)(?=\n|$)" + matches = re.findall(pattern, body) + for check, text in matches: + criteria.append(AcceptanceCriterion( + text=text.strip(), + completed=check.lower() == "x" + )) + return criteria + + # ======================================================================== + # Requirement Parsing + # ======================================================================== + + def parse_requirement(self, file_path: Path) -> ParsedRequirement: + """Parse a requirement markdown file. + + Args: + file_path: Path to requirement .md file + + Returns: + ParsedRequirement instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "created", "updated"] + for field_name in required: + if field_name not in frontmatter: + raise ValueError(f"Missing required field: {field_name}") + + # Extract EARS-style requirement from body + condition = self._extract_section(body, "Condition") or self._extract_section(body, "When") + action = self._extract_section(body, "Action") or self._extract_section(body, "The System Shall") + + # If not in sections, try to parse from structured format + if not condition and not action: + # Look for WHEN...THE SYSTEM SHALL pattern + ears_match = re.search( + r"WHEN\s+(.+?)\s+THE SYSTEM SHALL\s+(.+?)(?=\n\n|\Z)", + body, + re.DOTALL | re.IGNORECASE, + ) + if ears_match: + condition = ears_match.group(1).strip() + action = ears_match.group(2).strip() + + return ParsedRequirement( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + category=frontmatter.get("category", "functional"), + priority=frontmatter.get("priority", "MEDIUM"), + condition=condition, + action=action, + rationale=self._extract_section(body, "Rationale"), + acceptance_criteria=self._extract_acceptance_criteria(body), + linked_tickets=frontmatter.get("tickets", []) or [], + linked_design=frontmatter.get("design_ref"), + file_path=str(file_path), + ) + + # ======================================================================== + # Design Parsing + # ======================================================================== + + def parse_design(self, file_path: Path) -> ParsedDesign: + """Parse a design markdown file. + + Args: + file_path: Path to design .md file + + Returns: + ParsedDesign instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "created", "updated"] + for field_name in required: + if field_name not in frontmatter: + raise ValueError(f"Missing required field: {field_name}") + + # Parse API endpoints from frontmatter or body + api_endpoints = [] + api_data = frontmatter.get("api_endpoints", []) or [] + for ep in api_data: + if isinstance(ep, dict): + api_endpoints.append(ApiEndpoint( + method=ep.get("method", "GET"), + path=ep.get("path", ""), + description=ep.get("description", ""), + request_body=ep.get("request_body"), + response=ep.get("response"), + )) + + # Parse data models from frontmatter + data_models = [] + models_data = frontmatter.get("data_models", []) or [] + for model in models_data: + if isinstance(model, dict): + data_models.append(DataModel( + name=model.get("name", ""), + description=model.get("description", ""), + fields=model.get("fields", {}), + relationships=model.get("relationships", []), + )) + + return ParsedDesign( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + feature=frontmatter.get("feature", ""), + requirements=frontmatter.get("requirements", []) or [], + architecture=self._extract_section(body, "Architecture Overview") + or self._extract_section(body, "Architecture"), + data_models=data_models, + api_endpoints=api_endpoints, + components=frontmatter.get("components", []) or [], + error_handling=self._extract_section(body, "Error Handling"), + security_considerations=self._extract_section(body, "Security"), + implementation_notes=self._extract_section(body, "Implementation Notes") + or self._extract_section(body, "Notes"), + file_path=str(file_path), + ) + + # ======================================================================== + # Ticket Parsing + # ======================================================================== + + def parse_ticket(self, file_path: Path) -> ParsedTicket: + """Parse a ticket markdown file. + + Args: + file_path: Path to ticket .md file + + Returns: + ParsedTicket instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "priority", "estimate", "created", "updated"] + for field in required: + if field not in frontmatter: + raise ValueError(f"Missing required field: {field}") + + # Parse dependencies + deps_data = frontmatter.get("dependencies", {}) + dependencies = TicketDependencies( + blocked_by=deps_data.get("blocked_by", []) or [], + blocks=deps_data.get("blocks", []) or [], + related=deps_data.get("related", []) or [], + ) + + return ParsedTicket( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + priority=frontmatter["priority"], + estimate=frontmatter["estimate"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + feature=frontmatter.get("feature"), + requirements=frontmatter.get("requirements", []) or [], + design_ref=frontmatter.get("design_ref"), + tasks=frontmatter.get("tasks", []) or [], + dependencies=dependencies, + description=self._extract_description(body), + file_path=str(file_path), + ) + + def parse_task(self, file_path: Path) -> ParsedTask: + """Parse a task markdown file. + + Args: + file_path: Path to task .md file + + Returns: + ParsedTask instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "parent_ticket", "estimate", "created"] + for field in required: + if field not in frontmatter: + raise ValueError(f"Missing required field: {field}") + + # Parse dependencies + deps_data = frontmatter.get("dependencies", {}) + dependencies = TaskDependencies( + depends_on=deps_data.get("depends_on", []) or [], + blocks=deps_data.get("blocks", []) or [], + ) + + return ParsedTask( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + parent_ticket=frontmatter["parent_ticket"], + estimate=frontmatter["estimate"], + created=self._parse_date(frontmatter["created"]), + assignee=frontmatter.get("assignee"), + dependencies=dependencies, + objective=self._extract_description(body), + file_path=str(file_path), + ) + + def parse_all(self) -> ParseResult: + """Parse all spec files (requirements, designs, tickets, tasks). + + Returns: + ParseResult with all specs and any parse errors + """ + result = ParseResult() + + # Parse requirements + requirements_dir = self.omoi_dir / "requirements" + if requirements_dir.exists(): + for md_file in sorted(requirements_dir.glob("*.md")): + # Skip template files + if "template" in md_file.name.lower(): + continue + try: + req = self.parse_requirement(md_file) + result.requirements.append(req) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse designs + designs_dir = self.omoi_dir / "designs" + if designs_dir.exists(): + for md_file in sorted(designs_dir.glob("*.md")): + # Skip template files + if "template" in md_file.name.lower(): + continue + try: + design = self.parse_design(md_file) + result.designs.append(design) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse tickets + tickets_dir = self.omoi_dir / "tickets" + if tickets_dir.exists(): + for md_file in sorted(tickets_dir.glob("*.md")): + try: + ticket = self.parse_ticket(md_file) + result.tickets.append(ticket) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse tasks + tasks_dir = self.omoi_dir / "tasks" + if tasks_dir.exists(): + for md_file in sorted(tasks_dir.glob("*.md")): + try: + task = self.parse_task(md_file) + result.tasks.append(task) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + return result + + def list_requirements(self) -> list[ParsedRequirement]: + """List all parsed requirements.""" + return self.parse_all().requirements + + def list_designs(self) -> list[ParsedDesign]: + """List all parsed designs.""" + return self.parse_all().designs + + def list_tickets(self) -> list[ParsedTicket]: + """List all parsed tickets.""" + return self.parse_all().tickets + + def list_tasks(self) -> list[ParsedTask]: + """List all parsed tasks.""" + return self.parse_all().tasks + + def get_requirement(self, req_id: str) -> Optional[ParsedRequirement]: + """Get a specific requirement by ID.""" + return self.parse_all().get_requirement(req_id) + + def get_design(self, design_id: str) -> Optional[ParsedDesign]: + """Get a specific design by ID.""" + return self.parse_all().get_design(design_id) + + def get_ticket(self, ticket_id: str) -> Optional[ParsedTicket]: + """Get a specific ticket by ID.""" + return self.parse_all().get_ticket(ticket_id) + + def get_task(self, task_id: str) -> Optional[ParsedTask]: + """Get a specific task by ID.""" + return self.parse_all().get_task(task_id) + + +if __name__ == "__main__": + # Quick test + parser = SpecParser() + result = parser.parse_all() + + print(f"Found:") + print(f" {len(result.requirements)} requirements") + print(f" {len(result.designs)} designs") + print(f" {len(result.tickets)} tickets") + print(f" {len(result.tasks)} tasks") + + if result.errors: + print(f"\nParse Errors ({len(result.errors)}):") + for error in result.errors: + print(f" {error}") + + # Show traceability stats + if result.requirements or result.designs or result.tickets: + stats = result.get_traceability_stats() + print(f"\nTraceability Coverage:") + if result.requirements: + print(f" Requirements: {stats['requirements']['linked']}/{stats['requirements']['total']} linked ({stats['requirements']['coverage']:.1f}%)") + if result.designs: + print(f" Designs: {stats['designs']['linked']}/{stats['designs']['total']} linked ({stats['designs']['coverage']:.1f}%)") + if result.tickets: + print(f" Tickets: {stats['tickets']['linked']}/{stats['tickets']['total']} linked ({stats['tickets']['coverage']:.1f}%)") + print(f" Tasks: {stats['tasks']['done']}/{stats['tasks']['total']} done") diff --git a/.claude/skills/spec-driven-dev/scripts/spec_cli.py b/.claude/skills/spec-driven-dev/scripts/spec_cli.py new file mode 100644 index 00000000..fac1f007 --- /dev/null +++ b/.claude/skills/spec-driven-dev/scripts/spec_cli.py @@ -0,0 +1,1075 @@ +#!/usr/bin/env python3 +""" +Unified CLI for spec-driven development. + +Parse, validate, and visualize tickets and tasks from .omoi_os/ directory. + +Usage: + # Show all tickets and tasks + uv run python spec_cli.py show all + + # Show only tickets + uv run python spec_cli.py show tickets + + # Show only tasks + uv run python spec_cli.py show tasks + + # Show dependency graph + uv run python spec_cli.py show graph + + # Show ready tasks (no blocking dependencies) + uv run python spec_cli.py show ready + + # Validate specs (check for circular dependencies, missing refs) + uv run python spec_cli.py validate + + # Export to JSON + uv run python spec_cli.py export json + + # Sync to API (Phase 4) + uv run python spec_cli.py sync push +""" + +import argparse +import json +import sys +from collections import defaultdict +from pathlib import Path +from typing import Optional + +from models import ParseResult, ParsedTask, ParsedTicket, ValidationError +from parse_specs import SpecParser + + +# ============================================================================ +# Validation (Phase 3) +# ============================================================================ + + +def detect_circular_dependencies(result: ParseResult) -> list[ValidationError]: + """Detect circular dependencies in task graph. + + Uses DFS to find cycles in the dependency graph. + """ + errors = [] + + # Build adjacency list for tasks + task_deps = {t.id: t.dependencies.depends_on for t in result.tasks} + + # Track visited and recursion stack + visited = set() + rec_stack = set() + path = [] + + def dfs(task_id: str) -> Optional[list[str]]: + """DFS to detect cycle, returns cycle path if found.""" + if task_id in rec_stack: + # Found cycle - extract the cycle from path + cycle_start = path.index(task_id) + return path[cycle_start:] + [task_id] + + if task_id in visited: + return None + + visited.add(task_id) + rec_stack.add(task_id) + path.append(task_id) + + for dep in task_deps.get(task_id, []): + cycle = dfs(dep) + if cycle: + return cycle + + path.pop() + rec_stack.remove(task_id) + return None + + # Check each task as starting point + for task in result.tasks: + if task.id not in visited: + cycle = dfs(task.id) + if cycle: + cycle_str = " -> ".join(cycle) + errors.append( + ValidationError( + error_type="circular_dependency", + message=f"Circular dependency detected: {cycle_str}", + source_id=cycle[0], + target_id=cycle[-1], + ) + ) + # Reset for next search + visited.clear() + rec_stack.clear() + path.clear() + + return errors + + +def validate_references(result: ParseResult) -> list[ValidationError]: + """Validate that all referenced IDs exist.""" + errors = [] + + # Get all known IDs + ticket_ids = {t.id for t in result.tickets} + task_ids = {t.id for t in result.tasks} + + # Check ticket dependencies + for ticket in result.tickets: + for dep_id in ticket.dependencies.blocked_by: + if dep_id not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocked_by references unknown ticket: {dep_id}", + source_id=ticket.id, + target_id=dep_id, + ) + ) + for dep_id in ticket.dependencies.blocks: + if dep_id not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocks references unknown ticket: {dep_id}", + source_id=ticket.id, + target_id=dep_id, + ) + ) + + # Check task dependencies + for task in result.tasks: + # Check parent ticket exists + if task.parent_ticket not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"parent_ticket references unknown ticket: {task.parent_ticket}", + source_id=task.id, + target_id=task.parent_ticket, + ) + ) + + for dep_id in task.dependencies.depends_on: + if dep_id not in task_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"depends_on references unknown task: {dep_id}", + source_id=task.id, + target_id=dep_id, + ) + ) + for dep_id in task.dependencies.blocks: + if dep_id not in task_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocks references unknown task: {dep_id}", + source_id=task.id, + target_id=dep_id, + ) + ) + + return errors + + +def validate_specs(result: ParseResult) -> list[ValidationError]: + """Run all validation checks.""" + errors = list(result.errors) # Start with parse errors + errors.extend(detect_circular_dependencies(result)) + errors.extend(validate_references(result)) + return errors + + +# ============================================================================ +# Display Functions +# ============================================================================ + + +def print_header(title: str, char: str = "=", width: int = 70): + """Print a section header.""" + print(char * width) + print(f" {title}") + print(char * width) + + +def print_tickets(tickets: list[ParsedTicket]): + """Print all tickets.""" + print_header(f"TICKETS ({len(tickets)} total)") + print() + + for ticket in tickets: + print(f"{ticket.id}: {ticket.title}") + print(f" Status: {ticket.status} | Priority: {ticket.priority} | Estimate: {ticket.estimate}") + + # Truncate description + desc = ticket.description[:100] + "..." if len(ticket.description) > 100 else ticket.description + desc = desc.replace("\n", " ") + if desc: + print(f" Description: {desc}") + + if ticket.tasks: + print(f" Tasks: {', '.join(ticket.tasks)}") + + if ticket.dependencies.blocked_by: + print(f" Blocked By: {', '.join(ticket.dependencies.blocked_by)}") + if ticket.dependencies.blocks: + print(f" Blocks: {', '.join(ticket.dependencies.blocks)}") + + print() + + +def print_tasks(tasks: list[ParsedTask], result: Optional[ParseResult] = None): + """Print all tasks with cross-ticket dependency awareness.""" + print_header(f"TASKS ({len(tasks)} total)") + print() + + for task in tasks: + # Use ParseResult for cross-ticket blocking if available + if result: + is_blocked, reason = result.is_task_blocked(task) + else: + # Fallback to simple task-only blocking + completed_tasks = {t.id for t in tasks if t.status == "done"} + is_blocked = task.is_blocked(completed_tasks) + reason = "blocked by task dependency" if is_blocked else "" + + status_indicator = f"[BLOCKED: {reason}] " if is_blocked else "" + + print(f"{status_indicator}{task.id}: {task.title}") + print(f" Parent: {task.parent_ticket} | Status: {task.status} | Estimate: {task.estimate}") + + # Truncate objective + obj = task.objective[:100] + "..." if len(task.objective) > 100 else task.objective + obj = obj.replace("\n", " ") + if obj: + print(f" Objective: {obj}") + + if task.dependencies.depends_on: + print(f" Depends On: {', '.join(task.dependencies.depends_on)}") + if task.dependencies.blocks: + print(f" Blocks: {', '.join(task.dependencies.blocks)}") + + print() + + +def print_dependency_graph(result: ParseResult): + """Print ASCII dependency graph for tasks.""" + print_header("TASK DEPENDENCY GRAPH") + print() + + # Build reverse dependency map (what blocks what) + blocked_by: dict[str, list[str]] = defaultdict(list) + for task in result.tasks: + for dep in task.dependencies.depends_on: + blocked_by[dep].append(task.id) + + # Get task title by ID + task_titles = {t.id: t.title for t in result.tasks} + + # Find root tasks (no dependencies) + root_tasks = [t for t in result.tasks if not t.dependencies.depends_on] + + def print_tree(task_id: str, prefix: str = "", is_last: bool = True): + """Recursively print task tree.""" + connector = "└─> " if is_last else "├─> " + title = task_titles.get(task_id, "Unknown") + title_short = title[:40] + "..." if len(title) > 40 else title + + print(f"{prefix}{connector}{task_id} ({title_short})") + + children = blocked_by.get(task_id, []) + for i, child in enumerate(children): + new_prefix = prefix + (" " if is_last else "│ ") + print_tree(child, new_prefix, i == len(children) - 1) + + for i, task in enumerate(root_tasks): + if i > 0: + print() + print_tree(task.id, "", i == len(root_tasks) - 1) + + if not root_tasks: + print("No root tasks found (all tasks have dependencies)") + + print() + + +def print_cross_ticket_graph(result: ParseResult): + """Print ASCII dependency graph for tickets (cross-ticket dependencies).""" + print_header("CROSS-TICKET DEPENDENCY GRAPH") + print() + + # Build graph: ticket -> tickets it blocks + graph = result.get_cross_ticket_dependency_graph() + completed_tickets = result.get_completed_tickets() + + # Get ticket info + ticket_info = {t.id: t for t in result.tickets} + + if not any(t.dependencies.blocked_by or t.dependencies.blocks for t in result.tickets): + print("No cross-ticket dependencies defined.") + print() + print("To add cross-ticket dependencies, use the dependencies field in ticket YAML:") + print() + print(" dependencies:") + print(" blocked_by: [TKT-001] # This ticket waits for TKT-001") + print(" blocks: [TKT-003] # This ticket blocks TKT-003") + print() + return + + # Find root tickets (not blocked by any OTHER ticket) + all_blocked_by = set() + for ticket in result.tickets: + all_blocked_by.update(ticket.dependencies.blocked_by) + + # Root tickets are those that block others but aren't blocked themselves + root_tickets = [t for t in result.tickets if not t.dependencies.blocked_by] + + def print_ticket_tree(ticket_id: str, prefix: str = "", is_last: bool = True): + """Recursively print ticket tree.""" + connector = "└─> " if is_last else "├─> " + ticket = ticket_info.get(ticket_id) + if not ticket: + print(f"{prefix}{connector}{ticket_id} (unknown)") + return + + status_mark = "✓" if ticket_id in completed_tickets else "○" + title_short = ticket.title[:35] + "..." if len(ticket.title) > 35 else ticket.title + task_count = len(result.get_tasks_for_ticket(ticket_id)) + + print(f"{prefix}{connector}[{status_mark}] {ticket_id} ({title_short}) [{task_count} tasks]") + + children = graph.get(ticket_id, []) + for i, child in enumerate(children): + new_prefix = prefix + (" " if is_last else "│ ") + print_ticket_tree(child, new_prefix, i == len(children) - 1) + + for i, ticket in enumerate(root_tickets): + if i > 0: + print() + print_ticket_tree(ticket.id, "", i == len(root_tickets) - 1) + + print() + print("Legend: ✓ = all tasks complete, ○ = incomplete") + print() + + +def print_ready_tasks(result: ParseResult): + """Print tasks that are ready to work on.""" + ready = result.get_ready_tasks() + + print_header(f"READY TASKS ({len(ready)} available)") + print() + + if not ready: + print("No tasks are ready. Either:") + print(" - All tasks have pending dependencies") + print(" - All tasks are already completed or in progress") + print() + return + + for task in ready: + print(f"- {task.id}: {task.title}") + print(f" Parent: {task.parent_ticket} | Estimate: {task.estimate}") + if task.objective: + obj = task.objective[:80] + "..." if len(task.objective) > 80 else task.objective + obj = obj.replace("\n", " ") + print(f" {obj}") + print() + + +def print_requirements(result: ParseResult): + """Print all requirements.""" + print_header(f"REQUIREMENTS ({len(result.requirements)} total)") + print() + + if not result.requirements: + print("No requirements found in .omoi_os/requirements/") + print() + return + + for req in result.requirements: + print(f"{req.id}: {req.title}") + print(f" Status: {req.status} | Priority: {req.priority} | Category: {req.category}") + + if req.condition: + cond = req.condition[:60] + "..." if len(req.condition) > 60 else req.condition + print(f" WHEN: {cond}") + + if req.action: + act = req.action[:60] + "..." if len(req.action) > 60 else req.action + print(f" THE SYSTEM SHALL: {act}") + + if req.acceptance_criteria: + print(f" Acceptance Criteria: {len(req.acceptance_criteria)} items") + + if req.linked_tickets: + print(f" Linked Tickets: {', '.join(req.linked_tickets)}") + + print() + + +def print_designs(result: ParseResult): + """Print all designs.""" + print_header(f"DESIGNS ({len(result.designs)} total)") + print() + + if not result.designs: + print("No designs found in .omoi_os/designs/") + print() + return + + for design in result.designs: + print(f"{design.id}: {design.title}") + print(f" Feature: {design.feature} | Status: {design.status}") + + if design.requirements: + print(f" Implements Requirements: {', '.join(design.requirements)}") + + if design.data_models: + print(f" Data Models: {', '.join(dm.name for dm in design.data_models)}") + + if design.api_endpoints: + print(f" API Endpoints: {len(design.api_endpoints)} defined") + + if design.components: + print(f" Components: {', '.join(design.components[:3])}", end="") + if len(design.components) > 3: + print(f" +{len(design.components) - 3} more") + else: + print() + + print() + + +def print_traceability(result: ParseResult): + """Print full traceability matrix: Requirements → Designs → Tickets → Tasks.""" + stats = result.get_traceability_stats() + trace = result.get_full_traceability() + + print_header("TRACEABILITY MATRIX") + print() + + # Summary stats + print("COVERAGE SUMMARY:") + print(f" Requirements: {stats['requirements']['linked']}/{stats['requirements']['total']} linked ({stats['requirements']['coverage']:.1f}%)") + print(f" Designs: {stats['designs']['linked']}/{stats['designs']['total']} linked ({stats['designs']['coverage']:.1f}%)") + print(f" Tickets: {stats['tickets']['linked']}/{stats['tickets']['total']} linked ({stats['tickets']['coverage']:.1f}%)") + print() + print(f"TASK STATUS:") + print(f" Done: {stats['tasks']['done']} | In Progress: {stats['tasks']['in_progress']} | Pending: {stats['tasks']['pending']}") + print() + + # Orphans + if any(trace["orphans"].values()): + print("ORPHANED ITEMS (not linked):") + if trace["orphans"]["requirements"]: + print(f" Requirements without tickets: {', '.join(trace['orphans']['requirements'])}") + if trace["orphans"]["designs"]: + print(f" Designs without tickets: {', '.join(trace['orphans']['designs'])}") + if trace["orphans"]["tickets"]: + print(f" Tickets without requirements: {', '.join(trace['orphans']['tickets'])}") + print() + + # Detailed traceability + print_header("REQUIREMENT → IMPLEMENTATION TRACE", char="-") + print() + + for req_id, req_data in trace["requirements"].items(): + req = req_data["requirement"] + print(f"┌─ REQ: {req_id}: {req.title}") + + # Show linked design + if req_data["linked_design"]: + print(f"│ └─> Design: {req_data['linked_design']}") + + # Show linked tickets + if req_data["tickets"]: + print(f"│ └─> Tickets: {', '.join(req_data['tickets'])}") + + # Show tasks for each ticket + for ticket_id in req_data["tickets"]: + tasks = result.get_tasks_for_ticket(ticket_id) + if tasks: + done_count = sum(1 for t in tasks if t.status == "done") + print(f"│ └─> Tasks for {ticket_id}: {done_count}/{len(tasks)} complete") + else: + print("│ └─> (no implementing tickets)") + + print("└" + "─" * 50) + print() + + # Ticket → Task breakdown + print_header("TICKET → TASK BREAKDOWN", char="-") + print() + + for ticket in result.tickets: + tasks = result.get_tasks_for_ticket(ticket.id) + done = sum(1 for t in tasks if t.status == "done") + total = len(tasks) + progress = f"{done}/{total}" if total > 0 else "no tasks" + + # Check if blocked + is_blocked = ticket.is_blocked() + blocked_marker = " [BLOCKED]" if is_blocked else "" + + print(f"┌─ {ticket.id}: {ticket.title}{blocked_marker}") + print(f"│ Status: {ticket.status} | Progress: {progress}") + + if ticket.requirements: + print(f"│ Implements: {', '.join(ticket.requirements)}") + + if tasks: + for task in tasks: + status_char = "✓" if task.status == "done" else "○" + print(f"│ [{status_char}] {task.id}: {task.title[:40]}") + + print("└" + "─" * 50) + print() + + +def print_validation(errors: list[ValidationError]): + """Print validation results.""" + print_header("VALIDATION") + print() + + if not errors: + print("✓ No circular dependencies detected") + print("✓ All task references valid") + print("✓ All ticket references valid") + print() + return + + print(f"✗ Found {len(errors)} validation error(s):") + print() + + for error in errors: + print(f" [{error.error_type}] {error.source_id}") + print(f" {error.message}") + print() + + +def show_all(result: ParseResult): + """Show everything: requirements, designs, tickets, tasks, graphs, traceability, validation.""" + print_requirements(result) + print_designs(result) + print_tickets(result.tickets) + print_tasks(result.tasks, result) + print_dependency_graph(result) + print_cross_ticket_graph(result) + print_ready_tasks(result) + print_traceability(result) + + errors = validate_specs(result) + print_validation(errors) + + +# ============================================================================ +# Export Functions +# ============================================================================ + + +def export_json(result: ParseResult) -> str: + """Export all specs as JSON.""" + data = { + "requirements": [ + { + "id": r.id, + "title": r.title, + "status": r.status, + "created": r.created.isoformat(), + "updated": r.updated.isoformat(), + "category": r.category, + "priority": r.priority, + "condition": r.condition, + "action": r.action, + "rationale": r.rationale, + "acceptance_criteria": [ + {"text": ac.text, "completed": ac.completed} + for ac in r.acceptance_criteria + ], + "linked_tickets": r.linked_tickets, + "linked_design": r.linked_design, + } + for r in result.requirements + ], + "designs": [ + { + "id": d.id, + "title": d.title, + "status": d.status, + "created": d.created.isoformat(), + "updated": d.updated.isoformat(), + "feature": d.feature, + "requirements": d.requirements, + "architecture": d.architecture, + "data_models": [ + { + "name": dm.name, + "description": dm.description, + "fields": dm.fields, + "relationships": dm.relationships, + } + for dm in d.data_models + ], + "api_endpoints": [ + { + "method": ep.method, + "path": ep.path, + "description": ep.description, + } + for ep in d.api_endpoints + ], + "components": d.components, + "error_handling": d.error_handling, + "security_considerations": d.security_considerations, + "implementation_notes": d.implementation_notes, + } + for d in result.designs + ], + "tickets": [ + { + "id": t.id, + "title": t.title, + "status": t.status, + "priority": t.priority, + "estimate": t.estimate, + "created": t.created.isoformat(), + "updated": t.updated.isoformat(), + "feature": t.feature, + "requirements": t.requirements, + "design_ref": t.design_ref, + "tasks": t.tasks, + "dependencies": { + "blocked_by": t.dependencies.blocked_by, + "blocks": t.dependencies.blocks, + "related": t.dependencies.related, + }, + "description": t.description, + } + for t in result.tickets + ], + "tasks": [ + { + "id": t.id, + "title": t.title, + "status": t.status, + "parent_ticket": t.parent_ticket, + "estimate": t.estimate, + "created": t.created.isoformat(), + "assignee": t.assignee, + "dependencies": { + "depends_on": t.dependencies.depends_on, + "blocks": t.dependencies.blocks, + }, + "objective": t.objective, + } + for t in result.tasks + ], + "traceability": result.get_traceability_stats(), + } + return json.dumps(data, indent=2) + + +# ============================================================================ +# Main CLI +# ============================================================================ + + +def main(): + parser = argparse.ArgumentParser( + description="Parse, validate, and visualize specs from .omoi_os/", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + %(prog)s show all Show all requirements, designs, tickets, tasks + %(prog)s show requirements Show only requirements + %(prog)s show designs Show only designs + %(prog)s show tickets Show only tickets + %(prog)s show tasks Show only tasks + %(prog)s show graph Show task dependency graph + %(prog)s show traceability Show full traceability matrix + %(prog)s show ready Show tasks ready to work on + %(prog)s validate Run validation checks + %(prog)s export json Export to JSON format + %(prog)s sync-specs push Sync requirements/designs to API + """, + ) + + subparsers = parser.add_subparsers(dest="command", help="Command to run") + + # show command + show_parser = subparsers.add_parser("show", help="Show specs") + show_parser.add_argument( + "what", + choices=["all", "requirements", "designs", "tickets", "tasks", "graph", "ticket-graph", "traceability", "ready"], + help="What to show (graph=task deps, ticket-graph=cross-ticket deps, traceability=full matrix)", + ) + + # validate command + subparsers.add_parser("validate", help="Validate specs") + + # export command + export_parser = subparsers.add_parser("export", help="Export specs") + export_parser.add_argument( + "format", + choices=["json"], + help="Export format", + ) + + # projects command + projects_parser = subparsers.add_parser("projects", help="List API projects") + projects_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + projects_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # project command (show single project with tickets/tasks) + project_parser = subparsers.add_parser("project", help="Show project details with tickets and tasks") + project_parser.add_argument( + "project_id", + help="Project ID to display", + ) + project_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + project_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # sync command + sync_parser = subparsers.add_parser("sync", help="Sync with API") + sync_parser.add_argument( + "action", + choices=["push", "diff"], + help="Sync action (push=create/update, diff=dry run)", + ) + sync_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + sync_parser.add_argument( + "--project-id", + help="Project ID to associate tickets with", + ) + sync_parser.add_argument( + "--email", + help="Email for login (or set OMOIOS_EMAIL env var)", + ) + sync_parser.add_argument( + "--password", + help="Password for login (or set OMOIOS_PASSWORD env var)", + ) + sync_parser.add_argument( + "--token", + help="JWT access token (or set OMOIOS_TOKEN env var)", + ) + sync_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # sync-specs command (sync requirements/designs to API) + sync_specs_parser = subparsers.add_parser("sync-specs", help="Sync requirements/designs to API specs") + sync_specs_parser.add_argument( + "action", + choices=["push", "diff"], + help="Sync action (push=create/update, diff=dry run)", + ) + sync_specs_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + sync_specs_parser.add_argument( + "--project-id", + required=True, + help="Project ID to associate spec with (required)", + ) + sync_specs_parser.add_argument( + "--spec-title", + help="Spec title (defaults to design feature name)", + ) + sync_specs_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + sync_specs_parser.add_argument( + "--token", + help="JWT access token (or set OMOIOS_TOKEN env var)", + ) + + # traceability command (show API traceability) + trace_parser = subparsers.add_parser("api-trace", help="Show traceability from API") + trace_parser.add_argument( + "project_id", + help="Project ID to show traceability for", + ) + trace_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + trace_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + args = parser.parse_args() + + if not args.command: + parser.print_help() + sys.exit(1) + + # Parse all specs + spec_parser = SpecParser() + result = spec_parser.parse_all() + + if args.command == "show": + if args.what == "all": + show_all(result) + elif args.what == "requirements": + print_requirements(result) + elif args.what == "designs": + print_designs(result) + elif args.what == "tickets": + print_tickets(result.tickets) + elif args.what == "tasks": + print_tasks(result.tasks, result) + elif args.what == "graph": + print_dependency_graph(result) + elif args.what == "ticket-graph": + print_cross_ticket_graph(result) + elif args.what == "traceability": + print_traceability(result) + elif args.what == "ready": + print_ready_tasks(result) + + elif args.command == "validate": + errors = validate_specs(result) + print_validation(errors) + if errors: + sys.exit(1) + + elif args.command == "export": + if args.format == "json": + print(export_json(result)) + + elif args.command == "projects": + import asyncio + import os + from api_client import OmoiOSClient + + async def list_projects(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + projects = await client.list_projects() + if projects: + print_header(f"PROJECTS ({len(projects)} total)") + print() + for p in projects: + print(f" {p.get('id', 'N/A')}: {p.get('name', 'Unnamed')}") + if p.get('description'): + desc = p['description'][:60] + "..." if len(p['description']) > 60 else p['description'] + print(f" {desc}") + print() + else: + print("No projects found.") + + asyncio.run(list_projects()) + + elif args.command == "project": + import asyncio + import os + from api_client import OmoiOSClient + + async def show_project(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + data = await client.get_project_with_tickets(args.project_id) + + if "error" in data: + print(f"Error: {data['error']}") + sys.exit(1) + + project = data.get("project", {}) + tickets = data.get("tickets", []) + + print_header(f"PROJECT: {project.get('name', 'Unknown')}") + print() + print(f" ID: {project.get('id', 'N/A')}") + if project.get('description'): + print(f" Description: {project['description'][:80]}") + print() + print(f" Total Tickets: {data.get('total_tickets', 0)}") + print(f" Total Tasks: {data.get('total_tasks', 0)}") + print() + + if not tickets: + print(" No tickets found for this project.") + return + + # Group tickets by status + by_status = {} + for t in tickets: + status = t.get("status", "unknown") + if status not in by_status: + by_status[status] = [] + by_status[status].append(t) + + # Print tickets grouped by status + print_header("TICKETS BY STATUS", char="-") + print() + + for status, status_tickets in sorted(by_status.items()): + print(f" [{status.upper()}] ({len(status_tickets)} tickets)") + print() + + for ticket in status_tickets: + print(f" {ticket.get('id', 'N/A')[:20]}...") + print(f" Title: {ticket.get('title', 'No title')}") + print(f" Priority: {ticket.get('priority', 'N/A')}") + + tasks = ticket.get("tasks", []) + if tasks: + print(f" Tasks: ({len(tasks)} total)") + for task in tasks[:5]: # Show max 5 tasks per ticket + task_status = task.get("status", "unknown") + print(f" - [{task_status}] {task.get('title', task.get('description', 'No title')[:40])}") + if len(tasks) > 5: + print(f" ... and {len(tasks) - 5} more tasks") + else: + print(" Tasks: None") + print() + + asyncio.run(show_project()) + + elif args.command == "sync": + import asyncio + import os + from api_client import run_sync + + api_key = getattr(args, 'api_key', None) or os.environ.get("OMOIOS_API_KEY") + success = asyncio.run( + run_sync( + args.api_url, + args.action, + args.project_id, + args.email, + args.password, + args.token, + api_key, + ) + ) + if not success: + sys.exit(1) + + elif args.command == "sync-specs": + import asyncio + import os + from api_client import OmoiOSClient, print_sync_summary + + async def run_sync_specs(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + token = args.token or os.environ.get("OMOIOS_TOKEN") + + client = OmoiOSClient(base_url=args.api_url, api_key=api_key, token=token) + + # Check connection + print(f"Connecting to {args.api_url}...") + connected, msg = await client.check_connection() + if not connected: + print(f"Error: Cannot connect to API: {msg}") + return False + + print("Connected!") + print(f"Project ID: {args.project_id}") + print(f"Requirements: {len(result.requirements)}") + print(f"Designs: {len(result.designs)}") + print() + + # Run sync + if args.action == "diff": + print("Checking what would change (dry run)...") + summary = await client.diff_specs( + result, + args.project_id, + args.spec_title, + ) + else: # push + print("Syncing specs to API...") + summary = await client.sync_specs( + result, + args.project_id, + args.spec_title, + ) + + print_sync_summary(summary) + return summary.failed == 0 + + success = asyncio.run(run_sync_specs()) + if not success: + sys.exit(1) + + elif args.command == "api-trace": + import asyncio + import os + from api_client import OmoiOSClient + + async def show_api_traceability(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + + print(f"Fetching traceability from {args.api_url}...") + trace = await client.get_full_traceability(args.project_id) + + print_header("API TRACEABILITY MATRIX") + print() + + # Specs summary + specs = trace.get("specs", []) + print(f"SPECS: {len(specs)} total") + for spec in specs: + req_count = len(spec.get("requirements", [])) + ticket_count = len(spec.get("linked_tickets", [])) + print(f" [{spec['status']}] {spec['id'][:20]}...") + print(f" Title: {spec['title']}") + print(f" Requirements: {req_count} | Linked Tickets: {ticket_count}") + print() + + # Tickets summary + tickets = trace.get("tickets", []) + orphans = trace.get("orphan_tickets", []) + print(f"LINKED TICKETS: {len(tickets)} total") + for ticket in tickets[:10]: # Show max 10 + task_count = len(ticket.get("tasks", [])) + print(f" [{ticket['status']}] {ticket['id'][:20]}...") + print(f" Title: {ticket['title'][:50]}") + print(f" Tasks: {task_count}") + if len(tickets) > 10: + print(f" ... and {len(tickets) - 10} more tickets") + print() + + if orphans: + print(f"ORPHAN TICKETS (not linked to specs): {len(orphans)}") + for ticket in orphans[:5]: + print(f" - {ticket['id'][:20]}... {ticket['title'][:40]}") + if len(orphans) > 5: + print(f" ... and {len(orphans) - 5} more") + print() + + asyncio.run(show_api_traceability()) + + +if __name__ == "__main__": + main() diff --git a/.claude/skills/spec-driven-dev/scripts/validate_specs.py b/.claude/skills/spec-driven-dev/scripts/validate_specs.py index aa57afd5..eb420a8f 100755 --- a/.claude/skills/spec-driven-dev/scripts/validate_specs.py +++ b/.claude/skills/spec-driven-dev/scripts/validate_specs.py @@ -2,6 +2,8 @@ """ Validate spec documents for completeness and consistency. +Uses YAML frontmatter validation (consistent with parse_specs.py). + Usage: python validate_specs.py [--path PATH] python validate_specs.py --requirements @@ -21,6 +23,8 @@ from dataclasses import dataclass, field from pathlib import Path +import yaml + @dataclass class ValidationResult: @@ -48,10 +52,39 @@ def get_project_root() -> Path: return current +def parse_frontmatter(content: str) -> tuple[dict | None, str]: + """Extract YAML frontmatter and markdown body from content. + + Returns: + Tuple of (frontmatter dict or None if missing, remaining markdown body) + """ + if not content.startswith("---"): + return None, content + + # Find end of frontmatter + end_match = re.search(r"\n---\s*\n", content[3:]) + if not end_match: + return None, content + + frontmatter_text = content[3 : end_match.start() + 3] + body = content[end_match.end() + 3 :] + + try: + frontmatter = yaml.safe_load(frontmatter_text) + except yaml.YAMLError: + return None, content + + if not isinstance(frontmatter, dict): + return None, content + + return frontmatter, body + + def validate_requirements(file_path: Path) -> ValidationResult: """Validate a requirements document.""" result = ValidationResult(file=file_path, doc_type="requirements") content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) # Check for required sections required_sections = [ @@ -77,13 +110,18 @@ def validate_requirements(file_path: Path) -> ValidationResult: if not has_normative: result.warnings.append("No normative language found (SHALL/MUST/SHOULD/MAY)") - # Check for status - if "**Status**:" not in content: - result.warnings.append("Missing Status field") - - # Check for created date - if "**Created**:" not in content: - result.warnings.append("Missing Created date") + # Check for status - support both YAML frontmatter and markdown + if frontmatter: + if "status" not in frontmatter: + result.warnings.append("Missing status field in frontmatter") + if "created" not in frontmatter: + result.warnings.append("Missing created field in frontmatter") + else: + # Fallback to old markdown style check + if "**Status**:" not in content: + result.warnings.append("Missing Status field (no frontmatter found)") + if "**Created**:" not in content: + result.warnings.append("Missing Created date (no frontmatter found)") return result @@ -92,6 +130,7 @@ def validate_design(file_path: Path) -> ValidationResult: """Validate a design document.""" result = ValidationResult(file=file_path, doc_type="design") content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) # Check for required sections required_sections = [ @@ -111,9 +150,13 @@ def validate_design(file_path: Path) -> ValidationResult: if "Responsibilities" not in content: result.warnings.append("No component responsibilities documented") - # Check for status - if "**Status**:" not in content: - result.warnings.append("Missing Status field") + # Check for status - support both YAML frontmatter and markdown + if frontmatter: + if "status" not in frontmatter: + result.warnings.append("Missing status field in frontmatter") + else: + if "**Status**:" not in content: + result.warnings.append("Missing Status field (no frontmatter found)") # Check for related requirements link if "requirements" not in content.lower() and "Requirements" not in content: @@ -123,70 +166,129 @@ def validate_design(file_path: Path) -> ValidationResult: def validate_ticket(file_path: Path) -> ValidationResult: - """Validate a ticket document.""" + """Validate a ticket document (YAML frontmatter format).""" result = ValidationResult(file=file_path, doc_type="ticket") content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + if not frontmatter: + result.errors.append("Missing YAML frontmatter") + return result + + # Check for ticket ID in frontmatter + if "id" not in frontmatter: + result.errors.append("Missing 'id' field in frontmatter") + elif not frontmatter["id"].startswith("TKT-"): + result.errors.append(f"Invalid ticket ID format: {frontmatter['id']} (expected TKT-XXX)") + + # Check for required fields in frontmatter + required_fields = { + "id": "Ticket ID (e.g., TKT-001)", + "title": "Ticket title", + "status": "Status (backlog, in_progress, done, etc.)", + "priority": "Priority (LOW, MEDIUM, HIGH, CRITICAL)", + "estimate": "Estimate (XS, S, M, L, XL)", + "created": "Created date", + "updated": "Updated date", + } - # Check for TKT-XXX format - tkt_pattern = r"TKT-(?:[A-Z]+-)?(\d{3})" - tkts = re.findall(tkt_pattern, content) - - if not tkts: - result.errors.append("No ticket ID found (expected TKT-XXX format)") - - # Check for required fields - required_fields = [ - "**Status**:", - "**Priority**:", - ] - - for field_name in required_fields: - if field_name not in content: - result.errors.append(f"Missing required field: {field_name}") - - # Check for acceptance criteria - if "## Acceptance Criteria" not in content: - result.warnings.append("Missing Acceptance Criteria section") - - # Check for traceability - if "## Traceability" not in content and "**Requirements**:" not in content: - result.warnings.append("Missing traceability to requirements") + for field_key, description in required_fields.items(): + if field_key not in frontmatter: + result.errors.append(f"Missing required field: {field_key} ({description})") + + # Validate status value + valid_statuses = ["backlog", "ready", "in_progress", "review", "done", "blocked"] + if frontmatter.get("status") and frontmatter["status"] not in valid_statuses: + result.warnings.append(f"Non-standard status: {frontmatter['status']} (expected one of: {', '.join(valid_statuses)})") + + # Validate priority value + valid_priorities = ["LOW", "MEDIUM", "HIGH", "CRITICAL"] + if frontmatter.get("priority") and frontmatter["priority"] not in valid_priorities: + result.warnings.append(f"Non-standard priority: {frontmatter['priority']} (expected one of: {', '.join(valid_priorities)})") + + # Validate estimate value + valid_estimates = ["XS", "S", "M", "L", "XL"] + if frontmatter.get("estimate") and frontmatter["estimate"] not in valid_estimates: + result.warnings.append(f"Non-standard estimate: {frontmatter['estimate']} (expected one of: {', '.join(valid_estimates)})") + + # Check for acceptance criteria in body + if "## Acceptance Criteria" not in body: + result.warnings.append("Missing Acceptance Criteria section in body") + + # Check for requirements traceability + if not frontmatter.get("requirements"): + result.warnings.append("No requirements linked (consider adding requirements field)") + + # Check for dependency structure + deps = frontmatter.get("dependencies", {}) + if deps: + expected_dep_fields = ["blocked_by", "blocks", "related"] + for dep_field in expected_dep_fields: + if dep_field not in deps: + result.warnings.append(f"Missing dependencies.{dep_field} field") return result def validate_task(file_path: Path) -> ValidationResult: - """Validate a task document.""" + """Validate a task document (YAML frontmatter format).""" result = ValidationResult(file=file_path, doc_type="task") content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + if not frontmatter: + result.errors.append("Missing YAML frontmatter") + return result + + # Check for task ID in frontmatter + if "id" not in frontmatter: + result.errors.append("Missing 'id' field in frontmatter") + elif not frontmatter["id"].startswith("TSK-"): + result.errors.append(f"Invalid task ID format: {frontmatter['id']} (expected TSK-XXX)") + + # Check for required fields in frontmatter + required_fields = { + "id": "Task ID (e.g., TSK-001)", + "title": "Task title", + "status": "Status (pending, in_progress, done, etc.)", + "parent_ticket": "Parent ticket ID (e.g., TKT-001)", + "estimate": "Estimate (XS, S, M, L, XL)", + "created": "Created date", + } - # Check for TSK-XXX format - tsk_pattern = r"TSK-(?:[A-Z]+-)?(\d{3})" - tsks = re.findall(tsk_pattern, content) - - if not tsks: - result.errors.append("No task ID found (expected TSK-XXX format)") - - # Check for parent ticket reference - if "**Parent Ticket**:" not in content and "TKT-" not in content: - result.errors.append("Missing parent ticket reference") - - # Check for required fields - required_fields = [ - "**Status**:", - ] - - for field_name in required_fields: - if field_name not in content: - result.errors.append(f"Missing required field: {field_name}") - - # Check for deliverables - if "## Deliverables" not in content: - result.warnings.append("Missing Deliverables section") - - # Check for acceptance criteria - if "## Acceptance Criteria" not in content: - result.warnings.append("Missing Acceptance Criteria section") + for field_key, description in required_fields.items(): + if field_key not in frontmatter: + result.errors.append(f"Missing required field: {field_key} ({description})") + + # Check parent ticket reference format + if frontmatter.get("parent_ticket") and not frontmatter["parent_ticket"].startswith("TKT-"): + result.errors.append(f"Invalid parent_ticket format: {frontmatter['parent_ticket']} (expected TKT-XXX)") + + # Validate status value + valid_statuses = ["pending", "in_progress", "review", "done", "blocked"] + if frontmatter.get("status") and frontmatter["status"] not in valid_statuses: + result.warnings.append(f"Non-standard status: {frontmatter['status']} (expected one of: {', '.join(valid_statuses)})") + + # Validate estimate value + valid_estimates = ["XS", "S", "M", "L", "XL"] + if frontmatter.get("estimate") and frontmatter["estimate"] not in valid_estimates: + result.warnings.append(f"Non-standard estimate: {frontmatter['estimate']} (expected one of: {', '.join(valid_estimates)})") + + # Check for objective/description in body + if "## Objective" not in body and "## Description" not in body: + result.warnings.append("Missing Objective/Description section in body") + + # Check for acceptance criteria in body + if "## Acceptance Criteria" not in body: + result.warnings.append("Missing Acceptance Criteria section in body") + + # Check for dependency structure + deps = frontmatter.get("dependencies", {}) + if deps: + expected_dep_fields = ["depends_on", "blocks"] + for dep_field in expected_dep_fields: + if dep_field not in deps: + result.warnings.append(f"Missing dependencies.{dep_field} field") return result diff --git a/.omoi_os/requirements/webhook-notifications.md b/.omoi_os/requirements/webhook-notifications.md index c5a8c288..74bc25be 100644 --- a/.omoi_os/requirements/webhook-notifications.md +++ b/.omoi_os/requirements/webhook-notifications.md @@ -1,9 +1,16 @@ -# Webhook Notifications Requirements +--- +id: REQ-WEBHOOK-001 +title: Webhook Notifications Requirements +feature: webhook-notifications +created: 2025-12-27 +updated: 2025-12-29 +status: draft +category: functional +priority: HIGH +design_ref: designs/webhook-notifications.md +--- -**Feature**: webhook-notifications -**Created**: 2025-12-27 -**Status**: Draft -**Author**: Claude (spec-driven-dev skill) +# Webhook Notifications Requirements ## Overview diff --git a/.omoi_os/tasks/TSK-001-add-webhook-url-fields.md b/.omoi_os/tasks/TSK-001-add-webhook-url-fields.md index 6070867f..0d0e731b 100644 --- a/.omoi_os/tasks/TSK-001-add-webhook-url-fields.md +++ b/.omoi_os/tasks/TSK-001-add-webhook-url-fields.md @@ -1,21 +1,42 @@ -# TSK-001: Add webhook_url to Project and Ticket Models +--- +id: TSK-001 +title: Add webhook_url to Project and Ticket Models +status: pending +parent_ticket: TKT-001 +estimate: S +created: 2025-12-29 +assignee: null +dependencies: + depends_on: [] + blocks: + - TSK-002 +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: S -**Assignee**: (unassigned) +# TSK-001: Add webhook_url to Project and Ticket Models ## Objective -Add `webhook_url` field to both Project and Ticket models. +Add `webhook_url` field to both Project and Ticket models to enable webhook configuration. + +--- ## Deliverables - [ ] `backend/omoi_os/models/project.py` - Add webhook_url field - [ ] `backend/omoi_os/models/ticket.py` - Add webhook_url field +--- + ## Implementation Notes +### Approach + +1. Add webhook_url field to Project model +2. Add webhook_url field to Ticket model +3. Verify models import correctly + +### Code Patterns + Add to Project model (after line 68 in project.py): ```python webhook_url: Mapped[Optional[str]] = mapped_column( @@ -34,8 +55,36 @@ webhook_url: Mapped[Optional[str]] = mapped_column( ) ``` +### References +- designs/webhook-notifications.md#data-model + +--- + ## Acceptance Criteria -- Fields are nullable strings (max 2048 chars for URLs) -- Include SQLAlchemy comments for documentation -- No breaking changes to existing functionality +- [ ] Fields are nullable strings (max 2048 chars for URLs) +- [ ] Include SQLAlchemy comments for documentation +- [ ] No breaking changes to existing functionality +- [ ] Models import without errors + +--- + +## Testing Requirements + +### Unit Tests +```python +def test_project_webhook_url_field(): + project = Project(name="test") + project.webhook_url = "https://example.com/webhook" + assert project.webhook_url == "https://example.com/webhook" +``` + +### Edge Cases +- None/null values should be allowed +- Long URLs (up to 2048 chars) should work + +--- + +## Notes + +This is a model-only change. Database migration is handled in TSK-002. diff --git a/.omoi_os/tasks/TSK-002-create-migration.md b/.omoi_os/tasks/TSK-002-create-migration.md index 48c66dbc..ba498e9e 100644 --- a/.omoi_os/tasks/TSK-002-create-migration.md +++ b/.omoi_os/tasks/TSK-002-create-migration.md @@ -1,21 +1,45 @@ -# TSK-002: Create Database Migration for Webhook Fields +--- +id: TSK-002 +title: Create Database Migration for Webhook Fields +status: pending +parent_ticket: TKT-001 +estimate: S +created: 2025-12-29 +assignee: null +dependencies: + depends_on: + - TSK-001 + blocks: + - TSK-003 + - TSK-004 + - TSK-005 +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: S -**Assignee**: (unassigned) -**Depends On**: TSK-001 +# TSK-002: Create Database Migration for Webhook Fields ## Objective Create Alembic migration to add webhook_url columns to projects and tickets tables. +--- + ## Deliverables - [ ] `backend/migrations/versions/xxx_add_webhook_url_fields.py` +--- + ## Implementation Notes +### Approach + +1. Generate migration file with Alembic +2. Add upgrade logic for both columns +3. Add downgrade logic to remove columns +4. Test migration forwards and backwards + +### Code Patterns + ```bash cd backend uv run alembic revision -m "add_webhook_url_fields" @@ -32,8 +56,37 @@ def downgrade(): op.drop_column('projects', 'webhook_url') ``` +### References +- Alembic documentation +- Existing migrations in backend/migrations/versions/ + +--- + ## Acceptance Criteria -- Migration runs without errors: `uv run alembic upgrade head` -- Migration is reversible: `uv run alembic downgrade -1` -- Existing data is preserved +- [ ] Migration runs without errors: `uv run alembic upgrade head` +- [ ] Migration is reversible: `uv run alembic downgrade -1` +- [ ] Existing data is preserved +- [ ] Columns have correct type (VARCHAR 2048, nullable) + +--- + +## Testing Requirements + +### Manual Testing +```bash +cd backend +uv run alembic upgrade head +uv run alembic downgrade -1 +uv run alembic upgrade head +``` + +### Edge Cases +- Running on empty database +- Running on database with existing projects/tickets + +--- + +## Notes + +Depends on TSK-001 completing first so model fields match migration. diff --git a/.omoi_os/tasks/TSK-003-webhook-delivery-service.md b/.omoi_os/tasks/TSK-003-webhook-delivery-service.md index d8172af6..c8725efe 100644 --- a/.omoi_os/tasks/TSK-003-webhook-delivery-service.md +++ b/.omoi_os/tasks/TSK-003-webhook-delivery-service.md @@ -1,21 +1,46 @@ -# TSK-003: Implement WebhookDeliveryService +--- +id: TSK-003 +title: Implement WebhookDeliveryService +status: pending +parent_ticket: TKT-001 +estimate: M +created: 2025-12-29 +assignee: null +dependencies: + depends_on: + - TSK-002 + blocks: + - TSK-004 + - TSK-006 +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: M -**Assignee**: (unassigned) +# TSK-003: Implement WebhookDeliveryService ## Objective -Create a reusable HTTP webhook delivery service with retry logic. +Create a reusable HTTP webhook delivery service with retry logic and structured logging. + +--- ## Deliverables - [ ] `backend/omoi_os/services/webhook_delivery.py` - [ ] `backend/tests/unit/services/test_webhook_delivery.py` +--- + ## Implementation Notes +### Approach + +1. Create WebhookDeliveryService class +2. Implement async deliver method with httpx +3. Add retry logic with exponential backoff +4. Add structured logging for all attempts +5. Write unit tests with mocked HTTP + +### Code Patterns + Key features: - HTTP POST with `application/json` content type - 10 second timeout @@ -32,13 +57,72 @@ class WebhookDeliveryService: RETRY_DELAYS = [1.0, 2.0, 4.0] async def deliver(self, url: str, payload: dict) -> bool: - # Implementation per design doc + """Deliver webhook with retry logic. + + Returns True on success, False on failure. + """ + for attempt in range(self.MAX_RETRIES): + try: + async with httpx.AsyncClient(timeout=self.TIMEOUT_SECONDS) as client: + response = await client.post(url, json=payload) + if response.status_code < 400: + return True + if 400 <= response.status_code < 500: + # Client error - don't retry + return False + # Server error - retry + except httpx.TimeoutException: + pass + + if attempt < self.MAX_RETRIES - 1: + await asyncio.sleep(self.RETRY_DELAYS[attempt]) + + return False ``` +### References +- httpx documentation +- designs/webhook-notifications.md#webhook-delivery + +--- + ## Acceptance Criteria -- Successful delivery returns True -- Failed delivery (after retries) returns False -- All attempts are logged with structured data -- Timeout is enforced at 10 seconds -- 5xx errors trigger retry, 4xx do not +- [ ] Successful delivery returns True +- [ ] Failed delivery (after retries) returns False +- [ ] All attempts are logged with structured data +- [ ] Timeout is enforced at 10 seconds +- [ ] 5xx errors trigger retry, 4xx do not +- [ ] Exponential backoff: 1s, 2s, 4s between retries + +--- + +## Testing Requirements + +### Unit Tests +```python +@respx.mock +async def test_successful_delivery(): + respx.post("https://example.com/hook").respond(200) + service = WebhookDeliveryService() + result = await service.deliver("https://example.com/hook", {"test": True}) + assert result is True + +@respx.mock +async def test_retry_on_5xx(): + route = respx.post("https://example.com/hook") + route.side_effect = [httpx.Response(500), httpx.Response(500), httpx.Response(200)] + # ... verify 3 attempts made +``` + +### Edge Cases +- Connection refused +- DNS resolution failure +- Very slow responses (timeout) +- Empty payloads + +--- + +## Notes + +Use `respx` or `pytest-httpx` for mocking HTTP in tests. diff --git a/.omoi_os/tasks/TSK-004-webhook-notification-service.md b/.omoi_os/tasks/TSK-004-webhook-notification-service.md index 3d405350..4d648495 100644 --- a/.omoi_os/tasks/TSK-004-webhook-notification-service.md +++ b/.omoi_os/tasks/TSK-004-webhook-notification-service.md @@ -1,26 +1,46 @@ -# TSK-004: Implement WebhookNotificationService +--- +id: TSK-004 +title: Implement WebhookNotificationService +status: pending +parent_ticket: TKT-001 +estimate: M +created: 2025-12-29 +assignee: null +dependencies: + depends_on: + - TSK-002 + - TSK-003 + blocks: + - TSK-006 +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: M -**Assignee**: (unassigned) -**Depends On**: TSK-001, TSK-003 +# TSK-004: Implement WebhookNotificationService ## Objective -Create service that subscribes to EventBus events and delivers webhooks. +Create service that subscribes to EventBus events and delivers webhooks using WebhookDeliveryService. + +--- ## Deliverables - [ ] `backend/omoi_os/services/webhook_notification.py` - [ ] `backend/tests/integration/services/test_webhook_notification.py` +--- + ## Implementation Notes -1. Subscribe to events: `task.completed`, `task.failed`, `agent.stuck` -2. Resolve webhook URL (ticket override → project fallback) -3. Build standardized payload -4. Deliver via WebhookDeliveryService +### Approach + +1. Create WebhookNotificationService class +2. Subscribe to events: `task.completed`, `task.failed`, `agent.stuck` +3. Implement URL resolution (ticket override → project fallback) +4. Build standardized payload +5. Deliver via WebhookDeliveryService +6. Write integration tests + +### Code Patterns URL resolution logic: ```python @@ -48,9 +68,57 @@ Payload format: } ``` +Event subscription: +```python +class WebhookNotificationService: + def __init__(self, event_bus: EventBusService, delivery: WebhookDeliveryService): + self.event_bus = event_bus + self.delivery = delivery + + async def start(self): + await self.event_bus.subscribe("task.completed", self._handle_event) + await self.event_bus.subscribe("task.failed", self._handle_event) + await self.event_bus.subscribe("agent.stuck", self._handle_event) +``` + +### References +- designs/webhook-notifications.md#notification-flow +- backend/omoi_os/services/event_bus.py + +--- + ## Acceptance Criteria -- Subscribes to correct events on startup -- Resolves URL with correct precedence (ticket > project) -- Payload matches documented schema -- Delivery failures don't block event processing +- [ ] Subscribes to correct events on startup +- [ ] Resolves URL with correct precedence (ticket > project) +- [ ] Payload matches documented schema +- [ ] Delivery failures don't block event processing +- [ ] No webhook when URL is None (no error) + +--- + +## Testing Requirements + +### Integration Tests +```python +async def test_task_completed_triggers_webhook(): + # Setup: project with webhook_url + # Action: emit task.completed event + # Assert: webhook delivered with correct payload + +async def test_ticket_url_overrides_project(): + # Setup: project with URL A, ticket with URL B + # Action: emit event for that ticket's task + # Assert: webhook delivered to URL B +``` + +### Edge Cases +- No webhook configured (should not error) +- Ticket without project +- Delivery failure (should log but not raise) + +--- + +## Notes + +Depends on TSK-003 (WebhookDeliveryService) for actual HTTP delivery. diff --git a/.omoi_os/tasks/TSK-005-update-api-routes.md b/.omoi_os/tasks/TSK-005-update-api-routes.md index ca751d86..5ba42bf1 100644 --- a/.omoi_os/tasks/TSK-005-update-api-routes.md +++ b/.omoi_os/tasks/TSK-005-update-api-routes.md @@ -1,14 +1,25 @@ -# TSK-005: Update API Routes for Webhook Configuration +--- +id: TSK-005 +title: Update API Routes for Webhook Configuration +status: pending +parent_ticket: TKT-001 +estimate: S +created: 2025-12-29 +assignee: null +dependencies: + depends_on: + - TSK-002 + blocks: + - TSK-006 +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: S -**Assignee**: (unassigned) -**Depends On**: TSK-001, TSK-002 +# TSK-005: Update API Routes for Webhook Configuration ## Objective -Update project and ticket API routes to support webhook_url field. +Update project and ticket API routes to support reading and updating webhook_url field. + +--- ## Deliverables @@ -17,29 +28,96 @@ Update project and ticket API routes to support webhook_url field. - [ ] `backend/omoi_os/api/schemas/project.py` - Add webhook_url to schema - [ ] `backend/omoi_os/api/schemas/ticket.py` - Add webhook_url to schema +--- + ## Implementation Notes +### Approach + +1. Update Pydantic schemas with webhook_url field +2. Update route handlers to accept webhook_url in PATCH +3. Ensure GET responses include webhook_url +4. Add URL validation + +### Code Patterns + Add to Pydantic schemas: ```python class ProjectUpdate(BaseModel): # ... existing fields ... webhook_url: Optional[str] = Field(None, max_length=2048) +class ProjectResponse(BaseModel): + # ... existing fields ... + webhook_url: Optional[str] = None + class TicketUpdate(BaseModel): # ... existing fields ... webhook_url: Optional[str] = Field(None, max_length=2048) + +class TicketResponse(BaseModel): + # ... existing fields ... + webhook_url: Optional[str] = None ``` Optional: Add URL validation ```python -from pydantic import HttpUrl +from pydantic import HttpUrl, field_validator + +class ProjectUpdate(BaseModel): + webhook_url: Optional[str] = Field(None, max_length=2048) -webhook_url: Optional[HttpUrl] = None + @field_validator('webhook_url') + @classmethod + def validate_url(cls, v): + if v is not None and not v.startswith(('http://', 'https://')): + raise ValueError('webhook_url must be a valid HTTP(S) URL') + return v ``` +### References +- backend/omoi_os/api/routes/projects.py +- backend/omoi_os/api/routes/tickets.py +- Pydantic v2 documentation + +--- + ## Acceptance Criteria -- PATCH /api/v1/projects/{id} accepts webhook_url -- PATCH /api/v1/tickets/{id} accepts webhook_url -- GET responses include webhook_url field -- Invalid URLs are rejected with 422 +- [ ] PATCH /api/v1/projects/{id} accepts webhook_url +- [ ] PATCH /api/v1/tickets/{id} accepts webhook_url +- [ ] GET responses include webhook_url field +- [ ] Invalid URLs are rejected with 422 status + +--- + +## Testing Requirements + +### API Tests +```python +async def test_update_project_webhook_url(client): + response = await client.patch( + f"/api/v1/projects/{project_id}", + json={"webhook_url": "https://example.com/webhook"} + ) + assert response.status_code == 200 + assert response.json()["webhook_url"] == "https://example.com/webhook" + +async def test_invalid_url_rejected(client): + response = await client.patch( + f"/api/v1/projects/{project_id}", + json={"webhook_url": "not-a-valid-url"} + ) + assert response.status_code == 422 +``` + +### Edge Cases +- Setting webhook_url to null (should clear it) +- Very long URLs (near 2048 limit) +- URLs with query parameters and fragments + +--- + +## Notes + +Schema changes should be backward compatible - webhook_url is optional. diff --git a/.omoi_os/tasks/TSK-006-tests.md b/.omoi_os/tasks/TSK-006-tests.md index a8971b78..c51fb494 100644 --- a/.omoi_os/tasks/TSK-006-tests.md +++ b/.omoi_os/tasks/TSK-006-tests.md @@ -1,14 +1,26 @@ -# TSK-006: Add Unit and Integration Tests +--- +id: TSK-006 +title: Add Unit and Integration Tests +status: pending +parent_ticket: TKT-001 +estimate: M +created: 2025-12-29 +assignee: null +dependencies: + depends_on: + - TSK-003 + - TSK-004 + - TSK-005 + blocks: [] +--- -**Status**: pending -**Parent Ticket**: TKT-001 -**Estimate**: M -**Assignee**: (unassigned) -**Depends On**: TSK-003, TSK-004, TSK-005 +# TSK-006: Add Unit and Integration Tests ## Objective -Add comprehensive tests for webhook notification functionality. +Add comprehensive tests for webhook notification functionality to ensure reliability. + +--- ## Deliverables @@ -16,9 +28,20 @@ Add comprehensive tests for webhook notification functionality. - [ ] `backend/tests/integration/services/test_webhook_notification.py` - [ ] `backend/tests/integration/api/test_webhook_config.py` -## Test Cases +--- + +## Implementation Notes + +### Approach -### Unit: WebhookDeliveryService +1. Write unit tests for WebhookDeliveryService (mock HTTP) +2. Write integration tests for WebhookNotificationService (mock delivery) +3. Write API tests for webhook configuration endpoints +4. Ensure >80% coverage for new code + +### Test Cases + +#### Unit: WebhookDeliveryService 1. `test_successful_delivery` - 200 response returns True 2. `test_retry_on_5xx` - 500 response triggers retry, eventual success @@ -26,7 +49,7 @@ Add comprehensive tests for webhook notification functionality. 4. `test_timeout_handling` - Timeout triggers retry 5. `test_max_retries_exhausted` - Returns False after 3 failures -### Integration: WebhookNotificationService +#### Integration: WebhookNotificationService 1. `test_task_completed_triggers_webhook` - Event → webhook delivery 2. `test_task_failed_triggers_webhook` - Event → webhook delivery @@ -34,13 +57,13 @@ Add comprehensive tests for webhook notification functionality. 4. `test_ticket_url_overrides_project` - Correct URL resolution 5. `test_no_webhook_configured` - No error when URL is None -### Integration: API +#### Integration: API 1. `test_update_project_webhook_url` - PATCH project works 2. `test_update_ticket_webhook_url` - PATCH ticket works 3. `test_invalid_url_rejected` - Returns 422 for bad URL -## Implementation Notes +### Code Patterns Use `respx` or `pytest-httpx` for mocking HTTP requests. @@ -54,10 +77,53 @@ async def test_successful_delivery(): service = WebhookDeliveryService() result = await service.deliver("https://example.com/hook", {"test": True}) assert result is True + +@respx.mock +async def test_retry_on_5xx(): + route = respx.post("https://example.com/hook") + route.side_effect = [ + httpx.Response(500), + httpx.Response(500), + httpx.Response(200) + ] + service = WebhookDeliveryService() + result = await service.deliver("https://example.com/hook", {"test": True}) + assert result is True + assert route.call_count == 3 ``` +### References +- pytest-asyncio documentation +- respx documentation +- Existing test patterns in backend/tests/ + +--- + ## Acceptance Criteria -- All tests pass: `uv run pytest tests/ -k webhook` -- Coverage > 80% for new code -- Tests are fast (mock external HTTP) +- [ ] All tests pass: `uv run pytest tests/ -k webhook` +- [ ] Coverage > 80% for new code +- [ ] Tests are fast (mock external HTTP) +- [ ] No flaky tests + +--- + +## Testing Requirements + +### Run Tests +```bash +cd backend +uv run pytest tests/ -k webhook -v +uv run pytest tests/ -k webhook --cov=omoi_os/services/webhook --cov-report=term-missing +``` + +### Edge Cases +- Concurrent webhook deliveries +- Large payloads +- Unicode in payloads + +--- + +## Notes + +This is the final task - all other webhook tasks must complete first. diff --git a/.omoi_os/tasks/TSK-007-slack-formatter.md b/.omoi_os/tasks/TSK-007-slack-formatter.md new file mode 100644 index 00000000..bba5cc5e --- /dev/null +++ b/.omoi_os/tasks/TSK-007-slack-formatter.md @@ -0,0 +1,32 @@ +--- +id: TSK-007 +title: Implement Slack Message Formatter +status: pending +parent_ticket: TKT-002 +estimate: M +created: 2025-12-29 +assignee: null +dependencies: + depends_on: [] + blocks: [TSK-008] +--- + +# TSK-007: Implement Slack Message Formatter + +## Objective + +Create a Slack-specific message formatter that converts webhook payloads into Slack Block Kit format. + +## Implementation Details + +- Create `SlackFormatter` class in `services/webhook_formatters.py` +- Convert task events to Slack blocks with proper formatting +- Include action buttons for task links +- Support rich text and code blocks for error messages + +## Acceptance Criteria + +- [ ] SlackFormatter produces valid Block Kit JSON +- [ ] Messages include task status, title, description +- [ ] Error messages display in code blocks +- [ ] Unit tests cover all event types diff --git a/.omoi_os/tasks/TSK-008-discord-formatter.md b/.omoi_os/tasks/TSK-008-discord-formatter.md new file mode 100644 index 00000000..5ca397d2 --- /dev/null +++ b/.omoi_os/tasks/TSK-008-discord-formatter.md @@ -0,0 +1,32 @@ +--- +id: TSK-008 +title: Implement Discord Message Formatter +status: pending +parent_ticket: TKT-002 +estimate: M +created: 2025-12-29 +assignee: null +dependencies: + depends_on: [TSK-007] + blocks: [] +--- + +# TSK-008: Implement Discord Message Formatter + +## Objective + +Create a Discord-specific message formatter that converts webhook payloads into Discord embed format. + +## Implementation Details + +- Create `DiscordFormatter` class in `services/webhook_formatters.py` +- Convert task events to Discord embeds +- Use color coding for status (green=success, red=failure) +- Include fields for task metadata + +## Acceptance Criteria + +- [ ] DiscordFormatter produces valid embed JSON +- [ ] Messages include colored status indicators +- [ ] Error messages display with proper formatting +- [ ] Unit tests cover all event types diff --git a/.omoi_os/tickets/TKT-001-webhook-notifications.md b/.omoi_os/tickets/TKT-001-webhook-notifications.md index 69b31e78..5503e7c5 100644 --- a/.omoi_os/tickets/TKT-001-webhook-notifications.md +++ b/.omoi_os/tickets/TKT-001-webhook-notifications.md @@ -1,16 +1,52 @@ -# TKT-001: Implement Webhook Notifications +--- +id: TKT-001 +title: Implement Webhook Notifications +status: backlog +priority: HIGH +estimate: L +created: 2025-12-29 +updated: 2025-12-29 +feature: webhook-notifications +requirements: + - REQ-WEBHOOK-CONFIG-001 + - REQ-WEBHOOK-CONFIG-002 + - REQ-WEBHOOK-EVENT-001 + - REQ-WEBHOOK-EVENT-002 + - REQ-WEBHOOK-EVENT-003 +design_ref: designs/webhook-notifications.md +tasks: + - TSK-001 + - TSK-002 + - TSK-003 + - TSK-004 + - TSK-005 + - TSK-006 +dependencies: + blocked_by: [] + blocks: [] + related: [] +--- -**Status**: backlog -**Priority**: HIGH -**Estimate**: L -**Phase**: PHASE_IMPLEMENTATION -**Requirements**: REQ-WEBHOOK-CONFIG-001, REQ-WEBHOOK-CONFIG-002, REQ-WEBHOOK-EVENT-001, REQ-WEBHOOK-EVENT-002, REQ-WEBHOOK-EVENT-003 -**Design Reference**: designs/webhook-notifications.md +# TKT-001: Implement Webhook Notifications ## Description Implement webhook notifications for task lifecycle events (completed, failed, agent stuck). This enables internal monitoring systems to receive real-time updates. Webhooks can be configured at project-level with optional per-ticket overrides. +### Context +Internal monitoring systems need to receive real-time notifications when tasks complete, fail, or when agents get stuck. This allows dashboards and alerting systems to respond quickly to workflow events. + +### Goals +- Enable webhook configuration at project and ticket levels +- Deliver notifications for key lifecycle events +- Provide reliable delivery with retry logic + +### Non-Goals +- External third-party integrations (Slack, Discord, etc.) - future enhancement +- Webhook authentication/signing - future enhancement + +--- + ## Acceptance Criteria - [ ] Project model has `webhook_url` field @@ -22,15 +58,57 @@ Implement webhook notifications for task lifecycle events (completed, failed, ag - [ ] Events trigger webhooks within 5-10 seconds - [ ] Failed deliveries are logged but don't block task completion -## Dependencies +--- + +## Technical Notes + +### Implementation Approach +1. Add model fields (TSK-001) +2. Create migration (TSK-002) +3. Implement delivery service (TSK-003) +4. Implement notification service (TSK-004) +5. Update API routes (TSK-005) +6. Add tests (TSK-006) + +### Key Files +- `backend/omoi_os/models/project.py` - Add webhook_url field +- `backend/omoi_os/models/ticket.py` - Add webhook_url field +- `backend/omoi_os/services/webhook_delivery.py` - HTTP delivery with retries +- `backend/omoi_os/services/webhook_notification.py` - Event subscription and dispatch + +### API Changes +- PATCH /api/v1/projects/{id} - accepts webhook_url +- PATCH /api/v1/tickets/{id} - accepts webhook_url + +### Database Changes +- projects.webhook_url (VARCHAR 2048, nullable) +- tickets.webhook_url (VARCHAR 2048, nullable) + +--- + +## Testing Strategy + +### Unit Tests +- WebhookDeliveryService retry logic +- URL resolution precedence + +### Integration Tests +- Event triggers webhook delivery +- API endpoints accept webhook URLs + +### Manual Testing +- Configure webhook URL and trigger a task completion +- Verify webhook payload matches schema + +--- + +## Rollback Plan + +1. Run `alembic downgrade -1` to remove columns +2. Revert code changes via git -- None (extends existing infrastructure) +--- -## Tasks +## Notes -- TSK-001: Add webhook_url to Project and Ticket models -- TSK-002: Create database migration -- TSK-003: Implement WebhookDeliveryService -- TSK-004: Implement WebhookNotificationService -- TSK-005: Update API routes for webhook configuration -- TSK-006: Add unit and integration tests +This extends existing EventBusService infrastructure. No external dependencies required. diff --git a/.omoi_os/tickets/TKT-002-webhook-integrations.md b/.omoi_os/tickets/TKT-002-webhook-integrations.md new file mode 100644 index 00000000..282d5647 --- /dev/null +++ b/.omoi_os/tickets/TKT-002-webhook-integrations.md @@ -0,0 +1,47 @@ +--- +id: TKT-002 +title: External Webhook Integrations (Slack, Discord) +status: backlog +priority: MEDIUM +estimate: L +created: 2025-12-29 +updated: 2025-12-29 +feature: webhook-integrations +requirements: + - REQ-WEBHOOK-SLACK-001 + - REQ-WEBHOOK-DISCORD-001 +design_ref: designs/webhook-integrations.md +tasks: + - TSK-007 + - TSK-008 +dependencies: + blocked_by: [TKT-001] + blocks: [] + related: [] +--- + +# TKT-002: External Webhook Integrations (Slack, Discord) + +## Description + +Add support for external integrations like Slack and Discord notifications. This builds on the webhook infrastructure from TKT-001 to add formatted messages for popular platforms. + +### Context +After implementing basic webhook notifications (TKT-001), users want native integrations with common platforms. This requires message formatting specific to each platform's API. + +### Goals +- Slack-formatted webhook payloads +- Discord-formatted webhook payloads +- Platform detection from webhook URL + +### Non-Goals +- OAuth-based integrations (use incoming webhooks only) + +--- + +## Acceptance Criteria + +- [ ] Slack message formatting with blocks +- [ ] Discord embed formatting +- [ ] Auto-detect platform from URL pattern +- [ ] Fallback to generic JSON for unknown platforms diff --git a/backend/omoi_os/api/routes/specs.py b/backend/omoi_os/api/routes/specs.py index 56014733..dccaa3d9 100644 --- a/backend/omoi_os/api/routes/specs.py +++ b/backend/omoi_os/api/routes/specs.py @@ -78,7 +78,20 @@ async def _create_spec_async( session.add(new_spec) await session.commit() await session.refresh(new_spec) - return new_spec + + # Re-query with eager loading to avoid DetachedInstanceError + # when accessing relationships after session closes + result = await session.execute( + select(SpecModel) + .filter(SpecModel.id == new_spec.id) + .options( + selectinload(SpecModel.requirements).selectinload( + SpecRequirementModel.criteria + ), + selectinload(SpecModel.tasks), + ) + ) + return result.scalar_one() async def _get_spec_async( @@ -134,8 +147,20 @@ async def _update_spec_async( spec.phase = phase await session.commit() - await session.refresh(spec) - return spec + + # Re-query with eager loading to avoid DetachedInstanceError + # when accessing relationships after session closes + result = await session.execute( + select(SpecModel) + .filter(SpecModel.id == spec_id) + .options( + selectinload(SpecModel.requirements).selectinload( + SpecRequirementModel.criteria + ), + selectinload(SpecModel.tasks), + ) + ) + return result.scalar_one_or_none() async def _delete_spec_async(db: DatabaseService, spec_id: str) -> bool: @@ -159,6 +184,7 @@ async def _add_requirement_async( title: str, condition: str, action: str, + linked_design: Optional[str] = None, ) -> Optional[SpecRequirementModel]: """Add a requirement to a spec (ASYNC - non-blocking).""" async with db.get_async_session() as session: @@ -176,6 +202,7 @@ async def _add_requirement_async( condition=condition, action=action, status="pending", + linked_design=linked_design, ) session.add(new_req) await session.commit() @@ -191,6 +218,7 @@ async def _update_requirement_async( condition: Optional[str] = None, action: Optional[str] = None, status: Optional[str] = None, + linked_design: Optional[str] = None, ) -> Optional[SpecRequirementModel]: """Update a requirement (ASYNC - non-blocking).""" async with db.get_async_session() as session: @@ -214,6 +242,8 @@ async def _update_requirement_async( req.action = action if status is not None: req.status = status + if linked_design is not None: + req.linked_design = linked_design await session.commit() await session.refresh(req) @@ -506,6 +536,7 @@ class RequirementCreate(BaseModel): title: str condition: str action: str + linked_design: Optional[str] = None # Link to design section/ID class RequirementUpdate(BaseModel): @@ -513,6 +544,7 @@ class RequirementUpdate(BaseModel): condition: Optional[str] = None action: Optional[str] = None status: Optional[str] = None + linked_design: Optional[str] = None # Link to design section/ID class CriterionCreate(BaseModel): @@ -697,7 +729,7 @@ async def add_requirement( """Add a requirement to a spec.""" # Use async database operations (non-blocking) new_req = await _add_requirement_async( - db, spec_id, req.title, req.condition, req.action + db, spec_id, req.title, req.condition, req.action, req.linked_design ) if not new_req: raise HTTPException(status_code=404, detail="Spec not found") @@ -723,7 +755,8 @@ async def update_requirement( """Update a requirement.""" # Use async database operations (non-blocking) req = await _update_requirement_async( - db, spec_id, req_id, updates.title, updates.condition, updates.action, updates.status + db, spec_id, req_id, updates.title, updates.condition, updates.action, + updates.status, updates.linked_design ) if not req: raise HTTPException(status_code=404, detail="Requirement not found") diff --git a/backend/omoi_os/api/routes/tasks.py b/backend/omoi_os/api/routes/tasks.py index da13c3db..484bf1e8 100644 --- a/backend/omoi_os/api/routes/tasks.py +++ b/backend/omoi_os/api/routes/tasks.py @@ -410,6 +410,62 @@ async def list_tasks( ] +class TaskCreate(BaseModel): + """Request model for creating a task.""" + + ticket_id: str + title: str + description: str + task_type: str = "implementation" + priority: str = "MEDIUM" + phase_id: str = "PHASE_IMPLEMENTATION" + dependencies: Optional[Dict[str, Any]] = None # {"depends_on": ["task_id_1"]} + + +@router.post("", response_model=dict, status_code=201) +async def create_task( + task_data: TaskCreate, + queue: TaskQueueService = Depends(get_task_queue), +): + """ + Create a new task for a ticket. + + This endpoint allows direct task creation for testing and spec-driven development. + + Args: + task_data: Task creation data including ticket_id, title, description + queue: Task queue service for task creation + + Returns: + Created task with ID and details + """ + try: + task = queue.enqueue_task( + ticket_id=task_data.ticket_id, + phase_id=task_data.phase_id, + task_type=task_data.task_type, + description=task_data.description, + priority=task_data.priority, + dependencies=task_data.dependencies, + title=task_data.title, + ) + + return { + "id": task.id, + "ticket_id": task.ticket_id, + "phase_id": task.phase_id, + "task_type": task.task_type, + "title": task.title, + "description": task.description, + "priority": task.priority, + "status": task.status, + "dependencies": task.dependencies, + "created_at": task.created_at.isoformat(), + } + except Exception as e: + raise HTTPException(status_code=400, detail=str(e)) + + @router.get("/{task_id}/dependencies", response_model=dict) async def get_task_dependencies( task_id: str, diff --git a/docs/marketing/README.md b/docs/marketing/README.md new file mode 100644 index 00000000..e41e1284 --- /dev/null +++ b/docs/marketing/README.md @@ -0,0 +1,73 @@ +# Marketing Documentation + +> **Purpose**: All marketing-related strategy, positioning, niche targeting, and outreach documentation. + +--- + +## Quick Start: What to Read First + +1. **[Marketing Overview](./marketing_overview.md)** - Core positioning, messaging, one-liners, and buyer personas +2. **[Sub-Niche Targeting](./sub_niche_targeting.md)** - Software-focused sub-niches broken into 2 layers with specific targeting +3. **[Cross-Market Niche Ideas](./cross_market_niche_ideas.md)** - 15 non-software markets where OmoiOS applies + +--- + +## Document Index + +| Document | Purpose | When to Use | +|----------|---------|-------------| +| [Marketing Overview](./marketing_overview.md) | Core positioning, messaging, objection handling | Starting point for all marketing; reference for messaging | +| [Go-to-Market Strategy](./go_to_market_strategy.md) | Full GTM plan: launch sequence, channels, metrics, expansion | Planning launch phases; tracking progress | +| [Sub-Niche Targeting](./sub_niche_targeting.md) | Software sub-niches 2 layers deep with pain points + messaging | Choosing which niche to focus on first | +| [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) | 15 non-software markets with evaluation framework | Exploring opportunities beyond software engineering | +| [Reality Outreach Playbook](./reality_contact_outreach_playbook.md) | 2-week outreach sprint with daily actions | Actually doing outreach; tracking reality contact | + +--- + +## The Strategy in One Page + +### 1. Pick ONE Niche +Don't be generic. Choose one specific sub-niche from either: +- **Software-focused**: See [Sub-Niche Targeting](./sub_niche_targeting.md) + - Recommended: Communication Integrations (Slack/Teams/Discord) or Audit Logging & Compliance +- **Cross-market**: See [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) + - 15 industries from real estate to insurance to agriculture + +### 2. Validate with Reality +Use the [Reality Outreach Playbook](./reality_contact_outreach_playbook.md): +- 2-week sprint +- 60-120 touches +- Goal: 1-3 merged PRs + 1 repeat customer + +### 3. Prove Before Expanding +- 3-5 merged PRs in target niche +- 1 paying pilot or repeat customer +- Clear "this works" pattern +- THEN consider second niche + +### 4. Expand Methodically +Per [Go-to-Market Strategy](./go_to_market_strategy.md): +- Phase 0: Message + proof (2-4 weeks) +- Phase 1: Private beta (4-8 weeks) +- Phase 2: Public beta (8-12 weeks) +- Phase 3: Sales assist + +--- + +## Key Decisions Still Pending + +- [ ] **Which niche to start with** (software vs cross-market) +- [ ] **Pricing model** (per agent, per workspace, open-core) +- [ ] **Open source timing** (closed during validation vs OSS for trust/distribution) + +--- + +## Recent Updates + +| Date | Change | +|------|--------| +| 2025-12-29 | Added sub-niche targeting (2 layers deep) | +| 2025-12-29 | Added cross-market niche ideas (15 industries) | +| 2025-12-29 | Moved marketing docs to dedicated subdirectory | +| 2025-12-29 | Cross-referenced all documents | +| 2025-12-15 | Initial marketing documentation created | diff --git a/docs/marketing/cross_market_niche_ideas.md b/docs/marketing/cross_market_niche_ideas.md new file mode 100644 index 00000000..492010d2 --- /dev/null +++ b/docs/marketing/cross_market_niche_ideas.md @@ -0,0 +1,329 @@ +# Cross-Market Niche Ideas: Beyond Pure Software + +> **Status**: Starter ideas for exploration +> **Goal**: Identify low-risk niches at the intersection of software + other markets + +--- + +## The Insight + +OmoiOS automates spec-driven software execution. But "software" touches every industry. The lowest-risk opportunities may be where: + +1. **Software is a means, not the end** (they need software but aren't software companies) +2. **Specs are already well-defined** (industry standards, regulations, templates) +3. **Current solutions are expensive** (agencies, consultants, custom dev shops) +4. **Failure is recoverable** (not life-critical, not financial-critical) + +--- + +## Cross-Market Niches to Explore + +> **Note**: These complement the software-focused niches in [Sub-Niche Targeting](./sub_niche_targeting.md). Start with one from either list, not both simultaneously. + +### 1. Real Estate Tech (PropTech) + +**The Intersection**: Property managers, brokerages, and landlords need custom software but can't afford dev teams. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Property Management Portals** | "We use 5 different tools that don't talk to each other" | Custom tenant/owner portals that integrate their existing stack | +| **Listing Syndication Tools** | "Updating listings across Zillow/Redfin/MLS is manual hell" | Automated listing sync with custom branding | +| **Maintenance Request Systems** | "We're still using paper forms and phone calls" | Simple ticket → contractor assignment → completion tracking | + +**Why Low Risk**: Non-critical (a bug doesn't lose money immediately), clear workflows, template-heavy. + +**Who to Target**: Property management companies (50-500 units), boutique brokerages, real estate investors with 10+ properties. + +--- + +### 2. Legal Tech (Non-Litigation) + +**The Intersection**: Law firms need client portals, document automation, and workflow tools but pay $500/hr consultants. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Client Intake Portals** | "New client onboarding takes 2 weeks of back-and-forth emails" | Automated intake forms → document generation → scheduling | +| **Contract Template Systems** | "We copy-paste from Word docs and hope we didn't miss a field" | Template libraries with variable substitution and version control | +| **Matter Management Dashboards** | "Partners can't see case status without asking associates" | Real-time matter tracking for partners/clients | + +**Why Low Risk**: Document-centric (easy to validate), template-driven (clear specs), non-litigation (mistakes don't lose cases). + +**Who to Target**: Small/mid-size firms (5-50 attorneys), especially corporate, real estate, immigration, estate planning. + +--- + +### 3. Healthcare Admin (Non-Clinical) + +**The Intersection**: Clinics and practices need admin tools but HIPAA makes custom dev expensive. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Patient Communication Portals** | "Appointment reminders, follow-ups, and forms are all manual" | Automated patient comms (non-PHI or HIPAA-compliant) | +| **Staff Scheduling Systems** | "We use a shared Google Sheet for 30 employees" | Custom scheduling with shift swaps, PTO tracking, coverage alerts | +| **Inventory & Supply Tracking** | "We run out of supplies because no one tracks usage" | Simple inventory management with reorder alerts | + +**Why Low Risk**: Administrative (not clinical), can start with non-PHI workflows, clear ROI (staff time savings). + +**Who to Target**: Multi-location practices (dental, optometry, physical therapy, veterinary), urgent care chains. + +--- + +### 4. Education & Training Operations + +**The Intersection**: Schools, training companies, and edtech need custom tools but can't justify dev budgets. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Course Administration Portals** | "Managing enrollments, payments, and certificates is 3 different systems" | Unified course ops dashboard | +| **Instructor/Trainer Scheduling** | "Matching instructors to sessions is a spreadsheet nightmare" | Availability → assignment → notification automation | +| **Credentialing & Certification Tracking** | "We manually track 500 people's certifications in Excel" | Automated expiration alerts, renewal workflows, compliance reports | + +**Why Low Risk**: Administrative, template-heavy, clear success criteria (cert issued = done). + +**Who to Target**: Trade schools, corporate training departments, professional certification bodies, tutoring companies. + +--- + +### 5. Events & Hospitality Operations + +**The Intersection**: Event planners, venues, and hospitality need custom tools but rely on generic SaaS that doesn't fit. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Vendor Coordination Portals** | "I email 15 vendors for every event; half miss deadlines" | Vendor task lists, deadline tracking, automated reminders | +| **Venue Booking & Availability** | "Our booking calendar is a shared Google Cal with no integration" | Custom booking with deposits, contracts, and availability sync | +| **Event Day-Of Runsheets** | "Printed timelines are outdated by hour 2" | Real-time runsheet with mobile updates and role assignments | + +**Why Low Risk**: Event failure = bad reviews, not lawsuits. Clear timelines. Visual/tangible outputs. + +**Who to Target**: Event planning agencies, wedding venues, conference organizers, catering companies. + +--- + +### 6. Professional Services Operations (Non-Tech) + +**The Intersection**: Accountants, consultants, architects need client/project tools but aren't tech-native. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Client Onboarding Workflows** | "Every new client is 20 emails and a shared folder" | Automated onboarding with doc collection, task assignment, status tracking | +| **Project/Engagement Tracking** | "We bill hourly but don't know where time goes until month-end" | Real-time project dashboards with time capture integration | +| **Deliverable Management** | "Final reports live in 5 different folders per client" | Client portals with deliverable history, approvals, feedback | + +**Why Low Risk**: Process-heavy, document-centric, clear deliverables. + +**Who to Target**: CPA firms, management consultants, architecture firms, engineering consultancies. + +--- + +### 7. E-commerce Operations (Non-Storefront) + +**The Intersection**: E-commerce brands need ops tools but Shopify apps don't cover everything. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Returns & Warranty Processing** | "Returns are manual emails and spreadsheets" | Automated return requests, RMA generation, refund tracking | +| **Wholesale/B2B Order Portals** | "Our wholesale customers email orders; we re-key into Shopify" | Custom B2B portal with net terms, bulk pricing, reorder | +| **Influencer/Affiliate Management** | "Tracking 50 influencers and their codes is chaos" | Influencer dashboard with code tracking, payout calculation, content calendar | + +**Why Low Risk**: Operational (not storefront), clear workflows, measurable outcomes. + +**Who to Target**: DTC brands ($1M-$20M revenue), wholesale distributors, influencer-driven brands. + +--- + +### 8. Nonprofit & Membership Organizations + +**The Intersection**: Nonprofits need donor/member tools but can't afford custom dev. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Donor Management Portals** | "We use Salesforce but it's configured wrong and nobody can fix it" | Custom donor dashboards, giving history, communication preferences | +| **Volunteer Coordination** | "Volunteer scheduling is 100 emails per event" | Shift signup, skills matching, hours tracking, recognition | +| **Member Directory & Networking** | "Our member directory is a PDF from 2019" | Searchable member profiles with opt-in networking | + +**Why Low Risk**: Mission-driven (forgiving users), clear workflows, high goodwill. + +**Who to Target**: Mid-size nonprofits ($1M-$20M budget), professional associations, alumni organizations. + +--- + +## Evaluation Framework + +When exploring these, score each on: + +| Criterion | Question | Score 1-5 | +|-----------|----------|-----------| +| **Spec Clarity** | Are requirements well-defined or template-able? | | +| **Failure Cost** | What happens if something breaks? | | +| **Current Cost** | What do they pay now (agencies, manual labor, bad tools)? | | +| **Decision Maker** | Can we reach someone who can say yes? | | +| **Repeatability** | Can we do 10 of these with similar specs? | | +| **Demo-ability** | Can we show value in 30 minutes? | | + +**Ideal Score**: 24+ out of 30 + +--- + +## Research Questions for Tomorrow + +1. **Which of these markets do I have existing connections in?** +2. **Which has the most active online communities I can observe?** (Reddit, Facebook Groups, industry forums) +3. **Which has the clearest "template" workflows?** (industry standards, regulations, common practices) +4. **Which has the most expensive current solutions?** (look for $10K+ agency projects or $500+/mo SaaS) +5. **Which has buyers who are already tech-forward?** (using Zapier, Airtable, no-code tools = good sign) + +--- + +## Quick Validation Tactics + +For any niche that looks promising: + +1. **Search "[industry] + software frustrations" on Reddit/Twitter** +2. **Look at Upwork/Fiverr for common project types in that space** +3. **Check Product Hunt for failed/struggling tools in the category** +4. **Find 3-5 agencies serving that market; look at their project types** +5. **Join 1-2 industry Slack/Discord communities; observe pain points** + +--- + +--- + +### 9. Construction & Trades Operations + +**The Intersection**: Contractors, builders, and trades need job management but use paper or clunky software. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Job Costing & Estimate Tracking** | "We bid jobs then have no idea if we made money until it's over" | Real-time job cost tracking with material/labor breakdowns | +| **Subcontractor Coordination** | "Scheduling 8 subs across 3 job sites is phone tag hell" | Sub scheduling, availability, conflict detection, automated reminders | +| **Permit & Inspection Tracking** | "We missed an inspection and it cost us 2 weeks" | Permit status dashboard, inspection scheduling, document management | + +**Why Low Risk**: Process-heavy, template-driven (permits are standardized), high pain tolerance for software. + +**Who to Target**: General contractors (10-50 employees), specialty trades (HVAC, electrical, plumbing), construction project managers. + +--- + +### 10. Fitness & Wellness Studios + +**The Intersection**: Studios need member management and scheduling but gym software doesn't fit boutique models. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Class Booking & Waitlist Management** | "Our booking software doesn't handle waitlists or late cancellations well" | Custom booking rules, waitlist automation, penalty tracking | +| **Membership & Package Tracking** | "We sell class packs but tracking who has what left is a nightmare" | Package balance dashboards, expiration alerts, usage analytics | +| **Instructor Pay Calculation** | "Calculating instructor pay with per-class + bonus is 3 hours every pay period" | Automated pay calculation with custom rules per instructor | + +**Why Low Risk**: Non-critical, clear workflows, high volume of similar businesses. + +**Who to Target**: Yoga/pilates studios, CrossFit boxes, personal training studios, dance studios. + +--- + +### 11. Agriculture & Farm Operations + +**The Intersection**: Farms need operational software but ag-tech is either enterprise or too simple. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Crop Planning & Field Tracking** | "We use spreadsheets to track what's planted where and when to harvest" | Field maps, planting schedules, harvest forecasting | +| **Equipment Maintenance Logs** | "The tractor broke because we forgot the 500-hour service" | Maintenance schedules, usage tracking, service reminders | +| **Direct-to-Consumer Order Management** | "CSA box orders come from 3 different places and we pack by memory" | Unified order management, packing lists, delivery routing | + +**Why Low Risk**: Seasonal (clear deadlines), physical outputs (easy to verify), underserved market. + +**Who to Target**: Mid-size farms (50-500 acres), specialty crop producers, CSA/farm-to-table operations. + +--- + +### 12. Logistics & Fleet Operations (Small-Medium) + +**The Intersection**: Small fleets need dispatch/tracking but enterprise TMS is overkill. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Driver Dispatch & Route Assignment** | "Dispatch is our owner on WhatsApp with 15 drivers" | Simple dispatch board, route optimization, driver status | +| **Delivery Proof & Customer Communication** | "Customers call asking 'where's my stuff?' and we don't know" | Real-time tracking, photo proof of delivery, automated ETAs | +| **Fleet Maintenance Scheduling** | "DOT inspection came up and we weren't ready" | Maintenance calendars, compliance tracking, service alerts | + +**Why Low Risk**: Operational (not safety-critical software), clear workflows, measurable outcomes. + +**Who to Target**: Regional delivery companies (5-50 trucks), courier services, specialty haulers. + +--- + +### 13. Creative Agencies (Non-Dev) + +**The Intersection**: Design/marketing agencies need project management but creative workflows are different. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Creative Brief & Asset Management** | "Client assets are in 4 Dropbox folders, 2 email threads, and a Slack DM" | Unified brief + asset repository per client | +| **Revision Tracking & Approval Workflows** | "Which version did the client approve? V3 or V3_final_FINAL?" | Version control for creative assets with approval timestamps | +| **Retainer Hour Tracking** | "We sold 20 hours/month retainers but don't know who's over/under" | Real-time retainer consumption dashboards | + +**Why Low Risk**: Process overhead is the pain (not mission-critical), clear deliverables, recurring clients. + +**Who to Target**: Branding agencies, marketing agencies, video production studios. + +--- + +### 14. Religious & Community Organizations + +**The Intersection**: Churches, temples, community centers need member/event management but can't afford custom dev. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Member & Giving Management** | "Our member database is an Excel file from 2018" | Member directory, giving history, communication preferences | +| **Event & Facility Booking** | "Who booked the fellowship hall for Saturday?" | Room booking, event calendar, conflict detection | +| **Volunteer Coordination** | "We need 40 volunteers for the festival and sign-ups are chaos" | Volunteer shifts, skills matching, reminder automation | + +**Why Low Risk**: High goodwill (forgiving users), clear templates, strong word-of-mouth potential. + +**Who to Target**: Mid-size congregations (200-2000 members), community centers, fraternal organizations. + +--- + +### 15. Insurance Agencies (Independent) + +**The Intersection**: Independent agencies need client/policy management but carrier systems don't integrate. + +| Sub-Niche | Pain Point | Opportunity | +|-----------|------------|-------------| +| **Client Policy Overview Dashboards** | "To see all of a client's policies I open 4 different carrier portals" | Unified client view across carriers | +| **Renewal Tracking & Outreach** | "We miss renewals because the reminder system is my memory" | Renewal calendar, automated outreach sequences, follow-up tracking | +| **Quote Comparison Tools** | "Comparing 5 carrier quotes is copy-paste into a spreadsheet" | Quote aggregation and comparison presentations | + +**Why Low Risk**: Document-centric, deadline-driven (renewal dates), high margin per client. + +**Who to Target**: Independent insurance agencies (3-20 agents), insurance brokers. + +--- + +## Next Steps (When You Wake Up) + +1. Pick 2-3 of these that resonate or where you have connections +2. Spend 30 min per niche on validation tactics above +3. Identify one specific sub-niche with highest score on evaluation framework +4. Draft a single landing page headline + 3 bullet points for that niche +5. Find 5 potential targets to reach out to + +--- + +## Related Documents + +- [Sub-Niche Targeting](./sub_niche_targeting.md) - Software-focused sub-niches (2 layers deep) +- [Go-to-Market Strategy](./go_to_market_strategy.md) - Full GTM plan and launch sequence +- [Reality Outreach Playbook](./reality_contact_outreach_playbook.md) - Outreach tactics and templates +- [Marketing Overview](./marketing_overview.md) - Core positioning and messaging + +--- + +## Notes Space + +*Add thoughts, connections, or new ideas here as you explore:* + +- +- +- diff --git a/docs/go_to_market_strategy.md b/docs/marketing/go_to_market_strategy.md similarity index 94% rename from docs/go_to_market_strategy.md rename to docs/marketing/go_to_market_strategy.md index 81a773a0..d59bdf9f 100644 --- a/docs/go_to_market_strategy.md +++ b/docs/marketing/go_to_market_strategy.md @@ -1,9 +1,16 @@ # OmoiOS — Go-to-Market Strategy -**Created**: 2025-12-15 -**Status**: Draft +**Created**: 2025-12-15 +**Updated**: 2025-12-29 +**Status**: Draft **Purpose**: Define target users, positioning, launch strategy, acquisition channels, metrics, expansion, and an open source strategy for OmoiOS. +> **Related Documents**: +> - [Marketing Overview](./marketing_overview.md) - Core positioning and messaging +> - [Sub-Niche Targeting](./sub_niche_targeting.md) - Specific software sub-niches (2 layers deep) +> - [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) - Non-software market opportunities +> - [Reality Outreach Playbook](./reality_contact_outreach_playbook.md) - Outreach tactics + --- ## Table of contents @@ -157,7 +164,11 @@ Instead claim: Avoid early: - Large greenfield rewrites -- “Magic product work” with unclear requirements +- "Magic product work" with unclear requirements + +> **Deep Dive**: See [Sub-Niche Targeting](./sub_niche_targeting.md) for these broken into 2 layers with specific targeting, pain points, and messaging hooks. +> +> **Beyond Software**: See [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) for 15 industries outside pure software where spec-driven execution applies (real estate, legal, healthcare admin, etc.). ### 5.4 Pricing direction (choose one early) - **Per active agent / runtime** (transparent to buyer, maps to cost) diff --git a/docs/marketing_overview.md b/docs/marketing/marketing_overview.md similarity index 85% rename from docs/marketing_overview.md rename to docs/marketing/marketing_overview.md index d057bc48..8878bcbf 100644 --- a/docs/marketing_overview.md +++ b/docs/marketing/marketing_overview.md @@ -1,9 +1,16 @@ # OmoiOS Marketing Overview -**Created**: 2025-12-15 -**Status**: Draft +**Created**: 2025-12-15 +**Updated**: 2025-12-29 +**Status**: Draft **Purpose**: Clear buyer-facing positioning, messaging, and focus for OmoiOS. +> **Related Documents**: +> - [Sub-Niche Targeting](./sub_niche_targeting.md) - Specific software sub-niches (2 layers deep) +> - [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) - Non-software market opportunities +> - [Go-to-Market Strategy](./go_to_market_strategy.md) - Full GTM plan and launch sequence +> - [Reality Outreach Playbook](./reality_contact_outreach_playbook.md) - Outreach tactics + --- ## Product in one sentence @@ -144,6 +151,23 @@ Turn feature requests into reviewed PRs with spec-driven, multi-agent workflows. --- -## The focus statement (so you don’t drift) +## The focus statement (so you don't drift) **OmoiOS is for teams who want autonomous execution with oversight: fewer coordination cycles, adaptive workflows via discovery, and reviewable PR-based delivery.** + +--- + +## Niche Focus (pick one, prove it, expand) + +The messaging above is foundational, but winning requires **niche focus**. See: + +- **[Sub-Niche Targeting](./sub_niche_targeting.md)** - Software-focused niches broken into 2 layers: + - Primary recommendation: Communication Platform Integrations (Slack/Teams/Discord) + - Secondary recommendation: Audit Logging & Compliance Features + +- **[Cross-Market Niche Ideas](./cross_market_niche_ideas.md)** - Non-software markets where spec-driven execution applies: + - 15 industries from real estate to insurance to agriculture + - Evaluation framework for scoring opportunities + - Quick validation tactics + +**Rule**: Pick one niche. Prove it works (3-5 merged PRs, 1 repeat customer). Then expand. diff --git a/docs/reality_contact_outreach_playbook.md b/docs/marketing/reality_contact_outreach_playbook.md similarity index 89% rename from docs/reality_contact_outreach_playbook.md rename to docs/marketing/reality_contact_outreach_playbook.md index 1636f59f..2acbedd1 100644 --- a/docs/reality_contact_outreach_playbook.md +++ b/docs/marketing/reality_contact_outreach_playbook.md @@ -1,9 +1,16 @@ # Reality-Contact Outreach Playbook (2 Weeks) -**Created**: 2025-12-15 -**Owner**: Founder +**Created**: 2025-12-15 +**Updated**: 2025-12-29 +**Owner**: Founder **Purpose**: Increase contact with reality by forcing external, falsifiable outcomes: repo access → PR opened → PR merged → repeat ticket → paid pilot. +> **Related Documents**: +> - [Marketing Overview](./marketing_overview.md) - Core positioning and messaging +> - [Go-to-Market Strategy](./go_to_market_strategy.md) - Full GTM plan +> - [Sub-Niche Targeting](./sub_niche_targeting.md) - Specific software sub-niches with targeting +> - [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) - Non-software market opportunities + --- ## The goal (what “progress” means) @@ -66,8 +73,10 @@ Copy this block into Notes and update once/day: ### Day 1 (set up + list building) - Choose your **wedge** (one sentence). + - See [Sub-Niche Targeting](./sub_niche_targeting.md) for software-focused wedges (recommended: Communication Integrations or Audit Logging) + - See [Cross-Market Niche Ideas](./cross_market_niche_ideas.md) for non-software wedges (real estate, legal, healthcare admin, etc.) - Create a list of **30 targets** for ONE approach (2A/2B/2C) + 10 backups. -- Prepare: short Loom or 2–4 screenshots showing the “ticket → PR” path (optional but helpful). +- Prepare: short Loom or 2–4 screenshots showing the "ticket → PR" path (optional but helpful). ### Days 2–4 (outreach burst) - Send **10 touches/day**. diff --git a/docs/marketing/sub_niche_targeting.md b/docs/marketing/sub_niche_targeting.md new file mode 100644 index 00000000..02df3726 --- /dev/null +++ b/docs/marketing/sub_niche_targeting.md @@ -0,0 +1,199 @@ +# Marketing Sub-Niches: Two Layers Deep + +> **Purpose**: Specific, actionable sub-niches for focused marketing efforts. +> **Related**: [Marketing Overview](../marketing_overview.md) | [Go-to-Market Strategy](../go_to_market_strategy.md) | [Product Vision](../product_vision.md) + +--- + +## Executive Summary + +Generic marketing doesn't work. This document breaks down OmoiOS's target market into **specific sub-niches at two layers of depth**, with clear targeting criteria, pain points, and messaging hooks. + +**Recommended Focus**: +1. **Primary**: Communication Platform Integrations (Slack/Teams/Discord) +2. **Secondary**: Audit Logging & Compliance Features + +--- + +## Layer 1: Internal Tools & Admin Panels + +### Layer 2 Sub-Niches + +| Sub-Niche | Specific Target | Pain Point | Message Hook | +|-----------|-----------------|------------|--------------| +| **1a. Multi-tenant SaaS Admin Dashboards** | SaaS CTOs with 50-500 customers | "Every customer wants custom reports/dashboards but we can't justify the headcount" | "Ship customer-facing admin panels 10x faster" | +| **1b. Developer Portal & Documentation Sites** | Platform team leads at API companies | "Our API docs are always out of date; updating them is low-priority work" | "Keep developer portals in sync with your API automatically" | +| **1c. Internal Ops Tooling (CS/Support)** | Engineering managers at B2B SaaS | "Customer success needs 12 internal tools; we built 2 last year" | "Turn your CS team's feature requests into shipped tools in days, not quarters" | + +### Best Sub-Sub-Niche: **1c. Internal Ops Tooling for CS/Support** + +**Why**: These requests have clear specs (customer success knows exactly what they need), low risk (internal-only), and high volume (every team needs 5-10 of these). + +--- + +## Layer 1: Integrations (APIs, Webhooks, Third-Party) + +### Layer 2 Sub-Niches + +| Sub-Niche | Specific Target | Pain Point | Message Hook | +|-----------|-----------------|------------|--------------| +| **2a. Payment Processor Integrations** | CTOs at marketplace/fintech-adjacent SaaS | "Stripe Connect + PayPal + ACH took 6 months and 2 engineers" | "Payment integrations with compliance-ready code in weeks" | +| **2b. Communication Platform Integrations** | Engineering managers at B2B SaaS | "Every customer wants Slack/Teams/Discord notifications; we support only Slack" | "Ship notification integrations across all platforms without dedicated headcount" | +| **2c. CRM & Sales Tool Integrations** | CTOs at sales-driven SaaS | "Salesforce sync is always broken; HubSpot integration is on the roadmap forever" | "CRM integrations that actually stay in sync" | + +### Best Sub-Sub-Niche: **2b. Communication Platform Integrations** (RECOMMENDED PRIMARY) + +**Why**: +- High volume (every B2B SaaS needs these) +- Well-documented APIs (Slack/Discord/Teams have great docs) +- Clear success criteria (message delivered = done) +- Low risk to customer (if integration fails, nothing breaks) +- Easy to demonstrate value (before: 3 weeks; after: 3 days) + +**Marketing Message**: *"Your customers want Slack notifications. And Teams. And Discord. And email. Ship all four in a week instead of a quarter."* + +--- + +## Layer 1: Bounded Backend Features + +### Layer 2 Sub-Niches + +| Sub-Niche | Specific Target | Pain Point | Message Hook | +|-----------|-----------------|------------|--------------| +| **3a. User Settings & Preferences Systems** | Mid-market SaaS engineering managers | "User settings are scattered across 5 services; every feature adds more technical debt" | "Unified user preferences with proper architecture, shipped in days" | +| **3b. Audit Logging & Compliance Features** | CTOs at B2B SaaS pursuing enterprise customers | "SOC2 requires audit logging; it's been on the backlog for 18 months" | "Compliance-ready audit logging without diverting your feature team" | +| **3c. Notification Systems (In-app, Email, Push)** | Engineering managers at growing SaaS | "We built email notifications; push and in-app are still missing after 2 years" | "Multi-channel notification system with preferences, templates, and delivery tracking" | + +### Best Sub-Sub-Niche: **3b. Audit Logging & Compliance** (RECOMMENDED SECONDARY) + +**Why**: +- High urgency (sales blocked without it) +- Clear specs (SOC2 requirements are documented) +- Visible ROI (enables enterprise deals worth $100K+) +- Bounded scope (audit logging is a known pattern) +- Decision-maker visibility (CTO knows this is blocking sales) + +**Marketing Message**: *"Stop losing enterprise deals to compliance gaps. Get audit logging that passes SOC2 review—shipped in days, not months."* + +--- + +## Layer 1: Technical Debt & Refactors + +### Layer 2 Sub-Niches + +| Sub-Niche | Specific Target | Pain Point | Message Hook | +|-----------|-----------------|------------|--------------| +| **4a. Framework/Dependency Upgrades** | CTOs at 5-10 year old SaaS products | "We're 3 major versions behind on React/Django/Rails; upgrading is a 6-month project" | "Framework upgrades with automated test validation, not 6-month rewrites" | +| **4b. Monolith → Service Extraction** | Staff engineers at scaling SaaS | "We need to extract auth/billing into services but can't justify stopping feature work" | "Extract services from your monolith without freezing your roadmap" | +| **4c. Test Coverage Improvement** | Engineering managers at fast-growing startups | "We have 12% test coverage; every deploy is scary" | "Add comprehensive test coverage to existing code without rewriting it" | + +### Best Sub-Sub-Niche: **4a. Framework/Dependency Upgrades** + +**Why**: Quantifiable scope (version X → version Y), clear success criteria (tests pass), high business value (security/performance), but often deprioritized (good for "finally get this done" messaging). + +--- + +## Layer 1: Agencies & Fractional CTOs (Multiplier Segment) + +### Layer 2 Sub-Niches + +| Sub-Niche | Specific Target | Pain Point | Message Hook | +|-----------|-----------------|------------|--------------| +| **5a. WordPress/Webflow Agency → SaaS Expansion** | Digital agency owners wanting to offer SaaS features | "Clients want custom features but we only do marketing sites" | "Add custom SaaS features to client projects without hiring backend devs" | +| **5b. MVP Development Agencies** | Dev shops that build MVPs for startups | "We bid fixed price but every project goes over; margins are razor thin" | "Ship MVPs in half the time; double your margins" | +| **5c. Fractional CTOs Managing Multiple Clients** | Solo fractional CTOs with 3-8 clients | "I'm managing 5 backlogs with no engineers to assign work to" | "Your autonomous engineering team across all your client repos" | + +### Best Sub-Sub-Niche: **5c. Fractional CTOs** + +**Why**: Each one manages multiple repos (high leverage), they're technical enough to trust automation, and they're the decision maker (short sales cycle). + +--- + +## Recommended Focus: Start With These Two + +### PRIMARY: Communication Platform Integrations (2b) + +**Why this specific sub-niche:** +- Spec-driven approach is perfect for integrations (clear APIs, documented requirements) +- High volume of similar requests (every SaaS needs Slack + Teams + Discord + Email) +- Success is measurable (message delivered = done) +- Low risk to customer (if integration fails, nothing breaks) +- Easy to demonstrate value (before: 3 weeks; after: 3 days) + +**Target Persona**: Engineering Manager at B2B SaaS (Series A-C), managing 5-20 engineers, with incomplete notification system. + +**Outreach Angle**: Look for companies with Slack integration but missing Teams/Discord, or vice versa. + +### SECONDARY: Audit Logging & Compliance (3b) + +**Why this specific sub-niche:** +- SOC2/HIPAA requirements are well-documented specs (fits spec-driven approach) +- High urgency (enterprise deals blocked without it) +- Clear ROI story (enables deals worth $100K+) +- Bounded scope (audit logging is a known pattern) +- Visible to decision makers (CTO knows this is blocking sales) + +**Target Persona**: CTO at B2B SaaS (Series A-B), pursuing first enterprise customers, blocked by compliance requirements. + +**Outreach Angle**: Look for companies announcing SOC2 certification efforts or enterprise pricing tiers. + +--- + +## Why These Two, Not the Others + +| Rejected Sub-Niche | Reason | +|-------------------|--------| +| Internal Ops Tooling (1c) | Harder to prove ROI externally; internal users are forgiving | +| Payment Integrations (2a) | Higher risk; compliance concerns; longer sales cycle | +| Framework Upgrades (4a) | Requires deep trust; scary to let AI touch production monolith | +| MVP Agencies (5b) | Price-sensitive; harder to show differentiation | + +--- + +## Execution Playbook + +### Week 1-2: Validate Primary Niche (Communication Integrations) + +1. **Create landing page variant** targeting notification/integration pain +2. **Build demo workflow**: Slack integration for sample B2B SaaS +3. **Identify 20 target companies** with incomplete notification systems +4. **Outreach to 10 engineering managers** + +### Week 3-4: Get Real Results + +5. **Get 2-3 repos with access granted** +6. **Open 3-5 PRs** for integration features +7. **Get 1-2 PRs merged** +8. **Document as case studies** + +### Week 5-6: Expand or Pivot + +If primary niche converts: +- Double down on communication integrations +- Build playbook for common patterns +- Create pre-built specs for Slack/Teams/Discord + +If primary niche doesn't convert: +- Pivot to secondary niche (audit logging) +- Apply same 2-week validation sprint + +--- + +## Measuring Success + +| Metric | Target | Timeframe | +|--------|--------|-----------| +| Outreach response rate | >15% | Week 2 | +| Repos with access granted | 2-3 | Week 3 | +| PRs opened | 3-5 | Week 4 | +| PRs merged | 1-2 | Week 5 | +| Paid pilot interest | 1 | Week 6 | + +--- + +## Related Documents + +- [Marketing Overview](../marketing_overview.md) - High-level positioning +- [Go-to-Market Strategy](../go_to_market_strategy.md) - Full GTM plan +- [Reality Contact Outreach Playbook](../reality_contact_outreach_playbook.md) - Outreach tactics +- [Product Vision](../product_vision.md) - Product capabilities diff --git a/docs/plans/spec_driven_dev_dependencies_plan.md b/docs/plans/spec_driven_dev_dependencies_plan.md new file mode 100644 index 00000000..f3dbbd1d --- /dev/null +++ b/docs/plans/spec_driven_dev_dependencies_plan.md @@ -0,0 +1,530 @@ +# Plan: Add Dependencies to Spec-Driven-Dev Skill + +**Created**: 2025-12-29 +**Status**: Draft +**Purpose**: Add ticket/task dependency support to the spec-driven-dev skill and create programmatic scanning utilities for direct API access + +--- + +## Executive Summary + +This plan addresses two requirements: +1. **Add dependency support** to the spec-driven-dev skill's ticket and task templates +2. **Create programmatic scanning utilities** to scan tickets/tasks and call the API directly (bypassing MCP server) + +--- + +## Part 1: Add Ticket and Task Dependencies + +### Current State Analysis + +The existing templates already have dependency sections, but they are: +- **Ticket template** (`references/ticket_template.md`): Has "Blocks", "Blocked By", and "Related" sections - but these are freeform text +- **Task template** (`references/task_template.md`): Has "Requires" and "Provides" sections - also freeform text + +The **backend database models** already support dependencies: +- `Task.dependencies`: JSONB field with structure `{"depends_on": ["task_id_1", "task_id_2"]}` +- `Task.parent_task_id`: FK to parent task for hierarchy +- `Ticket` model: Currently has no explicit dependency fields (uses blocking overlay mechanism instead) + +### Proposed Changes + +#### 1.1 Update Ticket Template with Structured Dependencies + +**File**: `.claude/skills/spec-driven-dev/references/ticket_template.md` + +Add structured dependency format: + +```markdown +## Dependencies + +### Blocks (other tickets waiting on this) + +- TKT-003: Cannot start auth until user model exists + +### Blocked By (tickets that must complete first) + +- TKT-001: Requires database schema from infrastructure ticket + +### Related (informational, not blocking) + +- TKT-005: Same feature area, may share components +``` + +Add machine-readable frontmatter: + +```yaml +--- +id: TKT-001 +title: Implement User Authentication +status: backlog +priority: HIGH +estimate: L +created: 2025-12-29 +dependencies: + blocks: [TKT-003, TKT-004] + blocked_by: [TKT-001] + related: [TKT-005] +--- +``` + +#### 1.2 Update Task Template with Structured Dependencies + +**File**: `.claude/skills/spec-driven-dev/references/task_template.md` + +Add structured dependency format that matches the backend JSONB schema: + +```yaml +--- +id: TSK-001 +title: Implement CRDT Data Types +status: pending +parent_ticket: TKT-001 +estimate: M +assignee: null +dependencies: + depends_on: [TSK-002, TSK-003] # Must complete first + blocks: [TSK-005, TSK-006] # Cannot start until this completes +--- +``` + +#### 1.3 Update SKILL.md Documentation + +Add section explaining dependency management: + +```markdown +## Dependency Management + +### Ticket Dependencies +Tickets can have three types of relationships: +- **blocked_by**: Tickets that must complete before this can start +- **blocks**: Tickets waiting on this one +- **related**: Informational links (non-blocking) + +### Task Dependencies +Tasks use the same schema as the backend: +- **depends_on**: Array of task IDs that must complete first +- **blocks**: Array of task IDs waiting on this one + +### Best Practices +1. Keep dependency chains short (max 3-4 levels) +2. Avoid circular dependencies +3. Mark infrastructure tasks as blockers for feature tasks +4. Use `related` for informational links, not blocking +``` + +--- + +## Part 2: Programmatic Ticket/Task Scanning + +### Goal + +Create Python utilities that: +1. Scan `.omoi_os/` directory for tickets and tasks +2. Parse frontmatter and content +3. Provide structured data for API calls +4. Optionally sync to backend API directly + +### Proposed Implementation + +#### 2.1 Create Parser Module + +**File**: `.claude/skills/spec-driven-dev/scripts/parse_specs.py` + +```python +""" +Parse .omoi_os/ ticket and task files into structured data. + +Usage: + python parse_specs.py --list-tickets + python parse_specs.py --list-tasks + python parse_specs.py --get-ticket TKT-001 + python parse_specs.py --get-dependencies TKT-001 + python parse_specs.py --export-json +""" + +from dataclasses import dataclass +from pathlib import Path +from typing import Optional +import yaml +import re + +@dataclass +class TicketDependencies: + blocks: list[str] + blocked_by: list[str] + related: list[str] + +@dataclass +class ParsedTicket: + id: str + title: str + status: str + priority: str + estimate: str + description: str + acceptance_criteria: list[str] + tasks: list[str] + dependencies: TicketDependencies + raw_content: str + +@dataclass +class TaskDependencies: + depends_on: list[str] + blocks: list[str] + +@dataclass +class ParsedTask: + id: str + title: str + status: str + parent_ticket: str + estimate: str + assignee: Optional[str] + objective: str + deliverables: list[str] + acceptance_criteria: list[str] + dependencies: TaskDependencies + raw_content: str + +class SpecParser: + """Parse spec files from .omoi_os/ directory.""" + + def __init__(self, root_dir: Path = None): + self.root = root_dir or self._find_project_root() + self.omoi_dir = self.root / ".omoi_os" + + def list_tickets(self) -> list[ParsedTicket]: ... + def list_tasks(self) -> list[ParsedTask]: ... + def get_ticket(self, ticket_id: str) -> ParsedTicket: ... + def get_task(self, task_id: str) -> ParsedTask: ... + def get_ticket_dependencies(self, ticket_id: str) -> dict: ... + def get_task_dependencies(self, task_id: str) -> dict: ... + def get_ready_tasks(self) -> list[ParsedTask]: + """Return tasks with all dependencies satisfied.""" + def get_dependency_graph(self) -> dict: + """Return full dependency graph for visualization.""" + def export_json(self) -> dict: + """Export all specs as JSON for API calls.""" +``` + +#### 2.2 Create API Client Module + +**File**: `.claude/skills/spec-driven-dev/scripts/api_client.py` + +```python +""" +Direct API client for OmoiOS backend (bypasses MCP server). + +Usage: + python api_client.py --sync-tickets # Push local tickets to API + python api_client.py --sync-tasks # Push local tasks to API + python api_client.py --list-remote # List tickets/tasks from API + python api_client.py --diff # Show diff between local and remote +""" + +import httpx +from parse_specs import SpecParser, ParsedTicket, ParsedTask + +class OmoiOSClient: + """Direct HTTP client for OmoiOS API.""" + + def __init__(self, base_url: str = "http://localhost:18000"): + self.base_url = base_url + self.client = httpx.AsyncClient(timeout=30.0) + + # Ticket operations + async def create_ticket(self, ticket: ParsedTicket) -> dict: ... + async def update_ticket(self, ticket_id: str, data: dict) -> dict: ... + async def get_ticket(self, ticket_id: str) -> dict: ... + async def list_tickets(self, project_id: str = None) -> list[dict]: ... + async def delete_ticket(self, ticket_id: str) -> bool: ... + + # Task operations + async def create_task(self, task: ParsedTask) -> dict: ... + async def update_task(self, task_id: str, data: dict) -> dict: ... + async def get_task(self, task_id: str) -> dict: ... + async def list_tasks(self, ticket_id: str = None) -> list[dict]: ... + async def delete_task(self, task_id: str) -> bool: ... + + # Sync operations + async def sync_from_local(self, parser: SpecParser) -> dict: + """Push local .omoi_os/ files to API.""" + + async def sync_to_local(self, output_dir: Path) -> dict: + """Pull from API and write to local files.""" + + async def diff(self, parser: SpecParser) -> dict: + """Compare local files with remote API state.""" +``` + +#### 2.3 Create CLI Tool + +**File**: `.claude/skills/spec-driven-dev/scripts/spec_cli.py` + +Unified CLI that combines parsing and API operations: + +```bash +# List all tickets/tasks +uv run python spec_cli.py list tickets +uv run python spec_cli.py list tasks + +# Get specific item +uv run python spec_cli.py get ticket TKT-001 +uv run python spec_cli.py get task TSK-001 + +# Dependency operations +uv run python spec_cli.py deps TKT-001 # Show dependency tree +uv run python spec_cli.py ready # Show tasks ready to work on +uv run python spec_cli.py graph # Export dependency graph as DOT + +# API operations (when backend is running) +uv run python spec_cli.py sync push # Push local to API +uv run python spec_cli.py sync pull # Pull from API to local +uv run python spec_cli.py sync diff # Show differences + +# Export operations +uv run python spec_cli.py export json > specs.json +uv run python spec_cli.py export yaml > specs.yaml +``` + +--- + +## Part 3: File Changes Summary + +### Files to Modify + +| File | Change | +|------|--------| +| `references/ticket_template.md` | Add YAML frontmatter with structured dependencies | +| `references/task_template.md` | Add YAML frontmatter with structured dependencies | +| `SKILL.md` | Add dependency management documentation section | + +### Files to Create + +| File | Purpose | +|------|---------| +| `scripts/parse_specs.py` | Parse .omoi_os/ files into structured data | +| `scripts/api_client.py` | Direct HTTP client for OmoiOS API | +| `scripts/spec_cli.py` | Unified CLI combining parsing + API operations | +| `scripts/models.py` | Shared dataclasses/Pydantic models | + +### Existing Files to Update + +| File | Change | +|------|--------| +| `scripts/generate_ids.py` | Add dependency-aware ID generation | +| `scripts/validate_specs.py` | Add dependency validation (circular detection) | + +--- + +## Part 4: Implementation Steps + +### Phase 1: Template Updates (30 min) +1. Update `ticket_template.md` with YAML frontmatter +2. Update `task_template.md` with YAML frontmatter +3. Update `SKILL.md` with dependency documentation +4. Update existing `.omoi_os/` files to use new format + +### Phase 2: Parser Module (1-2 hours) +1. Create `scripts/models.py` with dataclasses +2. Create `scripts/parse_specs.py` with parser logic +3. Add YAML frontmatter parsing +4. Add markdown content extraction +5. Add dependency graph building +6. Add "ready tasks" logic + +### Phase 3: API Client (1-2 hours) +1. Create `scripts/api_client.py` with HTTP client +2. Add ticket CRUD operations +3. Add task CRUD operations +4. Add sync operations (push/pull/diff) +5. Add error handling and retries + +### Phase 4: CLI Tool (30 min) +1. Create `scripts/spec_cli.py` with argparse +2. Wire up all commands +3. Add output formatting (table, json, yaml) +4. Add dependency graph DOT export + +### Phase 5: Validation Updates (30 min) +1. Update `validate_specs.py` with: + - Circular dependency detection + - Missing dependency validation + - Orphaned task detection + +--- + +## Part 5: Usage Examples + +### After Implementation + +```bash +# Parse local specs +uv run python .claude/skills/spec-driven-dev/scripts/spec_cli.py list tickets + +# Output: +# ID | Title | Status | Blocked By | Blocks +# TKT-001 | Implement Auth | backlog | - | TKT-002, TKT-003 +# TKT-002 | User Profile Page | backlog | TKT-001 | - +# TKT-003 | Dashboard | backlog | TKT-001 | - + +# Get ready tasks (all dependencies satisfied) +uv run python .claude/skills/spec-driven-dev/scripts/spec_cli.py ready + +# Output: +# Ready tasks (no pending dependencies): +# - TSK-001: Add webhook_url to Project and Ticket Models +# - TSK-002: Create database migration + +# Sync to backend API +uv run python .claude/skills/spec-driven-dev/scripts/spec_cli.py sync push + +# Output: +# Syncing 1 tickets and 6 tasks to http://localhost:18000... +# Created TKT-001-webhook-notifications +# Created TSK-001, TSK-002, TSK-003, TSK-004, TSK-005, TSK-006 +# Sync complete! +``` + +--- + +## Design Decisions (Confirmed) + +1. **Frontmatter format**: YAML frontmatter with `---` delimiters +2. **No legacy support**: Files MUST have frontmatter (no inference from content) +3. **Sync behavior**: Create-only, but update descriptions if they differ from existing +4. **Validation strictness**: Strict - error on circular dependencies (blocks sync) +5. **Workflow**: **Markdown first** - write specs in markdown, use Python to parse/validate/visualize, then optionally sync to API + +--- + +## Core Workflow: Markdown First + +The key insight is that **markdown files are the source of truth**. The Python tools exist to: + +1. **Parse** - Read all tickets/tasks from `.omoi_os/` +2. **Validate** - Check for circular dependencies, missing references +3. **Visualize** - Print dependency graph, show ready tasks +4. **Sync** - Push validated specs to backend API (create-only, update descriptions) + +``` +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ 1. Write │────>│ 2. Parse & │────>│ 3. Sync to │ +│ Markdown │ │ Validate │ │ Backend API │ +│ (.omoi_os/) │ │ (Python CLI) │ │ (optional) │ +└─────────────────┘ └──────────────────┘ └─────────────────┘ + │ │ │ + │ ▼ │ + │ ┌──────────────────┐ │ + │ │ Print Graph: │ │ + │ │ - All tickets │ │ + │ │ - All tasks │ │ + │ │ - Dependencies │ │ + │ │ - Ready tasks │ │ + │ └──────────────────┘ │ + │ │ + └────────────────────────────────────────────────┘ + Source of Truth +``` + +--- + +## Revised Implementation: Parse & Print First + +### Primary Goal: Parse Markdown, Print Everything + +Before any API integration, the core utility is: + +```bash +# Parse all specs and print structured output +uv run python .claude/skills/spec-driven-dev/scripts/spec_cli.py show all + +# Output: +# ══════════════════════════════════════════════════════════════ +# TICKETS (3 total) +# ══════════════════════════════════════════════════════════════ +# +# TKT-001: Implement Webhook Notifications +# Status: backlog | Priority: HIGH | Estimate: L +# Description: Implement webhook notifications for task lifecycle... +# Tasks: TSK-001, TSK-002, TSK-003, TSK-004, TSK-005, TSK-006 +# Blocked By: (none) +# Blocks: (none) +# +# ══════════════════════════════════════════════════════════════ +# TASKS (6 total) +# ══════════════════════════════════════════════════════════════ +# +# TSK-001: Add webhook_url to Project and Ticket Models +# Parent: TKT-001 | Status: pending | Estimate: S +# Description: Add webhook_url field to both Project and Ticket models. +# Depends On: (none) +# Blocks: TSK-002 +# +# TSK-002: Create database migration +# Parent: TKT-001 | Status: pending | Estimate: S +# Description: Create Alembic migration for webhook_url fields. +# Depends On: TSK-001 +# Blocks: TSK-003, TSK-004 +# +# ... etc +# +# ══════════════════════════════════════════════════════════════ +# DEPENDENCY GRAPH +# ══════════════════════════════════════════════════════════════ +# +# TSK-001 (Add webhook_url...) +# └─> TSK-002 (Create migration) +# ├─> TSK-003 (WebhookDeliveryService) +# │ └─> TSK-004 (WebhookNotificationService) +# │ └─> TSK-006 (Tests) +# └─> TSK-005 (Update API routes) +# └─> TSK-006 (Tests) +# +# ══════════════════════════════════════════════════════════════ +# READY TASKS (no pending dependencies) +# ══════════════════════════════════════════════════════════════ +# +# - TSK-001: Add webhook_url to Project and Ticket Models +# +# ══════════════════════════════════════════════════════════════ +# VALIDATION +# ══════════════════════════════════════════════════════════════ +# +# ✓ No circular dependencies detected +# ✓ All task references valid +# ✓ All ticket references valid +``` + +### Secondary Goal: API Sync (After Validation Passes) + +```bash +# Only after parsing/validation succeeds, optionally sync +uv run python .claude/skills/spec-driven-dev/scripts/spec_cli.py sync push + +# Output: +# Validating specs... +# ✓ 3 tickets, 6 tasks parsed +# ✓ No circular dependencies +# +# Syncing to http://localhost:18000... +# [CREATE] TKT-001: Implement Webhook Notifications +# [CREATE] TSK-001: Add webhook_url to Project and Ticket Models +# [CREATE] TSK-002: Create database migration +# [UPDATE DESC] TSK-003: WebhookDeliveryService (description changed) +# [SKIP] TSK-004: Already exists, description unchanged +# ... +# +# Sync complete: 5 created, 1 updated, 1 skipped +``` + +--- + +## Next Steps + +1. ✅ Plan approved with design decisions +2. Begin Phase 1: Update templates with YAML frontmatter +3. Phase 2: Create parser that prints all tickets/tasks/dependencies +4. Phase 3: Add validation (circular dependency detection) +5. Phase 4: Add API sync (create-only, update descriptions) From c414f93cc0bc4f225936d96be6cebf4868daec75 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 13:39:09 -0300 Subject: [PATCH 002/290] Add complete sync workflow documentation to SKILL.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Step-by-step sync instructions with prerequisites - YAML frontmatter examples for all file types - Traceability flow diagram (Requirements → Designs → Tickets → Tasks) - Common issues and troubleshooting section - Updated references to note YAML frontmatter support --- .claude/skills/spec-driven-dev/SKILL.md | 199 +++++++++++++++++++++++- 1 file changed, 195 insertions(+), 4 deletions(-) diff --git a/.claude/skills/spec-driven-dev/SKILL.md b/.claude/skills/spec-driven-dev/SKILL.md index 2e8c134d..07a4cede 100644 --- a/.claude/skills/spec-driven-dev/SKILL.md +++ b/.claude/skills/spec-driven-dev/SKILL.md @@ -1036,10 +1036,201 @@ trace = await client.get_full_traceability(project_id) --- +## Complete Sync Workflow + +This section provides step-by-step instructions for syncing local specs to the OmoiOS API. + +### Prerequisites + +1. **Backend running** at `http://0.0.0.0:18000` (or your API URL) +2. **Project exists** in the system (get project ID via `spec_cli.py projects`) +3. **Local specs** in `.omoi_os/` with proper YAML frontmatter + +### Step 1: Prepare Local Spec Files + +Ensure your files have proper YAML frontmatter: + +**Requirements** (`.omoi_os/requirements/{feature}.md`): +```yaml +--- +id: REQ-FEATURE-001 +title: Feature Name Requirements +feature: feature-name +created: 2025-12-29 +updated: 2025-12-29 +status: draft +category: functional +priority: HIGH +design_ref: designs/feature-name.md # Links to design +--- +``` + +**Designs** (`.omoi_os/designs/{feature}.md`): +```yaml +--- +id: DESIGN-FEATURE-001 +title: Feature Name Design +feature: feature-name +created: 2025-12-29 +updated: 2025-12-29 +status: draft +requirements: + - REQ-FEATURE-001 # Links back to requirements +--- +``` + +**Tickets** (`.omoi_os/tickets/TKT-{N}.md`): +```yaml +--- +id: TKT-001 +title: Implement Feature +status: backlog +priority: HIGH +design_ref: designs/feature-name.md +dependencies: + blocked_by: [] + blocks: [] +--- +``` + +**Tasks** (`.omoi_os/tasks/TSK-{N}.md`): +```yaml +--- +id: TSK-001 +title: Create data models +status: pending +ticket_id: TKT-001 +estimate: M +dependencies: + depends_on: [] + blocks: [TSK-002] +--- +``` + +### Step 2: Validate Local Specs + +```bash +cd .claude/skills/spec-driven-dev/scripts + +# Validate for errors (circular deps, missing refs) +python spec_cli.py validate + +# Preview what you have locally +python spec_cli.py show all + +# Check traceability (Requirements → Designs → Tickets → Tasks) +python spec_cli.py show traceability +``` + +### Step 3: Find Your Project ID + +```bash +# List all projects +python spec_cli.py projects --api-url http://0.0.0.0:18000 + +# Note the project ID (UUID format) +``` + +### Step 4: Sync Specs (Requirements & Designs) + +```bash +# DRY RUN: See what would be synced (no changes made) +python spec_cli.py sync-specs diff \ + --project-id \ + --api-url http://0.0.0.0:18000 + +# Output shows: +# - CREATE: New specs that will be created +# - UPDATE: Specs that exist but need updates +# - SKIP: Specs already in sync + +# PUSH: Actually sync to API +python spec_cli.py sync-specs push \ + --project-id \ + --api-url http://0.0.0.0:18000 + +# Optional: Custom spec title +python spec_cli.py sync-specs push \ + --project-id \ + --spec-title "Webhook Notifications" \ + --api-url http://0.0.0.0:18000 +``` + +### Step 5: Sync Tickets & Tasks + +```bash +# DRY RUN: Preview ticket/task sync +python spec_cli.py sync diff \ + --project-id \ + --api-url http://0.0.0.0:18000 + +# PUSH: Actually sync tickets and tasks +python spec_cli.py sync push \ + --project-id \ + --api-url http://0.0.0.0:18000 +``` + +### Step 6: Verify in API + +```bash +# View full traceability from API +python spec_cli.py api-trace \ + --api-url http://0.0.0.0:18000 + +# View project with all tickets/tasks +python spec_cli.py project \ + --api-url http://0.0.0.0:18000 +``` + +### What Gets Synced + +| Local File | API Entity | Linking | +|------------|------------|---------| +| `.omoi_os/requirements/*.md` | Spec + Requirements | `design_ref` → Design link | +| `.omoi_os/designs/*.md` | Spec design artifact | `requirements` → Requirement links | +| `.omoi_os/tickets/*.md` | Ticket | `design_ref` → links to spec | +| `.omoi_os/tasks/*.md` | Task | `ticket_id` → parent ticket | + +### Traceability Flow + +``` +Requirements (EARS format) + ↓ design_ref +Designs (Architecture, API specs) + ↓ requirements list +Tickets (Work items) + ↓ ticket_id +Tasks (Atomic units) +``` + +### Sync Summary Output + +After push, you'll see a summary: +``` +Summary: 2 created, 1 updated, 3 skipped, 0 failed +``` + +- **created**: New entities added to API +- **updated**: Existing entities modified +- **skipped**: Already in sync (no changes needed) +- **failed**: Errors (check messages) + +### Common Issues + +**"Spec not found"**: Run `sync-specs push` before `sync push` (specs must exist first) + +**"DetachedInstanceError"**: Backend session issue - restart backend + +**"Project not found"**: Verify project ID with `spec_cli.py projects` + +**Missing linked_design**: Ensure requirements have `design_ref` in frontmatter + +--- + ## References -- [references/requirements_template.md](references/requirements_template.md) - Full requirements template -- [references/design_template.md](references/design_template.md) - Full design template -- [references/ticket_template.md](references/ticket_template.md) - Ticket template -- [references/task_template.md](references/task_template.md) - Task template +- [references/requirements_template.md](references/requirements_template.md) - Full requirements template with YAML frontmatter +- [references/design_template.md](references/design_template.md) - Full design template with YAML frontmatter +- [references/ticket_template.md](references/ticket_template.md) - Ticket template with dependencies +- [references/task_template.md](references/task_template.md) - Task template with dependencies - [references/claude_sdk_patterns.md](references/claude_sdk_patterns.md) - Claude Agent SDK integration patterns From 500666530e7bcf651f7e17ed1f4d1dc0744790f7 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 13:45:04 -0300 Subject: [PATCH 003/290] Add environment variable support for API URL and auth - OMOIOS_API_URL: Base API URL (default: http://localhost:18000) - OMOIOS_API_KEY: API key authentication - OMOIOS_TOKEN: JWT token authentication Priority: CLI flag > env var > default Updated SKILL.md documentation with env var section and simplified command examples (no need to pass --api-url every time) --- .claude/skills/spec-driven-dev/SKILL.md | 57 ++++++++++++------- .../spec-driven-dev/scripts/api_client.py | 29 +++++++--- 2 files changed, 59 insertions(+), 27 deletions(-) diff --git a/.claude/skills/spec-driven-dev/SKILL.md b/.claude/skills/spec-driven-dev/SKILL.md index 07a4cede..c444c597 100644 --- a/.claude/skills/spec-driven-dev/SKILL.md +++ b/.claude/skills/spec-driven-dev/SKILL.md @@ -1042,10 +1042,35 @@ This section provides step-by-step instructions for syncing local specs to the O ### Prerequisites -1. **Backend running** at `http://0.0.0.0:18000` (or your API URL) +1. **Backend running** at your API URL 2. **Project exists** in the system (get project ID via `spec_cli.py projects`) 3. **Local specs** in `.omoi_os/` with proper YAML frontmatter +### Environment Variables + +Set these environment variables to avoid passing flags on every command: + +| Variable | Description | Default | +|----------|-------------|---------| +| `OMOIOS_API_URL` | Base URL of the OmoiOS API | `http://localhost:18000` | +| `OMOIOS_API_KEY` | API key for authentication | (none) | +| `OMOIOS_TOKEN` | JWT token for authentication | (none) | + +**Example setup:** +```bash +# Set in your shell profile (.bashrc, .zshrc, etc.) +export OMOIOS_API_URL="http://0.0.0.0:18000" +export OMOIOS_API_KEY="your-api-key" + +# Or set per-session +OMOIOS_API_URL="http://0.0.0.0:18000" python spec_cli.py projects +``` + +**Priority order:** +1. Command-line flag (e.g., `--api-url`) +2. Environment variable (e.g., `OMOIOS_API_URL`) +3. Default value (`http://localhost:18000`) + ### Step 1: Prepare Local Spec Files Ensure your files have proper YAML frontmatter: @@ -1125,7 +1150,10 @@ python spec_cli.py show traceability ### Step 3: Find Your Project ID ```bash -# List all projects +# List all projects (uses OMOIOS_API_URL env var or default) +python spec_cli.py projects + +# Or specify URL explicitly python spec_cli.py projects --api-url http://0.0.0.0:18000 # Note the project ID (UUID format) @@ -1135,9 +1163,7 @@ python spec_cli.py projects --api-url http://0.0.0.0:18000 ```bash # DRY RUN: See what would be synced (no changes made) -python spec_cli.py sync-specs diff \ - --project-id \ - --api-url http://0.0.0.0:18000 +python spec_cli.py sync-specs diff --project-id # Output shows: # - CREATE: New specs that will be created @@ -1145,41 +1171,32 @@ python spec_cli.py sync-specs diff \ # - SKIP: Specs already in sync # PUSH: Actually sync to API -python spec_cli.py sync-specs push \ - --project-id \ - --api-url http://0.0.0.0:18000 +python spec_cli.py sync-specs push --project-id # Optional: Custom spec title python spec_cli.py sync-specs push \ --project-id \ - --spec-title "Webhook Notifications" \ - --api-url http://0.0.0.0:18000 + --spec-title "Webhook Notifications" ``` ### Step 5: Sync Tickets & Tasks ```bash # DRY RUN: Preview ticket/task sync -python spec_cli.py sync diff \ - --project-id \ - --api-url http://0.0.0.0:18000 +python spec_cli.py sync diff --project-id # PUSH: Actually sync tickets and tasks -python spec_cli.py sync push \ - --project-id \ - --api-url http://0.0.0.0:18000 +python spec_cli.py sync push --project-id ``` ### Step 6: Verify in API ```bash # View full traceability from API -python spec_cli.py api-trace \ - --api-url http://0.0.0.0:18000 +python spec_cli.py api-trace # View project with all tickets/tasks -python spec_cli.py project \ - --api-url http://0.0.0.0:18000 +python spec_cli.py project ``` ### What Gets Synced diff --git a/.claude/skills/spec-driven-dev/scripts/api_client.py b/.claude/skills/spec-driven-dev/scripts/api_client.py index a6543b4b..ec6ce1cd 100644 --- a/.claude/skills/spec-driven-dev/scripts/api_client.py +++ b/.claude/skills/spec-driven-dev/scripts/api_client.py @@ -22,6 +22,7 @@ """ import asyncio +import os import re from dataclasses import dataclass from enum import Enum @@ -29,6 +30,9 @@ import httpx +# Default API URL - can be overridden via environment variable +DEFAULT_API_URL = "http://localhost:18000" + from models import ( ParseResult, ParsedDesign, @@ -84,7 +88,7 @@ class OmoiOSClient: def __init__( self, - base_url: str = "http://localhost:18000", + base_url: Optional[str] = None, timeout: float = 30.0, token: Optional[str] = None, api_key: Optional[str] = None, @@ -92,15 +96,26 @@ def __init__( """Initialize client. Args: - base_url: Base URL of OmoiOS API + base_url: Base URL of OmoiOS API. If not provided, uses + OMOIOS_API_URL environment variable, or falls back + to DEFAULT_API_URL (http://localhost:18000) timeout: Request timeout in seconds - token: JWT access token for authentication - api_key: API key for authentication (alternative to JWT) + token: JWT access token for authentication. If not provided, + uses OMOIOS_TOKEN environment variable. + api_key: API key for authentication (alternative to JWT). + If not provided, uses OMOIOS_API_KEY environment variable. """ - self.base_url = base_url.rstrip("/") + # Resolve base URL: explicit > env var > default + if base_url: + self.base_url = base_url.rstrip("/") + else: + self.base_url = os.environ.get("OMOIOS_API_URL", DEFAULT_API_URL).rstrip("/") + self.timeout = timeout - self.token = token - self.api_key = api_key + + # Resolve auth: explicit > env var + self.token = token or os.environ.get("OMOIOS_TOKEN") + self.api_key = api_key or os.environ.get("OMOIOS_API_KEY") async def _request( self, From fb296b54be1ef19a94be2704c18b85b1cb1ce592 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 14:07:24 -0300 Subject: [PATCH 004/290] Sync spec-driven-dev skill to sandbox_skills and enhance upload - Sync SKILL.md, scripts/, and references/ to sandbox_skills directory - Add get_skill_files() to collect all files in a skill directory - Update get_skills_for_upload() to include all files by default - Add include_all_files parameter for backward compatibility - Skip __pycache__ and .pyc files when uploading --- backend/omoi_os/sandbox_skills/__init__.py | 62 +- .../sandbox_skills/spec-driven-dev/SKILL.md | 1269 +++++++++++++++- .../references/claude_sdk_patterns.md | 393 +++++ .../references/design_template.md | 385 +++++ .../references/requirements_template.md | 279 ++++ .../references/task_template.md | 190 +++ .../references/ticket_template.md | 210 +++ .../spec-driven-dev/scripts/api_client.py | 1293 +++++++++++++++++ .../spec-driven-dev/scripts/generate_ids.py | 169 +++ .../spec-driven-dev/scripts/init_feature.py | 284 ++++ .../spec-driven-dev/scripts/models.py | 540 +++++++ .../spec-driven-dev/scripts/parse_specs.py | 577 ++++++++ .../spec-driven-dev/scripts/spec_cli.py | 1075 ++++++++++++++ .../spec-driven-dev/scripts/validate_specs.py | 420 ++++++ 14 files changed, 7070 insertions(+), 76 deletions(-) create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/references/claude_sdk_patterns.md create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/references/design_template.md create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/references/requirements_template.md create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/references/task_template.md create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/references/ticket_template.md create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py create mode 100755 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/generate_ids.py create mode 100755 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/init_feature.py create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py create mode 100644 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/spec_cli.py create mode 100755 backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/validate_specs.py diff --git a/backend/omoi_os/sandbox_skills/__init__.py b/backend/omoi_os/sandbox_skills/__init__.py index 8f851d5b..43dcae0d 100644 --- a/backend/omoi_os/sandbox_skills/__init__.py +++ b/backend/omoi_os/sandbox_skills/__init__.py @@ -6,12 +6,15 @@ Usage: from omoi_os.sandbox_skills import get_skills_for_upload - # Get all skills + # Get all skills (includes all files: SKILL.md, scripts, references, etc.) skills = get_skills_for_upload() # Get specific skills skills = get_skills_for_upload(["spec-driven-dev", "code-review"]) + # Get only SKILL.md files (backward compatible) + skills = get_skills_for_upload(include_all_files=False) + # Upload to sandbox for skill_path, content in skills.items(): sandbox.fs.upload_file(content.encode("utf-8"), skill_path) @@ -52,15 +55,55 @@ def get_skill_content(skill_name: str) -> Optional[str]: return None +def get_skill_files(skill_name: str) -> dict[str, str]: + """Get all files for a skill (SKILL.md, scripts, references, etc.). + + Args: + skill_name: Name of the skill directory. + + Returns: + Dict mapping relative paths to file content. + Example: {"SKILL.md": "...", "scripts/api_client.py": "..."} + """ + skill_dir = Path(__file__).parent / skill_name + if not skill_dir.exists(): + return {} + + result = {} + + # Walk through all files in the skill directory + for file_path in skill_dir.rglob("*"): + if file_path.is_file(): + # Skip __pycache__ and .pyc files + if "__pycache__" in str(file_path) or file_path.suffix == ".pyc": + continue + + # Get relative path from skill directory + relative_path = file_path.relative_to(skill_dir) + + # Read file content (text files only) + try: + content = file_path.read_text() + result[str(relative_path)] = content + except UnicodeDecodeError: + # Skip binary files + continue + + return result + + def get_skills_for_upload( skill_names: Optional[list[str]] = None, install_path: str = "/root/.claude/skills", + include_all_files: bool = True, ) -> dict[str, str]: """Get skills ready for upload to a sandbox. Args: skill_names: Specific skills to include. If None, includes all always_include skills. install_path: Path in sandbox where skills will be installed. + include_all_files: If True, include all files (scripts, references, etc.). + If False, only include SKILL.md files. Returns: Dict mapping sandbox file paths to file content. @@ -80,11 +123,18 @@ def get_skills_for_upload( result = {} for skill_name in skill_names: - content = get_skill_content(skill_name) - if content: - # Create path like: /root/.claude/skills/code-review/SKILL.md - sandbox_path = f"{install_path}/{skill_name}/SKILL.md" - result[sandbox_path] = content + if include_all_files: + # Get all files for the skill + skill_files = get_skill_files(skill_name) + for relative_path, content in skill_files.items(): + sandbox_path = f"{install_path}/{skill_name}/{relative_path}" + result[sandbox_path] = content + else: + # Only get SKILL.md (backward compatible) + content = get_skill_content(skill_name) + if content: + sandbox_path = f"{install_path}/{skill_name}/SKILL.md" + result[sandbox_path] = content return result diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/SKILL.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/SKILL.md index 18052c2d..c444c597 100644 --- a/backend/omoi_os/sandbox_skills/spec-driven-dev/SKILL.md +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/SKILL.md @@ -1,124 +1,1253 @@ --- -description: Create requirements, designs, tickets, and tasks following spec-driven development methodology -globs: ["**/*.md", ".omoi_os/**"] +name: spec-driven-dev +description: Spec-driven development workflow using Claude Agent SDK Python patterns. Use when planning new features, generating requirements docs, design docs, tickets, and tasks. **IMPORTANT: If MCP spec_workflow tools are available (mcp__spec_workflow__*), use them to create specs, requirements, tickets, and tasks via API. Otherwise, fall back to writing files in .omoi_os/ directory.** Integrates with DeepWiki and Context7 for research. --- # Spec-Driven Development -Follow this workflow when building features: +Systematic workflow for turning feature ideas into structured specifications, designs, and actionable work items using Claude Agent SDK patterns. -## Workflow Phases +## CRITICAL: Research-First, Question-Driven Approach -### 1. Requirements Phase -Create requirements document in `.omoi_os/requirements/{feature}.md`: +**BEFORE creating any specs, requirements, or designs:** -```markdown -# {Feature} Requirements +### Step 0: Explore Existing Codebase & Documentation + +**MANDATORY**: Always explore the existing codebase and documentation FIRST to understand context, patterns, and constraints. + +``` +1. Search for existing docs: + - Read docs/ directory for architecture, requirements, and design docs + - Check .omoi_os/ directory for existing specs, tickets, and tasks + - Look for CLAUDE.md, README.md, and other project guidance files + +2. Explore related code: + - Use Grep/Glob to find related components, services, and patterns + - Read existing implementations that the new feature will integrate with + - Identify coding patterns, naming conventions, and architecture styles -**Status**: Draft | Review | Approved -**Created**: {YYYY-MM-DD} +3. Check for prior work: + - Look for existing tickets, tasks, or specs related to this feature + - Review git history for context on similar features + - Find any ADRs (Architecture Decision Records) that apply +``` -## Overview -{2-3 sentence description} +### Step 1: Ask Discovery Questions (MANDATORY) -## Requirements +**NEVER proceed to design without asking thorough questions first.** Ask 5-15 questions depending on feature complexity: -#### REQ-{DOMAIN}-{AREA}-001: {Title} -THE SYSTEM SHALL {normative requirement}. +**Feature Clarity Questions:** +- What specific problem does this solve? What pain point exists today? +- Who are the primary users/actors? Secondary? +- What does success look like? How will we measure it? -## Data Model -{Pydantic models with field descriptions} +**Scope & Boundaries Questions:** +- What is explicitly IN scope? What is explicitly OUT? +- Are there existing features this overlaps with? +- What are the hard constraints (time, budget, technical)? -## API Endpoints -| Method | Path | Purpose | Request | Response | -|--------|------|---------|---------|----------| +**Technical Context Questions:** +- What existing systems/services will this integrate with? +- Are there performance requirements (latency, throughput)? +- What data will this feature need? Where does it come from? +- What security/privacy considerations apply? + +**Design Direction Questions:** +- Are there multiple valid approaches? Which should we explore? +- What are the trade-offs between approaches? +- Are there existing patterns in the codebase we should follow? +- What could go wrong? What are the risks? + +**Example Question Flow:** ``` +User: "I want to add user notifications" -### 2. Design Phase -Create design document in `.omoi_os/designs/{feature}.md`: +Claude: Before designing, I need to understand the feature better. Let me ask some questions: -```markdown -# {Feature} Design +**Scope:** +1. What types of notifications? (in-app, email, SMS, push?) +2. What events trigger notifications? (task complete, agent stuck, PR ready?) +3. Should users be able to configure notification preferences? + +**Technical:** +4. I see we have an EventBusService - should notifications hook into that? +5. Do we need real-time delivery or is near-real-time (polling) acceptable? +6. What's the expected volume of notifications per user per day? + +**Constraints:** +7. Are there any third-party services we must/must not use? +8. What's the timeline/priority for this feature? + +Let me know your thoughts on these, and I may have follow-up questions. +``` + +### Step 2: Always Create Tangible Artifacts in `.omoi_os/` + +Even when using MCP tools, **ALSO write local files in `.omoi_os/`** for these benefits: +- **Reference**: AI can re-read files if context is lost +- **Debugging**: Humans can inspect what was generated +- **Version Control**: Files can be committed to git +- **Portability**: Works even if MCP tools fail + +**Dual-Write Strategy:** +``` +1. Call MCP tool to create in backend system +2. ALSO write to .omoi_os/ directory with same content +3. If MCP fails, .omoi_os/ files serve as fallback +``` + +### Python Scripts for API Reference + +Use the utility scripts in `scripts/` to interact with APIs and validate state: + +```bash +# Initialize feature directory structure +uv run python .claude/skills/spec-driven-dev/scripts/init_feature.py --feature notifications + +# Generate next ticket/task IDs +uv run python .claude/skills/spec-driven-dev/scripts/generate_ids.py + +# Validate spec completeness +uv run python .claude/skills/spec-driven-dev/scripts/validate_specs.py --feature notifications + +# Call spec workflow APIs (if backend is running) +# These help verify MCP tool operations and debug issues +uv run python scripts/test_spec_workflow.py --list-specs +uv run python scripts/test_spec_workflow.py --get-spec spec-123 +``` + +**When to use scripts:** +- After MCP tool calls, verify the data was created correctly +- When debugging why a spec/ticket isn't showing up +- To list existing specs before creating new ones +- To validate generated specs match requirements + +--- + +## MCP Spec Workflow Tools (USE WHEN AVAILABLE!) + +**IMPORTANT**: If you have access to the `mcp__spec_workflow__*` tools, use them instead of writing files to `.omoi_os/`. These tools create specs, requirements, and tickets directly in the OmoiOS backend system. + +### Available MCP Tools + +Check if these tools are available in your current session: + +| Tool | Purpose | Use Instead Of | +|------|---------|----------------| +| `mcp__spec_workflow__create_spec` | Create a new specification | Writing `.omoi_os/requirements/*.md` | +| `mcp__spec_workflow__add_requirement` | Add EARS-style requirement | Manually formatting requirements | +| `mcp__spec_workflow__add_acceptance_criterion` | Add acceptance criteria | Writing criteria in markdown | +| `mcp__spec_workflow__update_design` | Update architecture/design | Writing `.omoi_os/designs/*.md` | +| `mcp__spec_workflow__add_spec_task` | Add task to spec | Writing `.omoi_os/tasks/*.md` | +| `mcp__spec_workflow__create_ticket` | Create workflow ticket | Writing `.omoi_os/tickets/*.md` | +| `mcp__spec_workflow__approve_requirements` | Move to Design phase | Manual status updates | +| `mcp__spec_workflow__approve_design` | Move to Implementation phase | Manual status updates | +| `mcp__spec_workflow__get_spec` | Get spec details | Reading files | +| `mcp__spec_workflow__list_project_specs` | List all specs | Listing directories | +| `mcp__spec_workflow__get_ticket` | Get ticket details | Reading ticket files | + +### MCP-Based Workflow (with Dual-Write) + +When MCP tools are available, follow this flow. **Always dual-write to `.omoi_os/` as backup!** + +``` +0. EXPLORE existing docs, code, and prior specs (MANDATORY) +1. ASK discovery questions to understand the feature (5-15 questions) +2. RESEARCH using DeepWiki/Context7 for external libraries/patterns +3. Create Spec: mcp__spec_workflow__create_spec + write .omoi_os/requirements/{feature}.md +4. Add Requirements: mcp__spec_workflow__add_requirement (for each) + append to local file +5. Add Criteria: mcp__spec_workflow__add_acceptance_criterion + append to local file +6. Approve Reqs: mcp__spec_workflow__approve_requirements +7. Update Design: mcp__spec_workflow__update_design + write .omoi_os/designs/{feature}.md +8. Approve Design: mcp__spec_workflow__approve_design +9. Add Tasks: mcp__spec_workflow__add_spec_task (for each) + write .omoi_os/tasks/TSK-{N}.md +10. Create Ticket: mcp__spec_workflow__create_ticket + write .omoi_os/tickets/TKT-{N}.md +11. VERIFY: Run scripts to confirm data was created correctly +``` + +**Why Dual-Write?** +- If MCP server fails, you have local files as fallback +- AI can re-read .omoi_os/ files if context is lost mid-conversation +- Human reviewers can see exactly what was generated +- Files can be committed to version control for audit trail + +### Example MCP Usage + +```python +# 1. Create the spec +mcp__spec_workflow__create_spec( + project_id="proj-123", + title="Real-Time Collaboration", + description="Enable multiple users to edit documents simultaneously" +) +# Returns: spec_id + +# 2. Add requirements (EARS format: WHEN condition, THE SYSTEM SHALL action) +mcp__spec_workflow__add_requirement( + spec_id="spec-456", + title="Live cursor display", + condition="a user opens a shared document", + action="display all active collaborators' cursors in real-time" +) + +# 3. Add acceptance criteria +mcp__spec_workflow__add_acceptance_criterion( + spec_id="spec-456", + requirement_id="req-789", + text="Cursor positions update within 100ms of remote changes" +) + +# 4. Approve requirements to move to Design phase +mcp__spec_workflow__approve_requirements(spec_id="spec-456") + +# 5. Update design +mcp__spec_workflow__update_design( + spec_id="spec-456", + architecture="## Architecture\n\nWebSocket-based sync using CRDT...", + data_model="## Data Model\n\n```sql\nCREATE TABLE collaborators...", + api_spec=[ + {"method": "WS", "endpoint": "/ws/collab/{doc_id}", "description": "Real-time sync"} + ] +) + +# 6. Approve design to move to Implementation +mcp__spec_workflow__approve_design(spec_id="spec-456") + +# 7. Add tasks +mcp__spec_workflow__add_spec_task( + spec_id="spec-456", + title="Implement CRDT data structures", + description="Create RGA-based CRDT for text sequences", + phase="Implementation", + priority="high" +) + +# 8. Create ticket to trigger orchestrator +mcp__spec_workflow__create_ticket( + title="Implement Real-Time Collaboration", + description="Full implementation of collaborative editing feature", + priority="HIGH", + phase_id="PHASE_IMPLEMENTATION", + project_id="proj-123" +) +``` + +--- + +## Fallback: File-Based Workflow + +If MCP tools are NOT available, fall back to writing files in `.omoi_os/` as described below. + +## Workflow Overview + +``` +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ 1. Understand │────>│ 2. Research │────>│ 3. Generate │ +│ Feature │ │ (DeepWiki/C7) │ │ Requirements │ +└─────────────────┘ └──────────────────┘ └─────────────────┘ + │ + ▼ +┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐ +│ 6. Tasks │<────│ 5. Tickets │<────│ 4. Design Doc │ +│ (Children) │ │ (Work Items) │ │ (Technical) │ +└─────────────────┘ └──────────────────┘ └─────────────────┘ +``` + +## Directory Structure + +All spec artifacts are stored in `.omoi_os/`: + +``` +.omoi_os/ +├── requirements/ # Requirements documents +│ ├── feature-name.md # REQ-XXX-YYY format +│ └── ... +├── designs/ # Technical design documents +│ ├── feature-name.md # Architecture, data models, APIs +│ └── ... +├── tickets/ # Work item tickets +│ ├── TKT-001.md # Parent work items +│ └── ... +└── tasks/ # Child tasks + ├── TSK-001.md # Individual actionable tasks + └── ... +``` + +## Phase 1: Feature Understanding (DEEP EXPLORATION) + +### When to Enter This Phase +- User describes a new feature idea +- User wants to plan implementation +- User says "let's build..." or "I want to add..." + +### Process (Expanded) + +**Step 1a: Explore Existing Context FIRST** + +Before asking questions, gather context: +``` +# Read project documentation +Read docs/CLAUDE.md, docs/architecture/, docs/requirements/ + +# Check for existing related specs +ls .omoi_os/requirements/ +ls .omoi_os/designs/ + +# Search codebase for related components +Grep for related service names, models, patterns + +# Check for prior discussions/ADRs +ls docs/architecture/decisions/ +``` + +**Step 1b: Ask Thorough Discovery Questions** + +Ask 5-15 questions depending on complexity. Categories: + +1. **Problem & Value** (2-3 questions) + - What specific problem does this solve? + - What's the impact of not having this feature? + - How will we measure success? + +2. **Users & Interactions** (2-3 questions) + - Who are the users/actors? + - What are the key user journeys? + - What are the edge cases? + +3. **Technical Context** (3-5 questions) + - What existing systems will this integrate with? + - What data does this need? Where does it come from? + - Are there performance/scale requirements? + - What security/auth considerations apply? + +4. **Constraints & Trade-offs** (2-3 questions) + - What's explicitly out of scope? + - Are there time/budget constraints? + - What are the risks? What could go wrong? + +**Step 1c: Confirm Understanding with Summary** + +After questions are answered, summarize: +- Feature name (kebab-case identifier) +- One-line description +- Key user stories (3-5) +- Scope boundaries (IN/OUT explicitly listed) +- Technical constraints discovered +- Risks identified + +### Example Dialog + +``` +User: "I want to add real-time collaboration to the editor" + +Claude: Let me first explore the codebase to understand context... + +[Reads docs/CLAUDE.md, checks for existing specs, searches for editor-related code] + +Found: We have EventBusService for real-time events, WebSocket support in the frontend. +No existing collaboration specs found. -**Status**: Draft | Review | Approved -**Requirements**: {link to requirements} +Now, before designing, I need to understand the feature better: -## Architecture +**Problem & Value:** +1. What specific problem does this solve? (Multiple people editing same doc causes conflicts?) +2. How critical is real-time? (Sub-second updates vs eventual consistency?) +**Users & Scope:** +3. Who will collaborate? (Internal team only? External guests?) +4. What types of content? (Just text? Code? Rich formatting?) +5. What's explicitly OUT of scope? (Comments? Video chat?) + +**Technical:** +6. I see we have EventBusService - should collaboration events flow through there? +7. What's the expected concurrent editor count per document? +8. Do we need conflict resolution or can we use CRDT for auto-merge? + +**Constraints:** +9. Any third-party services we must use or avoid? +10. What's the timeline? MVP vs full feature? + +[After user answers...] + +**Summary:** + +**Feature**: real-time-collaboration +**Description**: Enable multiple users to edit documents simultaneously with live cursors and changes + +**User Stories**: +1. As a user, I can see other editors' cursors in real-time +2. As a user, I can see changes appear as others type +3. As a user, I can see who is currently viewing the document + +**Scope**: +- IN: Live cursors, text sync, presence indicators +- OUT: Voice/video, commenting (separate feature) + +**Technical Constraints**: +- Must integrate with existing EventBusService +- Max 10 concurrent editors per document +- Use CRDT for conflict resolution + +**Risks Identified**: +- Performance with large documents +- Network latency affecting user experience + +Is this correct? Any changes before I proceed to research? +``` + +## Phase 2: Research + +### Using DeepWiki and Context7 + +Research libraries and patterns before creating specifications: + +```python +# For GitHub repositories (architecture, implementation patterns) +mcp__deepwiki-mcp__read_wiki_structure(repoName="owner/repo") +mcp__deepwiki-mcp__ask_question( + repoName="owner/repo", + question="How is real-time collaboration implemented?" +) + +# For library documentation (API, best practices) +lib_id = mcp__context7-mcp__resolve-library-id(libraryName="yjs") +mcp__context7-mcp__get-library-docs( + context7CompatibleLibraryID=lib_id, + topic="collaboration", + mode="code" # or "info" for concepts +) +``` + +### Research Questions by Feature Type + +**Data/Storage Features:** +- What data models are needed? +- What are the storage patterns? +- What are the query patterns? + +**API Features:** +- What endpoints are needed? +- What authentication/authorization? +- What error handling? + +**Real-time Features:** +- What transport (WebSocket, SSE)? +- What sync algorithm (CRDT, OT)? +- What consistency model? + +**Agent Features:** +- What tools are needed? +- What hooks/callbacks? +- What permissions model? + +## Phase 3: Requirements Document + +### Location +`.omoi_os/requirements/{feature-name}.md` + +### Template Structure + +See [references/requirements_template.md](references/requirements_template.md) for full template. + +### Key Patterns + +**Requirement IDs:** +``` +REQ-{DOMAIN}-{AREA}-{NUM} + +Examples: +REQ-COLLAB-SYNC-001 # Collaboration > Sync > Requirement 1 +REQ-AUTH-JWT-002 # Auth > JWT > Requirement 2 +REQ-AGENT-TOOL-003 # Agent > Tool > Requirement 3 +``` + +**State Machines (Mermaid):** ```mermaid -flowchart TD - A[Component A] --> B[Component B] +stateDiagram-v2 + [*] --> idle + idle --> syncing : Document Opened + syncing --> idle : Document Closed + syncing --> error : Sync Failed + error --> syncing : Retry +``` + +**Configuration Tables:** +```markdown +| Parameter | Default | Range | Description | +|-----------|---------|-------|-------------| +| sync_interval_ms | 100 | 50-1000 | Sync frequency | +| max_peers | 10 | 1-50 | Maximum collaborators | +``` + +**Pydantic Models:** +```python +from pydantic import BaseModel, Field + +class CollaboratorPresence(BaseModel): + user_id: str + cursor_position: int + last_seen: datetime + color: str = Field(description="Assigned cursor color") +``` + +## Phase 4: Design Document + +### Location +`.omoi_os/designs/{feature-name}.md` + +### Template Structure + +See [references/design_template.md](references/design_template.md) for full template. + +### Key Sections + +1. **Architecture Overview** (Mermaid flowchart) +2. **Component Responsibilities** (table) +3. **Data Models** (SQL + Pydantic) +4. **API Specifications** (endpoints table) +5. **Integration Points** (external systems) +6. **Implementation Details** (algorithms, pseudocode) + +### Claude Agent SDK Integration Patterns + +When the feature involves agents, include: + +**Custom Tools:** +```python +@tool("sync_document", "Sync document changes", {"doc_id": str, "changes": dict}) +async def sync_document(args: dict[str, Any]) -> dict[str, Any]: + # Implementation + return {"content": [{"type": "text", "text": f"Synced {len(changes)} changes"}]} ``` -## Component Responsibilities -| Component | Layer | Responsibilities | -|-----------|-------|------------------| +**Hooks:** +```python +async def validate_changes(input_data, tool_use_id, context): + """PreToolUse hook to validate changes before applying.""" + changes = input_data["tool_input"].get("changes", {}) + if not validate_change_format(changes): + return { + "hookSpecificOutput": { + "hookEventName": "PreToolUse", + "permissionDecision": "deny", + "permissionDecisionReason": "Invalid change format" + } + } + return {} +``` -## Implementation Details -{Key algorithms, patterns, integration points} +**Agent Options:** +```python +options = ClaudeAgentOptions( + system_prompt="You are a document collaboration assistant.", + mcp_servers={"collab": collab_server}, + allowed_tools=["mcp__collab__sync_document", "mcp__collab__get_peers"], + hooks={ + "PreToolUse": [HookMatcher(matcher="sync_document", hooks=[validate_changes])] + }, + max_turns=20, + max_budget_usd=1.0 +) ``` -### 3. Tickets Phase -Create tickets in `.omoi_os/tickets/TKT-{NUM}.md`: +## Phase 5: Tickets + +### Location +`.omoi_os/tickets/TKT-{NUM}.md` + +### Template Structure + +See [references/ticket_template.md](references/ticket_template.md) for full template. + +### Ticket Generation Rules + +1. **One ticket per major component** from design +2. **Clear acceptance criteria** mapped from requirements +3. **Dependencies** explicitly listed +4. **Estimates** in T-shirt sizes (S/M/L/XL) + +### Example Ticket ```markdown -# TKT-{NUM}: {Title} +# TKT-001: Implement Sync Engine -**Status**: backlog | building | testing | done -**Priority**: HIGH | MEDIUM | LOW -**Requirements**: REQ-XXX-YYY-001 +**Status**: backlog +**Priority**: HIGH +**Estimate**: L +**Requirements**: REQ-COLLAB-SYNC-001, REQ-COLLAB-SYNC-002 +**Design Reference**: designs/real-time-collaboration.md#sync-engine + +## Description +Implement the core synchronization engine using CRDT for conflict-free merging. ## Acceptance Criteria -- [ ] {Criterion 1} -- [ ] {Criterion 2} +- [ ] CRDT data structure implemented +- [ ] Sync protocol handles concurrent edits +- [ ] P95 sync latency < 100ms + +## Dependencies +- TKT-002: WebSocket infrastructure (must complete first) ## Tasks -| Task ID | Description | Status | -|---------|-------------|--------| -| TSK-001 | {Task} | pending | +- TSK-001: Implement CRDT data types +- TSK-002: Build sync protocol +- TSK-003: Add unit tests ``` -### 4. Tasks Phase -Create tasks in `.omoi_os/tasks/TSK-{NUM}.md`: +## Phase 6: Tasks + +### Location +`.omoi_os/tasks/TSK-{NUM}.md` + +### Template Structure + +See [references/task_template.md](references/task_template.md) for full template. + +### Task Generation Rules + +1. **Atomic units of work** (1-4 hours ideal) +2. **Clear deliverable** (file, test, endpoint) +3. **No ambiguity** in implementation approach +4. **Test requirements** included + +### Example Task ```markdown -# TSK-{NUM}: {Title} +# TSK-001: Implement CRDT Data Types -**Status**: pending | in_progress | done -**Parent**: TKT-{NUM} +**Status**: pending +**Parent Ticket**: TKT-001 +**Estimate**: M +**Assignee**: (unassigned) ## Objective -{What this task accomplishes} +Create CRDT data structures for text collaboration. ## Deliverables -- [ ] `{file1.py}` - {purpose} -- [ ] `{file2.py}` - {purpose} +- [ ] `src/collab/crdt.py` - CRDT implementation +- [ ] `tests/collab/test_crdt.py` - Unit tests + +## Implementation Notes +Use RGA (Replicated Growable Array) for text sequences. +Reference: https://github.com/yjs/yjs ## Acceptance Criteria -- [ ] All tests pass -- [ ] No linting errors +- All unit tests pass +- Handles concurrent inserts at same position +- Maintains consistent ordering across replicas ``` -## Naming Conventions +## Interactive Workflow -- **Requirements**: `REQ-{DOMAIN}-{AREA}-{NUM}` (e.g., `REQ-AUTH-API-001`) -- **Tickets**: `TKT-{NUM}` or `TKT-{PREFIX}-{NUM}` (e.g., `TKT-AUTH-001`) -- **Tasks**: `TSK-{NUM}` (e.g., `TSK-001`) +### Starting a New Feature -## Directory Structure +``` +User: "I want to build a notification system" +Claude: [Phase 1 - Understand] +Let me understand the feature... +[Asks clarifying questions] + +Claude: [Phase 2 - Research] +Let me research notification patterns... +[Uses DeepWiki/Context7] + +Claude: [Phase 3-6 - Generate] +Here's what I'll create: +1. .omoi_os/requirements/notifications.md +2. .omoi_os/designs/notifications.md +3. .omoi_os/tickets/TKT-00X.md (N tickets) +4. .omoi_os/tasks/TSK-00X.md (M tasks) + +Shall I proceed? ``` -.omoi_os/ -├── requirements/ # REQ-XXX documents -├── designs/ # Design documents -├── tickets/ # TKT-XXX tickets -└── tasks/ # TSK-XXX tasks + +### Updating Existing Specs + +When updating: +1. Read existing spec first +2. Identify what changed +3. Update requirements → design → tickets → tasks (cascade) +4. Note revision in document history + +## Recovery: When Context is Lost + +If you lose context mid-conversation or need to resume work on a spec: + +### Re-Orient from Local Files + +``` +1. Read the spec files to understand where you were: + - .omoi_os/requirements/{feature}.md - Current requirements + - .omoi_os/designs/{feature}.md - Current design + - .omoi_os/tickets/ - Existing tickets + - .omoi_os/tasks/ - Existing tasks + +2. Check MCP for current state: + mcp__spec_workflow__list_project_specs(project_id="...") + mcp__spec_workflow__get_spec(spec_id="...") + +3. Use scripts to verify state: + uv run python scripts/test_spec_workflow.py --get-spec spec-123 ``` +### Resume Checklist + +When resuming work on an existing spec: + +- [ ] Read .omoi_os/ files to understand current state +- [ ] Query MCP to get live data +- [ ] Identify what phase you're in (Requirements, Design, Implementation) +- [ ] Check for pending questions that were never answered +- [ ] Review any TODOs or blockers noted in files +- [ ] Continue from where you left off + +### Example Recovery + +``` +Claude: I notice we're continuing work on the "notifications" feature. Let me check our progress... + +[Reads .omoi_os/requirements/notifications.md] +[Reads .omoi_os/designs/notifications.md] +[Calls mcp__spec_workflow__get_spec] + +Status: +- Requirements: 5 defined, approved +- Design: In progress (architecture done, API spec pending) +- Tickets: Not yet created +- Tasks: Not yet created + +Next step: Complete the API specification section of the design. + +Would you like me to continue from here? +``` + +--- + ## Best Practices -1. **Start with requirements** - Understand WHAT before HOW -2. **Design before code** - Architecture diagrams and data models first -3. **Small tickets** - Each ticket should be completable in 1-2 days -4. **Atomic tasks** - Each task should have one clear deliverable -5. **Traceability** - Always link tasks → tickets → requirements +### Requirements +- Use normative language (SHALL, MUST, SHOULD, MAY) +- Include measurable criteria (latency < Xms) +- Reference related requirements explicitly + +### Designs +- Architecture first, then details +- Include both happy path and error handling +- Provide pseudocode for complex algorithms + +### Tickets +- Clear scope boundaries +- Explicit dependencies +- Realistic estimates + +### Tasks +- Small enough to complete in one session +- Self-contained with all context needed +- Include test expectations + +### Recovery & Reference +- ALWAYS write to .omoi_os/ even when using MCP +- Re-read local files when resuming work +- Use scripts to verify backend state +- Keep local files in sync with MCP state + +## Scripts + +This skill includes utility scripts in `scripts/`: + +- `init_feature.py` - Initialize directory structure for new feature +- `generate_ids.py` - Generate next ticket/task IDs +- `validate_specs.py` - Validate spec documents for completeness +- `spec_cli.py` - **Main CLI** for viewing, validating, and syncing specs +- `api_client.py` - Direct HTTP client for OmoiOS API (bypasses MCP) +- `parse_specs.py` - Parser for .omoi_os/ markdown files +- `models.py` - Data models with cross-ticket dependency logic + +--- + +## Spec CLI (spec_cli.py) + +The main CLI tool for working with specs locally and syncing to the API. + +### View Local Specs + +```bash +# From the scripts directory: +cd .claude/skills/spec-driven-dev/scripts + +# Show all specs (requirements, designs, tickets, tasks, traceability) +python spec_cli.py show all + +# Show only requirements (EARS format) +python spec_cli.py show requirements + +# Show only designs +python spec_cli.py show designs + +# Show only tickets +python spec_cli.py show tickets + +# Show only tasks (with blocking reasons) +python spec_cli.py show tasks + +# Show task dependency graph (within tickets) +python spec_cli.py show graph + +# Show cross-ticket dependency graph +python spec_cli.py show ticket-graph + +# Show full traceability matrix (Requirements → Designs → Tickets → Tasks) +python spec_cli.py show traceability + +# Show only ready tasks (not blocked) +python spec_cli.py show ready + +# Validate specs (circular deps, missing refs) +python spec_cli.py validate + +# Export to JSON (includes all specs + traceability stats) +python spec_cli.py export json +``` + +### API Integration + +```bash +# List all projects +python spec_cli.py projects --api-url http://0.0.0.0:18000 + +# Show project with all tickets and tasks +python spec_cli.py project --api-url http://0.0.0.0:18000 + +# Dry-run sync (see what would change) +python spec_cli.py sync diff --api-url http://0.0.0.0:18000 --project-id + +# Push local specs to API +python spec_cli.py sync push --api-url http://0.0.0.0:18000 --project-id +``` + +### Sync Specs to API (Requirements & Designs) + +```bash +# Sync local requirements and designs to API specs +# This creates or updates specs with EARS-format requirements + +# Dry-run: See what would be synced +python spec_cli.py sync-specs diff --project-id --api-url http://0.0.0.0:18000 + +# Push: Actually create/update specs +python spec_cli.py sync-specs push --project-id --api-url http://0.0.0.0:18000 + +# Optional: Specify a custom spec title +python spec_cli.py sync-specs push --project-id --spec-title "My Feature Spec" +``` + +### View API Traceability + +```bash +# View full traceability from API: Specs → Requirements → Tickets → Tasks +python spec_cli.py api-trace --api-url http://0.0.0.0:18000 +``` + +### Authentication Options + +```bash +# Via API key (recommended) +python spec_cli.py sync push --api-key ... +# Or set OMOIOS_API_KEY environment variable + +# Via JWT token +python spec_cli.py sync push --token ... +# Or set OMOIOS_TOKEN environment variable + +# Via email/password login +python spec_cli.py sync push --email user@example.com --password secret ... +# Or set OMOIOS_EMAIL and OMOIOS_PASSWORD environment variables +``` + +### Sync Behavior + +The sync command uses **create-or-skip** logic: +- **CREATE**: If ticket/task doesn't exist (matched by title) +- **UPDATE**: If exists but description differs +- **SKIP**: If exists with same description + +--- + +## Cross-Ticket Dependencies + +Tasks can be blocked by dependencies at two levels: + +### 1. Task-Level Dependencies (within a ticket) + +```yaml +# In .omoi_os/tasks/TSK-002.md +dependencies: + depends_on: [TSK-001] # Must complete TSK-001 first + blocks: [TSK-003] # TSK-003 waits for this +``` + +### 2. Ticket-Level Dependencies (cross-ticket) + +```yaml +# In .omoi_os/tickets/TKT-002.md +dependencies: + blocked_by: [TKT-001] # ALL tasks in TKT-002 wait for ALL tasks in TKT-001 + blocks: [TKT-003] # TKT-003 waits for this ticket + related: [] # Informational only +``` + +### How Cross-Ticket Blocking Works + +When a ticket has `blocked_by: [TKT-001]`: +1. ALL tasks in that ticket are blocked +2. They remain blocked until ALL tasks in TKT-001 have `status: done` +3. The CLI shows: `[BLOCKED: blocked by ticket(s): TKT-001]` + +### Viewing Dependency Graphs + +```bash +# Task dependencies (within tickets) +python spec_cli.py show graph +# Output: +# └─> TSK-001 (Add models) +# └─> TSK-002 (Create migration) +# └─> TSK-003 (Implement service) + +# Cross-ticket dependencies +python spec_cli.py show ticket-graph +# Output: +# └─> [○] TKT-001 (Webhook Infrastructure) [6 tasks] +# └─> [○] TKT-002 (Slack/Discord Integration) [2 tasks] +# Legend: ✓ = all tasks complete, ○ = incomplete +``` + +--- + +## Direct API Client (api_client.py) + +For programmatic access to the OmoiOS API without MCP: + +```python +from api_client import OmoiOSClient + +# Initialize client +client = OmoiOSClient( + base_url="http://0.0.0.0:18000", + api_key="your-api-key" # or token="jwt-token" +) + +# List projects +projects = await client.list_projects() + +# Get project with tickets and tasks +data = await client.get_project_with_tickets(project_id) + +# List tickets for a project +tickets = await client.list_tickets(project_id) + +# Create a ticket +from models import ParsedTicket +success, msg = await client.create_ticket(parsed_ticket, project_id) + +# Create a task +from models import ParsedTask +success, msg = await client.create_task(parsed_task, ticket_api_id) + +# Full sync from local specs (tickets/tasks) +from parse_specs import SpecParser +parser = SpecParser() +result = parser.parse_all() +summary = await client.sync(result, project_id) +``` + +### Spec/Requirement/Design API Operations + +```python +# === SPEC OPERATIONS === + +# Create a new spec +success, msg, spec_id = await client.create_spec( + title="My Feature Spec", + project_id="project-uuid", + description="Optional description" +) + +# Get spec by ID +spec = await client.get_spec(spec_id) + +# List all specs for a project +specs = await client.list_specs(project_id) + +# === REQUIREMENT OPERATIONS (EARS Format) === + +# Add a requirement using EARS format +success, msg, req_id = await client.add_requirement( + spec_id=spec_id, + title="User authentication", + condition="a user submits valid credentials", # WHEN clause + action="authenticate the user and create a session" # SHALL clause +) + +# Add acceptance criterion to a requirement +success, msg = await client.add_acceptance_criterion( + spec_id=spec_id, + requirement_id=req_id, + text="Session token expires after 24 hours" +) + +# === DESIGN OPERATIONS === + +# Update spec's design artifact +success, msg = await client.update_design( + spec_id=spec_id, + architecture="## Architecture\n\nJWT-based auth with refresh tokens...", + data_model="## Data Model\n\n```sql\nCREATE TABLE sessions...", + api_spec=[ + {"method": "POST", "path": "/api/auth/login", "description": "User login"}, + {"method": "POST", "path": "/api/auth/logout", "description": "User logout"} + ] +) + +# === SYNC LOCAL SPECS TO API === + +# Sync local requirements and designs to API specs +from parse_specs import SpecParser +from models import ParseResult + +parser = SpecParser() +result: ParseResult = parser.parse_all() + +# Dry-run: See what would change +summary = await client.sync_specs(result, project_id, dry_run=True) + +# Actual sync: Create/update specs +summary = await client.sync_specs(result, project_id) + +# Diff only (shorthand for dry_run=True) +summary = await client.diff_specs(result, project_id) + +# === FULL TRACEABILITY === + +# Get complete traceability from API +# Returns: Specs → Requirements → Tickets → Tasks +trace = await client.get_full_traceability(project_id) +``` + +--- + +## Complete Sync Workflow + +This section provides step-by-step instructions for syncing local specs to the OmoiOS API. + +### Prerequisites + +1. **Backend running** at your API URL +2. **Project exists** in the system (get project ID via `spec_cli.py projects`) +3. **Local specs** in `.omoi_os/` with proper YAML frontmatter + +### Environment Variables + +Set these environment variables to avoid passing flags on every command: + +| Variable | Description | Default | +|----------|-------------|---------| +| `OMOIOS_API_URL` | Base URL of the OmoiOS API | `http://localhost:18000` | +| `OMOIOS_API_KEY` | API key for authentication | (none) | +| `OMOIOS_TOKEN` | JWT token for authentication | (none) | + +**Example setup:** +```bash +# Set in your shell profile (.bashrc, .zshrc, etc.) +export OMOIOS_API_URL="http://0.0.0.0:18000" +export OMOIOS_API_KEY="your-api-key" + +# Or set per-session +OMOIOS_API_URL="http://0.0.0.0:18000" python spec_cli.py projects +``` + +**Priority order:** +1. Command-line flag (e.g., `--api-url`) +2. Environment variable (e.g., `OMOIOS_API_URL`) +3. Default value (`http://localhost:18000`) + +### Step 1: Prepare Local Spec Files + +Ensure your files have proper YAML frontmatter: + +**Requirements** (`.omoi_os/requirements/{feature}.md`): +```yaml +--- +id: REQ-FEATURE-001 +title: Feature Name Requirements +feature: feature-name +created: 2025-12-29 +updated: 2025-12-29 +status: draft +category: functional +priority: HIGH +design_ref: designs/feature-name.md # Links to design +--- +``` + +**Designs** (`.omoi_os/designs/{feature}.md`): +```yaml +--- +id: DESIGN-FEATURE-001 +title: Feature Name Design +feature: feature-name +created: 2025-12-29 +updated: 2025-12-29 +status: draft +requirements: + - REQ-FEATURE-001 # Links back to requirements +--- +``` + +**Tickets** (`.omoi_os/tickets/TKT-{N}.md`): +```yaml +--- +id: TKT-001 +title: Implement Feature +status: backlog +priority: HIGH +design_ref: designs/feature-name.md +dependencies: + blocked_by: [] + blocks: [] +--- +``` + +**Tasks** (`.omoi_os/tasks/TSK-{N}.md`): +```yaml +--- +id: TSK-001 +title: Create data models +status: pending +ticket_id: TKT-001 +estimate: M +dependencies: + depends_on: [] + blocks: [TSK-002] +--- +``` + +### Step 2: Validate Local Specs + +```bash +cd .claude/skills/spec-driven-dev/scripts + +# Validate for errors (circular deps, missing refs) +python spec_cli.py validate + +# Preview what you have locally +python spec_cli.py show all + +# Check traceability (Requirements → Designs → Tickets → Tasks) +python spec_cli.py show traceability +``` + +### Step 3: Find Your Project ID + +```bash +# List all projects (uses OMOIOS_API_URL env var or default) +python spec_cli.py projects + +# Or specify URL explicitly +python spec_cli.py projects --api-url http://0.0.0.0:18000 + +# Note the project ID (UUID format) +``` + +### Step 4: Sync Specs (Requirements & Designs) + +```bash +# DRY RUN: See what would be synced (no changes made) +python spec_cli.py sync-specs diff --project-id + +# Output shows: +# - CREATE: New specs that will be created +# - UPDATE: Specs that exist but need updates +# - SKIP: Specs already in sync + +# PUSH: Actually sync to API +python spec_cli.py sync-specs push --project-id + +# Optional: Custom spec title +python spec_cli.py sync-specs push \ + --project-id \ + --spec-title "Webhook Notifications" +``` + +### Step 5: Sync Tickets & Tasks + +```bash +# DRY RUN: Preview ticket/task sync +python spec_cli.py sync diff --project-id + +# PUSH: Actually sync tickets and tasks +python spec_cli.py sync push --project-id +``` + +### Step 6: Verify in API + +```bash +# View full traceability from API +python spec_cli.py api-trace + +# View project with all tickets/tasks +python spec_cli.py project +``` + +### What Gets Synced + +| Local File | API Entity | Linking | +|------------|------------|---------| +| `.omoi_os/requirements/*.md` | Spec + Requirements | `design_ref` → Design link | +| `.omoi_os/designs/*.md` | Spec design artifact | `requirements` → Requirement links | +| `.omoi_os/tickets/*.md` | Ticket | `design_ref` → links to spec | +| `.omoi_os/tasks/*.md` | Task | `ticket_id` → parent ticket | + +### Traceability Flow + +``` +Requirements (EARS format) + ↓ design_ref +Designs (Architecture, API specs) + ↓ requirements list +Tickets (Work items) + ↓ ticket_id +Tasks (Atomic units) +``` + +### Sync Summary Output + +After push, you'll see a summary: +``` +Summary: 2 created, 1 updated, 3 skipped, 0 failed +``` + +- **created**: New entities added to API +- **updated**: Existing entities modified +- **skipped**: Already in sync (no changes needed) +- **failed**: Errors (check messages) + +### Common Issues + +**"Spec not found"**: Run `sync-specs push` before `sync push` (specs must exist first) + +**"DetachedInstanceError"**: Backend session issue - restart backend + +**"Project not found"**: Verify project ID with `spec_cli.py projects` + +**Missing linked_design**: Ensure requirements have `design_ref` in frontmatter + +--- + +## References + +- [references/requirements_template.md](references/requirements_template.md) - Full requirements template with YAML frontmatter +- [references/design_template.md](references/design_template.md) - Full design template with YAML frontmatter +- [references/ticket_template.md](references/ticket_template.md) - Ticket template with dependencies +- [references/task_template.md](references/task_template.md) - Task template with dependencies +- [references/claude_sdk_patterns.md](references/claude_sdk_patterns.md) - Claude Agent SDK integration patterns diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/references/claude_sdk_patterns.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/claude_sdk_patterns.md new file mode 100644 index 00000000..8643f413 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/claude_sdk_patterns.md @@ -0,0 +1,393 @@ +# Claude Agent SDK Integration Patterns + +Reference patterns for integrating Claude Agent SDK Python into spec-driven development. + +--- + +## Overview + +When designing features that involve AI agents, use these patterns to define: +- Custom tools for agent capabilities +- Hooks for lifecycle control +- Permission callbacks for security +- Session management for stateful interactions + +--- + +## Custom Tools + +### Basic Tool Definition + +```python +from typing import Any +from claude_agent_sdk import tool, create_sdk_mcp_server + +@tool("tool_name", "Description of what the tool does", { + "param1": str, + "param2": int, + "param3": bool +}) +async def tool_name(args: dict[str, Any]) -> dict[str, Any]: + """Implementation of the tool.""" + result = process(args["param1"], args["param2"]) + + return { + "content": [ + {"type": "text", "text": f"Result: {result}"} + ] + } +``` + +### Tool with Error Handling + +```python +@tool("safe_operation", "Operation with error handling", {"input": str}) +async def safe_operation(args: dict[str, Any]) -> dict[str, Any]: + try: + result = perform_operation(args["input"]) + return { + "content": [{"type": "text", "text": f"Success: {result}"}] + } + except ValidationError as e: + return { + "content": [{"type": "text", "text": f"Validation error: {e}"}], + "is_error": True + } + except Exception as e: + return { + "content": [{"type": "text", "text": f"Error: {e}"}], + "is_error": True + } +``` + +### Tool with State Access + +```python +class ApplicationState: + def __init__(self): + self.items: list[str] = [] + self.counter: int = 0 + +state = ApplicationState() + +@tool("add_item", "Add item to state", {"item": str}) +async def add_item(args: dict[str, Any]) -> dict[str, Any]: + state.items.append(args["item"]) + state.counter += 1 + return { + "content": [{ + "type": "text", + "text": f"Added '{args['item']}'. Total: {state.counter}" + }] + } + +@tool("list_items", "List all items", {}) +async def list_items(args: dict[str, Any]) -> dict[str, Any]: + if not state.items: + return {"content": [{"type": "text", "text": "No items"}]} + + items_text = "\n".join(f"- {item}" for item in state.items) + return {"content": [{"type": "text", "text": f"Items:\n{items_text}"}]} +``` + +### Creating MCP Server + +```python +from claude_agent_sdk import create_sdk_mcp_server + +# Create server with tools +server = create_sdk_mcp_server( + name="my_tools", + version="1.0.0", + tools=[add_item, list_items, safe_operation] +) +``` + +--- + +## Hooks + +### PreToolUse Hook (Validation/Blocking) + +```python +from claude_agent_sdk import HookMatcher, HookInput, HookContext, HookJSONOutput + +async def validate_before_tool( + input_data: HookInput, + tool_use_id: str | None, + context: HookContext +) -> HookJSONOutput: + """Validate tool input before execution.""" + tool_name = input_data["tool_name"] + tool_input = input_data["tool_input"] + + # Block dangerous operations + if tool_name == "Bash": + command = tool_input.get("command", "") + dangerous_patterns = ["rm -rf", "sudo", "chmod 777"] + + for pattern in dangerous_patterns: + if pattern in command: + return { + "hookSpecificOutput": { + "hookEventName": "PreToolUse", + "permissionDecision": "deny", + "permissionDecisionReason": f"Blocked: {pattern}" + } + } + + # Allow by default + return {} +``` + +### PostToolUse Hook (Review/Feedback) + +```python +async def review_after_tool( + input_data: HookInput, + tool_use_id: str | None, + context: HookContext +) -> HookJSONOutput: + """Review tool output and provide feedback.""" + tool_response = input_data.get("tool_response", "") + + # Check for errors + if "error" in str(tool_response).lower(): + return { + "systemMessage": "The command produced an error", + "reason": "Tool execution failed", + "hookSpecificOutput": { + "hookEventName": "PostToolUse", + "additionalContext": "Consider a different approach." + } + } + + return {} +``` + +### Registering Hooks + +```python +from claude_agent_sdk import ClaudeAgentOptions, HookMatcher + +options = ClaudeAgentOptions( + hooks={ + "PreToolUse": [ + HookMatcher(matcher="Bash", hooks=[validate_before_tool]), + HookMatcher(matcher="Write", hooks=[validate_before_tool]), + ], + "PostToolUse": [ + HookMatcher(matcher=None, hooks=[review_after_tool]), # All tools + ], + } +) +``` + +--- + +## Permission Callbacks + +### Custom Permission Logic + +```python +from claude_agent_sdk import ( + ToolPermissionContext, + PermissionResultAllow, + PermissionResultDeny +) + +async def can_use_tool( + tool_name: str, + tool_input: dict, + context: ToolPermissionContext +) -> PermissionResultAllow | PermissionResultDeny: + """Custom tool permission logic.""" + + # Block writes to protected paths + if tool_name == "Write": + file_path = tool_input.get("file_path", "") + protected = ["config", "secrets", ".env"] + + if any(p in file_path.lower() for p in protected): + return PermissionResultDeny( + behavior="deny", + message="Cannot write to protected files", + interrupt=False + ) + + # Modify input for safety + if tool_name == "Bash": + command = tool_input.get("command", "") + if command.startswith("rm"): + modified_input = {**tool_input, "command": f"{command} -i"} + return PermissionResultAllow( + behavior="allow", + updated_input=modified_input + ) + + return PermissionResultAllow(behavior="allow") +``` + +--- + +## Agent Configuration + +### Full Configuration Example + +```python +from claude_agent_sdk import ClaudeAgentOptions, HookMatcher + +options = ClaudeAgentOptions( + # System prompt + system_prompt="You are a helpful assistant.", + + # Tool configuration + allowed_tools=[ + "Read", "Write", "Edit", + "mcp__my_tools__add_item", + "mcp__my_tools__list_items", + ], + disallowed_tools=["Bash"], + + # MCP servers + mcp_servers={ + "my_tools": my_tools_server, + }, + + # Permission callback + can_use_tool=can_use_tool, + + # Hooks + hooks={ + "PreToolUse": [ + HookMatcher(matcher="Write", hooks=[validate_before_tool]), + ], + "PostToolUse": [ + HookMatcher(matcher=None, hooks=[review_after_tool]), + ], + }, + + # Execution limits + max_turns=20, + max_budget_usd=2.0, + + # Session management + fork_session=True, + + # Model selection + model="claude-sonnet-4-5", +) +``` + +--- + +## Usage Patterns + +### Simple Query + +```python +from claude_agent_sdk import query + +async def simple_query(): + async for msg in query(prompt="Hello!", options=options): + if isinstance(msg, AssistantMessage): + for block in msg.content: + if isinstance(block, TextBlock): + print(block.text) +``` + +### Stateful Client + +```python +from claude_agent_sdk import ClaudeSDKClient + +async def stateful_session(): + async with ClaudeSDKClient(options=options) as client: + # First query + await client.query("Create a new item called 'apple'") + async for msg in client.receive_response(): + process_message(msg) + + # Follow-up (maintains context) + await client.query("Now add 'banana'") + async for msg in client.receive_response(): + process_message(msg) +``` + +### Message Processing + +```python +from claude_agent_sdk import ( + AssistantMessage, + ResultMessage, + TextBlock, + ToolUseBlock, +) + +def process_message(msg): + if isinstance(msg, AssistantMessage): + for block in msg.content: + if isinstance(block, TextBlock): + print(f"Text: {block.text}") + elif isinstance(block, ToolUseBlock): + print(f"Tool: {block.name} -> {block.input}") + + elif isinstance(msg, ResultMessage): + print(f"Done. Turns: {msg.num_turns}, Cost: ${msg.total_cost_usd:.4f}") +``` + +--- + +## Requirements Integration + +When writing requirements for agent features: + +```markdown +#### REQ-AGENT-TOOL-001: Custom Tool Definition +THE SYSTEM SHALL define custom tools using the `@tool` decorator with: +- Unique tool name (snake_case) +- Description for LLM understanding +- Input schema with type hints + +#### REQ-AGENT-HOOK-001: PreToolUse Validation +THE SYSTEM SHALL implement PreToolUse hooks to validate and optionally block: +- Dangerous operations +- Invalid input parameters +- Unauthorized access patterns + +#### REQ-AGENT-PERM-001: Permission Callback +THE SYSTEM SHALL implement `can_use_tool` callback for: +- File path access control +- Command modification +- Rate limiting +``` + +--- + +## Design Integration + +When writing design docs for agent features: + +```markdown +## Agent Architecture + +### Custom Tools + +| Tool | Purpose | Input Schema | +|------|---------|--------------| +| `add_item` | Add item to state | `{item: str}` | +| `list_items` | List all items | `{}` | + +### Hooks Configuration + +| Hook | Matcher | Purpose | +|------|---------|---------| +| `PreToolUse` | `Bash` | Block dangerous commands | +| `PostToolUse` | `*` | Log all tool usage | + +### Permission Model + +- File writes restricted to `/workspace/` directory +- Bash commands auto-modified with safety flags +- Rate limit: 100 tool calls per session +``` diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/references/design_template.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/design_template.md new file mode 100644 index 00000000..c9318b07 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/design_template.md @@ -0,0 +1,385 @@ +# Design Document Template + +Use this template for `.omoi_os/designs/{feature-name}.md` files. + +--- + +```markdown +--- +id: DESIGN-{FEATURE}-001 +title: {Feature Name} Design +feature: {feature-name} +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +status: draft +requirements: + - REQ-{DOMAIN}-001 + - REQ-{DOMAIN}-002 +--- + +# {Feature Name} - Product Design Document + +## Document Overview + +{Description of the feature/system being designed} + +- **Purpose & Scope** + - {Goal 1} + - {Goal 2} + - {Non-goals/Out of scope} + +- **Target Audience** + - {Audience 1 - e.g., implementation teams} + - {Audience 2 - e.g., system architects} + +- **Related Documents** + - Requirements: `{path to requirements doc}` + - Design: `{paths to related design docs}` + +--- + +## Architecture Overview + +### High-Level Architecture + +```mermaid +flowchart TD + subgraph Layer1[{Layer Name}] + C1[Component 1] + C2[Component 2] + end + + subgraph Layer2[{Layer Name}] + C3[Component 3] + C4[Component 4] + end + + subgraph External[External Systems] + E1[External System 1] + E2[External System 2] + end + + C1 -->|action| C2 + C2 -->|action| C3 + C3 --> E1 +``` + +### Component Responsibilities + +| Component | Layer | Responsibilities | +|-----------|-------|------------------| +| {Component 1} | {Layer} | {Primary responsibilities} | +| {Component 2} | {Layer} | {Primary responsibilities} | +| {Component 3} | {Layer} | {Primary responsibilities} | + +### System Boundaries + +- **Within scope of {System}**: + - {Responsibility 1} + - {Responsibility 2} + +- **Out of scope (delegated)**: + - {Delegated responsibility 1} + - {Delegated responsibility 2} + +--- + +## Component Details + +### {Component Name} + +#### Responsibilities +- {Responsibility 1} +- {Responsibility 2} + +#### Key Interfaces +- `method_1(param1, param2) -> ReturnType` +- `method_2(param) -> ReturnType` + +#### Implementation Notes +{Important implementation details, algorithms, or patterns} + +--- + +## Data Models + +### Database Schema + +```sql +-- {Entity} table +CREATE TABLE {entities} ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name VARCHAR(255) NOT NULL, + status VARCHAR(50) NOT NULL CHECK (status IN ('active', 'inactive')), + parent_id UUID REFERENCES {parents}(id) ON DELETE CASCADE, + metadata JSONB DEFAULT '{}'::JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_{entities}_status ON {entities}(status); +CREATE INDEX idx_{entities}_parent_id ON {entities}(parent_id); +``` + +### Pydantic Models + +```python +from __future__ import annotations +from datetime import datetime +from enum import Enum +from typing import Dict, List, Optional, Any +from pydantic import BaseModel, Field + + +class StatusEnum(str, Enum): + ACTIVE = "active" + INACTIVE = "inactive" + + +class {Entity}(BaseModel): + id: str + name: str + status: StatusEnum + parent_id: Optional[str] = None + metadata: Dict[str, Any] = Field(default_factory=dict) + created_at: datetime + updated_at: datetime + + +class {Entity}Create(BaseModel): + name: str + parent_id: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + + +class {Entity}Update(BaseModel): + name: Optional[str] = None + status: Optional[StatusEnum] = None + metadata: Optional[Dict[str, Any]] = None +``` + +--- + +## API Specifications + +### REST Endpoints + +| Method | Path | Purpose | Request | Response | +|--------|------|---------|---------|----------| +| POST | `/api/{resource}` | Create resource | `{Entity}Create` | `{Entity}` | +| GET | `/api/{resource}/{id}` | Get resource | - | `{Entity}` | +| PUT | `/api/{resource}/{id}` | Update resource | `{Entity}Update` | `{Entity}` | +| DELETE | `/api/{resource}/{id}` | Delete resource | - | `{success: true}` | +| GET | `/api/{resource}` | List resources | `?status=&limit=&offset=` | `{items: [], total}` | + +### Request/Response Models + +```python +class CreateRequest(BaseModel): + name: str + parent_id: Optional[str] = None + + +class ListResponse(BaseModel): + items: List[{Entity}] + total: int + limit: int + offset: int +``` + +### Error Handling + +| Status Code | Error Code | Description | +|-------------|------------|-------------| +| 400 | `INVALID_INPUT` | Request validation failed | +| 404 | `NOT_FOUND` | Resource not found | +| 409 | `CONFLICT` | Resource conflict (duplicate, etc.) | +| 500 | `INTERNAL_ERROR` | Internal server error | + +--- + +## Integration Points + +### {External System} Integration + +- **Purpose**: {Why this integration exists} +- **Protocol**: {REST/GraphQL/WebSocket/etc.} +- **Authentication**: {How auth works} + +#### Integration Flow + +```mermaid +sequenceDiagram + participant Our as Our System + participant Ext as External System + + Our->>Ext: Request + Ext-->>Our: Response +``` + +### Event Contracts + +| Event | When Emitted | Payload | +|-------|--------------|---------| +| `{entity}_created` | After creation | `{id, name, created_at}` | +| `{entity}_updated` | After update | `{id, changes, updated_at}` | +| `{entity}_deleted` | After deletion | `{id, deleted_at}` | + +--- + +## Implementation Details + +### Core Algorithm + +```python +def core_operation(input_data: InputType) -> OutputType: + """ + {Description of the algorithm} + + Steps: + 1. {Step 1} + 2. {Step 2} + 3. {Step 3} + """ + # Step 1: {Description} + result = process_step_1(input_data) + + # Step 2: {Description} + result = process_step_2(result) + + # Step 3: {Description} + return finalize(result) +``` + +### Operation Flow + +```mermaid +sequenceDiagram + participant Client + participant API + participant Service + participant Store + participant External + + Client->>API: Request + API->>Service: process(request) + Service->>Store: persist(data) + Store-->>Service: result + Service->>External: notify(event) + External-->>Service: ack + Service-->>API: response + API-->>Client: Response +``` + +--- + +## Configuration + +| Parameter | Default | Range | Description | +|-----------|---------|-------|-------------| +| `timeout_seconds` | 30 | 1-300 | Operation timeout | +| `max_retries` | 3 | 0-10 | Maximum retry attempts | +| `batch_size` | 100 | 1-1000 | Batch processing size | + +--- + +## Performance Considerations + +### Database Indexing +- {Index 1 purpose} +- {Index 2 purpose} + +### Caching Strategy +- {What to cache} +- {Cache TTL} +- {Invalidation strategy} + +### Batch Processing +- {When to batch} +- {Batch size considerations} + +--- + +## Security Considerations + +### Authentication +- {Auth mechanism} + +### Authorization +- {Permission model} + +### Data Protection +- {Sensitive data handling} + +--- + +## Related Documents + +- **Requirements**: `{path}` +- **Design**: `{paths}` + +--- + +## Quality Checklist + +- [ ] All requirements addressed +- [ ] Architecture diagram included +- [ ] API specifications complete +- [ ] Database schemas defined +- [ ] Integration points documented +- [ ] Error handling specified +- [ ] Security considerations addressed + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 1.0 | {YYYY-MM-DD} | {Author} | Initial design | +``` + +--- + +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique design ID (DESIGN-FEATURE-001) | +| `title` | string | Yes | Human-readable title | +| `feature` | string | Yes | Feature name (kebab-case) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `status` | string | Yes | draft, review, approved | +| `requirements` | list | No | List of requirement IDs this design implements | +| `tickets` | list | No | Linked ticket IDs for this design | + +--- + +## Design ID Conventions + +### Format +``` +DESIGN-{FEATURE}-{NUM} +``` + +### Examples +- `DESIGN-AUTH-001` - Authentication system design +- `DESIGN-WEBHOOK-001` - Webhook notifications design +- `DESIGN-SYNC-001` - Data synchronization design + +### Numbering +- Start at 001 +- Increment sequentially within feature +- Don't reuse deleted numbers + +--- + +## Best Practices + +1. **Architecture First** - Start with high-level before diving into details +2. **Visual Diagrams** - Use Mermaid for architecture, sequences, state machines +3. **Concrete Examples** - Include pseudocode and example payloads +4. **Integration Focus** - Clearly define boundaries and contracts +5. **Traceability** - Link back to requirements throughout +6. **Bidirectional Links** - Reference requirements in frontmatter, link design from requirements diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/references/requirements_template.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/requirements_template.md new file mode 100644 index 00000000..00eea7c1 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/requirements_template.md @@ -0,0 +1,279 @@ +# Requirements Document Template + +Use this template for `.omoi_os/requirements/{feature-name}.md` files. + +--- + +```markdown +--- +id: REQ-{DOMAIN}-001 +title: {Feature Name} Requirements +feature: {feature-name} +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +status: draft +category: functional +priority: HIGH +design_ref: designs/{feature-name}.md +condition: "{EARS WHEN clause - triggering condition}" +action: "{EARS SHALL clause - expected behavior}" +--- + +# {Feature Name} Requirements + +## Document Overview + +{2-3 sentence overview of what this requirements document covers} + +**Parent Document**: {Link to parent if applicable} + +--- + +## 1. {First Domain Area} + +#### REQ-{DOMAIN}-{AREA}-001: {Requirement Title} +THE SYSTEM SHALL {normative requirement statement}. + +{Additional details, rationale, or constraints} + +#### REQ-{DOMAIN}-{AREA}-002: {Requirement Title} +THE SYSTEM SHALL {normative requirement statement}. + +--- + +## 2. State Machine (If Applicable) + +#### REQ-{DOMAIN}-SM-001: States +{Feature} SHALL support the following states: + +```mermaid +stateDiagram-v2 + [*] --> state1 + state1 --> state2 : Trigger + state2 --> state3 : Trigger + state3 --> [*] +``` + +#### REQ-{DOMAIN}-SM-002: Transitions +Valid transitions: +``` +state1 → state2 +state2 → state3 | error +state3 → done +``` + +#### REQ-{DOMAIN}-SM-003: Guards +- Transition to `state2` requires {condition}. +- Transition to `state3` requires {condition}. + +--- + +## 3. Data Model Requirements + +### 3.1 {Entity} Model +#### REQ-{DOMAIN}-DM-001 +{Entity} SHALL include the following fields: +- `field1: type` (description) +- `field2: type` (description) +- `field3: type | null` (description) + +Update rules: +- On {event}, {action}. +- On {event}, {action}. + +### 3.2 {Related Entity} (DB Table) +#### REQ-{DOMAIN}-DM-002 +The system SHALL persist {entity} with at least the following fields and constraints: +- `id TEXT PK` +- `parent_id TEXT NOT NULL FK -> parent(id)` +- `status VARCHAR(50) NOT NULL` +- `created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP` + +Foreign keys must be enforced. + +--- + +## 4. Configuration (Normative) + +| Parameter | Default | Range | Description | +|-----------|---------|-------|-------------| +| param_1 | value | min-max | Description | +| param_2 | value | bool | Description | +| param_3 | value | min-max | Description | + +--- + +## 5. API (Normative) + +### 5.1 Endpoints Table + +| Endpoint | Method | Purpose | Request Body (min) | Responses | +|----------|--------|---------|--------------------|-----------| +| /api/{resource} | POST | Create resource | `{ field1, field2 }` | 200: `{ id, status }`; 400: `{ error }` | +| /api/{resource}/{id} | GET | Get resource | - | 200: `{ resource }`; 404: `{ error }` | +| /api/{resource}/{id}/action | POST | Perform action | `{ action_data }` | 200: `{ result }`; 400: `{ error }` | + +Notes: +- All responses MUST include a stable `error` field on failure. +- {Additional endpoint constraints} + +### 5.2 WebSocket/Event Contracts + +| Event | When Emitted | Payload (min) | +|-------|--------------|---------------| +| event_started | {trigger condition} | `{ id, timestamp }` | +| event_completed | {trigger condition} | `{ id, result }` | +| event_failed | {trigger condition} | `{ id, error, reason }` | + +--- + +## 6. SLOs & Performance + +#### REQ-{DOMAIN}-SLO-001 +{Operation} should complete within {time} under normal conditions. + +#### REQ-{DOMAIN}-SLO-002 +{Metric} P95 < {threshold}. + +--- + +## 7. Security & Audit + +#### REQ-{DOMAIN}-SEC-001 +Only {authorized actors} MAY call {protected endpoints}; all actions MUST be audited with actor, resource_id, and result. + +--- + +## 8. Integration Requirements + +#### REQ-{DOMAIN}-INT-001: {System} Integration +THE SYSTEM SHALL integrate with {external system} for {purpose}. + +Integration points: +- {Integration point 1} +- {Integration point 2} + +--- + +## 9. Pydantic Reference Models + +```python +from __future__ import annotations +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional +from pydantic import BaseModel, Field + + +class StatusEnum(str, Enum): + PENDING = "pending" + ACTIVE = "active" + COMPLETED = "completed" + FAILED = "failed" + + +class {Entity}(BaseModel): + id: str + status: StatusEnum + field1: str + field2: Optional[str] = None + created_at: datetime + + +class {Entity}Request(BaseModel): + field1: str + field2: Optional[str] = None + + +class {Entity}Response(BaseModel): + status: str + message: str + data: Optional[{Entity}] = None +``` + +--- + +## Related Documents + +- [{Related Feature} Requirements](./{related}.md) +- [{System} Design](../designs/{system}.md) + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 1.0 | {YYYY-MM-DD} | {Author} | Initial draft | +``` + +--- + +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique requirement ID (REQ-DOMAIN-001) | +| `title` | string | Yes | Human-readable title | +| `feature` | string | Yes | Feature name (kebab-case) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `status` | string | Yes | draft, review, approved | +| `category` | string | Yes | functional, non-functional, constraint | +| `priority` | string | Yes | CRITICAL, HIGH, MEDIUM, LOW | +| `design_ref` | string | No | Path to linked design doc (e.g., `designs/feature.md`) | +| `condition` | string | No | EARS "WHEN" clause | +| `action` | string | No | EARS "THE SYSTEM SHALL" clause | +| `tickets` | list | No | Linked ticket IDs implementing this requirement | + +--- + +## Requirement ID Conventions + +### Format +``` +REQ-{DOMAIN}-{AREA}-{NUM} +``` + +### Domain Prefixes (Examples) +- `AUTH` - Authentication/Authorization +- `AGENT` - Agent system +- `COLLAB` - Collaboration +- `NOTIF` - Notifications +- `SYNC` - Synchronization +- `VAL` - Validation +- `MON` - Monitoring +- `TKT` - Ticketing +- `TSK` - Tasks + +### Area Codes (Examples) +- `SM` - State Machine +- `DM` - Data Model +- `API` - API/Endpoints +- `SEC` - Security +- `SLO` - Service Level Objectives +- `INT` - Integration +- `CFG` - Configuration + +### Numbering +- Start at 001 +- Increment sequentially within area +- Don't reuse deleted numbers + +## Normative Language + +Use RFC 2119 keywords: + +- **SHALL** / **MUST** - Absolute requirement +- **SHALL NOT** / **MUST NOT** - Absolute prohibition +- **SHOULD** - Recommended but not required +- **SHOULD NOT** - Discouraged but not prohibited +- **MAY** - Optional + +## Best Practices + +1. **Be Specific** - "latency < 100ms" not "fast response" +2. **Be Testable** - Every requirement should have clear pass/fail criteria +3. **Cross-Reference** - Link related requirements explicitly +4. **Include Rationale** - Explain why, not just what +5. **Version Control** - Track all changes in revision history diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/references/task_template.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/task_template.md new file mode 100644 index 00000000..a73581e0 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/task_template.md @@ -0,0 +1,190 @@ +# Task Template + +Use this template for `.omoi_os/tasks/TSK-{NUM}.md` files. + +**IMPORTANT**: All task files MUST include YAML frontmatter for programmatic parsing. + +--- + +## Template + +```markdown +--- +id: TSK-{NUM} +title: {Task Title} +status: pending # pending | in_progress | review | done | blocked +parent_ticket: TKT-{NUM} +estimate: M # S | M | L +created: {YYYY-MM-DD} +assignee: null # agent-id or null +dependencies: + depends_on: [] # Task IDs that must complete first + blocks: [] # Task IDs that cannot start until this completes +--- + +# TSK-{NUM}: {Task Title} + +## Objective + +{1-2 sentences describing what this task accomplishes} + +--- + +## Deliverables + +- [ ] `{path/to/file1.py}` - {What this file should contain/do} +- [ ] `{path/to/file2.py}` - {What this file should contain/do} +- [ ] `{path/to/test_file.py}` - {Test coverage} + +--- + +## Implementation Notes + +### Approach +{Step-by-step approach to implementing this task} + +1. {Step 1} +2. {Step 2} +3. {Step 3} + +### Code Patterns +{Specific patterns, libraries, or conventions to use} + +```python +# Example code pattern +def example_pattern(): + pass +``` + +### References +- {Link to documentation} +- {Link to similar implementation} +- {Link to design section} + +--- + +## Acceptance Criteria + +- [ ] {Specific criterion 1} +- [ ] {Specific criterion 2} +- [ ] All tests pass +- [ ] No linting errors +- [ ] Type hints complete + +--- + +## Testing Requirements + +### Unit Tests +```python +# Expected test cases +def test_example(): + # Test {scenario} + pass +``` + +### Edge Cases +- {Edge case 1 to handle} +- {Edge case 2 to handle} + +--- + +## Notes + +{Additional context, decisions, or warnings} +``` + +--- + +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique task ID (TSK-001, TSK-FEAT-001) | +| `title` | string | Yes | Human-readable task title | +| `status` | string | Yes | Current status (see Status Definitions) | +| `parent_ticket` | string | Yes | Parent ticket ID | +| `estimate` | string | Yes | T-shirt size (S/M/L) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `assignee` | string | No | Agent ID or null if unassigned | +| `dependencies.depends_on` | list | No | Task IDs that must complete first | +| `dependencies.blocks` | list | No | Task IDs waiting on this | + +--- + +## Task ID Conventions + +### Format +``` +TSK-{NUM} +TSK-{PREFIX}-{NUM} +``` + +### Numbering +- Start at 001 +- Increment sequentially +- Can have prefix: `TSK-COLLAB-001` + +--- + +## Status Definitions + +| Status | Description | +|--------|-------------| +| `pending` | Not yet started | +| `in_progress` | Currently being worked on | +| `review` | Complete, awaiting review | +| `done` | Completed and verified | +| `blocked` | Cannot proceed | + +--- + +## Estimate Definitions + +| Size | Time | Description | +|------|------|-------------| +| `S` | < 2 hours | Simple, straightforward | +| `M` | 2-4 hours | Moderate complexity | +| `L` | 4-8 hours | Complex, may need splitting | + +--- + +## Dependency Rules + +1. **No Circular Dependencies**: A task cannot depend on itself or create a cycle +2. **Same Ticket Preferred**: Dependencies should ideally be within the same parent ticket +3. **Cross-Ticket Dependencies**: Use ticket-level `blocked_by` instead when possible +4. **Keep Chains Short**: Prefer max 3-4 levels of task dependency depth + +### Example Dependency Graph + +``` +TSK-001 (Add model fields) + └─ blocks: TSK-002 + +TSK-002 (Create migration) + ├─ depends_on: TSK-001 + └─ blocks: TSK-003, TSK-004 + +TSK-003 (Implement service) + ├─ depends_on: TSK-002 + └─ blocks: TSK-005 + +TSK-004 (Update API routes) + ├─ depends_on: TSK-002 + └─ blocks: TSK-005 + +TSK-005 (Add tests) + └─ depends_on: TSK-003, TSK-004 +``` + +--- + +## Best Practices + +1. **Atomic** - One clear deliverable per task +2. **Self-Contained** - All context needed is in the task +3. **Testable** - Clear acceptance criteria +4. **Time-Boxed** - Should complete in one session (< 8 hours) +5. **Linked** - Always reference parent ticket in frontmatter +6. **Explicit Dependencies** - List all dependencies in frontmatter, not just prose diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/references/ticket_template.md b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/ticket_template.md new file mode 100644 index 00000000..dd37cf4b --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/references/ticket_template.md @@ -0,0 +1,210 @@ +# Ticket Template + +Use this template for `.omoi_os/tickets/TKT-{NUM}.md` files. + +**IMPORTANT**: All ticket files MUST include YAML frontmatter for programmatic parsing. + +--- + +## Template + +```markdown +--- +id: TKT-{NUM} +title: {Ticket Title} +status: backlog # backlog | analyzing | building | testing | done | blocked +priority: MEDIUM # CRITICAL | HIGH | MEDIUM | LOW +estimate: M # S | M | L | XL +created: {YYYY-MM-DD} +updated: {YYYY-MM-DD} +feature: {feature-name} +requirements: + - REQ-XXX-YYY +design_ref: designs/{feature-name}.md +tasks: + - TSK-{NUM} + - TSK-{NUM} +dependencies: + blocked_by: [] # Tickets that must complete before this can start + blocks: [] # Tickets that cannot start until this completes + related: [] # Tickets that are related but not blocking +--- + +# TKT-{NUM}: {Ticket Title} + +## Description + +{2-3 paragraph description of what this ticket accomplishes} + +### Context +{Why this work is needed, background information} + +### Goals +- {Goal 1} +- {Goal 2} + +### Non-Goals +- {What this ticket does NOT include} + +--- + +## Acceptance Criteria + +- [ ] {Specific, testable criterion 1} +- [ ] {Specific, testable criterion 2} +- [ ] {Specific, testable criterion 3} +- [ ] All unit tests pass +- [ ] Integration tests pass +- [ ] Documentation updated + +--- + +## Technical Notes + +### Implementation Approach +{High-level approach to implementing this ticket} + +### Key Files +- `{path/to/file1.py}` - {Purpose} +- `{path/to/file2.py}` - {Purpose} + +### API Changes +{Summary of API changes if any} + +### Database Changes +{Summary of schema changes if any} + +--- + +## Testing Strategy + +### Unit Tests +- {What to test} + +### Integration Tests +- {What to test} + +### Manual Testing +- {Test scenarios} + +--- + +## Rollback Plan + +{How to revert changes if something goes wrong} + +--- + +## Notes + +{Additional notes, decisions, or context} +``` + +--- + +## Frontmatter Field Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique ticket ID (TKT-001, TKT-FEAT-001) | +| `title` | string | Yes | Human-readable ticket title | +| `status` | string | Yes | Current status (see Status Definitions) | +| `priority` | string | Yes | Priority level (CRITICAL/HIGH/MEDIUM/LOW) | +| `estimate` | string | Yes | T-shirt size (S/M/L/XL) | +| `created` | date | Yes | Creation date (YYYY-MM-DD) | +| `updated` | date | Yes | Last update date (YYYY-MM-DD) | +| `feature` | string | No | Feature name for grouping | +| `requirements` | list | No | Linked requirement IDs | +| `design_ref` | string | No | Path to design document | +| `tasks` | list | No | Child task IDs | +| `dependencies.blocked_by` | list | No | Ticket IDs that must complete first | +| `dependencies.blocks` | list | No | Ticket IDs waiting on this | +| `dependencies.related` | list | No | Related ticket IDs (non-blocking) | + +--- + +## Ticket ID Conventions + +### Format +``` +TKT-{NUM} +TKT-{PREFIX}-{NUM} +``` + +### Numbering +- Start at 001 for new projects +- Increment sequentially +- Never reuse deleted numbers +- Can have feature prefix: `TKT-COLLAB-001` + +--- + +## Status Definitions + +| Status | Description | +|--------|-------------| +| `backlog` | Ticket created but not yet started | +| `analyzing` | Requirements/design analysis in progress | +| `building` | Implementation in progress | +| `testing` | Testing and validation in progress | +| `done` | Completed and verified | +| `blocked` | Cannot proceed due to dependency or issue | + +--- + +## Priority Definitions + +| Priority | Response Time | Description | +|----------|---------------|-------------| +| `CRITICAL` | Immediate | Production-breaking, security issues | +| `HIGH` | 1-2 days | Important feature, significant bug | +| `MEDIUM` | 1 week | Normal priority work | +| `LOW` | Backlog | Nice-to-have, minor improvements | + +--- + +## Estimate Definitions + +| Size | Time Range | Complexity | +|------|------------|------------| +| `S` | 1-4 hours | Simple, well-understood | +| `M` | 0.5-2 days | Moderate complexity | +| `L` | 3-5 days | Complex, multiple components | +| `XL` | 1-2 weeks | Very complex, should consider splitting | + +--- + +## Dependency Rules + +1. **No Circular Dependencies**: A ticket cannot be blocked by itself or create a cycle +2. **Explicit Over Implicit**: Always list dependencies in frontmatter, not just in prose +3. **Use `related` for Informational Links**: Non-blocking relationships go in `related` +4. **Keep Chains Short**: Prefer max 3-4 levels of dependency depth + +### Example Dependency Graph + +``` +TKT-001 (Infrastructure) + └─ blocks: TKT-002, TKT-003 + +TKT-002 (User Model) + ├─ blocked_by: TKT-001 + └─ blocks: TKT-004 + +TKT-003 (API Framework) + ├─ blocked_by: TKT-001 + └─ blocks: TKT-004 + +TKT-004 (User API) + └─ blocked_by: TKT-002, TKT-003 +``` + +--- + +## Best Practices + +1. **One Component Per Ticket** - Scope to a single major component or feature slice +2. **Clear Acceptance Criteria** - Every criterion should be testable +3. **Explicit Dependencies** - Document all blockers in frontmatter +4. **Task Breakdown** - Every ticket should have associated tasks in `tasks` field +5. **Traceability** - Always link to requirements and design docs diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py new file mode 100644 index 00000000..ec6ce1cd --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py @@ -0,0 +1,1293 @@ +#!/usr/bin/env python3 +""" +Direct API client for OmoiOS backend (bypasses MCP server). + +This module provides the OmoiOSClient class for syncing local specs +to the OmoiOS backend API. + +Sync Behavior: +- CREATE: New tickets/tasks are created +- UPDATE DESCRIPTION: If item exists but description differs, update it +- SKIP: If item exists with same description, skip + +Usage: + from api_client import OmoiOSClient + from parse_specs import SpecParser + + client = OmoiOSClient() + parser = SpecParser() + result = parser.parse_all() + + await client.sync(result) +""" + +import asyncio +import os +import re +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +import httpx + +# Default API URL - can be overridden via environment variable +DEFAULT_API_URL = "http://localhost:18000" + +from models import ( + ParseResult, + ParsedDesign, + ParsedRequirement, + ParsedTask, + ParsedTicket, +) + + +class SyncAction(Enum): + """Action taken during sync.""" + + CREATED = "created" + UPDATED = "updated" + SKIPPED = "skipped" + FAILED = "failed" + + +@dataclass +class SyncResult: + """Result of syncing a single item.""" + + item_id: str + item_type: str # "ticket" or "task" + action: SyncAction + message: str = "" + + +@dataclass +class SyncSummary: + """Summary of sync operation.""" + + results: list[SyncResult] + created: int = 0 + updated: int = 0 + skipped: int = 0 + failed: int = 0 + + def add(self, result: SyncResult): + self.results.append(result) + if result.action == SyncAction.CREATED: + self.created += 1 + elif result.action == SyncAction.UPDATED: + self.updated += 1 + elif result.action == SyncAction.SKIPPED: + self.skipped += 1 + elif result.action == SyncAction.FAILED: + self.failed += 1 + + +class OmoiOSClient: + """Direct HTTP client for OmoiOS API.""" + + def __init__( + self, + base_url: Optional[str] = None, + timeout: float = 30.0, + token: Optional[str] = None, + api_key: Optional[str] = None, + ): + """Initialize client. + + Args: + base_url: Base URL of OmoiOS API. If not provided, uses + OMOIOS_API_URL environment variable, or falls back + to DEFAULT_API_URL (http://localhost:18000) + timeout: Request timeout in seconds + token: JWT access token for authentication. If not provided, + uses OMOIOS_TOKEN environment variable. + api_key: API key for authentication (alternative to JWT). + If not provided, uses OMOIOS_API_KEY environment variable. + """ + # Resolve base URL: explicit > env var > default + if base_url: + self.base_url = base_url.rstrip("/") + else: + self.base_url = os.environ.get("OMOIOS_API_URL", DEFAULT_API_URL).rstrip("/") + + self.timeout = timeout + + # Resolve auth: explicit > env var + self.token = token or os.environ.get("OMOIOS_TOKEN") + self.api_key = api_key or os.environ.get("OMOIOS_API_KEY") + + async def _request( + self, + method: str, + endpoint: str, + json: Optional[dict] = None, + ) -> tuple[int, Optional[dict]]: + """Make HTTP request to API. + + Returns: + Tuple of (status_code, response_json or None) + """ + url = f"{self.base_url}{endpoint}" + headers = {} + if self.api_key: + # API key authentication takes precedence + headers["X-API-Key"] = self.api_key + elif self.token: + headers["Authorization"] = f"Bearer {self.token}" + + async with httpx.AsyncClient(timeout=self.timeout) as client: + try: + response = await client.request(method, url, json=json, headers=headers) + try: + data = response.json() + except Exception: + data = None + return response.status_code, data + except httpx.RequestError as e: + return 0, {"error": str(e)} + + # ======================================================================== + # Ticket Operations + # ======================================================================== + + async def get_ticket(self, ticket_id: str) -> Optional[dict]: + """Get ticket by ID.""" + status, data = await self._request("GET", f"/api/v1/tickets/{ticket_id}") + if status == 200: + return data + return None + + async def create_ticket(self, ticket: ParsedTicket, project_id: Optional[str] = None) -> tuple[bool, str]: + """Create a new ticket. + + Returns: + Tuple of (success, message/error) + """ + payload = { + "title": ticket.title, + "description": ticket.description, + "priority": ticket.priority, + "phase_id": "PHASE_IMPLEMENTATION", # Default phase + } + if project_id: + payload["project_id"] = project_id + + status, data = await self._request("POST", "/api/v1/tickets", json=payload) + + if status in (200, 201): + return True, f"Created with API ID: {data.get('id', 'unknown')}" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create: {error}" + + async def update_ticket_description(self, api_id: str, description: str) -> tuple[bool, str]: + """Update ticket description. + + Returns: + Tuple of (success, message/error) + """ + payload = {"description": description} + status, data = await self._request("PATCH", f"/api/v1/tickets/{api_id}", json=payload) + + if status == 200: + return True, "Description updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update: {error}" + + async def list_tickets(self, project_id: Optional[str] = None) -> list[dict]: + """List all tickets, optionally filtered by project.""" + endpoint = "/api/v1/tickets" + if project_id: + endpoint += f"?project_id={project_id}" + + status, data = await self._request("GET", endpoint) + if status == 200 and isinstance(data, dict): + return data.get("tickets", []) + return [] + + # ======================================================================== + # Task Operations + # ======================================================================== + + async def get_task(self, task_id: str) -> Optional[dict]: + """Get task by ID.""" + status, data = await self._request("GET", f"/api/v1/tasks/{task_id}") + if status == 200: + return data + return None + + async def create_task(self, task: ParsedTask, ticket_api_id: str) -> tuple[bool, str]: + """Create a new task. + + Returns: + Tuple of (success, message/error) + """ + # Convert dependencies to backend format + dependencies = None + if task.dependencies.depends_on: + dependencies = {"depends_on": task.dependencies.depends_on} + + payload = { + "ticket_id": ticket_api_id, + "title": task.title, + "description": task.objective, + "task_type": "implementation", # Default type + "priority": "MEDIUM", # Default priority + "phase_id": "PHASE_IMPLEMENTATION", + } + if dependencies: + payload["dependencies"] = dependencies + + status, data = await self._request("POST", "/api/v1/tasks", json=payload) + + if status in (200, 201): + return True, f"Created with API ID: {data.get('id', 'unknown')}" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create: {error}" + + async def update_task_description(self, api_id: str, description: str) -> tuple[bool, str]: + """Update task description. + + Returns: + Tuple of (success, message/error) + """ + payload = {"description": description} + status, data = await self._request("PATCH", f"/api/v1/tasks/{api_id}", json=payload) + + if status == 200: + return True, "Description updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update: {error}" + + async def list_tasks(self, ticket_id: Optional[str] = None) -> list[dict]: + """List all tasks, optionally filtered by ticket.""" + endpoint = "/api/v1/tasks" + # Note: The tasks endpoint doesn't have a ticket_id filter yet + # We filter client-side for now + status, data = await self._request("GET", endpoint) + if status == 200 and isinstance(data, list): + if ticket_id: + return [t for t in data if t.get("ticket_id") == ticket_id] + return data + return [] + + async def get_project_with_tickets(self, project_id: str) -> dict: + """Get project details with all tickets and their tasks.""" + # Get project info + status, project_data = await self._request("GET", f"/api/v1/projects/{project_id}") + if status != 200: + return {"error": f"Project not found: {project_id}"} + + # Get tickets for this project + tickets = await self.list_tickets(project_id) + + # Get all tasks + all_tasks = await self.list_tasks() + + # Group tasks by ticket + tasks_by_ticket = {} + for task in all_tasks: + tid = task.get("ticket_id") + if tid not in tasks_by_ticket: + tasks_by_ticket[tid] = [] + tasks_by_ticket[tid].append(task) + + # Attach tasks to tickets + for ticket in tickets: + ticket["tasks"] = tasks_by_ticket.get(ticket["id"], []) + + return { + "project": project_data, + "tickets": tickets, + "total_tickets": len(tickets), + "total_tasks": len(all_tasks), + } + + # ======================================================================== + # Project Operations + # ======================================================================== + + async def list_projects(self) -> list[dict]: + """List all projects.""" + status, data = await self._request("GET", "/api/v1/projects") + if status == 200 and isinstance(data, dict): + return data.get("projects", []) + return [] + + # ======================================================================== + # Spec Operations + # ======================================================================== + + async def get_spec(self, spec_id: str) -> Optional[dict]: + """Get spec by ID.""" + status, data = await self._request("GET", f"/api/v1/specs/{spec_id}") + if status == 200: + return data + return None + + async def list_specs(self, project_id: str) -> list[dict]: + """List all specs for a project.""" + status, data = await self._request("GET", f"/api/v1/specs/project/{project_id}") + if status == 200 and isinstance(data, dict): + return data.get("specs", []) + return [] + + async def create_spec( + self, + title: str, + project_id: str, + description: Optional[str] = None, + ) -> tuple[bool, str, Optional[str]]: + """Create a new spec. + + Returns: + Tuple of (success, message/error, spec_id if created) + """ + payload = { + "title": title, + "project_id": project_id, + } + if description: + payload["description"] = description + + status, data = await self._request("POST", "/api/v1/specs", json=payload) + + if status in (200, 201) and data: + spec_id = data.get("id") + return True, f"Created spec with ID: {spec_id}", spec_id + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to create spec: {error}", None + + async def update_spec( + self, + spec_id: str, + title: Optional[str] = None, + description: Optional[str] = None, + status: Optional[str] = None, + phase: Optional[str] = None, + ) -> tuple[bool, str]: + """Update a spec. + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if title is not None: + payload["title"] = title + if description is not None: + payload["description"] = description + if status is not None: + payload["status"] = status + if phase is not None: + payload["phase"] = phase + + resp_status, data = await self._request( + "PATCH", f"/api/v1/specs/{spec_id}", json=payload + ) + + if resp_status == 200: + return True, "Spec updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {resp_status}" + return False, f"Failed to update spec: {error}" + + # ======================================================================== + # Requirement Operations (EARS format) + # ======================================================================== + + async def add_requirement( + self, + spec_id: str, + title: str, + condition: str, + action: str, + linked_design: Optional[str] = None, + ) -> tuple[bool, str, Optional[str]]: + """Add a requirement to a spec using EARS format. + + EARS format: + - condition: The "WHEN" clause (triggering condition) + - action: The "THE SYSTEM SHALL" clause (expected behavior) + + Args: + linked_design: Optional reference to a design section/ID + + Returns: + Tuple of (success, message/error, requirement_id if created) + """ + payload = { + "title": title, + "condition": condition, + "action": action, + } + if linked_design: + payload["linked_design"] = linked_design + + status, data = await self._request( + "POST", f"/api/v1/specs/{spec_id}/requirements", json=payload + ) + + if status in (200, 201) and data: + req_id = data.get("id") + return True, f"Added requirement with ID: {req_id}", req_id + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to add requirement: {error}", None + + async def update_requirement( + self, + spec_id: str, + req_id: str, + title: Optional[str] = None, + condition: Optional[str] = None, + action: Optional[str] = None, + status: Optional[str] = None, + linked_design: Optional[str] = None, + ) -> tuple[bool, str]: + """Update a requirement. + + Args: + linked_design: Optional reference to a design section/ID + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if title is not None: + payload["title"] = title + if condition is not None: + payload["condition"] = condition + if action is not None: + payload["action"] = action + if status is not None: + payload["status"] = status + if linked_design is not None: + payload["linked_design"] = linked_design + + resp_status, data = await self._request( + "PATCH", f"/api/v1/specs/{spec_id}/requirements/{req_id}", json=payload + ) + + if resp_status == 200: + return True, "Requirement updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {resp_status}" + return False, f"Failed to update requirement: {error}" + + async def add_acceptance_criterion( + self, + spec_id: str, + req_id: str, + text: str, + ) -> tuple[bool, str]: + """Add an acceptance criterion to a requirement. + + Returns: + Tuple of (success, message/error) + """ + payload = {"text": text} + + status, data = await self._request( + "POST", + f"/api/v1/specs/{spec_id}/requirements/{req_id}/criteria", + json=payload, + ) + + if status in (200, 201): + return True, "Criterion added" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to add criterion: {error}" + + # ======================================================================== + # Design Operations + # ======================================================================== + + async def update_design( + self, + spec_id: str, + architecture: Optional[str] = None, + data_model: Optional[str] = None, + api_spec: Optional[list[dict]] = None, + ) -> tuple[bool, str]: + """Update a spec's design artifact. + + Args: + spec_id: The spec ID + architecture: Architecture description (markdown/mermaid) + data_model: Data model description + api_spec: List of API endpoints [{method, endpoint, description}] + + Returns: + Tuple of (success, message/error) + """ + payload = {} + if architecture is not None: + payload["architecture"] = architecture + if data_model is not None: + payload["data_model"] = data_model + if api_spec is not None: + payload["api_spec"] = api_spec + + status, data = await self._request( + "PUT", f"/api/v1/specs/{spec_id}/design", json=payload + ) + + if status == 200: + return True, "Design updated" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Failed to update design: {error}" + + # ======================================================================== + # Sync Specs from Local Files + # ======================================================================== + + async def sync_requirement_to_spec( + self, + spec_id: str, + requirement: ParsedRequirement, + existing_reqs: Optional[list[dict]] = None, + ) -> SyncResult: + """Sync a parsed requirement to an API spec. + + Args: + spec_id: The spec ID to add/update requirement in + requirement: Parsed requirement from local file + existing_reqs: Optional list of existing requirements for comparison + + Returns: + SyncResult indicating action taken + """ + # Check if requirement already exists by title match + existing = None + if existing_reqs: + for req in existing_reqs: + if req.get("title") == requirement.title: + existing = req + break + + if existing: + # Check if needs update (compare condition/action/linked_design) + needs_update = ( + existing.get("condition", "").strip() != requirement.condition.strip() + or existing.get("action", "").strip() != requirement.action.strip() + or existing.get("linked_design") != requirement.linked_design + ) + if needs_update: + success, msg = await self.update_requirement( + spec_id, + existing["id"], + condition=requirement.condition, + action=requirement.action, + linked_design=requirement.linked_design, + ) + return SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + else: + return SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.SKIPPED, + message="Already exists with same content", + ) + else: + # Create new requirement + success, msg, req_id = await self.add_requirement( + spec_id, + requirement.title, + requirement.condition, + requirement.action, + requirement.linked_design, + ) + + result = SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + + # Add acceptance criteria if requirement was created + if success and req_id and requirement.acceptance_criteria: + for criterion in requirement.acceptance_criteria: + await self.add_acceptance_criterion(spec_id, req_id, criterion.text) + + return result + + async def sync_design_to_spec( + self, + spec_id: str, + design: ParsedDesign, + existing_design: Optional[dict] = None, + ) -> SyncResult: + """Sync a parsed design to an API spec. + + Args: + spec_id: The spec ID to update design in + design: Parsed design from local file + existing_design: Optional existing design for comparison + + Returns: + SyncResult indicating action taken + """ + # Convert parsed API endpoints to API format + api_spec = [] + for ep in design.api_endpoints: + api_spec.append({ + "method": ep.method, + "endpoint": ep.path, + "description": ep.description, + }) + + # Build data model description from parsed data models + data_model_parts = [] + for dm in design.data_models: + model_desc = f"### {dm.name}\n{dm.description}\n\n" + if dm.fields: + model_desc += "**Fields:**\n" + for field_name, field_type in dm.fields.items(): + model_desc += f"- `{field_name}`: {field_type}\n" + if dm.relationships: + model_desc += "\n**Relationships:**\n" + for rel in dm.relationships: + model_desc += f"- {rel}\n" + data_model_parts.append(model_desc) + + data_model = "\n".join(data_model_parts) if data_model_parts else None + + # Check if needs update + if existing_design: + existing_arch = existing_design.get("architecture", "") or "" + existing_dm = existing_design.get("data_model", "") or "" + new_arch = design.architecture or "" + new_dm = data_model or "" + + if existing_arch.strip() == new_arch.strip() and existing_dm.strip() == new_dm.strip(): + return SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.SKIPPED, + message="Already exists with same content", + ) + + # Update design + success, msg = await self.update_design( + spec_id, + architecture=design.architecture, + data_model=data_model, + api_spec=api_spec if api_spec else None, + ) + + return SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + + # ======================================================================== + # Auth Operations + # ======================================================================== + + async def login(self, email: str, password: str) -> tuple[bool, str]: + """Login and store access token. + + Returns: + Tuple of (success, message/error) + """ + payload = {"email": email, "password": password} + status, data = await self._request("POST", "/api/v1/auth/login", json=payload) + + if status == 200 and data: + self.token = data.get("access_token") + return True, "Login successful" + else: + error = data.get("detail", str(data)) if data else f"HTTP {status}" + return False, f"Login failed: {error}" + + # ======================================================================== + # Sync Operations + # ======================================================================== + + async def check_connection(self) -> tuple[bool, str]: + """Check if API is reachable. + + Returns: + Tuple of (success, message) + """ + try: + status, _ = await self._request("GET", "/health") + if status == 200: + return True, "Connected" + else: + return False, f"API returned status {status}" + except Exception as e: + return False, str(e) + + async def sync( + self, + result: ParseResult, + project_id: Optional[str] = None, + dry_run: bool = False, + ) -> SyncSummary: + """Sync local specs to API. + + Behavior: + - CREATE: If ticket/task doesn't exist (by title match) + - UPDATE: If exists but description differs + - SKIP: If exists with same description + + Args: + result: Parsed specs from SpecParser + project_id: Optional project ID to associate tickets with + dry_run: If True, don't actually make changes + + Returns: + SyncSummary with results for each item + """ + summary = SyncSummary(results=[]) + + # Get existing items from API for comparison + existing_tickets = await self.list_tickets(project_id) + existing_tasks = await self.list_tasks() + + # Build lookup by title + ticket_by_title = {t["title"]: t for t in existing_tickets} + task_by_title = {t.get("title", ""): t for t in existing_tasks} + + # Track created ticket API IDs for task creation + ticket_api_ids: dict[str, str] = {} + + # Sync tickets + for ticket in result.tickets: + existing = ticket_by_title.get(ticket.title) + + if existing: + ticket_api_ids[ticket.id] = existing["id"] + + # Check if description needs update + existing_desc = existing.get("description", "") or "" + if existing_desc.strip() != ticket.description.strip(): + if dry_run: + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.UPDATED, + message="Would update description (dry run)", + ) + ) + else: + success, msg = await self.update_ticket_description( + existing["id"], ticket.description + ) + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + ) + else: + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.SKIPPED, + message="Already exists with same description", + ) + ) + else: + # Create new ticket + if dry_run: + # Use placeholder ID for dry run so tasks can reference it + ticket_api_ids[ticket.id] = f"dry-run-{ticket.id}" + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.CREATED, + message="Would create (dry run)", + ) + ) + else: + success, msg = await self.create_ticket(ticket, project_id) + if success: + # Extract API ID from message + # Format: "Created with API ID: xxx" + match = re.search(r"API ID: (\S+)", msg) + if match: + ticket_api_ids[ticket.id] = match.group(1) + summary.add( + SyncResult( + item_id=ticket.id, + item_type="ticket", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + ) + + # Sync tasks + for task in result.tasks: + existing = task_by_title.get(task.title) + + if existing: + # Check if description needs update + existing_desc = existing.get("description", "") or "" + if existing_desc.strip() != task.objective.strip(): + if dry_run: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.UPDATED, + message="Would update description (dry run)", + ) + ) + else: + success, msg = await self.update_task_description( + existing["id"], task.objective + ) + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.UPDATED if success else SyncAction.FAILED, + message=msg, + ) + ) + else: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.SKIPPED, + message="Already exists with same description", + ) + ) + else: + # Create new task - need parent ticket API ID + ticket_api_id = ticket_api_ids.get(task.parent_ticket) + if not ticket_api_id: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.FAILED, + message=f"Parent ticket {task.parent_ticket} not found in API", + ) + ) + continue + + if dry_run: + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.CREATED, + message="Would create (dry run)", + ) + ) + else: + success, msg = await self.create_task(task, ticket_api_id) + summary.add( + SyncResult( + item_id=task.id, + item_type="task", + action=SyncAction.CREATED if success else SyncAction.FAILED, + message=msg, + ) + ) + + return summary + + async def diff(self, result: ParseResult, project_id: Optional[str] = None) -> SyncSummary: + """Show what would change without making changes. + + This is equivalent to sync with dry_run=True. + """ + return await self.sync(result, project_id, dry_run=True) + + async def sync_specs( + self, + result: ParseResult, + project_id: str, + spec_title: Optional[str] = None, + dry_run: bool = False, + ) -> SyncSummary: + """Sync local requirements and designs to API specs. + + This creates/updates a spec document in the API with requirements + and design artifacts parsed from local .omoi_os/ files. + + Workflow: + 1. Find or create spec by title (defaults to first design's title) + 2. Sync all requirements to the spec + 3. Sync all designs to the spec + + Args: + result: Parsed specs from SpecParser + project_id: Project ID to associate spec with + spec_title: Optional spec title (defaults to design feature name) + dry_run: If True, don't actually make changes + + Returns: + SyncSummary with results for each item + """ + summary = SyncSummary(results=[]) + + # Determine spec title + if not spec_title: + if result.designs: + spec_title = result.designs[0].title or result.designs[0].feature + elif result.requirements: + spec_title = f"Spec for {result.requirements[0].title}" + else: + summary.add(SyncResult( + item_id="unknown", + item_type="spec", + action=SyncAction.FAILED, + message="No requirements or designs found to sync", + )) + return summary + + # Get existing specs for this project + existing_specs = await self.list_specs(project_id) + spec_by_title = {s["title"]: s for s in existing_specs} + + # Find or create spec + spec_id = None + if spec_title in spec_by_title: + spec_id = spec_by_title[spec_title]["id"] + existing_spec = spec_by_title[spec_title] + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.SKIPPED, + message=f"Using existing spec: {spec_title}", + )) + else: + if dry_run: + spec_id = f"dry-run-spec-{spec_title}" + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.CREATED, + message=f"Would create spec: {spec_title} (dry run)", + )) + existing_spec = {"requirements": [], "design": None} + else: + # Build description from requirements + description = "" + if result.requirements: + description = f"Requirements ({len(result.requirements)}): " + description += ", ".join(r.title for r in result.requirements[:3]) + if len(result.requirements) > 3: + description += f" and {len(result.requirements) - 3} more" + + success, msg, created_id = await self.create_spec( + title=spec_title, + project_id=project_id, + description=description, + ) + if success and created_id: + spec_id = created_id + summary.add(SyncResult( + item_id=spec_id, + item_type="spec", + action=SyncAction.CREATED, + message=msg, + )) + existing_spec = {"requirements": [], "design": None} + else: + summary.add(SyncResult( + item_id="unknown", + item_type="spec", + action=SyncAction.FAILED, + message=msg, + )) + return summary + + # Sync requirements + if spec_id and not dry_run: + existing_reqs = existing_spec.get("requirements", []) + + for requirement in result.requirements: + req_result = await self.sync_requirement_to_spec( + spec_id, + requirement, + existing_reqs, + ) + summary.add(req_result) + elif dry_run: + for requirement in result.requirements: + summary.add(SyncResult( + item_id=requirement.id, + item_type="requirement", + action=SyncAction.CREATED, + message=f"Would create requirement: {requirement.title} (dry run)", + )) + + # Sync designs + if spec_id and not dry_run: + existing_design = existing_spec.get("design") + + for design in result.designs: + design_result = await self.sync_design_to_spec( + spec_id, + design, + existing_design, + ) + summary.add(design_result) + elif dry_run: + for design in result.designs: + summary.add(SyncResult( + item_id=design.id, + item_type="design", + action=SyncAction.UPDATED, + message=f"Would update design: {design.title} (dry run)", + )) + + return summary + + async def diff_specs( + self, + result: ParseResult, + project_id: str, + spec_title: Optional[str] = None, + ) -> SyncSummary: + """Show what spec changes would happen without making changes. + + This is equivalent to sync_specs with dry_run=True. + """ + return await self.sync_specs(result, project_id, spec_title, dry_run=True) + + async def get_full_traceability( + self, + project_id: str, + ) -> dict: + """Get full traceability from API: Specs → Requirements → Tickets → Tasks. + + Returns: + Dict with: + - specs: List of specs with requirements + - tickets: List of tickets with tasks + - traceability: Mapping of spec requirements to tickets + """ + # Get all specs for the project + specs = await self.list_specs(project_id) + + # Get all tickets for the project + tickets = await self.list_tickets(project_id) + + # Get all tasks + all_tasks = await self.list_tasks() + + # Group tasks by ticket + tasks_by_ticket = {} + for task in all_tasks: + tid = task.get("ticket_id") + if tid not in tasks_by_ticket: + tasks_by_ticket[tid] = [] + tasks_by_ticket[tid].append(task) + + # Build traceability matrix + traceability = { + "specs": [], + "tickets": [], + "orphan_tickets": [], # Tickets not linked to any spec requirement + } + + # Process specs + for spec in specs: + spec_entry = { + "id": spec["id"], + "title": spec["title"], + "status": spec["status"], + "requirements": [], + "linked_tickets": [], + } + + for req in spec.get("requirements", []): + req_entry = { + "id": req["id"], + "title": req["title"], + "condition": req.get("condition", ""), + "action": req.get("action", ""), + "status": req.get("status", "pending"), + "linked_tickets": [], + } + + # Find tickets that might implement this requirement + # (This would require ticket.requirements field - checking by title match for now) + for ticket in tickets: + ticket_title_lower = ticket.get("title", "").lower() + req_title_lower = req["title"].lower() + + # Simple heuristic: ticket title contains requirement keywords + if any(word in ticket_title_lower for word in req_title_lower.split()): + req_entry["linked_tickets"].append(ticket["id"]) + spec_entry["linked_tickets"].append(ticket["id"]) + + spec_entry["requirements"].append(req_entry) + + traceability["specs"].append(spec_entry) + + # Process tickets + linked_ticket_ids = set() + for spec in traceability["specs"]: + linked_ticket_ids.update(spec["linked_tickets"]) + + for ticket in tickets: + ticket_entry = { + "id": ticket["id"], + "title": ticket["title"], + "status": ticket.get("status", "unknown"), + "priority": ticket.get("priority", "MEDIUM"), + "tasks": tasks_by_ticket.get(ticket["id"], []), + } + + if ticket["id"] in linked_ticket_ids: + traceability["tickets"].append(ticket_entry) + else: + traceability["orphan_tickets"].append(ticket_entry) + + return traceability + + +# ============================================================================ +# CLI Integration +# ============================================================================ + + +def print_sync_summary(summary: SyncSummary): + """Print sync summary to console.""" + print("\nSync Results:") + print("-" * 60) + + for result in summary.results: + action_str = { + SyncAction.CREATED: "[CREATE]", + SyncAction.UPDATED: "[UPDATE]", + SyncAction.SKIPPED: "[SKIP] ", + SyncAction.FAILED: "[FAILED]", + }[result.action] + + print(f"{action_str} {result.item_type} {result.item_id}") + if result.message: + print(f" {result.message}") + + print("-" * 60) + print( + f"Summary: {summary.created} created, {summary.updated} updated, " + f"{summary.skipped} skipped, {summary.failed} failed" + ) + + +async def run_sync( + api_url: str, + action: str, + project_id: Optional[str] = None, + email: Optional[str] = None, + password: Optional[str] = None, + token: Optional[str] = None, + api_key: Optional[str] = None, +): + """Run sync from CLI.""" + import os + from parse_specs import SpecParser + + # Auth can come from: argument > env var + auth_token = token or os.environ.get("OMOIOS_TOKEN") + auth_api_key = api_key or os.environ.get("OMOIOS_API_KEY") + + client = OmoiOSClient(base_url=api_url, token=auth_token, api_key=auth_api_key) + parser = SpecParser() + + # Check connection + print(f"Connecting to {api_url}...") + connected, msg = await client.check_connection() + if not connected: + print(f"Error: Cannot connect to API: {msg}") + return False + + print("Connected!") + + # Handle authentication + if client.api_key: + print("Using API key authentication.\n") + elif client.token: + print("Using provided token.\n") + else: + # Try to login if credentials provided + if email and password: + print(f"Logging in as {email}...") + success, msg = await client.login(email, password) + if not success: + print(f"Error: {msg}") + return False + print("Authenticated!\n") + else: + # Try env vars for credentials + env_email = os.environ.get("OMOIOS_EMAIL") + env_password = os.environ.get("OMOIOS_PASSWORD") + if env_email and env_password: + print(f"Logging in as {env_email}...") + success, msg = await client.login(env_email, env_password) + if not success: + print(f"Error: {msg}") + return False + print("Authenticated!\n") + else: + print("Warning: No authentication provided. API calls may fail.") + print("Set OMOIOS_API_KEY, OMOIOS_TOKEN, or OMOIOS_EMAIL/OMOIOS_PASSWORD env vars.\n") + + # Parse specs + result = parser.parse_all() + print(f"Parsed {len(result.tickets)} tickets and {len(result.tasks)} tasks\n") + + # Run validation first + from spec_cli import validate_specs + + errors = validate_specs(result) + if errors: + print("Validation failed! Fix these errors before syncing:") + for error in errors: + print(f" - {error}") + return False + + print("Validation passed!\n") + + # Run sync + if action == "diff": + print("Checking what would change (dry run)...") + summary = await client.diff(result, project_id) + else: # push + print("Syncing to API...") + summary = await client.sync(result, project_id) + + print_sync_summary(summary) + return summary.failed == 0 + + +if __name__ == "__main__": + import sys + + # Quick test + async def test(): + client = OmoiOSClient() + connected, msg = await client.check_connection() + print(f"Connection test: {msg}") + + asyncio.run(test()) diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/generate_ids.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/generate_ids.py new file mode 100755 index 00000000..2571309e --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/generate_ids.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +""" +Generate next ticket or task IDs. + +Usage: + python generate_ids.py ticket [--prefix PREFIX] + python generate_ids.py task [--prefix PREFIX] + python generate_ids.py --list + +Examples: + python generate_ids.py ticket # TKT-001 + python generate_ids.py ticket --prefix COLLAB # TKT-COLLAB-001 + python generate_ids.py task # TSK-001 + python generate_ids.py --list # Show all existing IDs +""" + +import argparse +import re +import sys +from pathlib import Path +from typing import Optional + + +def get_project_root() -> Path: + """Find project root by looking for .omoi_os or common markers.""" + current = Path.cwd() + + for parent in [current] + list(current.parents): + if (parent / ".omoi_os").exists(): + return parent + if (parent / ".git").exists(): + return parent + + return current + + +def extract_ids(directory: Path, pattern: str) -> list[tuple[str, int]]: + """Extract IDs from markdown files in a directory.""" + ids = [] + + if not directory.exists(): + return ids + + # Pattern like TKT-001 or TKT-PREFIX-001 + id_regex = re.compile(rf"({pattern}(?:-[A-Z]+)?-(\d+))") + + for md_file in directory.glob("*.md"): + content = md_file.read_text() + matches = id_regex.findall(content) + for full_id, num in matches: + ids.append((full_id, int(num))) + + # Also check filenames + for md_file in directory.glob("*.md"): + match = id_regex.match(md_file.stem.upper()) + if match: + ids.append((match.group(1), int(match.group(2)))) + + return ids + + +def get_next_id(id_type: str, prefix: Optional[str] = None) -> str: + """Generate the next ID for tickets or tasks.""" + root = get_project_root() + omoi_dir = root / ".omoi_os" + + if id_type == "ticket": + directory = omoi_dir / "tickets" + base = "TKT" + else: + directory = omoi_dir / "tasks" + base = "TSK" + + # Get existing IDs + existing = extract_ids(directory, base) + + # Filter by prefix if specified + if prefix: + prefix = prefix.upper() + full_base = f"{base}-{prefix}" + relevant = [(id_, num) for id_, num in existing if id_.startswith(full_base)] + else: + full_base = base + # Get IDs without custom prefix + relevant = [(id_, num) for id_, num in existing + if re.match(rf"{base}-\d+$", id_)] + + # Find next number + if relevant: + max_num = max(num for _, num in relevant) + next_num = max_num + 1 + else: + next_num = 1 + + # Format ID + if prefix: + new_id = f"{base}-{prefix}-{next_num:03d}" + else: + new_id = f"{base}-{next_num:03d}" + + return new_id + + +def list_all_ids() -> None: + """List all existing ticket and task IDs.""" + root = get_project_root() + omoi_dir = root / ".omoi_os" + + print("Existing IDs:") + print("-" * 40) + + # Tickets + tickets_dir = omoi_dir / "tickets" + ticket_ids = extract_ids(tickets_dir, "TKT") + if ticket_ids: + print("\nTickets:") + for id_, _ in sorted(set(ticket_ids)): + print(f" {id_}") + else: + print("\nTickets: None") + + # Tasks + tasks_dir = omoi_dir / "tasks" + task_ids = extract_ids(tasks_dir, "TSK") + if task_ids: + print("\nTasks:") + for id_, _ in sorted(set(task_ids)): + print(f" {id_}") + else: + print("\nTasks: None") + + +def main(): + parser = argparse.ArgumentParser( + description="Generate next ticket or task IDs" + ) + parser.add_argument( + "type", + nargs="?", + choices=["ticket", "task"], + help="Type of ID to generate" + ) + parser.add_argument( + "--prefix", + help="Optional prefix for the ID (e.g., COLLAB for TKT-COLLAB-001)" + ) + parser.add_argument( + "--list", + action="store_true", + dest="list_ids", + help="List all existing IDs" + ) + + args = parser.parse_args() + + if args.list_ids: + list_all_ids() + return + + if not args.type: + parser.print_help() + sys.exit(1) + + next_id = get_next_id(args.type, args.prefix) + print(next_id) + + +if __name__ == "__main__": + main() diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/init_feature.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/init_feature.py new file mode 100755 index 00000000..029b0f4c --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/init_feature.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +""" +Initialize directory structure for a new feature in .omoi_os/. + +Usage: + python init_feature.py + +Example: + python init_feature.py collaboration-system + +Creates: + .omoi_os/requirements/.md + .omoi_os/designs/.md +""" + +import argparse +import sys +from datetime import datetime +from pathlib import Path + + +def get_project_root() -> Path: + """Find project root by looking for .omoi_os or common markers.""" + current = Path.cwd() + + # Look for .omoi_os directory + for parent in [current] + list(current.parents): + if (parent / ".omoi_os").exists(): + return parent + if (parent / ".git").exists(): + return parent + + return current + + +def create_requirements_stub(feature_name: str, output_path: Path) -> None: + """Create a requirements document stub.""" + domain = feature_name.upper().replace("-", "_")[:6] + today = datetime.now().strftime("%Y-%m-%d") + + content = f"""# {feature_name.replace("-", " ").title()} Requirements + +**Created**: {today} +**Status**: Draft +**Purpose**: Requirements specification for {feature_name.replace("-", " ")}. +**Related**: + +--- + +## Document Overview + +{{2-3 sentence overview of what this requirements document covers}} + +--- + +## 1. Core Requirements + +#### REQ-{domain}-CORE-001: {{Requirement Title}} +THE SYSTEM SHALL {{normative requirement statement}}. + +{{Additional details, rationale, or constraints}} + +--- + +## 2. State Machine (If Applicable) + +#### REQ-{domain}-SM-001: States +{{Feature}} SHALL support the following states: + +```mermaid +stateDiagram-v2 + [*] --> pending + pending --> active : Start + active --> completed : Complete + completed --> [*] +``` + +--- + +## 3. Data Model Requirements + +### 3.1 Primary Entity +#### REQ-{domain}-DM-001 +{{Entity}} SHALL include the following fields: +- `id: str` (unique identifier) +- `status: StatusEnum` (current state) +- `created_at: datetime` (creation timestamp) +- `updated_at: datetime` (last update timestamp) + +--- + +## 4. API Requirements + +| Endpoint | Method | Purpose | Request Body | Responses | +|----------|--------|---------|--------------|-----------| +| /api/{{resource}} | POST | Create resource | `{{ field1, field2 }}` | 200: `{{ id, status }}`; 400: `{{ error }}` | + +--- + +## Related Documents + +- [Design Document](../designs/{feature_name}.md) + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 0.1 | {today} | Claude | Initial stub | +""" + + output_path.write_text(content) + print(f"Created: {output_path}") + + +def create_design_stub(feature_name: str, output_path: Path) -> None: + """Create a design document stub.""" + today = datetime.now().strftime("%Y-%m-%d") + + content = f"""# {feature_name.replace("-", " ").title()} - Product Design Document + +**Created**: {today} +**Status**: Draft +**Purpose**: Design specification for {feature_name.replace("-", " ")}. +**Related**: [Requirements](../requirements/{feature_name}.md) + +--- + +## Document Overview + +{{Description of the feature/system being designed}} + +- **Purpose & Scope** + - {{Goal 1}} + - {{Goal 2}} + +- **Target Audience** + - Implementation teams + - System architects + +--- + +## Architecture Overview + +### High-Level Architecture + +```mermaid +flowchart TD + subgraph Feature[{feature_name.replace("-", " ").title()}] + C1[Component 1] + C2[Component 2] + end + + C1 -->|action| C2 +``` + +### Component Responsibilities + +| Component | Layer | Responsibilities | +|-----------|-------|------------------| +| Component 1 | Service | {{Primary responsibilities}} | +| Component 2 | Data | {{Primary responsibilities}} | + +--- + +## Data Models + +### Pydantic Models + +```python +from __future__ import annotations +from datetime import datetime +from enum import Enum +from typing import Optional +from pydantic import BaseModel + + +class StatusEnum(str, Enum): + PENDING = "pending" + ACTIVE = "active" + COMPLETED = "completed" + + +class Entity(BaseModel): + id: str + status: StatusEnum + created_at: datetime + updated_at: datetime +``` + +--- + +## API Specifications + +### REST Endpoints + +| Method | Path | Purpose | Request | Response | +|--------|------|---------|---------|----------| +| POST | `/api/{{resource}}` | Create | `EntityCreate` | `Entity` | +| GET | `/api/{{resource}}/{{id}}` | Get | - | `Entity` | + +--- + +## Related Documents + +- **Requirements**: `../requirements/{feature_name}.md` + +--- + +## Quality Checklist + +- [ ] All requirements addressed +- [ ] Architecture diagram included +- [ ] API specifications complete +- [ ] Database schemas defined + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 0.1 | {today} | Claude | Initial stub | +""" + + output_path.write_text(content) + print(f"Created: {output_path}") + + +def init_feature(feature_name: str) -> None: + """Initialize directory structure for a new feature.""" + root = get_project_root() + omoi_dir = root / ".omoi_os" + + # Create directories if they don't exist + dirs = ["requirements", "designs", "tickets", "tasks"] + for d in dirs: + (omoi_dir / d).mkdir(parents=True, exist_ok=True) + + # Create stub files + req_path = omoi_dir / "requirements" / f"{feature_name}.md" + design_path = omoi_dir / "designs" / f"{feature_name}.md" + + if req_path.exists(): + print(f"Warning: {req_path} already exists, skipping") + else: + create_requirements_stub(feature_name, req_path) + + if design_path.exists(): + print(f"Warning: {design_path} already exists, skipping") + else: + create_design_stub(feature_name, design_path) + + print(f"\nFeature '{feature_name}' initialized in {omoi_dir}") + print("\nNext steps:") + print(f" 1. Edit requirements: {req_path}") + print(f" 2. Edit design: {design_path}") + print(f" 3. Create tickets with generate_ids.py") + + +def main(): + parser = argparse.ArgumentParser( + description="Initialize directory structure for a new feature" + ) + parser.add_argument( + "feature_name", + help="Name of the feature (kebab-case, e.g., 'collaboration-system')" + ) + + args = parser.parse_args() + + # Validate feature name + feature_name = args.feature_name.lower().replace("_", "-") + if not feature_name.replace("-", "").isalnum(): + print(f"Error: Invalid feature name '{feature_name}'") + print("Use alphanumeric characters and hyphens only") + sys.exit(1) + + init_feature(feature_name) + + +if __name__ == "__main__": + main() diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py new file mode 100644 index 00000000..b0297493 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py @@ -0,0 +1,540 @@ +""" +Data models for parsed spec files. + +These dataclasses represent the structured data extracted from +.omoi_os/ markdown files with YAML frontmatter. + +Supports: +- Requirements (.omoi_os/requirements/*.md) +- Designs (.omoi_os/designs/*.md) +- Tickets (.omoi_os/tickets/*.md) +- Tasks (.omoi_os/tasks/*.md) +""" + +from dataclasses import dataclass, field +from datetime import date +from typing import Optional + + +# ============================================================================ +# Requirement Models +# ============================================================================ + + +@dataclass +class AcceptanceCriterion: + """Single acceptance criterion for a requirement.""" + + text: str + completed: bool = False + + +@dataclass +class ParsedRequirement: + """Parsed requirement from .omoi_os/requirements/*.md + + Uses EARS format (Easy Approach to Requirements Syntax): + - condition: The "WHEN" clause (triggering condition) + - action: The "THE SYSTEM SHALL" clause (expected behavior) + """ + + id: str # REQ-XXX-YYY-NNN format + title: str + status: str # draft, review, approved + created: date + updated: date + category: str = "" # functional, non-functional, constraint + priority: str = "MEDIUM" + condition: str = "" # EARS "WHEN" clause + action: str = "" # EARS "THE SYSTEM SHALL" clause + rationale: str = "" # Why this requirement exists + acceptance_criteria: list[AcceptanceCriterion] = field(default_factory=list) + linked_tickets: list[str] = field(default_factory=list) # TKT-XXX references + linked_design: Optional[str] = None # Design section reference + file_path: str = "" + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +# ============================================================================ +# Design Models +# ============================================================================ + + +@dataclass +class ApiEndpoint: + """API endpoint specification.""" + + method: str # GET, POST, PUT, DELETE, PATCH + path: str # /api/v1/resource + description: str = "" + request_body: Optional[str] = None # JSON schema or description + response: Optional[str] = None # JSON schema or description + + +@dataclass +class DataModel: + """Data model/entity specification.""" + + name: str + description: str = "" + fields: dict[str, str] = field(default_factory=dict) # field_name -> type/description + relationships: list[str] = field(default_factory=list) + + +@dataclass +class ParsedDesign: + """Parsed design from .omoi_os/designs/*.md""" + + id: str # Design identifier + title: str + status: str # draft, review, approved + created: date + updated: date + feature: str = "" # Feature this design covers + requirements: list[str] = field(default_factory=list) # REQ-XXX references + architecture: str = "" # Architecture description/diagram + data_models: list[DataModel] = field(default_factory=list) + api_endpoints: list[ApiEndpoint] = field(default_factory=list) + components: list[str] = field(default_factory=list) # Key components + error_handling: str = "" + security_considerations: str = "" + implementation_notes: str = "" + file_path: str = "" + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +# ============================================================================ +# Ticket/Task Dependency Models +# ============================================================================ + + +@dataclass +class TicketDependencies: + """Dependencies for a ticket.""" + + blocked_by: list[str] = field(default_factory=list) + blocks: list[str] = field(default_factory=list) + related: list[str] = field(default_factory=list) + + +@dataclass +class TaskDependencies: + """Dependencies for a task.""" + + depends_on: list[str] = field(default_factory=list) + blocks: list[str] = field(default_factory=list) + + +@dataclass +class ParsedTicket: + """Parsed ticket from .omoi_os/tickets/*.md""" + + id: str + title: str + status: str + priority: str + estimate: str + created: date + updated: date + feature: Optional[str] = None + requirements: list[str] = field(default_factory=list) + design_ref: Optional[str] = None + tasks: list[str] = field(default_factory=list) + dependencies: TicketDependencies = field(default_factory=TicketDependencies) + description: str = "" + file_path: str = "" + + def is_blocked(self) -> bool: + """Check if this ticket is blocked by other tickets.""" + return len(self.dependencies.blocked_by) > 0 + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +@dataclass +class ParsedTask: + """Parsed task from .omoi_os/tasks/*.md""" + + id: str + title: str + status: str + parent_ticket: str + estimate: str + created: date + assignee: Optional[str] = None + dependencies: TaskDependencies = field(default_factory=TaskDependencies) + objective: str = "" + file_path: str = "" + + def is_blocked(self, completed_tasks: set[str]) -> bool: + """Check if this task is blocked by incomplete tasks.""" + for dep in self.dependencies.depends_on: + if dep not in completed_tasks: + return True + return False + + def is_ready(self, completed_tasks: set[str]) -> bool: + """Check if this task is ready to work on.""" + return self.status == "pending" and not self.is_blocked(completed_tasks) + + def __str__(self) -> str: + return f"{self.id}: {self.title}" + + +@dataclass +class ValidationError: + """Validation error found in specs.""" + + error_type: str # circular_dependency, missing_reference, etc. + message: str + source_id: str + target_id: Optional[str] = None + + def __str__(self) -> str: + if self.target_id: + return f"[{self.error_type}] {self.source_id} -> {self.target_id}: {self.message}" + return f"[{self.error_type}] {self.source_id}: {self.message}" + + +@dataclass +class ParseResult: + """Result of parsing all spec files.""" + + requirements: list[ParsedRequirement] = field(default_factory=list) + designs: list[ParsedDesign] = field(default_factory=list) + tickets: list[ParsedTicket] = field(default_factory=list) + tasks: list[ParsedTask] = field(default_factory=list) + errors: list[ValidationError] = field(default_factory=list) + + # ======================================================================== + # Requirement Methods + # ======================================================================== + + def get_requirement(self, req_id: str) -> Optional[ParsedRequirement]: + """Get requirement by ID.""" + for req in self.requirements: + if req.id == req_id: + return req + return None + + def get_requirements_by_category(self, category: str) -> list[ParsedRequirement]: + """Get all requirements in a category.""" + return [r for r in self.requirements if r.category == category] + + def get_requirements_by_status(self, status: str) -> list[ParsedRequirement]: + """Get all requirements with a given status.""" + return [r for r in self.requirements if r.status == status] + + # ======================================================================== + # Design Methods + # ======================================================================== + + def get_design(self, design_id: str) -> Optional[ParsedDesign]: + """Get design by ID.""" + for design in self.designs: + if design.id == design_id: + return design + return None + + def get_design_for_feature(self, feature: str) -> Optional[ParsedDesign]: + """Get design for a feature.""" + for design in self.designs: + if design.feature == feature: + return design + return None + + # ======================================================================== + # Ticket Methods + # ======================================================================== + + def get_ticket(self, ticket_id: str) -> Optional[ParsedTicket]: + """Get ticket by ID.""" + for ticket in self.tickets: + if ticket.id == ticket_id: + return ticket + return None + + def get_task(self, task_id: str) -> Optional[ParsedTask]: + """Get task by ID.""" + for task in self.tasks: + if task.id == task_id: + return task + return None + + def get_tasks_for_ticket(self, ticket_id: str) -> list[ParsedTask]: + """Get all tasks belonging to a ticket.""" + return [t for t in self.tasks if t.parent_ticket == ticket_id] + + def get_completed_tasks(self) -> set[str]: + """Get set of completed task IDs.""" + return {t.id for t in self.tasks if t.status == "done"} + + def get_completed_tickets(self) -> set[str]: + """Get set of ticket IDs where all tasks are complete.""" + completed_tickets = set() + for ticket in self.tickets: + tasks = self.get_tasks_for_ticket(ticket.id) + if tasks and all(t.status == "done" for t in tasks): + completed_tickets.add(ticket.id) + elif not tasks and ticket.status == "done": + # Ticket with no tasks is complete if status is done + completed_tickets.add(ticket.id) + return completed_tickets + + def get_blocking_tickets(self, ticket_id: str) -> list[str]: + """Get list of ticket IDs that block a given ticket (transitively). + + Uses BFS to find all tickets that must complete before this ticket. + """ + ticket = self.get_ticket(ticket_id) + if not ticket: + return [] + + blocking = [] + visited = set() + queue = list(ticket.dependencies.blocked_by) + + while queue: + blocker_id = queue.pop(0) + if blocker_id in visited: + continue + visited.add(blocker_id) + blocking.append(blocker_id) + + # Check transitive dependencies + blocker = self.get_ticket(blocker_id) + if blocker: + for transitive in blocker.dependencies.blocked_by: + if transitive not in visited: + queue.append(transitive) + + return blocking + + def is_task_blocked_by_tickets(self, task: ParsedTask) -> tuple[bool, list[str]]: + """Check if a task is blocked by incomplete tickets its parent depends on. + + Returns: + Tuple of (is_blocked, list of blocking ticket IDs) + """ + parent_ticket = self.get_ticket(task.parent_ticket) + if not parent_ticket: + return False, [] + + blocking_tickets = self.get_blocking_tickets(task.parent_ticket) + if not blocking_tickets: + return False, [] + + completed_tickets = self.get_completed_tickets() + incomplete_blockers = [t for t in blocking_tickets if t not in completed_tickets] + + return len(incomplete_blockers) > 0, incomplete_blockers + + def is_task_blocked(self, task: ParsedTask, completed_tasks: Optional[set[str]] = None) -> tuple[bool, str]: + """Check if a task is blocked, considering both task and ticket dependencies. + + Returns: + Tuple of (is_blocked, reason) + """ + if completed_tasks is None: + completed_tasks = self.get_completed_tasks() + + # Check direct task dependencies first + for dep in task.dependencies.depends_on: + if dep not in completed_tasks: + return True, f"blocked by task {dep}" + + # Check cross-ticket dependencies + is_blocked, blocking_tickets = self.is_task_blocked_by_tickets(task) + if is_blocked: + return True, f"blocked by ticket(s): {', '.join(blocking_tickets)}" + + return False, "" + + def get_ready_tasks(self) -> list[ParsedTask]: + """Get tasks that are ready to work on (no blocking dependencies). + + Considers both: + - Direct task dependencies (depends_on) + - Cross-ticket dependencies (parent ticket blocked_by) + """ + completed = self.get_completed_tasks() + ready = [] + + for task in self.tasks: + if task.status != "pending": + continue + + is_blocked, _ = self.is_task_blocked(task, completed) + if not is_blocked: + ready.append(task) + + return ready + + def get_cross_ticket_dependency_graph(self) -> dict[str, list[str]]: + """Build a graph of ticket dependencies. + + Returns: + Dict mapping ticket_id -> list of ticket_ids it blocks + """ + graph = {t.id: [] for t in self.tickets} + + for ticket in self.tickets: + for blocker_id in ticket.dependencies.blocked_by: + if blocker_id in graph: + graph[blocker_id].append(ticket.id) + + return graph + + def is_valid(self) -> bool: + """Check if there are no validation errors.""" + return len(self.errors) == 0 + + # ======================================================================== + # Traceability Methods + # ======================================================================== + + def get_tickets_for_requirement(self, req_id: str) -> list[ParsedTicket]: + """Get all tickets implementing a requirement.""" + return [t for t in self.tickets if req_id in t.requirements] + + def get_design_for_ticket(self, ticket_id: str) -> Optional[ParsedDesign]: + """Get the design document for a ticket.""" + ticket = self.get_ticket(ticket_id) + if not ticket or not ticket.design_ref: + return None + # design_ref is like "designs/feature-name.md" + # Find design by matching feature + for design in self.designs: + if ticket.design_ref.endswith(f"{design.feature}.md"): + return design + if design.id == ticket.design_ref: + return design + return None + + def get_requirements_for_ticket(self, ticket_id: str) -> list[ParsedRequirement]: + """Get all requirements a ticket implements.""" + ticket = self.get_ticket(ticket_id) + if not ticket: + return [] + return [r for r in self.requirements if r.id in ticket.requirements] + + def get_full_traceability(self) -> dict: + """Build complete traceability matrix. + + Returns dict with: + - requirements: REQ-ID -> {requirement, designs, tickets, tasks} + - designs: DESIGN-ID -> {design, requirements, tickets} + - tickets: TKT-ID -> {ticket, requirements, design, tasks} + - orphans: items without proper links + """ + trace = { + "requirements": {}, + "designs": {}, + "tickets": {}, + "orphans": { + "requirements": [], # Requirements not linked to any ticket + "designs": [], # Designs not linked to any ticket + "tickets": [], # Tickets not linked to requirements + }, + } + + # Build requirement traceability + for req in self.requirements: + tickets = self.get_tickets_for_requirement(req.id) + tasks = [] + for ticket in tickets: + tasks.extend(self.get_tasks_for_ticket(ticket.id)) + + trace["requirements"][req.id] = { + "requirement": req, + "linked_design": req.linked_design, + "tickets": [t.id for t in tickets], + "tasks": [t.id for t in tasks], + } + + if not tickets: + trace["orphans"]["requirements"].append(req.id) + + # Build design traceability + for design in self.designs: + linked_tickets = [ + t for t in self.tickets + if t.design_ref and ( + t.design_ref.endswith(f"{design.feature}.md") + or t.design_ref == design.id + ) + ] + + trace["designs"][design.id] = { + "design": design, + "requirements": design.requirements, + "tickets": [t.id for t in linked_tickets], + } + + if not linked_tickets: + trace["orphans"]["designs"].append(design.id) + + # Build ticket traceability + for ticket in self.tickets: + reqs = self.get_requirements_for_ticket(ticket.id) + design = self.get_design_for_ticket(ticket.id) + tasks = self.get_tasks_for_ticket(ticket.id) + + trace["tickets"][ticket.id] = { + "ticket": ticket, + "requirements": [r.id for r in reqs], + "design": design.id if design else None, + "tasks": [t.id for t in tasks], + "blocking_tickets": self.get_blocking_tickets(ticket.id), + } + + if not reqs: + trace["orphans"]["tickets"].append(ticket.id) + + return trace + + def get_traceability_stats(self) -> dict: + """Get summary statistics for traceability. + + Returns coverage metrics showing how well-linked the specs are. + """ + trace = self.get_full_traceability() + + total_reqs = len(self.requirements) + linked_reqs = total_reqs - len(trace["orphans"]["requirements"]) + + total_designs = len(self.designs) + linked_designs = total_designs - len(trace["orphans"]["designs"]) + + total_tickets = len(self.tickets) + linked_tickets = total_tickets - len(trace["orphans"]["tickets"]) + + return { + "requirements": { + "total": total_reqs, + "linked": linked_reqs, + "coverage": (linked_reqs / total_reqs * 100) if total_reqs > 0 else 100, + }, + "designs": { + "total": total_designs, + "linked": linked_designs, + "coverage": (linked_designs / total_designs * 100) if total_designs > 0 else 100, + }, + "tickets": { + "total": total_tickets, + "linked": linked_tickets, + "coverage": (linked_tickets / total_tickets * 100) if total_tickets > 0 else 100, + }, + "tasks": { + "total": len(self.tasks), + "done": len([t for t in self.tasks if t.status == "done"]), + "in_progress": len([t for t in self.tasks if t.status == "in_progress"]), + "pending": len([t for t in self.tasks if t.status == "pending"]), + }, + "orphans": trace["orphans"], + } diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py new file mode 100644 index 00000000..50da5952 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py @@ -0,0 +1,577 @@ +#!/usr/bin/env python3 +""" +Parse .omoi_os/ spec files into structured data. + +This module provides the SpecParser class that reads markdown files +with YAML frontmatter and converts them into structured dataclasses. + +Supports: +- Requirements (.omoi_os/requirements/*.md) +- Designs (.omoi_os/designs/*.md) +- Tickets (.omoi_os/tickets/*.md) +- Tasks (.omoi_os/tasks/*.md) + +Usage: + from parse_specs import SpecParser + + parser = SpecParser() + result = parser.parse_all() + + for req in result.requirements: + print(f"{req.id}: {req.title}") + + for ticket in result.tickets: + print(f"{ticket.id}: {ticket.title}") + + for task in result.get_ready_tasks(): + print(f"Ready: {task.id}") +""" + +import re +from datetime import date +from pathlib import Path +from typing import Optional + +import yaml + +from models import ( + AcceptanceCriterion, + ApiEndpoint, + DataModel, + ParsedDesign, + ParsedRequirement, + ParsedTask, + ParsedTicket, + ParseResult, + TaskDependencies, + TicketDependencies, + ValidationError, +) + + +class SpecParser: + """Parse spec files from .omoi_os/ directory.""" + + def __init__(self, root_dir: Optional[Path] = None): + """Initialize parser with project root directory. + + Args: + root_dir: Project root directory. If None, will search upward + from current directory for .omoi_os/ folder. + """ + self.root = root_dir or self._find_project_root() + self.omoi_dir = self.root / ".omoi_os" + + def _find_project_root(self) -> Path: + """Find project root by looking for .omoi_os or common markers.""" + current = Path.cwd() + + for parent in [current] + list(current.parents): + if (parent / ".omoi_os").exists(): + return parent + if (parent / ".git").exists(): + return parent + + return current + + def _parse_frontmatter(self, content: str) -> tuple[dict, str]: + """Extract YAML frontmatter and markdown body from content. + + Supports two formats: + 1. YAML frontmatter (preferred): ---\nkey: value\n--- + 2. Markdown metadata (legacy): **Key**: Value + + Args: + content: Full file content + + Returns: + Tuple of (frontmatter dict, remaining markdown body) + + Raises: + ValueError: If frontmatter is missing or invalid + """ + # Check for YAML frontmatter delimiters + if content.startswith("---"): + # Find end of frontmatter + end_match = re.search(r"\n---\s*\n", content[3:]) + if end_match: + frontmatter_text = content[3 : end_match.start() + 3] + body = content[end_match.end() + 3 :] + + try: + frontmatter = yaml.safe_load(frontmatter_text) + except yaml.YAMLError as e: + raise ValueError(f"Invalid YAML frontmatter: {e}") + + if not isinstance(frontmatter, dict): + raise ValueError("Frontmatter must be a YAML mapping") + + return frontmatter, body + + # Fallback: Parse markdown-style metadata + # Format: **Key**: Value or **Key:** Value + frontmatter = {} + body_lines = [] + in_header = True + + for line in content.split("\n"): + if in_header: + # Check for markdown metadata pattern + md_match = re.match(r"\*\*([^*]+)\*\*:?\s*(.+)", line) + if md_match: + key = md_match.group(1).strip().lower().replace(" ", "_") + value = md_match.group(2).strip() + frontmatter[key] = value + elif line.startswith("# "): + # Title line - extract id from it if possible + title = line[2:].strip() + frontmatter["title"] = title + # Try to extract ID from title like "REQ-XXX: Title" or "# Title" + id_match = re.match(r"(REQ-[A-Z0-9-]+|[A-Z]+-\d+)", title) + if id_match: + frontmatter["id"] = id_match.group(1) + elif line.strip() == "" or line.startswith("##"): + # End of header section + in_header = False + body_lines.append(line) + else: + body_lines.append(line) + + body = "\n".join(body_lines) + + # Generate missing required fields for requirements/designs + if "id" not in frontmatter: + # Use feature as ID fallback + if "feature" in frontmatter: + frontmatter["id"] = frontmatter["feature"] + # Or generate from title + elif "title" in frontmatter: + # Convert title to ID-like format + title_id = frontmatter["title"].lower().replace(" ", "-") + title_id = re.sub(r"[^a-z0-9-]", "", title_id) + frontmatter["id"] = title_id + + if "status" not in frontmatter: + # Normalize status values + status = frontmatter.get("status", "draft") + if isinstance(status, str): + frontmatter["status"] = status.lower() + else: + frontmatter["status"] = "draft" + + if "created" not in frontmatter: + # Parse from markdown if present + created_str = frontmatter.pop("created_date", None) or frontmatter.get("created") + if created_str: + try: + frontmatter["created"] = date.fromisoformat(created_str) if isinstance(created_str, str) else created_str + except (ValueError, TypeError): + frontmatter["created"] = date.today() + else: + frontmatter["created"] = date.today() + + if "updated" not in frontmatter: + frontmatter["updated"] = frontmatter.get("created", date.today()) + + return frontmatter, body + + def _parse_date(self, value) -> date: + """Parse date from various formats.""" + if isinstance(value, date): + return value + if isinstance(value, str): + return date.fromisoformat(value) + raise ValueError(f"Cannot parse date: {value}") + + def _extract_description(self, body: str) -> str: + """Extract description section from markdown body.""" + # Look for ## Description or ## Objective section + match = re.search( + r"##\s+(?:Description|Objective)\s*\n(.*?)(?=\n##|\n---|\Z)", + body, + re.DOTALL | re.IGNORECASE, + ) + if match: + return match.group(1).strip() + return "" + + def _extract_section(self, body: str, section_name: str) -> str: + """Extract a named section from markdown body.""" + pattern = rf"##\s+{re.escape(section_name)}\s*\n(.*?)(?=\n##|\n---|\Z)" + match = re.search(pattern, body, re.DOTALL | re.IGNORECASE) + if match: + return match.group(1).strip() + return "" + + def _extract_acceptance_criteria(self, body: str) -> list[AcceptanceCriterion]: + """Extract acceptance criteria checkboxes from markdown body.""" + criteria = [] + # Look for checkbox patterns: - [ ] text or - [x] text + pattern = r"- \[([ xX])\] (.+?)(?=\n|$)" + matches = re.findall(pattern, body) + for check, text in matches: + criteria.append(AcceptanceCriterion( + text=text.strip(), + completed=check.lower() == "x" + )) + return criteria + + # ======================================================================== + # Requirement Parsing + # ======================================================================== + + def parse_requirement(self, file_path: Path) -> ParsedRequirement: + """Parse a requirement markdown file. + + Args: + file_path: Path to requirement .md file + + Returns: + ParsedRequirement instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "created", "updated"] + for field_name in required: + if field_name not in frontmatter: + raise ValueError(f"Missing required field: {field_name}") + + # Extract EARS-style requirement from body + condition = self._extract_section(body, "Condition") or self._extract_section(body, "When") + action = self._extract_section(body, "Action") or self._extract_section(body, "The System Shall") + + # If not in sections, try to parse from structured format + if not condition and not action: + # Look for WHEN...THE SYSTEM SHALL pattern + ears_match = re.search( + r"WHEN\s+(.+?)\s+THE SYSTEM SHALL\s+(.+?)(?=\n\n|\Z)", + body, + re.DOTALL | re.IGNORECASE, + ) + if ears_match: + condition = ears_match.group(1).strip() + action = ears_match.group(2).strip() + + return ParsedRequirement( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + category=frontmatter.get("category", "functional"), + priority=frontmatter.get("priority", "MEDIUM"), + condition=condition, + action=action, + rationale=self._extract_section(body, "Rationale"), + acceptance_criteria=self._extract_acceptance_criteria(body), + linked_tickets=frontmatter.get("tickets", []) or [], + linked_design=frontmatter.get("design_ref"), + file_path=str(file_path), + ) + + # ======================================================================== + # Design Parsing + # ======================================================================== + + def parse_design(self, file_path: Path) -> ParsedDesign: + """Parse a design markdown file. + + Args: + file_path: Path to design .md file + + Returns: + ParsedDesign instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "created", "updated"] + for field_name in required: + if field_name not in frontmatter: + raise ValueError(f"Missing required field: {field_name}") + + # Parse API endpoints from frontmatter or body + api_endpoints = [] + api_data = frontmatter.get("api_endpoints", []) or [] + for ep in api_data: + if isinstance(ep, dict): + api_endpoints.append(ApiEndpoint( + method=ep.get("method", "GET"), + path=ep.get("path", ""), + description=ep.get("description", ""), + request_body=ep.get("request_body"), + response=ep.get("response"), + )) + + # Parse data models from frontmatter + data_models = [] + models_data = frontmatter.get("data_models", []) or [] + for model in models_data: + if isinstance(model, dict): + data_models.append(DataModel( + name=model.get("name", ""), + description=model.get("description", ""), + fields=model.get("fields", {}), + relationships=model.get("relationships", []), + )) + + return ParsedDesign( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + feature=frontmatter.get("feature", ""), + requirements=frontmatter.get("requirements", []) or [], + architecture=self._extract_section(body, "Architecture Overview") + or self._extract_section(body, "Architecture"), + data_models=data_models, + api_endpoints=api_endpoints, + components=frontmatter.get("components", []) or [], + error_handling=self._extract_section(body, "Error Handling"), + security_considerations=self._extract_section(body, "Security"), + implementation_notes=self._extract_section(body, "Implementation Notes") + or self._extract_section(body, "Notes"), + file_path=str(file_path), + ) + + # ======================================================================== + # Ticket Parsing + # ======================================================================== + + def parse_ticket(self, file_path: Path) -> ParsedTicket: + """Parse a ticket markdown file. + + Args: + file_path: Path to ticket .md file + + Returns: + ParsedTicket instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "priority", "estimate", "created", "updated"] + for field in required: + if field not in frontmatter: + raise ValueError(f"Missing required field: {field}") + + # Parse dependencies + deps_data = frontmatter.get("dependencies", {}) + dependencies = TicketDependencies( + blocked_by=deps_data.get("blocked_by", []) or [], + blocks=deps_data.get("blocks", []) or [], + related=deps_data.get("related", []) or [], + ) + + return ParsedTicket( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + priority=frontmatter["priority"], + estimate=frontmatter["estimate"], + created=self._parse_date(frontmatter["created"]), + updated=self._parse_date(frontmatter["updated"]), + feature=frontmatter.get("feature"), + requirements=frontmatter.get("requirements", []) or [], + design_ref=frontmatter.get("design_ref"), + tasks=frontmatter.get("tasks", []) or [], + dependencies=dependencies, + description=self._extract_description(body), + file_path=str(file_path), + ) + + def parse_task(self, file_path: Path) -> ParsedTask: + """Parse a task markdown file. + + Args: + file_path: Path to task .md file + + Returns: + ParsedTask instance + + Raises: + ValueError: If file is missing required fields or has invalid format + """ + content = file_path.read_text() + frontmatter, body = self._parse_frontmatter(content) + + # Required fields + required = ["id", "title", "status", "parent_ticket", "estimate", "created"] + for field in required: + if field not in frontmatter: + raise ValueError(f"Missing required field: {field}") + + # Parse dependencies + deps_data = frontmatter.get("dependencies", {}) + dependencies = TaskDependencies( + depends_on=deps_data.get("depends_on", []) or [], + blocks=deps_data.get("blocks", []) or [], + ) + + return ParsedTask( + id=frontmatter["id"], + title=frontmatter["title"], + status=frontmatter["status"], + parent_ticket=frontmatter["parent_ticket"], + estimate=frontmatter["estimate"], + created=self._parse_date(frontmatter["created"]), + assignee=frontmatter.get("assignee"), + dependencies=dependencies, + objective=self._extract_description(body), + file_path=str(file_path), + ) + + def parse_all(self) -> ParseResult: + """Parse all spec files (requirements, designs, tickets, tasks). + + Returns: + ParseResult with all specs and any parse errors + """ + result = ParseResult() + + # Parse requirements + requirements_dir = self.omoi_dir / "requirements" + if requirements_dir.exists(): + for md_file in sorted(requirements_dir.glob("*.md")): + # Skip template files + if "template" in md_file.name.lower(): + continue + try: + req = self.parse_requirement(md_file) + result.requirements.append(req) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse designs + designs_dir = self.omoi_dir / "designs" + if designs_dir.exists(): + for md_file in sorted(designs_dir.glob("*.md")): + # Skip template files + if "template" in md_file.name.lower(): + continue + try: + design = self.parse_design(md_file) + result.designs.append(design) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse tickets + tickets_dir = self.omoi_dir / "tickets" + if tickets_dir.exists(): + for md_file in sorted(tickets_dir.glob("*.md")): + try: + ticket = self.parse_ticket(md_file) + result.tickets.append(ticket) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + # Parse tasks + tasks_dir = self.omoi_dir / "tasks" + if tasks_dir.exists(): + for md_file in sorted(tasks_dir.glob("*.md")): + try: + task = self.parse_task(md_file) + result.tasks.append(task) + except ValueError as e: + result.errors.append( + ValidationError( + error_type="parse_error", + message=str(e), + source_id=md_file.name, + ) + ) + + return result + + def list_requirements(self) -> list[ParsedRequirement]: + """List all parsed requirements.""" + return self.parse_all().requirements + + def list_designs(self) -> list[ParsedDesign]: + """List all parsed designs.""" + return self.parse_all().designs + + def list_tickets(self) -> list[ParsedTicket]: + """List all parsed tickets.""" + return self.parse_all().tickets + + def list_tasks(self) -> list[ParsedTask]: + """List all parsed tasks.""" + return self.parse_all().tasks + + def get_requirement(self, req_id: str) -> Optional[ParsedRequirement]: + """Get a specific requirement by ID.""" + return self.parse_all().get_requirement(req_id) + + def get_design(self, design_id: str) -> Optional[ParsedDesign]: + """Get a specific design by ID.""" + return self.parse_all().get_design(design_id) + + def get_ticket(self, ticket_id: str) -> Optional[ParsedTicket]: + """Get a specific ticket by ID.""" + return self.parse_all().get_ticket(ticket_id) + + def get_task(self, task_id: str) -> Optional[ParsedTask]: + """Get a specific task by ID.""" + return self.parse_all().get_task(task_id) + + +if __name__ == "__main__": + # Quick test + parser = SpecParser() + result = parser.parse_all() + + print(f"Found:") + print(f" {len(result.requirements)} requirements") + print(f" {len(result.designs)} designs") + print(f" {len(result.tickets)} tickets") + print(f" {len(result.tasks)} tasks") + + if result.errors: + print(f"\nParse Errors ({len(result.errors)}):") + for error in result.errors: + print(f" {error}") + + # Show traceability stats + if result.requirements or result.designs or result.tickets: + stats = result.get_traceability_stats() + print(f"\nTraceability Coverage:") + if result.requirements: + print(f" Requirements: {stats['requirements']['linked']}/{stats['requirements']['total']} linked ({stats['requirements']['coverage']:.1f}%)") + if result.designs: + print(f" Designs: {stats['designs']['linked']}/{stats['designs']['total']} linked ({stats['designs']['coverage']:.1f}%)") + if result.tickets: + print(f" Tickets: {stats['tickets']['linked']}/{stats['tickets']['total']} linked ({stats['tickets']['coverage']:.1f}%)") + print(f" Tasks: {stats['tasks']['done']}/{stats['tasks']['total']} done") diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/spec_cli.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/spec_cli.py new file mode 100644 index 00000000..fac1f007 --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/spec_cli.py @@ -0,0 +1,1075 @@ +#!/usr/bin/env python3 +""" +Unified CLI for spec-driven development. + +Parse, validate, and visualize tickets and tasks from .omoi_os/ directory. + +Usage: + # Show all tickets and tasks + uv run python spec_cli.py show all + + # Show only tickets + uv run python spec_cli.py show tickets + + # Show only tasks + uv run python spec_cli.py show tasks + + # Show dependency graph + uv run python spec_cli.py show graph + + # Show ready tasks (no blocking dependencies) + uv run python spec_cli.py show ready + + # Validate specs (check for circular dependencies, missing refs) + uv run python spec_cli.py validate + + # Export to JSON + uv run python spec_cli.py export json + + # Sync to API (Phase 4) + uv run python spec_cli.py sync push +""" + +import argparse +import json +import sys +from collections import defaultdict +from pathlib import Path +from typing import Optional + +from models import ParseResult, ParsedTask, ParsedTicket, ValidationError +from parse_specs import SpecParser + + +# ============================================================================ +# Validation (Phase 3) +# ============================================================================ + + +def detect_circular_dependencies(result: ParseResult) -> list[ValidationError]: + """Detect circular dependencies in task graph. + + Uses DFS to find cycles in the dependency graph. + """ + errors = [] + + # Build adjacency list for tasks + task_deps = {t.id: t.dependencies.depends_on for t in result.tasks} + + # Track visited and recursion stack + visited = set() + rec_stack = set() + path = [] + + def dfs(task_id: str) -> Optional[list[str]]: + """DFS to detect cycle, returns cycle path if found.""" + if task_id in rec_stack: + # Found cycle - extract the cycle from path + cycle_start = path.index(task_id) + return path[cycle_start:] + [task_id] + + if task_id in visited: + return None + + visited.add(task_id) + rec_stack.add(task_id) + path.append(task_id) + + for dep in task_deps.get(task_id, []): + cycle = dfs(dep) + if cycle: + return cycle + + path.pop() + rec_stack.remove(task_id) + return None + + # Check each task as starting point + for task in result.tasks: + if task.id not in visited: + cycle = dfs(task.id) + if cycle: + cycle_str = " -> ".join(cycle) + errors.append( + ValidationError( + error_type="circular_dependency", + message=f"Circular dependency detected: {cycle_str}", + source_id=cycle[0], + target_id=cycle[-1], + ) + ) + # Reset for next search + visited.clear() + rec_stack.clear() + path.clear() + + return errors + + +def validate_references(result: ParseResult) -> list[ValidationError]: + """Validate that all referenced IDs exist.""" + errors = [] + + # Get all known IDs + ticket_ids = {t.id for t in result.tickets} + task_ids = {t.id for t in result.tasks} + + # Check ticket dependencies + for ticket in result.tickets: + for dep_id in ticket.dependencies.blocked_by: + if dep_id not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocked_by references unknown ticket: {dep_id}", + source_id=ticket.id, + target_id=dep_id, + ) + ) + for dep_id in ticket.dependencies.blocks: + if dep_id not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocks references unknown ticket: {dep_id}", + source_id=ticket.id, + target_id=dep_id, + ) + ) + + # Check task dependencies + for task in result.tasks: + # Check parent ticket exists + if task.parent_ticket not in ticket_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"parent_ticket references unknown ticket: {task.parent_ticket}", + source_id=task.id, + target_id=task.parent_ticket, + ) + ) + + for dep_id in task.dependencies.depends_on: + if dep_id not in task_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"depends_on references unknown task: {dep_id}", + source_id=task.id, + target_id=dep_id, + ) + ) + for dep_id in task.dependencies.blocks: + if dep_id not in task_ids: + errors.append( + ValidationError( + error_type="missing_reference", + message=f"blocks references unknown task: {dep_id}", + source_id=task.id, + target_id=dep_id, + ) + ) + + return errors + + +def validate_specs(result: ParseResult) -> list[ValidationError]: + """Run all validation checks.""" + errors = list(result.errors) # Start with parse errors + errors.extend(detect_circular_dependencies(result)) + errors.extend(validate_references(result)) + return errors + + +# ============================================================================ +# Display Functions +# ============================================================================ + + +def print_header(title: str, char: str = "=", width: int = 70): + """Print a section header.""" + print(char * width) + print(f" {title}") + print(char * width) + + +def print_tickets(tickets: list[ParsedTicket]): + """Print all tickets.""" + print_header(f"TICKETS ({len(tickets)} total)") + print() + + for ticket in tickets: + print(f"{ticket.id}: {ticket.title}") + print(f" Status: {ticket.status} | Priority: {ticket.priority} | Estimate: {ticket.estimate}") + + # Truncate description + desc = ticket.description[:100] + "..." if len(ticket.description) > 100 else ticket.description + desc = desc.replace("\n", " ") + if desc: + print(f" Description: {desc}") + + if ticket.tasks: + print(f" Tasks: {', '.join(ticket.tasks)}") + + if ticket.dependencies.blocked_by: + print(f" Blocked By: {', '.join(ticket.dependencies.blocked_by)}") + if ticket.dependencies.blocks: + print(f" Blocks: {', '.join(ticket.dependencies.blocks)}") + + print() + + +def print_tasks(tasks: list[ParsedTask], result: Optional[ParseResult] = None): + """Print all tasks with cross-ticket dependency awareness.""" + print_header(f"TASKS ({len(tasks)} total)") + print() + + for task in tasks: + # Use ParseResult for cross-ticket blocking if available + if result: + is_blocked, reason = result.is_task_blocked(task) + else: + # Fallback to simple task-only blocking + completed_tasks = {t.id for t in tasks if t.status == "done"} + is_blocked = task.is_blocked(completed_tasks) + reason = "blocked by task dependency" if is_blocked else "" + + status_indicator = f"[BLOCKED: {reason}] " if is_blocked else "" + + print(f"{status_indicator}{task.id}: {task.title}") + print(f" Parent: {task.parent_ticket} | Status: {task.status} | Estimate: {task.estimate}") + + # Truncate objective + obj = task.objective[:100] + "..." if len(task.objective) > 100 else task.objective + obj = obj.replace("\n", " ") + if obj: + print(f" Objective: {obj}") + + if task.dependencies.depends_on: + print(f" Depends On: {', '.join(task.dependencies.depends_on)}") + if task.dependencies.blocks: + print(f" Blocks: {', '.join(task.dependencies.blocks)}") + + print() + + +def print_dependency_graph(result: ParseResult): + """Print ASCII dependency graph for tasks.""" + print_header("TASK DEPENDENCY GRAPH") + print() + + # Build reverse dependency map (what blocks what) + blocked_by: dict[str, list[str]] = defaultdict(list) + for task in result.tasks: + for dep in task.dependencies.depends_on: + blocked_by[dep].append(task.id) + + # Get task title by ID + task_titles = {t.id: t.title for t in result.tasks} + + # Find root tasks (no dependencies) + root_tasks = [t for t in result.tasks if not t.dependencies.depends_on] + + def print_tree(task_id: str, prefix: str = "", is_last: bool = True): + """Recursively print task tree.""" + connector = "└─> " if is_last else "├─> " + title = task_titles.get(task_id, "Unknown") + title_short = title[:40] + "..." if len(title) > 40 else title + + print(f"{prefix}{connector}{task_id} ({title_short})") + + children = blocked_by.get(task_id, []) + for i, child in enumerate(children): + new_prefix = prefix + (" " if is_last else "│ ") + print_tree(child, new_prefix, i == len(children) - 1) + + for i, task in enumerate(root_tasks): + if i > 0: + print() + print_tree(task.id, "", i == len(root_tasks) - 1) + + if not root_tasks: + print("No root tasks found (all tasks have dependencies)") + + print() + + +def print_cross_ticket_graph(result: ParseResult): + """Print ASCII dependency graph for tickets (cross-ticket dependencies).""" + print_header("CROSS-TICKET DEPENDENCY GRAPH") + print() + + # Build graph: ticket -> tickets it blocks + graph = result.get_cross_ticket_dependency_graph() + completed_tickets = result.get_completed_tickets() + + # Get ticket info + ticket_info = {t.id: t for t in result.tickets} + + if not any(t.dependencies.blocked_by or t.dependencies.blocks for t in result.tickets): + print("No cross-ticket dependencies defined.") + print() + print("To add cross-ticket dependencies, use the dependencies field in ticket YAML:") + print() + print(" dependencies:") + print(" blocked_by: [TKT-001] # This ticket waits for TKT-001") + print(" blocks: [TKT-003] # This ticket blocks TKT-003") + print() + return + + # Find root tickets (not blocked by any OTHER ticket) + all_blocked_by = set() + for ticket in result.tickets: + all_blocked_by.update(ticket.dependencies.blocked_by) + + # Root tickets are those that block others but aren't blocked themselves + root_tickets = [t for t in result.tickets if not t.dependencies.blocked_by] + + def print_ticket_tree(ticket_id: str, prefix: str = "", is_last: bool = True): + """Recursively print ticket tree.""" + connector = "└─> " if is_last else "├─> " + ticket = ticket_info.get(ticket_id) + if not ticket: + print(f"{prefix}{connector}{ticket_id} (unknown)") + return + + status_mark = "✓" if ticket_id in completed_tickets else "○" + title_short = ticket.title[:35] + "..." if len(ticket.title) > 35 else ticket.title + task_count = len(result.get_tasks_for_ticket(ticket_id)) + + print(f"{prefix}{connector}[{status_mark}] {ticket_id} ({title_short}) [{task_count} tasks]") + + children = graph.get(ticket_id, []) + for i, child in enumerate(children): + new_prefix = prefix + (" " if is_last else "│ ") + print_ticket_tree(child, new_prefix, i == len(children) - 1) + + for i, ticket in enumerate(root_tickets): + if i > 0: + print() + print_ticket_tree(ticket.id, "", i == len(root_tickets) - 1) + + print() + print("Legend: ✓ = all tasks complete, ○ = incomplete") + print() + + +def print_ready_tasks(result: ParseResult): + """Print tasks that are ready to work on.""" + ready = result.get_ready_tasks() + + print_header(f"READY TASKS ({len(ready)} available)") + print() + + if not ready: + print("No tasks are ready. Either:") + print(" - All tasks have pending dependencies") + print(" - All tasks are already completed or in progress") + print() + return + + for task in ready: + print(f"- {task.id}: {task.title}") + print(f" Parent: {task.parent_ticket} | Estimate: {task.estimate}") + if task.objective: + obj = task.objective[:80] + "..." if len(task.objective) > 80 else task.objective + obj = obj.replace("\n", " ") + print(f" {obj}") + print() + + +def print_requirements(result: ParseResult): + """Print all requirements.""" + print_header(f"REQUIREMENTS ({len(result.requirements)} total)") + print() + + if not result.requirements: + print("No requirements found in .omoi_os/requirements/") + print() + return + + for req in result.requirements: + print(f"{req.id}: {req.title}") + print(f" Status: {req.status} | Priority: {req.priority} | Category: {req.category}") + + if req.condition: + cond = req.condition[:60] + "..." if len(req.condition) > 60 else req.condition + print(f" WHEN: {cond}") + + if req.action: + act = req.action[:60] + "..." if len(req.action) > 60 else req.action + print(f" THE SYSTEM SHALL: {act}") + + if req.acceptance_criteria: + print(f" Acceptance Criteria: {len(req.acceptance_criteria)} items") + + if req.linked_tickets: + print(f" Linked Tickets: {', '.join(req.linked_tickets)}") + + print() + + +def print_designs(result: ParseResult): + """Print all designs.""" + print_header(f"DESIGNS ({len(result.designs)} total)") + print() + + if not result.designs: + print("No designs found in .omoi_os/designs/") + print() + return + + for design in result.designs: + print(f"{design.id}: {design.title}") + print(f" Feature: {design.feature} | Status: {design.status}") + + if design.requirements: + print(f" Implements Requirements: {', '.join(design.requirements)}") + + if design.data_models: + print(f" Data Models: {', '.join(dm.name for dm in design.data_models)}") + + if design.api_endpoints: + print(f" API Endpoints: {len(design.api_endpoints)} defined") + + if design.components: + print(f" Components: {', '.join(design.components[:3])}", end="") + if len(design.components) > 3: + print(f" +{len(design.components) - 3} more") + else: + print() + + print() + + +def print_traceability(result: ParseResult): + """Print full traceability matrix: Requirements → Designs → Tickets → Tasks.""" + stats = result.get_traceability_stats() + trace = result.get_full_traceability() + + print_header("TRACEABILITY MATRIX") + print() + + # Summary stats + print("COVERAGE SUMMARY:") + print(f" Requirements: {stats['requirements']['linked']}/{stats['requirements']['total']} linked ({stats['requirements']['coverage']:.1f}%)") + print(f" Designs: {stats['designs']['linked']}/{stats['designs']['total']} linked ({stats['designs']['coverage']:.1f}%)") + print(f" Tickets: {stats['tickets']['linked']}/{stats['tickets']['total']} linked ({stats['tickets']['coverage']:.1f}%)") + print() + print(f"TASK STATUS:") + print(f" Done: {stats['tasks']['done']} | In Progress: {stats['tasks']['in_progress']} | Pending: {stats['tasks']['pending']}") + print() + + # Orphans + if any(trace["orphans"].values()): + print("ORPHANED ITEMS (not linked):") + if trace["orphans"]["requirements"]: + print(f" Requirements without tickets: {', '.join(trace['orphans']['requirements'])}") + if trace["orphans"]["designs"]: + print(f" Designs without tickets: {', '.join(trace['orphans']['designs'])}") + if trace["orphans"]["tickets"]: + print(f" Tickets without requirements: {', '.join(trace['orphans']['tickets'])}") + print() + + # Detailed traceability + print_header("REQUIREMENT → IMPLEMENTATION TRACE", char="-") + print() + + for req_id, req_data in trace["requirements"].items(): + req = req_data["requirement"] + print(f"┌─ REQ: {req_id}: {req.title}") + + # Show linked design + if req_data["linked_design"]: + print(f"│ └─> Design: {req_data['linked_design']}") + + # Show linked tickets + if req_data["tickets"]: + print(f"│ └─> Tickets: {', '.join(req_data['tickets'])}") + + # Show tasks for each ticket + for ticket_id in req_data["tickets"]: + tasks = result.get_tasks_for_ticket(ticket_id) + if tasks: + done_count = sum(1 for t in tasks if t.status == "done") + print(f"│ └─> Tasks for {ticket_id}: {done_count}/{len(tasks)} complete") + else: + print("│ └─> (no implementing tickets)") + + print("└" + "─" * 50) + print() + + # Ticket → Task breakdown + print_header("TICKET → TASK BREAKDOWN", char="-") + print() + + for ticket in result.tickets: + tasks = result.get_tasks_for_ticket(ticket.id) + done = sum(1 for t in tasks if t.status == "done") + total = len(tasks) + progress = f"{done}/{total}" if total > 0 else "no tasks" + + # Check if blocked + is_blocked = ticket.is_blocked() + blocked_marker = " [BLOCKED]" if is_blocked else "" + + print(f"┌─ {ticket.id}: {ticket.title}{blocked_marker}") + print(f"│ Status: {ticket.status} | Progress: {progress}") + + if ticket.requirements: + print(f"│ Implements: {', '.join(ticket.requirements)}") + + if tasks: + for task in tasks: + status_char = "✓" if task.status == "done" else "○" + print(f"│ [{status_char}] {task.id}: {task.title[:40]}") + + print("└" + "─" * 50) + print() + + +def print_validation(errors: list[ValidationError]): + """Print validation results.""" + print_header("VALIDATION") + print() + + if not errors: + print("✓ No circular dependencies detected") + print("✓ All task references valid") + print("✓ All ticket references valid") + print() + return + + print(f"✗ Found {len(errors)} validation error(s):") + print() + + for error in errors: + print(f" [{error.error_type}] {error.source_id}") + print(f" {error.message}") + print() + + +def show_all(result: ParseResult): + """Show everything: requirements, designs, tickets, tasks, graphs, traceability, validation.""" + print_requirements(result) + print_designs(result) + print_tickets(result.tickets) + print_tasks(result.tasks, result) + print_dependency_graph(result) + print_cross_ticket_graph(result) + print_ready_tasks(result) + print_traceability(result) + + errors = validate_specs(result) + print_validation(errors) + + +# ============================================================================ +# Export Functions +# ============================================================================ + + +def export_json(result: ParseResult) -> str: + """Export all specs as JSON.""" + data = { + "requirements": [ + { + "id": r.id, + "title": r.title, + "status": r.status, + "created": r.created.isoformat(), + "updated": r.updated.isoformat(), + "category": r.category, + "priority": r.priority, + "condition": r.condition, + "action": r.action, + "rationale": r.rationale, + "acceptance_criteria": [ + {"text": ac.text, "completed": ac.completed} + for ac in r.acceptance_criteria + ], + "linked_tickets": r.linked_tickets, + "linked_design": r.linked_design, + } + for r in result.requirements + ], + "designs": [ + { + "id": d.id, + "title": d.title, + "status": d.status, + "created": d.created.isoformat(), + "updated": d.updated.isoformat(), + "feature": d.feature, + "requirements": d.requirements, + "architecture": d.architecture, + "data_models": [ + { + "name": dm.name, + "description": dm.description, + "fields": dm.fields, + "relationships": dm.relationships, + } + for dm in d.data_models + ], + "api_endpoints": [ + { + "method": ep.method, + "path": ep.path, + "description": ep.description, + } + for ep in d.api_endpoints + ], + "components": d.components, + "error_handling": d.error_handling, + "security_considerations": d.security_considerations, + "implementation_notes": d.implementation_notes, + } + for d in result.designs + ], + "tickets": [ + { + "id": t.id, + "title": t.title, + "status": t.status, + "priority": t.priority, + "estimate": t.estimate, + "created": t.created.isoformat(), + "updated": t.updated.isoformat(), + "feature": t.feature, + "requirements": t.requirements, + "design_ref": t.design_ref, + "tasks": t.tasks, + "dependencies": { + "blocked_by": t.dependencies.blocked_by, + "blocks": t.dependencies.blocks, + "related": t.dependencies.related, + }, + "description": t.description, + } + for t in result.tickets + ], + "tasks": [ + { + "id": t.id, + "title": t.title, + "status": t.status, + "parent_ticket": t.parent_ticket, + "estimate": t.estimate, + "created": t.created.isoformat(), + "assignee": t.assignee, + "dependencies": { + "depends_on": t.dependencies.depends_on, + "blocks": t.dependencies.blocks, + }, + "objective": t.objective, + } + for t in result.tasks + ], + "traceability": result.get_traceability_stats(), + } + return json.dumps(data, indent=2) + + +# ============================================================================ +# Main CLI +# ============================================================================ + + +def main(): + parser = argparse.ArgumentParser( + description="Parse, validate, and visualize specs from .omoi_os/", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + %(prog)s show all Show all requirements, designs, tickets, tasks + %(prog)s show requirements Show only requirements + %(prog)s show designs Show only designs + %(prog)s show tickets Show only tickets + %(prog)s show tasks Show only tasks + %(prog)s show graph Show task dependency graph + %(prog)s show traceability Show full traceability matrix + %(prog)s show ready Show tasks ready to work on + %(prog)s validate Run validation checks + %(prog)s export json Export to JSON format + %(prog)s sync-specs push Sync requirements/designs to API + """, + ) + + subparsers = parser.add_subparsers(dest="command", help="Command to run") + + # show command + show_parser = subparsers.add_parser("show", help="Show specs") + show_parser.add_argument( + "what", + choices=["all", "requirements", "designs", "tickets", "tasks", "graph", "ticket-graph", "traceability", "ready"], + help="What to show (graph=task deps, ticket-graph=cross-ticket deps, traceability=full matrix)", + ) + + # validate command + subparsers.add_parser("validate", help="Validate specs") + + # export command + export_parser = subparsers.add_parser("export", help="Export specs") + export_parser.add_argument( + "format", + choices=["json"], + help="Export format", + ) + + # projects command + projects_parser = subparsers.add_parser("projects", help="List API projects") + projects_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + projects_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # project command (show single project with tickets/tasks) + project_parser = subparsers.add_parser("project", help="Show project details with tickets and tasks") + project_parser.add_argument( + "project_id", + help="Project ID to display", + ) + project_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + project_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # sync command + sync_parser = subparsers.add_parser("sync", help="Sync with API") + sync_parser.add_argument( + "action", + choices=["push", "diff"], + help="Sync action (push=create/update, diff=dry run)", + ) + sync_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + sync_parser.add_argument( + "--project-id", + help="Project ID to associate tickets with", + ) + sync_parser.add_argument( + "--email", + help="Email for login (or set OMOIOS_EMAIL env var)", + ) + sync_parser.add_argument( + "--password", + help="Password for login (or set OMOIOS_PASSWORD env var)", + ) + sync_parser.add_argument( + "--token", + help="JWT access token (or set OMOIOS_TOKEN env var)", + ) + sync_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + # sync-specs command (sync requirements/designs to API) + sync_specs_parser = subparsers.add_parser("sync-specs", help="Sync requirements/designs to API specs") + sync_specs_parser.add_argument( + "action", + choices=["push", "diff"], + help="Sync action (push=create/update, diff=dry run)", + ) + sync_specs_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + sync_specs_parser.add_argument( + "--project-id", + required=True, + help="Project ID to associate spec with (required)", + ) + sync_specs_parser.add_argument( + "--spec-title", + help="Spec title (defaults to design feature name)", + ) + sync_specs_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + sync_specs_parser.add_argument( + "--token", + help="JWT access token (or set OMOIOS_TOKEN env var)", + ) + + # traceability command (show API traceability) + trace_parser = subparsers.add_parser("api-trace", help="Show traceability from API") + trace_parser.add_argument( + "project_id", + help="Project ID to show traceability for", + ) + trace_parser.add_argument( + "--api-url", + default="http://localhost:18000", + help="API base URL", + ) + trace_parser.add_argument( + "--api-key", + help="API key for authentication (or set OMOIOS_API_KEY env var)", + ) + + args = parser.parse_args() + + if not args.command: + parser.print_help() + sys.exit(1) + + # Parse all specs + spec_parser = SpecParser() + result = spec_parser.parse_all() + + if args.command == "show": + if args.what == "all": + show_all(result) + elif args.what == "requirements": + print_requirements(result) + elif args.what == "designs": + print_designs(result) + elif args.what == "tickets": + print_tickets(result.tickets) + elif args.what == "tasks": + print_tasks(result.tasks, result) + elif args.what == "graph": + print_dependency_graph(result) + elif args.what == "ticket-graph": + print_cross_ticket_graph(result) + elif args.what == "traceability": + print_traceability(result) + elif args.what == "ready": + print_ready_tasks(result) + + elif args.command == "validate": + errors = validate_specs(result) + print_validation(errors) + if errors: + sys.exit(1) + + elif args.command == "export": + if args.format == "json": + print(export_json(result)) + + elif args.command == "projects": + import asyncio + import os + from api_client import OmoiOSClient + + async def list_projects(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + projects = await client.list_projects() + if projects: + print_header(f"PROJECTS ({len(projects)} total)") + print() + for p in projects: + print(f" {p.get('id', 'N/A')}: {p.get('name', 'Unnamed')}") + if p.get('description'): + desc = p['description'][:60] + "..." if len(p['description']) > 60 else p['description'] + print(f" {desc}") + print() + else: + print("No projects found.") + + asyncio.run(list_projects()) + + elif args.command == "project": + import asyncio + import os + from api_client import OmoiOSClient + + async def show_project(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + data = await client.get_project_with_tickets(args.project_id) + + if "error" in data: + print(f"Error: {data['error']}") + sys.exit(1) + + project = data.get("project", {}) + tickets = data.get("tickets", []) + + print_header(f"PROJECT: {project.get('name', 'Unknown')}") + print() + print(f" ID: {project.get('id', 'N/A')}") + if project.get('description'): + print(f" Description: {project['description'][:80]}") + print() + print(f" Total Tickets: {data.get('total_tickets', 0)}") + print(f" Total Tasks: {data.get('total_tasks', 0)}") + print() + + if not tickets: + print(" No tickets found for this project.") + return + + # Group tickets by status + by_status = {} + for t in tickets: + status = t.get("status", "unknown") + if status not in by_status: + by_status[status] = [] + by_status[status].append(t) + + # Print tickets grouped by status + print_header("TICKETS BY STATUS", char="-") + print() + + for status, status_tickets in sorted(by_status.items()): + print(f" [{status.upper()}] ({len(status_tickets)} tickets)") + print() + + for ticket in status_tickets: + print(f" {ticket.get('id', 'N/A')[:20]}...") + print(f" Title: {ticket.get('title', 'No title')}") + print(f" Priority: {ticket.get('priority', 'N/A')}") + + tasks = ticket.get("tasks", []) + if tasks: + print(f" Tasks: ({len(tasks)} total)") + for task in tasks[:5]: # Show max 5 tasks per ticket + task_status = task.get("status", "unknown") + print(f" - [{task_status}] {task.get('title', task.get('description', 'No title')[:40])}") + if len(tasks) > 5: + print(f" ... and {len(tasks) - 5} more tasks") + else: + print(" Tasks: None") + print() + + asyncio.run(show_project()) + + elif args.command == "sync": + import asyncio + import os + from api_client import run_sync + + api_key = getattr(args, 'api_key', None) or os.environ.get("OMOIOS_API_KEY") + success = asyncio.run( + run_sync( + args.api_url, + args.action, + args.project_id, + args.email, + args.password, + args.token, + api_key, + ) + ) + if not success: + sys.exit(1) + + elif args.command == "sync-specs": + import asyncio + import os + from api_client import OmoiOSClient, print_sync_summary + + async def run_sync_specs(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + token = args.token or os.environ.get("OMOIOS_TOKEN") + + client = OmoiOSClient(base_url=args.api_url, api_key=api_key, token=token) + + # Check connection + print(f"Connecting to {args.api_url}...") + connected, msg = await client.check_connection() + if not connected: + print(f"Error: Cannot connect to API: {msg}") + return False + + print("Connected!") + print(f"Project ID: {args.project_id}") + print(f"Requirements: {len(result.requirements)}") + print(f"Designs: {len(result.designs)}") + print() + + # Run sync + if args.action == "diff": + print("Checking what would change (dry run)...") + summary = await client.diff_specs( + result, + args.project_id, + args.spec_title, + ) + else: # push + print("Syncing specs to API...") + summary = await client.sync_specs( + result, + args.project_id, + args.spec_title, + ) + + print_sync_summary(summary) + return summary.failed == 0 + + success = asyncio.run(run_sync_specs()) + if not success: + sys.exit(1) + + elif args.command == "api-trace": + import asyncio + import os + from api_client import OmoiOSClient + + async def show_api_traceability(): + api_key = args.api_key or os.environ.get("OMOIOS_API_KEY") + client = OmoiOSClient(base_url=args.api_url, api_key=api_key) + + print(f"Fetching traceability from {args.api_url}...") + trace = await client.get_full_traceability(args.project_id) + + print_header("API TRACEABILITY MATRIX") + print() + + # Specs summary + specs = trace.get("specs", []) + print(f"SPECS: {len(specs)} total") + for spec in specs: + req_count = len(spec.get("requirements", [])) + ticket_count = len(spec.get("linked_tickets", [])) + print(f" [{spec['status']}] {spec['id'][:20]}...") + print(f" Title: {spec['title']}") + print(f" Requirements: {req_count} | Linked Tickets: {ticket_count}") + print() + + # Tickets summary + tickets = trace.get("tickets", []) + orphans = trace.get("orphan_tickets", []) + print(f"LINKED TICKETS: {len(tickets)} total") + for ticket in tickets[:10]: # Show max 10 + task_count = len(ticket.get("tasks", [])) + print(f" [{ticket['status']}] {ticket['id'][:20]}...") + print(f" Title: {ticket['title'][:50]}") + print(f" Tasks: {task_count}") + if len(tickets) > 10: + print(f" ... and {len(tickets) - 10} more tickets") + print() + + if orphans: + print(f"ORPHAN TICKETS (not linked to specs): {len(orphans)}") + for ticket in orphans[:5]: + print(f" - {ticket['id'][:20]}... {ticket['title'][:40]}") + if len(orphans) > 5: + print(f" ... and {len(orphans) - 5} more") + print() + + asyncio.run(show_api_traceability()) + + +if __name__ == "__main__": + main() diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/validate_specs.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/validate_specs.py new file mode 100755 index 00000000..eb420a8f --- /dev/null +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/validate_specs.py @@ -0,0 +1,420 @@ +#!/usr/bin/env python3 +""" +Validate spec documents for completeness and consistency. + +Uses YAML frontmatter validation (consistent with parse_specs.py). + +Usage: + python validate_specs.py [--path PATH] + python validate_specs.py --requirements + python validate_specs.py --designs + python validate_specs.py --tickets + python validate_specs.py --tasks + +Examples: + python validate_specs.py # Validate all + python validate_specs.py --requirements # Only requirements + python validate_specs.py --path .omoi_os # Custom path +""" + +import argparse +import re +import sys +from dataclasses import dataclass, field +from pathlib import Path + +import yaml + + +@dataclass +class ValidationResult: + """Result of validating a document.""" + file: Path + doc_type: str + errors: list[str] = field(default_factory=list) + warnings: list[str] = field(default_factory=list) + + @property + def is_valid(self) -> bool: + return len(self.errors) == 0 + + +def get_project_root() -> Path: + """Find project root by looking for .omoi_os or common markers.""" + current = Path.cwd() + + for parent in [current] + list(current.parents): + if (parent / ".omoi_os").exists(): + return parent + if (parent / ".git").exists(): + return parent + + return current + + +def parse_frontmatter(content: str) -> tuple[dict | None, str]: + """Extract YAML frontmatter and markdown body from content. + + Returns: + Tuple of (frontmatter dict or None if missing, remaining markdown body) + """ + if not content.startswith("---"): + return None, content + + # Find end of frontmatter + end_match = re.search(r"\n---\s*\n", content[3:]) + if not end_match: + return None, content + + frontmatter_text = content[3 : end_match.start() + 3] + body = content[end_match.end() + 3 :] + + try: + frontmatter = yaml.safe_load(frontmatter_text) + except yaml.YAMLError: + return None, content + + if not isinstance(frontmatter, dict): + return None, content + + return frontmatter, body + + +def validate_requirements(file_path: Path) -> ValidationResult: + """Validate a requirements document.""" + result = ValidationResult(file=file_path, doc_type="requirements") + content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + # Check for required sections + required_sections = [ + "## Document Overview", + "## Revision History", + ] + + for section in required_sections: + if section not in content: + result.warnings.append(f"Missing section: {section}") + + # Check for REQ-XXX-YYY-NNN format + req_pattern = r"REQ-[A-Z]+-[A-Z]+-\d{3}" + reqs = re.findall(req_pattern, content) + + if not reqs: + result.errors.append("No requirements found (expected REQ-XXX-YYY-NNN format)") + + # Check for normative language + normative = ["SHALL", "MUST", "SHOULD", "MAY"] + has_normative = any(word in content for word in normative) + + if not has_normative: + result.warnings.append("No normative language found (SHALL/MUST/SHOULD/MAY)") + + # Check for status - support both YAML frontmatter and markdown + if frontmatter: + if "status" not in frontmatter: + result.warnings.append("Missing status field in frontmatter") + if "created" not in frontmatter: + result.warnings.append("Missing created field in frontmatter") + else: + # Fallback to old markdown style check + if "**Status**:" not in content: + result.warnings.append("Missing Status field (no frontmatter found)") + if "**Created**:" not in content: + result.warnings.append("Missing Created date (no frontmatter found)") + + return result + + +def validate_design(file_path: Path) -> ValidationResult: + """Validate a design document.""" + result = ValidationResult(file=file_path, doc_type="design") + content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + # Check for required sections + required_sections = [ + "## Architecture Overview", + "## Revision History", + ] + + for section in required_sections: + if section not in content: + result.warnings.append(f"Missing section: {section}") + + # Check for architecture diagram + if "```mermaid" not in content: + result.warnings.append("No Mermaid diagram found") + + # Check for component responsibilities + if "Responsibilities" not in content: + result.warnings.append("No component responsibilities documented") + + # Check for status - support both YAML frontmatter and markdown + if frontmatter: + if "status" not in frontmatter: + result.warnings.append("Missing status field in frontmatter") + else: + if "**Status**:" not in content: + result.warnings.append("Missing Status field (no frontmatter found)") + + # Check for related requirements link + if "requirements" not in content.lower() and "Requirements" not in content: + result.warnings.append("No link to requirements document") + + return result + + +def validate_ticket(file_path: Path) -> ValidationResult: + """Validate a ticket document (YAML frontmatter format).""" + result = ValidationResult(file=file_path, doc_type="ticket") + content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + if not frontmatter: + result.errors.append("Missing YAML frontmatter") + return result + + # Check for ticket ID in frontmatter + if "id" not in frontmatter: + result.errors.append("Missing 'id' field in frontmatter") + elif not frontmatter["id"].startswith("TKT-"): + result.errors.append(f"Invalid ticket ID format: {frontmatter['id']} (expected TKT-XXX)") + + # Check for required fields in frontmatter + required_fields = { + "id": "Ticket ID (e.g., TKT-001)", + "title": "Ticket title", + "status": "Status (backlog, in_progress, done, etc.)", + "priority": "Priority (LOW, MEDIUM, HIGH, CRITICAL)", + "estimate": "Estimate (XS, S, M, L, XL)", + "created": "Created date", + "updated": "Updated date", + } + + for field_key, description in required_fields.items(): + if field_key not in frontmatter: + result.errors.append(f"Missing required field: {field_key} ({description})") + + # Validate status value + valid_statuses = ["backlog", "ready", "in_progress", "review", "done", "blocked"] + if frontmatter.get("status") and frontmatter["status"] not in valid_statuses: + result.warnings.append(f"Non-standard status: {frontmatter['status']} (expected one of: {', '.join(valid_statuses)})") + + # Validate priority value + valid_priorities = ["LOW", "MEDIUM", "HIGH", "CRITICAL"] + if frontmatter.get("priority") and frontmatter["priority"] not in valid_priorities: + result.warnings.append(f"Non-standard priority: {frontmatter['priority']} (expected one of: {', '.join(valid_priorities)})") + + # Validate estimate value + valid_estimates = ["XS", "S", "M", "L", "XL"] + if frontmatter.get("estimate") and frontmatter["estimate"] not in valid_estimates: + result.warnings.append(f"Non-standard estimate: {frontmatter['estimate']} (expected one of: {', '.join(valid_estimates)})") + + # Check for acceptance criteria in body + if "## Acceptance Criteria" not in body: + result.warnings.append("Missing Acceptance Criteria section in body") + + # Check for requirements traceability + if not frontmatter.get("requirements"): + result.warnings.append("No requirements linked (consider adding requirements field)") + + # Check for dependency structure + deps = frontmatter.get("dependencies", {}) + if deps: + expected_dep_fields = ["blocked_by", "blocks", "related"] + for dep_field in expected_dep_fields: + if dep_field not in deps: + result.warnings.append(f"Missing dependencies.{dep_field} field") + + return result + + +def validate_task(file_path: Path) -> ValidationResult: + """Validate a task document (YAML frontmatter format).""" + result = ValidationResult(file=file_path, doc_type="task") + content = file_path.read_text() + frontmatter, body = parse_frontmatter(content) + + if not frontmatter: + result.errors.append("Missing YAML frontmatter") + return result + + # Check for task ID in frontmatter + if "id" not in frontmatter: + result.errors.append("Missing 'id' field in frontmatter") + elif not frontmatter["id"].startswith("TSK-"): + result.errors.append(f"Invalid task ID format: {frontmatter['id']} (expected TSK-XXX)") + + # Check for required fields in frontmatter + required_fields = { + "id": "Task ID (e.g., TSK-001)", + "title": "Task title", + "status": "Status (pending, in_progress, done, etc.)", + "parent_ticket": "Parent ticket ID (e.g., TKT-001)", + "estimate": "Estimate (XS, S, M, L, XL)", + "created": "Created date", + } + + for field_key, description in required_fields.items(): + if field_key not in frontmatter: + result.errors.append(f"Missing required field: {field_key} ({description})") + + # Check parent ticket reference format + if frontmatter.get("parent_ticket") and not frontmatter["parent_ticket"].startswith("TKT-"): + result.errors.append(f"Invalid parent_ticket format: {frontmatter['parent_ticket']} (expected TKT-XXX)") + + # Validate status value + valid_statuses = ["pending", "in_progress", "review", "done", "blocked"] + if frontmatter.get("status") and frontmatter["status"] not in valid_statuses: + result.warnings.append(f"Non-standard status: {frontmatter['status']} (expected one of: {', '.join(valid_statuses)})") + + # Validate estimate value + valid_estimates = ["XS", "S", "M", "L", "XL"] + if frontmatter.get("estimate") and frontmatter["estimate"] not in valid_estimates: + result.warnings.append(f"Non-standard estimate: {frontmatter['estimate']} (expected one of: {', '.join(valid_estimates)})") + + # Check for objective/description in body + if "## Objective" not in body and "## Description" not in body: + result.warnings.append("Missing Objective/Description section in body") + + # Check for acceptance criteria in body + if "## Acceptance Criteria" not in body: + result.warnings.append("Missing Acceptance Criteria section in body") + + # Check for dependency structure + deps = frontmatter.get("dependencies", {}) + if deps: + expected_dep_fields = ["depends_on", "blocks"] + for dep_field in expected_dep_fields: + if dep_field not in deps: + result.warnings.append(f"Missing dependencies.{dep_field} field") + + return result + + +def validate_all(omoi_path: Path, doc_types: list[str]) -> list[ValidationResult]: + """Validate all documents of specified types.""" + results = [] + + validators = { + "requirements": (omoi_path / "requirements", validate_requirements), + "designs": (omoi_path / "designs", validate_design), + "tickets": (omoi_path / "tickets", validate_ticket), + "tasks": (omoi_path / "tasks", validate_task), + } + + for doc_type in doc_types: + if doc_type not in validators: + continue + + directory, validator = validators[doc_type] + + if not directory.exists(): + continue + + for md_file in directory.glob("*.md"): + result = validator(md_file) + results.append(result) + + return results + + +def print_results(results: list[ValidationResult]) -> int: + """Print validation results and return exit code.""" + if not results: + print("No documents found to validate") + return 0 + + total_errors = 0 + total_warnings = 0 + + for result in results: + if result.errors or result.warnings: + print(f"\n{result.doc_type}: {result.file.name}") + + for error in result.errors: + print(f" ERROR: {error}") + total_errors += 1 + + for warning in result.warnings: + print(f" WARNING: {warning}") + total_warnings += 1 + + print(f"\n{'=' * 40}") + print(f"Total: {len(results)} documents") + print(f"Errors: {total_errors}") + print(f"Warnings: {total_warnings}") + + valid_count = sum(1 for r in results if r.is_valid) + print(f"Valid: {valid_count}/{len(results)}") + + return 1 if total_errors > 0 else 0 + + +def main(): + parser = argparse.ArgumentParser( + description="Validate spec documents for completeness" + ) + parser.add_argument( + "--path", + default=None, + help="Path to .omoi_os directory (auto-detected if not specified)" + ) + parser.add_argument( + "--requirements", + action="store_true", + help="Validate only requirements" + ) + parser.add_argument( + "--designs", + action="store_true", + help="Validate only designs" + ) + parser.add_argument( + "--tickets", + action="store_true", + help="Validate only tickets" + ) + parser.add_argument( + "--tasks", + action="store_true", + help="Validate only tasks" + ) + + args = parser.parse_args() + + # Determine path + if args.path: + omoi_path = Path(args.path) + else: + root = get_project_root() + omoi_path = root / ".omoi_os" + + if not omoi_path.exists(): + print(f"Error: {omoi_path} does not exist") + sys.exit(1) + + # Determine which types to validate + doc_types = [] + if args.requirements: + doc_types.append("requirements") + if args.designs: + doc_types.append("designs") + if args.tickets: + doc_types.append("tickets") + if args.tasks: + doc_types.append("tasks") + + # Default to all if none specified + if not doc_types: + doc_types = ["requirements", "designs", "tickets", "tasks"] + + # Run validation + results = validate_all(omoi_path, doc_types) + exit_code = print_results(results) + sys.exit(exit_code) + + +if __name__ == "__main__": + main() From dbe5b81e08722400ef0328159ea13762c71c68bf Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 14:15:50 -0300 Subject: [PATCH 005/290] Add media/ directory to gitignore for video assets Co-Authored-By: Warp --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 2730e45f..28ad3547 100644 --- a/.gitignore +++ b/.gitignore @@ -78,3 +78,6 @@ examples/workspaces/ # MCP .mcp/ +# Media files (videos, large assets) +media/ + From 3212d8f07b8c16559f2f5f8d5b2151e8e0a11fba Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Mon, 29 Dec 2025 17:22:23 -0300 Subject: [PATCH 006/290] Add code-review to always_include skills --- backend/omoi_os/sandbox_skills/manifest.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/omoi_os/sandbox_skills/manifest.yaml b/backend/omoi_os/sandbox_skills/manifest.yaml index 85a447d8..a8cf04fa 100644 --- a/backend/omoi_os/sandbox_skills/manifest.yaml +++ b/backend/omoi_os/sandbox_skills/manifest.yaml @@ -59,3 +59,4 @@ settings: - spec-driven-dev - git-workflow - error-diagnosis + - code-review From b1af3299932e74c2afca7f33ece46a09a459a4d2 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 11:04:13 -0300 Subject: [PATCH 007/290] Add workflow mode selector and comprehensive sandbox event rendering Frontend: - Add WorkflowModeSelector component with Quick/Spec-Driven modes - Integrate mode selector into command page with mode-specific params - Update PromptInput to support submitLabel prop - Enhance EventRenderer with subagent result display (cost, usage, duration) - Fix duplicate subagent prompt display in sandbox events - Update IconRail and AuthLayout styling Backend: - Add task validator service for validation flow - Enhance claude_sandbox_worker to extract subagent results - Update orchestrator worker with improved task handling - Add sandbox routes for workspace management - Enhance task queue service with user limit tracking - Add spec workflow MCP tool enhancements Documentation: - Add command workflow modes design doc - Update user journey and page flow docs - Add continuous Claude SDK design doc - Add task queue user limits spec documents --- .../designs/task-queue-user-limits.md | 468 +++++++++ .../requirements/task-queue-user-limits.md | 263 +++++ .../tasks/TSK-TQU-001-create-migration.md | 60 ++ .../.omoi_os/tasks/TSK-TQU-002-plan-config.md | 71 ++ .../tasks/TSK-TQU-003-update-user-model.md | 61 ++ .../tasks/TSK-TQU-004-plan-service.md | 92 ++ .../tasks/TSK-TQU-005-running-count.md | 47 + .../tasks/TSK-TQU-006-claim-any-user.md | 80 ++ .../tasks/TSK-TQU-007-usage-tracking.md | 70 ++ .../tasks/TSK-TQU-008-orchestrator-timeout.md | 101 ++ .../tasks/TSK-TQU-009-api-create-task.md | 76 ++ .../tasks/TSK-TQU-010-api-queue-status.md | 69 ++ .../tasks/TSK-TQU-011-api-cancel-task.md | 65 ++ .../tasks/TSK-TQU-012-integration-tests.md | 88 ++ .../TKT-TQU-001-database-plan-models.md | 60 ++ .../tickets/TKT-TQU-002-plan-service.md | 73 ++ .../TKT-TQU-003-task-queue-extensions.md | 93 ++ .../TKT-TQU-004-orchestrator-updates.md | 83 ++ .../tickets/TKT-TQU-005-api-endpoints.md | 93 ++ .../TKT-TQU-006-integration-testing.md | 78 ++ .../testing/validation_system_test_plan.md | 339 +++++++ backend/omoi_os/api/main.py | 19 +- backend/omoi_os/api/routes/sandbox.py | 230 ++++- backend/omoi_os/api/routes/tasks.py | 54 + backend/omoi_os/api/routes/tickets.py | 45 + backend/omoi_os/mcp/spec_workflow.py | 50 + backend/omoi_os/sandbox_skills/__init__.py | 44 +- backend/omoi_os/sandbox_skills/manifest.yaml | 26 +- .../spec-driven-dev/scripts/api_client.py | 22 +- .../spec-driven-dev/scripts/models.py | 6 +- .../spec-driven-dev/scripts/parse_specs.py | 18 +- backend/omoi_os/services/daytona_spawner.py | 45 +- backend/omoi_os/services/task_queue.py | 195 ++++ backend/omoi_os/services/task_validator.py | 501 ++++++++++ .../omoi_os/workers/claude_sandbox_worker.py | 164 ++- .../omoi_os/workers/orchestrator_worker.py | 197 +++- .../testing/test_execution_mode_spawn.py | 333 +++++++ .../scripts/testing/test_validation_flow.py | 455 +++++++++ backend/tests/integration/__init__.py | 2 +- .../test_validation_integration.py | 565 +++++++++++ backend/tests/test_task_validator_service.py | 681 +++++++++++++ docs/design/continuous_claude_sdk.md | 849 ++++++++++++++++ .../design/frontend/command_workflow_modes.md | 325 ++++++ docs/page_flows/03_agents_workspaces.md | 523 ++++++---- docs/page_flows/10_command_center.md | 148 +-- docs/user_flows_summary.md | 100 +- docs/user_journey/00_overview.md | 141 +-- docs/user_journey/03_execution_monitoring.md | 89 +- docs/user_journey/06_key_interactions.md | 91 ++ frontend/app/(app)/command/page.tsx | 79 +- .../app/(app)/sandbox/[sandboxId]/page.tsx | 37 +- frontend/app/(auth)/layout.tsx | 8 +- frontend/components/command/PromptInput.tsx | 13 +- .../command/WorkflowModeSelector.tsx | 96 ++ frontend/components/command/index.ts | 1 + frontend/components/layout/AuthLayout.tsx | 8 +- frontend/components/layout/IconRail.tsx | 5 +- frontend/components/sandbox/EventRenderer.tsx | 930 +++++++++++++++++- 58 files changed, 9035 insertions(+), 490 deletions(-) create mode 100644 backend/.omoi_os/designs/task-queue-user-limits.md create mode 100644 backend/.omoi_os/requirements/task-queue-user-limits.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-001-create-migration.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-002-plan-config.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-003-update-user-model.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-004-plan-service.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-005-running-count.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-006-claim-any-user.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-007-usage-tracking.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-008-orchestrator-timeout.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-009-api-create-task.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-010-api-queue-status.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-011-api-cancel-task.md create mode 100644 backend/.omoi_os/tasks/TSK-TQU-012-integration-tests.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-001-database-plan-models.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-002-plan-service.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-003-task-queue-extensions.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-004-orchestrator-updates.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-005-api-endpoints.md create mode 100644 backend/.omoi_os/tickets/TKT-TQU-006-integration-testing.md create mode 100644 backend/docs/testing/validation_system_test_plan.md create mode 100644 backend/omoi_os/services/task_validator.py create mode 100755 backend/scripts/testing/test_execution_mode_spawn.py create mode 100755 backend/scripts/testing/test_validation_flow.py create mode 100644 backend/tests/integration/test_validation_integration.py create mode 100644 backend/tests/test_task_validator_service.py create mode 100644 docs/design/continuous_claude_sdk.md create mode 100644 docs/design/frontend/command_workflow_modes.md create mode 100644 frontend/components/command/WorkflowModeSelector.tsx diff --git a/backend/.omoi_os/designs/task-queue-user-limits.md b/backend/.omoi_os/designs/task-queue-user-limits.md new file mode 100644 index 00000000..dea25013 --- /dev/null +++ b/backend/.omoi_os/designs/task-queue-user-limits.md @@ -0,0 +1,468 @@ +--- +id: DESIGN-TQU-001 +title: Task Queue User Limits Design +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: draft +requirements: + - REQ-TQU-001 +--- + +# Task Queue User Limits - Product Design Document + +## Document Overview + +A task queue extension that enables overnight batch execution with user-based concurrency limits. Users queue tasks before bed, agents execute them in sandboxes respecting plan limits, and results are available in the morning. + +- **Purpose & Scope** + - Add plan-based concurrency limits to existing TaskQueueService + - Track monthly agent hour usage per user + - Enable overnight batch processing without supervision + - Non-goals: Billing integration, Stripe webhooks (post-MVP) + +- **Target Audience** + - Implementation teams extending TaskQueueService + - DevOps configuring plan limits + +- **Related Documents** + - Requirements: `.omoi_os/requirements/task-queue-user-limits.md` + - Existing: `omoi_os/services/task_queue.py` + +--- + +## Architecture Overview + +### High-Level Architecture + +```mermaid +flowchart TD + subgraph API[API Layer] + TaskAPI[Task Endpoints] + UserAPI[User/Limits Endpoints] + end + + subgraph Services[Service Layer] + TQS[TaskQueueService] + PS[PlanService] + OW[OrchestratorWorker] + end + + subgraph Infrastructure[Infrastructure Layer] + DB[(PostgreSQL)] + DS[DaytonaSpawner] + EB[EventBus] + end + + subgraph Sandbox[Sandbox Environment] + Agent[Claude Agent] + end + + TaskAPI -->|create/cancel| TQS + TaskAPI -->|check limits| PS + UserAPI -->|get limits| PS + + OW -->|claim task| TQS + OW -->|check limits| PS + OW -->|spawn sandbox| DS + OW -->|kill timeout| DS + + TQS -->|read/write| DB + PS -->|read user plan| DB + TQS -->|publish events| EB + + DS -->|start| Agent + Agent -->|complete/fail| TaskAPI +``` + +### Component Responsibilities + +| Component | Layer | Responsibilities | +|-----------|-------|------------------| +| TaskQueueService | Service | Task CRUD, claim logic, status updates, queue ordering | +| PlanService | Service | Plan limits lookup, usage tracking, eligibility checks | +| OrchestratorWorker | Service | Main loop, timeout detection, sandbox spawning | +| DaytonaSpawner | Infrastructure | Sandbox lifecycle management | +| EventBus | Infrastructure | Real-time event broadcasting | + +### System Boundaries + +- **Within scope of this design**: + - User plan limits enforcement + - Concurrency control per user + - Monthly usage tracking + - Timeout management per plan + +- **Out of scope (delegated)**: + - Billing/Stripe integration (future) + - Plan upgrade UI (frontend) + - Guardian intervention (separate feature) + +--- + +## Component Details + +### PlanService (New) + +#### Responsibilities +- Load plan configuration from code +- Get user's current plan limits +- Track and update monthly usage +- Check claim eligibility + +#### Key Interfaces +- `get_limits(user_id: str) -> PlanLimits` +- `get_monthly_hours_used(user_id: str) -> float` +- `add_usage_hours(user_id: str, hours: float) -> None` +- `can_claim_task(user_id: str) -> tuple[bool, Optional[str]]` + +#### Implementation Notes +```python +# config/plans.py +from dataclasses import dataclass +from typing import Optional + +@dataclass +class PlanLimits: + max_concurrent_agents: int + max_task_duration_minutes: int + monthly_agent_hours: Optional[int] # None = unlimited + +PLAN_LIMITS = { + "free": PlanLimits( + max_concurrent_agents=1, + max_task_duration_minutes=30, + monthly_agent_hours=10, + ), + "pro": PlanLimits( + max_concurrent_agents=3, + max_task_duration_minutes=120, + monthly_agent_hours=100, + ), + "team": PlanLimits( + max_concurrent_agents=10, + max_task_duration_minutes=240, + monthly_agent_hours=None, + ), + "enterprise": PlanLimits( + max_concurrent_agents=50, + max_task_duration_minutes=480, + monthly_agent_hours=None, + ), +} +``` + +### TaskQueueService Extensions + +#### New Methods +- `get_running_count(user_id: str) -> int` +- `can_claim_task(user_id: str) -> tuple[bool, Optional[str]]` +- `claim_next_task_for_user(user_id: str) -> Optional[Task]` +- `claim_next_task_any_user() -> Optional[Task]` +- `get_pending_count(user_id: str) -> int` +- `get_queue_position(task_id: str) -> Optional[int]` + +#### Modified Methods +- `mark_completed()` - Add usage hour tracking +- `mark_failed()` - Add usage hour tracking + +### OrchestratorWorker Updates + +#### New Behavior +1. Check for timed-out tasks every 60 seconds +2. Use `claim_next_task_any_user()` instead of existing claim logic +3. Pass user's timeout limit to sandbox spawner +4. Kill sandboxes for timed-out tasks + +--- + +## Data Models + +### Database Schema + +```sql +-- Add plan columns to existing users table +ALTER TABLE users ADD COLUMN plan_tier VARCHAR(20) DEFAULT 'free'; +ALTER TABLE users ADD COLUMN max_concurrent_agents INT DEFAULT 1; +ALTER TABLE users ADD COLUMN max_task_duration_minutes INT DEFAULT 30; +ALTER TABLE users ADD COLUMN monthly_agent_hours_limit INT DEFAULT 10; +ALTER TABLE users ADD COLUMN monthly_agent_hours_used DECIMAL(10,2) DEFAULT 0; +ALTER TABLE users ADD COLUMN billing_cycle_reset_at TIMESTAMP; + +-- Index for quick plan lookups +CREATE INDEX idx_users_plan ON users(plan_tier); + +-- Add user_id to tasks if not present +ALTER TABLE tasks ADD COLUMN IF NOT EXISTS user_id UUID REFERENCES users(id); +ALTER TABLE tasks ADD COLUMN IF NOT EXISTS project_id UUID; +ALTER TABLE tasks ADD COLUMN IF NOT EXISTS result_summary TEXT; +ALTER TABLE tasks ADD COLUMN IF NOT EXISTS files_changed JSONB; + +-- Index for user's running task count +CREATE INDEX idx_tasks_user_status ON tasks(user_id, status) + WHERE status IN ('claimed', 'running'); +``` + +### Pydantic Models + +```python +from __future__ import annotations +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional +from pydantic import BaseModel, Field + + +class PlanTier(str, Enum): + FREE = "free" + PRO = "pro" + TEAM = "team" + ENTERPRISE = "enterprise" + + +class TaskStatus(str, Enum): + PENDING = "pending" + CLAIMED = "claimed" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + + +class TaskCreate(BaseModel): + title: str + description: str + project_id: str + priority: int = Field(default=3, ge=1, le=4) + + +class TaskResponse(BaseModel): + id: str + title: str + status: TaskStatus + priority: int + queue_position: Optional[int] = None + created_at: datetime + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + result_summary: Optional[str] = None + failure_reason: Optional[str] = None + + +class TaskResult(BaseModel): + status: str # "completed" or "failed" + summary: Optional[str] = None + files_changed: Optional[List[str]] = None + error: Optional[str] = None + + +class QueueStatus(BaseModel): + running: int + pending: int + max_concurrent: int + can_start_more: bool + monthly_hours_used: float + monthly_hours_limit: Optional[int] = None + + +class UserLimits(BaseModel): + plan: PlanTier + max_concurrent_agents: int + max_task_duration_minutes: int + monthly_agent_hours_limit: Optional[int] = None + monthly_agent_hours_used: float + billing_cycle_resets_at: Optional[datetime] = None +``` + +--- + +## API Specifications + +### REST Endpoints + +| Method | Path | Purpose | Request | Response | +|--------|------|---------|---------|----------| +| POST | `/api/v1/tasks` | Create task | `TaskCreate` | `TaskResponse` | +| GET | `/api/v1/tasks` | List user's tasks | `?status=` | `TaskResponse[]` | +| DELETE | `/api/v1/tasks/{id}` | Cancel task | - | `{cancelled: true}` | +| GET | `/api/v1/tasks/queue-status` | Queue status | - | `QueueStatus` | +| POST | `/api/v1/tasks/{id}/complete` | Report completion | `TaskResult` | `{ok: true}` | +| GET | `/api/v1/users/me/limits` | User limits | - | `UserLimits` | + +### Error Handling + +| Status Code | Error Code | Description | +|-------------|------------|-------------| +| 400 | `TOO_MANY_PENDING` | User has 50+ pending tasks | +| 400 | `TASK_ALREADY_COMPLETED` | Cannot cancel completed task | +| 403 | `MONTHLY_LIMIT_REACHED` | Monthly hours exhausted | +| 404 | `TASK_NOT_FOUND` | Task doesn't exist or wrong user | + +--- + +## Implementation Details + +### Core Algorithm: Claim Next Task Any User + +```python +async def claim_next_task_any_user(self) -> Optional[QueuedTask]: + """ + Claim next available task across all users (for orchestrator). + Respects per-user limits, prioritizes users with fewer running agents. + + Steps: + 1. Find users with pending tasks who aren't at their limit + 2. Order by running count ascending (fairness) + 3. Try to claim from each eligible user + """ + # Step 1: Get eligible users + eligible_users = await self.db.fetch(""" + WITH user_running AS ( + SELECT user_id, COUNT(*) as running_count + FROM tasks + WHERE status IN ('claimed', 'running') + GROUP BY user_id + ), + user_pending AS ( + SELECT DISTINCT user_id + FROM tasks + WHERE status = 'pending' + ) + SELECT up.user_id, u.max_concurrent_agents, + COALESCE(ur.running_count, 0) as running + FROM user_pending up + JOIN users u ON up.user_id = u.id + LEFT JOIN user_running ur ON up.user_id = ur.user_id + WHERE COALESCE(ur.running_count, 0) < u.max_concurrent_agents + ORDER BY COALESCE(ur.running_count, 0) ASC + LIMIT 10 + """) + + # Step 2: Try to claim from each eligible user + for user in eligible_users: + # Also check monthly hours + if not await self._check_monthly_limit(user["user_id"]): + continue + + task = await self.claim_next_task(user["user_id"]) + if task: + return task + + return None +``` + +### Operation Flow: Task Lifecycle + +```mermaid +sequenceDiagram + participant User + participant API + participant TQS as TaskQueueService + participant PS as PlanService + participant Orch as Orchestrator + participant DS as DaytonaSpawner + participant Agent + + User->>API: POST /tasks + API->>PS: can_claim_task(user_id) + PS-->>API: (true, null) + API->>TQS: create_task() + TQS-->>API: Task(status=pending) + API-->>User: TaskResponse + + loop Every 5s + Orch->>TQS: claim_next_task_any_user() + TQS->>PS: check limits per user + TQS-->>Orch: Task(status=claimed) + Orch->>PS: get_limits(user_id) + PS-->>Orch: PlanLimits + Orch->>DS: spawn(task, timeout) + DS-->>Orch: Sandbox + Orch->>TQS: mark_running(task_id, sandbox_id) + end + + Agent->>API: POST /tasks/{id}/complete + API->>TQS: mark_completed(task_id, summary) + TQS->>PS: add_usage_hours(user_id, hours) + API-->>Agent: {ok: true} + + loop Every 60s + Orch->>TQS: get_timed_out_tasks() + TQS-->>Orch: [timed_out_tasks] + Orch->>DS: terminate(sandbox_id) + Orch->>TQS: mark_failed(task_id, "Timeout") + TQS->>PS: add_usage_hours(user_id, hours) + end +``` + +--- + +## Configuration + +| Parameter | Default | Range | Description | +|-----------|---------|-------|-------------| +| `poll_interval` | 5.0 | 1-30 | Orchestrator poll interval (seconds) | +| `timeout_check_interval` | 60.0 | 30-300 | Timeout check interval (seconds) | +| `max_pending_per_user` | 50 | 10-200 | Maximum pending tasks per user | +| `stale_claiming_threshold` | 60 | 30-300 | Seconds before claiming task is stale | + +--- + +## Performance Considerations + +### Database Indexing +- `idx_users_plan` - Quick plan tier lookups +- `idx_tasks_user_status` - Fast running count queries (partial index) +- Existing task priority/created_at indexes for queue ordering + +### Query Optimization +- Use partial index for running tasks (status IN ('claimed', 'running')) +- Limit eligible users query to 10 at a time +- Use SKIP LOCKED for atomic claiming + +### Batch Processing +- Orchestrator processes one task per loop iteration +- Timeout check runs every 60 seconds to batch kill operations + +--- + +## Security Considerations + +### Authentication +- All task endpoints require valid JWT or API key +- Sandbox callback uses internal service auth + +### Authorization +- Users can only access their own tasks +- Plan tier changes require admin privileges + +### Data Protection +- No sensitive data in task descriptions (user responsibility) +- Sandbox outputs may contain code - treat as user data + +--- + +## Related Documents + +- **Requirements**: `.omoi_os/requirements/task-queue-user-limits.md` +- **Existing Implementation**: `omoi_os/services/task_queue.py` +- **Daytona Spawner**: `omoi_os/services/daytona_spawner.py` + +--- + +## Quality Checklist + +- [x] All requirements addressed +- [x] Architecture diagram included +- [x] API specifications complete +- [x] Database schemas defined +- [x] Integration points documented +- [x] Error handling specified +- [x] Security considerations addressed + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 1.0 | 2024-12-29 | Kevin | Initial design from provided doc | diff --git a/backend/.omoi_os/requirements/task-queue-user-limits.md b/backend/.omoi_os/requirements/task-queue-user-limits.md new file mode 100644 index 00000000..fc726c3c --- /dev/null +++ b/backend/.omoi_os/requirements/task-queue-user-limits.md @@ -0,0 +1,263 @@ +--- +id: REQ-TQU-001 +title: Task Queue User Limits Requirements +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: draft +category: functional +priority: HIGH +design_ref: designs/task-queue-user-limits.md +condition: "WHEN a user queues tasks for overnight agent execution" +action: "THE SYSTEM SHALL enforce plan-based concurrency limits and track usage" +--- + +# Task Queue User Limits Requirements + +## Document Overview + +This document defines requirements for extending the existing task queue system with user-based concurrency limits, plan tiers, and overnight batch execution support. The goal is to enable users to queue multiple tasks before sleeping and wake up to completed results without runaway costs or hung tasks. + +**Parent Document**: Existing TaskQueueService implementation + +--- + +## 1. Plan Tier Management + +#### REQ-TQU-PLAN-001: Plan Tier Definition +THE SYSTEM SHALL support the following plan tiers with defined limits: + +| Plan | Max Concurrent Agents | Max Task Duration | Monthly Agent Hours | +|------|----------------------|-------------------|---------------------| +| Free | 1 | 30 min | 10 hrs | +| Pro | 3 | 2 hrs | 100 hrs | +| Team | 10 | 4 hrs | Unlimited | +| Enterprise | Custom | Custom | Unlimited | + +#### REQ-TQU-PLAN-002: Plan Configuration +THE SYSTEM SHALL store plan limits in a configuration module that can be extended without database changes. + +#### REQ-TQU-PLAN-003: User Plan Assignment +THE SYSTEM SHALL associate each user with a plan tier, defaulting to 'free' for new users. + +--- + +## 2. Concurrency Control + +#### REQ-TQU-CONC-001: Running Task Count +THE SYSTEM SHALL track the number of currently running agents per user by counting tasks in 'claimed' or 'running' status. + +#### REQ-TQU-CONC-002: Claim Eligibility Check +WHEN a user attempts to claim a new task, THE SYSTEM SHALL verify: +- Current running count < max_concurrent_agents for user's plan +- Monthly hours used < monthly_agent_hours limit (if applicable) + +#### REQ-TQU-CONC-003: Claim Rejection Reason +WHEN a user cannot claim a task due to limits, THE SYSTEM SHALL return a descriptive reason (e.g., "At limit: 3/3 agents running"). + +#### REQ-TQU-CONC-004: Cross-User Task Claiming +THE SYSTEM SHALL support claiming tasks across all users while respecting per-user limits, prioritizing users with fewer running agents. + +--- + +## 3. Monthly Usage Tracking + +#### REQ-TQU-USAGE-001: Hour Accumulation +THE SYSTEM SHALL track monthly agent hours used per user, incrementing on task completion or failure. + +#### REQ-TQU-USAGE-002: Usage Calculation +THE SYSTEM SHALL calculate usage as: (completed_at - started_at) in hours, rounded to two decimal places. + +#### REQ-TQU-USAGE-003: Billing Cycle Reset +THE SYSTEM SHALL reset monthly_agent_hours_used to 0 at the start of each billing cycle (monthly). + +#### REQ-TQU-USAGE-004: Usage Enforcement +WHEN a user's monthly hours reach the limit, THE SYSTEM SHALL prevent new task claims until the billing cycle resets. + +--- + +## 4. Task Duration Limits + +#### REQ-TQU-DUR-001: Per-Task Timeout +THE SYSTEM SHALL enforce max_task_duration_minutes from the user's plan as the timeout for each task. + +#### REQ-TQU-DUR-002: Timeout Detection +THE SYSTEM SHALL periodically check for tasks exceeding their timeout (every 60 seconds). + +#### REQ-TQU-DUR-003: Timeout Handling +WHEN a task exceeds its timeout, THE SYSTEM SHALL: +1. Terminate the associated sandbox +2. Mark the task as failed with reason "Timeout: exceeded {minutes} minutes" +3. Still count the elapsed hours toward monthly usage + +--- + +## 5. Queue Management + +#### REQ-TQU-QUEUE-001: Priority-Based Ordering +THE SYSTEM SHALL order pending tasks by priority (1=critical → 4=low) then by created_at ascending. + +#### REQ-TQU-QUEUE-002: Queue Position +THE SYSTEM SHALL provide queue position for pending tasks relative to other tasks with same or higher priority. + +#### REQ-TQU-QUEUE-003: Maximum Pending Tasks +THE SYSTEM SHALL limit pending tasks per user to 50 to prevent queue abuse. + +#### REQ-TQU-QUEUE-004: Task Cancellation +THE SYSTEM SHALL allow users to cancel pending or running tasks, terminating sandboxes for running tasks. + +--- + +## 6. State Machine + +#### REQ-TQU-SM-001: States +Tasks SHALL support the following states: + +```mermaid +stateDiagram-v2 + [*] --> pending + pending --> claimed : Orchestrator claims + claimed --> running : Sandbox started + running --> completed : Agent finishes + running --> failed : Error or timeout + pending --> failed : Cancelled + claimed --> failed : Spawn failed + failed --> pending : Retry (if eligible) +``` + +#### REQ-TQU-SM-002: Transitions +Valid transitions: +``` +pending → claimed (orchestrator claims) +claimed → running (sandbox spawns, agent starts) +running → completed (agent finishes successfully) +running → failed (error, timeout, or cancellation) +pending → failed (cancelled before claim) +claimed → failed (sandbox spawn failure) +failed → pending (retry if retry_count < max_retries) +``` + +--- + +## 7. Data Model Requirements + +### 7.1 User Plan Fields +#### REQ-TQU-DM-001 +User model SHALL include the following plan-related fields: +- `plan_tier: VARCHAR(20)` (default: 'free') +- `max_concurrent_agents: INT` (default: 1) +- `max_task_duration_minutes: INT` (default: 30) +- `monthly_agent_hours_limit: INT | NULL` (NULL = unlimited) +- `monthly_agent_hours_used: DECIMAL(10,2)` (default: 0) +- `billing_cycle_reset_at: TIMESTAMP` + +### 7.2 Task Fields (Existing + New) +#### REQ-TQU-DM-002 +Task model already includes required fields. Verify presence of: +- `user_id TEXT FK -> users(id)` (may need to add) +- `project_id TEXT FK -> projects(id)` (may need to add) +- `sandbox_id TEXT` (exists) +- `started_at TIMESTAMP` (exists) +- `completed_at TIMESTAMP` (exists) +- `timeout_seconds INT` (exists) +- `result_summary TEXT` (may need to add) +- `files_changed JSONB` (may need to add) + +--- + +## 8. API Requirements + +### 8.1 Endpoints Table + +| Endpoint | Method | Purpose | Request Body | Responses | +|----------|--------|---------|--------------|-----------| +| /api/v1/tasks | POST | Create task | `{ title, description, project_id, priority }` | 200: `TaskResponse`; 400: `{ error }` | +| /api/v1/tasks | GET | List user's tasks | `?status=` | 200: `TaskResponse[]` | +| /api/v1/tasks/{id} | DELETE | Cancel task | - | 200: `{ cancelled: true }` | +| /api/v1/tasks/queue-status | GET | Get queue status | - | 200: `QueueStatus` | +| /api/v1/tasks/{id}/complete | POST | Report completion | `{ status, summary, files_changed }` | 200: `{ ok: true }` | +| /api/v1/users/me/limits | GET | Get user limits | - | 200: `UserLimits` | + +### 8.2 Response Models +#### REQ-TQU-API-001 +```python +class TaskResponse(BaseModel): + id: str + title: str + status: str + priority: int + queue_position: Optional[int] + created_at: datetime + started_at: Optional[datetime] + completed_at: Optional[datetime] + +class QueueStatus(BaseModel): + running: int + pending: int + max_concurrent: int + can_start_more: bool + monthly_hours_used: float + monthly_hours_limit: Optional[int] + +class UserLimits(BaseModel): + plan: str + max_concurrent_agents: int + max_task_duration_minutes: int + monthly_agent_hours_limit: Optional[int] + monthly_agent_hours_used: float + billing_cycle_resets_at: Optional[datetime] +``` + +--- + +## 9. SLOs & Performance + +#### REQ-TQU-SLO-001 +Task claiming should complete within 100ms under normal conditions. + +#### REQ-TQU-SLO-002 +Timeout detection loop should complete within 5 seconds for up to 1000 running tasks. + +#### REQ-TQU-SLO-003 +Queue position calculation should complete within 50ms. + +--- + +## 10. Security & Audit + +#### REQ-TQU-SEC-001 +Only authenticated users MAY create, list, or cancel their own tasks. + +#### REQ-TQU-SEC-002 +Task completion callback endpoint SHALL validate task ownership or use internal auth. + +#### REQ-TQU-SEC-003 +Plan tier changes MUST be audited with actor, old_tier, new_tier, and timestamp. + +--- + +## 11. Integration Requirements + +#### REQ-TQU-INT-001: Existing TaskQueueService +THE SYSTEM SHALL extend the existing TaskQueueService rather than replacing it. + +#### REQ-TQU-INT-002: Daytona Spawner +THE SYSTEM SHALL integrate with DaytonaSpawnerService for sandbox lifecycle management. + +#### REQ-TQU-INT-003: Event Bus +THE SYSTEM SHALL publish task events (created, claimed, completed, failed, cancelled) to the existing EventBusService. + +--- + +## Related Documents + +- [Task Queue User Limits Design](../designs/task-queue-user-limits.md) +- [Existing TaskQueueService](../../omoi_os/services/task_queue.py) + +--- + +## Revision History + +| Version | Date | Author | Changes | +|---------|------|--------|---------| +| 1.0 | 2024-12-29 | Kevin | Initial requirements from design doc | diff --git a/backend/.omoi_os/tasks/TSK-TQU-001-create-migration.md b/backend/.omoi_os/tasks/TSK-TQU-001-create-migration.md new file mode 100644 index 00000000..078ade20 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-001-create-migration.md @@ -0,0 +1,60 @@ +--- +id: TSK-TQU-001 +title: Create database migration for user plan columns +parent_ticket: TKT-TQU-001 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: [] +--- + +# TSK-TQU-001: Create database migration for user plan columns + +## Description + +Create an Alembic migration that adds plan-related columns to the users table. + +## Acceptance Criteria + +- [ ] Migration adds plan_tier VARCHAR(20) DEFAULT 'free' +- [ ] Migration adds max_concurrent_agents INT DEFAULT 1 +- [ ] Migration adds max_task_duration_minutes INT DEFAULT 30 +- [ ] Migration adds monthly_agent_hours_limit INT DEFAULT 10 +- [ ] Migration adds monthly_agent_hours_used DECIMAL(10,2) DEFAULT 0 +- [ ] Migration adds billing_cycle_reset_at TIMESTAMP +- [ ] Index created on plan_tier column +- [ ] Migration is reversible (downgrade works) +- [ ] Migration tested on fresh and existing databases + +## Implementation Notes + +```bash +uv run alembic revision -m "add_user_plan_columns" +``` + +```python +def upgrade(): + op.add_column('users', sa.Column('plan_tier', sa.String(20), server_default='free')) + op.add_column('users', sa.Column('max_concurrent_agents', sa.Integer(), server_default='1')) + op.add_column('users', sa.Column('max_task_duration_minutes', sa.Integer(), server_default='30')) + op.add_column('users', sa.Column('monthly_agent_hours_limit', sa.Integer(), server_default='10')) + op.add_column('users', sa.Column('monthly_agent_hours_used', sa.Numeric(10, 2), server_default='0')) + op.add_column('users', sa.Column('billing_cycle_reset_at', sa.DateTime(timezone=True))) + op.create_index('idx_users_plan', 'users', ['plan_tier']) + +def downgrade(): + op.drop_index('idx_users_plan') + op.drop_column('users', 'billing_cycle_reset_at') + op.drop_column('users', 'monthly_agent_hours_used') + op.drop_column('users', 'monthly_agent_hours_limit') + op.drop_column('users', 'max_task_duration_minutes') + op.drop_column('users', 'max_concurrent_agents') + op.drop_column('users', 'plan_tier') +``` + +## Dependencies + +None - first task in the chain. diff --git a/backend/.omoi_os/tasks/TSK-TQU-002-plan-config.md b/backend/.omoi_os/tasks/TSK-TQU-002-plan-config.md new file mode 100644 index 00000000..66700b5d --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-002-plan-config.md @@ -0,0 +1,71 @@ +--- +id: TSK-TQU-002 +title: Create plan configuration module +parent_ticket: TKT-TQU-001 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: [] +--- + +# TSK-TQU-002: Create plan configuration module + +## Description + +Create config/plans.py with PlanLimits dataclass and PLAN_LIMITS dictionary. + +## Acceptance Criteria + +- [ ] PlanLimits dataclass with max_concurrent_agents, max_task_duration_minutes, monthly_agent_hours +- [ ] PLAN_LIMITS dict with free, pro, team, enterprise tiers +- [ ] get_plan_limits(plan_tier: str) -> PlanLimits helper function +- [ ] Returns free tier for unknown plan names +- [ ] Unit tests for get_plan_limits() + +## Implementation + +```python +# config/plans.py +from dataclasses import dataclass +from typing import Optional + +@dataclass +class PlanLimits: + max_concurrent_agents: int + max_task_duration_minutes: int + monthly_agent_hours: Optional[int] # None = unlimited + +PLAN_LIMITS = { + "free": PlanLimits( + max_concurrent_agents=1, + max_task_duration_minutes=30, + monthly_agent_hours=10, + ), + "pro": PlanLimits( + max_concurrent_agents=3, + max_task_duration_minutes=120, + monthly_agent_hours=100, + ), + "team": PlanLimits( + max_concurrent_agents=10, + max_task_duration_minutes=240, + monthly_agent_hours=None, + ), + "enterprise": PlanLimits( + max_concurrent_agents=50, + max_task_duration_minutes=480, + monthly_agent_hours=None, + ), +} + +def get_plan_limits(plan_tier: str) -> PlanLimits: + """Get limits for a plan tier. Defaults to free for unknown tiers.""" + return PLAN_LIMITS.get(plan_tier, PLAN_LIMITS["free"]) +``` + +## Dependencies + +None - can be done in parallel with TSK-TQU-001. diff --git a/backend/.omoi_os/tasks/TSK-TQU-003-update-user-model.md b/backend/.omoi_os/tasks/TSK-TQU-003-update-user-model.md new file mode 100644 index 00000000..640c7960 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-003-update-user-model.md @@ -0,0 +1,61 @@ +--- +id: TSK-TQU-003 +title: Update User model with plan fields +parent_ticket: TKT-TQU-001 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-001 +--- + +# TSK-TQU-003: Update User model with plan fields + +## Description + +Add plan-related mapped columns to the User SQLAlchemy model. + +## Acceptance Criteria + +- [ ] User model has plan_tier: Mapped[str] with default 'free' +- [ ] User model has max_concurrent_agents: Mapped[int] with default 1 +- [ ] User model has max_task_duration_minutes: Mapped[int] with default 30 +- [ ] User model has monthly_agent_hours_limit: Mapped[Optional[int]] with default 10 +- [ ] User model has monthly_agent_hours_used: Mapped[Decimal] with default 0 +- [ ] User model has billing_cycle_reset_at: Mapped[Optional[datetime]] +- [ ] Model matches migration schema + +## Implementation + +Add to `omoi_os/models/user.py`: + +```python +from decimal import Decimal + +# Plan tier +plan_tier: Mapped[str] = mapped_column( + String(20), default="free", nullable=False +) +max_concurrent_agents: Mapped[int] = mapped_column( + Integer, default=1, nullable=False +) +max_task_duration_minutes: Mapped[int] = mapped_column( + Integer, default=30, nullable=False +) +monthly_agent_hours_limit: Mapped[Optional[int]] = mapped_column( + Integer, default=10, nullable=True +) +monthly_agent_hours_used: Mapped[Decimal] = mapped_column( + Numeric(10, 2), default=Decimal("0"), nullable=False +) +billing_cycle_reset_at: Mapped[Optional[datetime]] = mapped_column( + DateTime(timezone=True), nullable=True +) +``` + +## Dependencies + +- TSK-TQU-001: Migration must be created first to ensure schema matches diff --git a/backend/.omoi_os/tasks/TSK-TQU-004-plan-service.md b/backend/.omoi_os/tasks/TSK-TQU-004-plan-service.md new file mode 100644 index 00000000..b76ffae5 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-004-plan-service.md @@ -0,0 +1,92 @@ +--- +id: TSK-TQU-004 +title: Implement PlanService class +parent_ticket: TKT-TQU-002 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 2h +depends_on: + - TSK-TQU-002 + - TSK-TQU-003 +--- + +# TSK-TQU-004: Implement PlanService class + +## Description + +Create PlanService in services/plan_service.py with methods for limits lookup, usage tracking, and claim eligibility. + +## Acceptance Criteria + +- [ ] PlanService class created with DatabaseService dependency +- [ ] get_limits(user_id) returns PlanLimits from user's plan_tier +- [ ] get_monthly_hours_used(user_id) returns current monthly_agent_hours_used +- [ ] add_usage_hours(user_id, hours) increments monthly_agent_hours_used +- [ ] can_claim_task(user_id, running_count) checks concurrency and monthly limits +- [ ] Returns tuple[bool, Optional[str]] with denial reason +- [ ] Async versions of all methods +- [ ] Unit tests for each method + +## Implementation + +```python +# services/plan_service.py +from typing import Optional, Tuple +from config.plans import PlanLimits, get_plan_limits +from omoi_os.services.database import DatabaseService + +class PlanService: + def __init__(self, db: DatabaseService): + self.db = db + + async def get_limits(self, user_id: str) -> PlanLimits: + """Get plan limits for a user.""" + async with self.db.get_async_session() as session: + result = await session.execute( + select(User.plan_tier).where(User.id == user_id) + ) + plan_tier = result.scalar_one_or_none() or "free" + return get_plan_limits(plan_tier) + + async def get_monthly_hours_used(self, user_id: str) -> float: + """Get current monthly usage in hours.""" + async with self.db.get_async_session() as session: + result = await session.execute( + select(User.monthly_agent_hours_used).where(User.id == user_id) + ) + return float(result.scalar_one_or_none() or 0) + + async def add_usage_hours(self, user_id: str, hours: float) -> None: + """Add hours to user's monthly usage.""" + async with self.db.get_async_session() as session: + await session.execute( + update(User) + .where(User.id == user_id) + .values(monthly_agent_hours_used=User.monthly_agent_hours_used + hours) + ) + await session.commit() + + async def can_claim_task( + self, user_id: str, running_count: int + ) -> Tuple[bool, Optional[str]]: + """Check if user can claim another task.""" + limits = await self.get_limits(user_id) + + if running_count >= limits.max_concurrent_agents: + return False, f"At limit: {running_count}/{limits.max_concurrent_agents} agents running" + + if limits.monthly_agent_hours is not None: + used = await self.get_monthly_hours_used(user_id) + if used >= limits.monthly_agent_hours: + return False, f"Monthly limit reached: {used}/{limits.monthly_agent_hours} hours" + + return True, None +``` + +## Dependencies + +- TSK-TQU-002: Plan config module (for get_plan_limits) +- TSK-TQU-003: User model updates (for plan fields) diff --git a/backend/.omoi_os/tasks/TSK-TQU-005-running-count.md b/backend/.omoi_os/tasks/TSK-TQU-005-running-count.md new file mode 100644 index 00000000..2a934d57 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-005-running-count.md @@ -0,0 +1,47 @@ +--- +id: TSK-TQU-005 +title: Add get_running_count to TaskQueueService +parent_ticket: TKT-TQU-003 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-004 +--- + +# TSK-TQU-005: Add get_running_count to TaskQueueService + +## Description + +Add method to count tasks in claimed/running status for a specific user. + +## Acceptance Criteria + +- [ ] get_running_count(user_id) returns int count +- [ ] Counts tasks with status IN ('claimed', 'running') +- [ ] Filters by user_id +- [ ] Async version available +- [ ] Unit test verifies count accuracy + +## Implementation + +Add to `omoi_os/services/task_queue.py`: + +```python +async def get_running_count(self, user_id: str) -> int: + """Count currently running agents for a user.""" + async with self.db.get_async_session() as session: + result = await session.execute( + select(func.count(Task.id)) + .where(Task.user_id == user_id) + .where(Task.status.in_(['claimed', 'running'])) + ) + return result.scalar_one() or 0 +``` + +## Dependencies + +- TSK-TQU-004: PlanService (for integration testing) diff --git a/backend/.omoi_os/tasks/TSK-TQU-006-claim-any-user.md b/backend/.omoi_os/tasks/TSK-TQU-006-claim-any-user.md new file mode 100644 index 00000000..986cf929 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-006-claim-any-user.md @@ -0,0 +1,80 @@ +--- +id: TSK-TQU-006 +title: Implement claim_next_task_any_user +parent_ticket: TKT-TQU-003 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 2h +depends_on: + - TSK-TQU-005 +--- + +# TSK-TQU-006: Implement claim_next_task_any_user + +## Description + +Add method to claim next available task across all users while respecting per-user limits. + +## Acceptance Criteria + +- [ ] claim_next_task_any_user() finds eligible users with pending tasks +- [ ] Respects max_concurrent_agents per user +- [ ] Prioritizes users with fewer running agents (fairness) +- [ ] Checks monthly hour limits before claiming +- [ ] Uses atomic claim with SKIP LOCKED +- [ ] Returns None if no eligible tasks +- [ ] Integration test with multiple users + +## Implementation + +```python +async def claim_next_task_any_user(self) -> Optional[Task]: + """ + Claim next available task across all users (for orchestrator). + Respects per-user limits, prioritizes users with fewer running agents. + """ + async with self.db.get_async_session() as session: + # Find eligible users + eligible_users = await session.execute(text(""" + WITH user_running AS ( + SELECT user_id, COUNT(*) as running_count + FROM tasks + WHERE status IN ('claimed', 'running') + GROUP BY user_id + ), + user_pending AS ( + SELECT DISTINCT user_id + FROM tasks + WHERE status = 'pending' + ) + SELECT up.user_id, u.max_concurrent_agents, + COALESCE(ur.running_count, 0) as running + FROM user_pending up + JOIN users u ON up.user_id = u.id + LEFT JOIN user_running ur ON up.user_id = ur.user_id + WHERE COALESCE(ur.running_count, 0) < u.max_concurrent_agents + ORDER BY COALESCE(ur.running_count, 0) ASC + LIMIT 10 + """)) + + for user in eligible_users: + # Check monthly limits + can_claim, _ = await self.plan_service.can_claim_task( + user["user_id"], user["running"] + ) + if not can_claim: + continue + + task = await self.claim_next_task_for_user(user["user_id"]) + if task: + return task + + return None +``` + +## Dependencies + +- TSK-TQU-005: get_running_count (for user running count) diff --git a/backend/.omoi_os/tasks/TSK-TQU-007-usage-tracking.md b/backend/.omoi_os/tasks/TSK-TQU-007-usage-tracking.md new file mode 100644 index 00000000..79d3cef6 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-007-usage-tracking.md @@ -0,0 +1,70 @@ +--- +id: TSK-TQU-007 +title: Add usage hour tracking to mark_completed/mark_failed +parent_ticket: TKT-TQU-003 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-004 +--- + +# TSK-TQU-007: Add usage hour tracking to mark_completed/mark_failed + +## Description + +Modify mark_completed and mark_failed methods to calculate and add usage hours to user's monthly total. + +## Acceptance Criteria + +- [ ] mark_completed calculates hours from started_at to completed_at +- [ ] mark_completed calls plan_service.add_usage_hours +- [ ] mark_failed also tracks hours (even failed tasks count) +- [ ] Hours rounded to 2 decimal places +- [ ] Unit test verifies hours accumulate correctly + +## Implementation + +Modify in `omoi_os/services/task_queue.py`: + +```python +async def mark_completed( + self, + task_id: str, + summary: Optional[str] = None, + files_changed: Optional[list] = None, +) -> None: + """Mark task as successfully completed.""" + from omoi_os.utils.datetime import utc_now + + async with self.db.get_async_session() as session: + result = await session.execute( + select(Task).where(Task.id == task_id) + ) + task = result.scalar_one_or_none() + if not task: + return + + now = utc_now() + task.status = "completed" + task.completed_at = now + if summary: + task.result_summary = summary + if files_changed: + task.files_changed = files_changed + + await session.commit() + + # Track usage hours + if task.started_at and task.user_id: + duration = now - task.started_at + hours = round(duration.total_seconds() / 3600, 2) + await self.plan_service.add_usage_hours(task.user_id, hours) +``` + +## Dependencies + +- TSK-TQU-004: PlanService (for add_usage_hours method) diff --git a/backend/.omoi_os/tasks/TSK-TQU-008-orchestrator-timeout.md b/backend/.omoi_os/tasks/TSK-TQU-008-orchestrator-timeout.md new file mode 100644 index 00000000..d948b24e --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-008-orchestrator-timeout.md @@ -0,0 +1,101 @@ +--- +id: TSK-TQU-008 +title: Add timeout detection loop to orchestrator +parent_ticket: TKT-TQU-004 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 2h +depends_on: + - TSK-TQU-006 + - TSK-TQU-007 +--- + +# TSK-TQU-008: Add timeout detection loop to orchestrator + +## Description + +Update OrchestratorWorker to periodically check for and kill timed-out tasks. + +## Acceptance Criteria + +- [ ] _kill_timed_out_tasks() method added +- [ ] Timeout check runs every 60 seconds +- [ ] Joins tasks with users to get max_task_duration_minutes +- [ ] Terminates sandbox for timed-out tasks +- [ ] Marks task failed with timeout reason +- [ ] Tracks usage hours for timed-out tasks +- [ ] Integration test verifies timeout handling + +## Implementation + +Add to `omoi_os/workers/orchestrator_worker.py`: + +```python +from datetime import timedelta + +class OrchestratorWorker: + def __init__(self, ...): + ... + self.timeout_check_interval = 60.0 + self._last_timeout_check = datetime.min + + async def run(self): + while not self._shutdown: + try: + # Periodic timeout check + now = utc_now() + if (now - self._last_timeout_check).total_seconds() > self.timeout_check_interval: + await self._kill_timed_out_tasks() + self._last_timeout_check = now + + # Claim and process + task = await self.task_queue.claim_next_task_any_user() + if task: + await self._process_task(task) + else: + await asyncio.sleep(self.poll_interval) + except Exception as e: + logger.error(f"Orchestrator error: {e}") + await asyncio.sleep(self.poll_interval) + + async def _kill_timed_out_tasks(self): + """Find and kill tasks that have exceeded their time limit.""" + running = await self.db.fetch(""" + SELECT t.*, u.max_task_duration_minutes + FROM tasks t + JOIN users u ON t.user_id = u.id + WHERE t.status = 'running' + AND t.started_at IS NOT NULL + """) + + now = utc_now() + killed = 0 + + for task in running: + max_duration = timedelta(minutes=task["max_task_duration_minutes"]) + if now - task["started_at"] > max_duration: + logger.info(f"Killing timed-out task: {task['id']}") + + if task["sandbox_id"]: + try: + await self.spawner.terminate(task["sandbox_id"]) + except Exception as e: + logger.warning(f"Failed to terminate sandbox: {e}") + + await self.task_queue.mark_failed( + task["id"], + f"Timeout: exceeded {task['max_task_duration_minutes']} minutes" + ) + killed += 1 + + if killed: + logger.info(f"Killed {killed} timed-out tasks") +``` + +## Dependencies + +- TSK-TQU-006: claim_next_task_any_user (main loop uses this) +- TSK-TQU-007: mark_failed with usage tracking diff --git a/backend/.omoi_os/tasks/TSK-TQU-009-api-create-task.md b/backend/.omoi_os/tasks/TSK-TQU-009-api-create-task.md new file mode 100644 index 00000000..8cb3fa2a --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-009-api-create-task.md @@ -0,0 +1,76 @@ +--- +id: TSK-TQU-009 +title: Implement POST /api/v1/tasks endpoint +parent_ticket: TKT-TQU-005 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-005 +--- + +# TSK-TQU-009: Implement POST /api/v1/tasks endpoint + +## Description + +Create endpoint for users to add tasks to the queue with limit checks. + +## Acceptance Criteria + +- [ ] POST /api/v1/tasks accepts TaskCreate body +- [ ] Validates pending count < 50 +- [ ] Creates task with user_id from auth +- [ ] Returns TaskResponse with queue_position +- [ ] Returns 400 if too many pending tasks +- [ ] Requires authentication +- [ ] API test via httpx + +## Implementation + +```python +# api/routes/tasks.py + +class TaskCreate(BaseModel): + title: str + description: str + project_id: str + priority: int = Field(default=3, ge=1, le=4) + + +@router.post("", response_model=TaskResponse) +async def create_task( + task: TaskCreate, + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), +): + """Create a new task and add to queue.""" + pending = await task_queue.get_pending_count(user_id) + if pending >= 50: + raise HTTPException(400, detail="Too many pending tasks (max 50)") + + new_task = await task_queue.create_task( + user_id=user_id, + project_id=task.project_id, + title=task.title, + description=task.description, + priority=task.priority, + ) + + queue_position = await task_queue.get_queue_position(new_task.id) + + return TaskResponse( + id=str(new_task.id), + title=new_task.title, + status=new_task.status, + priority=new_task.priority, + queue_position=queue_position, + created_at=new_task.created_at, + ) +``` + +## Dependencies + +- TSK-TQU-005: get_running_count (queue status check) diff --git a/backend/.omoi_os/tasks/TSK-TQU-010-api-queue-status.md b/backend/.omoi_os/tasks/TSK-TQU-010-api-queue-status.md new file mode 100644 index 00000000..002dd42a --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-010-api-queue-status.md @@ -0,0 +1,69 @@ +--- +id: TSK-TQU-010 +title: Implement GET /api/v1/tasks/queue-status endpoint +parent_ticket: TKT-TQU-005 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-004 + - TSK-TQU-005 +--- + +# TSK-TQU-010: Implement GET /api/v1/tasks/queue-status endpoint + +## Description + +Create endpoint that returns user's current queue status including running count, pending count, limits, and monthly usage. + +## Acceptance Criteria + +- [ ] GET /api/v1/tasks/queue-status returns QueueStatus +- [ ] Includes running, pending counts +- [ ] Includes max_concurrent from plan +- [ ] Includes can_start_more boolean +- [ ] Includes monthly_hours_used and monthly_hours_limit +- [ ] Requires authentication +- [ ] API test via httpx + +## Implementation + +```python +class QueueStatus(BaseModel): + running: int + pending: int + max_concurrent: int + can_start_more: bool + monthly_hours_used: float + monthly_hours_limit: Optional[int] = None + + +@router.get("/queue-status", response_model=QueueStatus) +async def get_queue_status( + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), + plan_service: PlanService = Depends(get_plan_service), +): + """Get user's queue status and limits.""" + limits = await plan_service.get_limits(user_id) + running = await task_queue.get_running_count(user_id) + pending = await task_queue.get_pending_count(user_id) + hours_used = await plan_service.get_monthly_hours_used(user_id) + + return QueueStatus( + running=running, + pending=pending, + max_concurrent=limits.max_concurrent_agents, + can_start_more=running < limits.max_concurrent_agents, + monthly_hours_used=hours_used, + monthly_hours_limit=limits.monthly_agent_hours, + ) +``` + +## Dependencies + +- TSK-TQU-004: PlanService (for limits and usage) +- TSK-TQU-005: get_running_count diff --git a/backend/.omoi_os/tasks/TSK-TQU-011-api-cancel-task.md b/backend/.omoi_os/tasks/TSK-TQU-011-api-cancel-task.md new file mode 100644 index 00000000..a0ced534 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-011-api-cancel-task.md @@ -0,0 +1,65 @@ +--- +id: TSK-TQU-011 +title: Implement DELETE /api/v1/tasks/{id} endpoint +parent_ticket: TKT-TQU-005 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: HIGH +type: implementation +estimate: 1h +depends_on: + - TSK-TQU-007 +--- + +# TSK-TQU-011: Implement DELETE /api/v1/tasks/{id} endpoint + +## Description + +Create endpoint to cancel pending or running tasks, terminating sandbox if necessary. + +## Acceptance Criteria + +- [ ] DELETE /api/v1/tasks/{id} cancels task +- [ ] Validates task belongs to user +- [ ] Returns 404 if task not found +- [ ] Returns 400 if task already completed +- [ ] Terminates sandbox if task is running +- [ ] Marks task as failed with "Cancelled by user" +- [ ] Requires authentication +- [ ] API test via httpx + +## Implementation + +```python +@router.delete("/{task_id}") +async def cancel_task( + task_id: str, + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), + spawner: DaytonaSpawnerService = Depends(get_spawner), +): + """Cancel a pending or running task.""" + task = await task_queue.get_task(task_id) + + if not task or task.user_id != user_id: + raise HTTPException(404, detail="Task not found") + + if task.status == "completed": + raise HTTPException(400, detail="Task already completed") + + # Kill sandbox if running + if task.status == "running" and task.sandbox_id: + try: + await spawner.terminate(task.sandbox_id) + except Exception as e: + logger.warning(f"Failed to terminate sandbox: {e}") + + await task_queue.mark_failed(task_id, "Cancelled by user") + + return {"cancelled": True} +``` + +## Dependencies + +- TSK-TQU-007: mark_failed with usage tracking diff --git a/backend/.omoi_os/tasks/TSK-TQU-012-integration-tests.md b/backend/.omoi_os/tasks/TSK-TQU-012-integration-tests.md new file mode 100644 index 00000000..b7d3cef7 --- /dev/null +++ b/backend/.omoi_os/tasks/TSK-TQU-012-integration-tests.md @@ -0,0 +1,88 @@ +--- +id: TSK-TQU-012 +title: Write integration tests for task queue flow +parent_ticket: TKT-TQU-006 +created: 2024-12-29 +updated: 2024-12-29 +status: pending +priority: MEDIUM +type: test +estimate: 3h +depends_on: + - TSK-TQU-008 + - TSK-TQU-009 + - TSK-TQU-010 + - TSK-TQU-011 +--- + +# TSK-TQU-012: Write integration tests for task queue flow + +## Description + +Create comprehensive integration tests for the complete task queue flow with user limits. + +## Acceptance Criteria + +- [ ] Test basic flow: queue 3 tasks, verify sequential execution for free user +- [ ] Test timeout: task killed after exceeding duration +- [ ] Test monthly limits: claim rejected when hours exhausted +- [ ] Test multi-user fairness: tasks interleave between users +- [ ] Test cancellation: sandbox terminated, task marked failed +- [ ] Performance test: claiming < 100ms +- [ ] All tests pass in CI + +## Test Cases + +```python +# tests/integration/test_task_queue_user_limits.py + +@pytest.mark.asyncio +async def test_free_user_single_concurrent(db, task_queue, plan_service): + """Free users can only run 1 task at a time.""" + user = await create_user(db, plan_tier="free") + + # Queue 3 tasks + task1 = await task_queue.create_task(user.id, "Task 1") + task2 = await task_queue.create_task(user.id, "Task 2") + task3 = await task_queue.create_task(user.id, "Task 3") + + # First claim succeeds + claimed = await task_queue.claim_next_task_for_user(user.id) + assert claimed.id == task1.id + + # Second claim fails (at limit) + can_claim, reason = await plan_service.can_claim_task(user.id, running_count=1) + assert not can_claim + assert "1/1 agents running" in reason + + +@pytest.mark.asyncio +async def test_timeout_kills_sandbox(db, task_queue, spawner, orchestrator): + """Tasks exceeding timeout are killed.""" + user = await create_user(db, max_task_duration_minutes=1) + task = await task_queue.create_task(user.id, "Long task") + + # Start task + await task_queue.claim_next_task_for_user(user.id) + await task_queue.mark_running(task.id, "sandbox-123") + + # Simulate time passing + task.started_at = utc_now() - timedelta(minutes=2) + await db.commit() + + # Run timeout check + await orchestrator._kill_timed_out_tasks() + + # Verify + refreshed = await task_queue.get_task(task.id) + assert refreshed.status == "failed" + assert "Timeout" in refreshed.error_message + spawner.terminate.assert_called_with("sandbox-123") +``` + +## Dependencies + +- TSK-TQU-008: Orchestrator timeout (for timeout tests) +- TSK-TQU-009: Create task API (for API tests) +- TSK-TQU-010: Queue status API +- TSK-TQU-011: Cancel task API diff --git a/backend/.omoi_os/tickets/TKT-TQU-001-database-plan-models.md b/backend/.omoi_os/tickets/TKT-TQU-001-database-plan-models.md new file mode 100644 index 00000000..51b2dffa --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-001-database-plan-models.md @@ -0,0 +1,60 @@ +--- +id: TKT-TQU-001 +title: Database & Plan Models +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: HIGH +phase: PHASE_IMPLEMENTATION +type: feature +requirements: + - REQ-TQU-PLAN-001 + - REQ-TQU-PLAN-002 + - REQ-TQU-PLAN-003 + - REQ-TQU-DM-001 + - REQ-TQU-DM-002 +linked_design: designs/task-queue-user-limits.md +estimate: 4h +--- + +# TKT-TQU-001: Database & Plan Models + +## Summary + +Add plan tier columns to users table and create plan configuration module. This is the foundation for user-based concurrency limits. + +## Acceptance Criteria + +- [ ] Users table has plan_tier, max_concurrent_agents, max_task_duration_minutes columns +- [ ] Users table has monthly_agent_hours_limit, monthly_agent_hours_used, billing_cycle_reset_at columns +- [ ] PlanLimits dataclass defined in config/plans.py +- [ ] PLAN_LIMITS dict with free/pro/team/enterprise tiers +- [ ] get_plan_limits() helper function +- [ ] Migration script created and tested +- [ ] Default values set for new users (free tier) + +## Technical Details + +### Migration Script +```sql +ALTER TABLE users ADD COLUMN plan_tier VARCHAR(20) DEFAULT 'free'; +ALTER TABLE users ADD COLUMN max_concurrent_agents INT DEFAULT 1; +ALTER TABLE users ADD COLUMN max_task_duration_minutes INT DEFAULT 30; +ALTER TABLE users ADD COLUMN monthly_agent_hours_limit INT DEFAULT 10; +ALTER TABLE users ADD COLUMN monthly_agent_hours_used DECIMAL(10,2) DEFAULT 0; +ALTER TABLE users ADD COLUMN billing_cycle_reset_at TIMESTAMP; +CREATE INDEX idx_users_plan ON users(plan_tier); +``` + +### Plan Configuration +Create `config/plans.py` with PlanLimits dataclass and PLAN_LIMITS dictionary. + +## Dependencies + +None - this is the first ticket in the implementation. + +## Related + +- Design: DESIGN-TQU-001 +- Requirements: REQ-TQU-PLAN-001, REQ-TQU-DM-001 diff --git a/backend/.omoi_os/tickets/TKT-TQU-002-plan-service.md b/backend/.omoi_os/tickets/TKT-TQU-002-plan-service.md new file mode 100644 index 00000000..afa9f904 --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-002-plan-service.md @@ -0,0 +1,73 @@ +--- +id: TKT-TQU-002 +title: Plan Service Implementation +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: HIGH +phase: PHASE_IMPLEMENTATION +type: feature +requirements: + - REQ-TQU-CONC-001 + - REQ-TQU-CONC-002 + - REQ-TQU-CONC-003 + - REQ-TQU-USAGE-001 + - REQ-TQU-USAGE-002 + - REQ-TQU-USAGE-004 +linked_design: designs/task-queue-user-limits.md +estimate: 4h +depends_on: + - TKT-TQU-001 +--- + +# TKT-TQU-002: Plan Service Implementation + +## Summary + +Create PlanService class that handles plan limits lookup, usage tracking, and claim eligibility checks. + +## Acceptance Criteria + +- [ ] PlanService class created in services/plan_service.py +- [ ] get_limits(user_id) returns PlanLimits for user +- [ ] get_monthly_hours_used(user_id) returns current usage +- [ ] add_usage_hours(user_id, hours) increments usage +- [ ] can_claim_task(user_id) checks both concurrency and monthly limits +- [ ] Returns descriptive reason when claim is denied +- [ ] Unit tests for all methods + +## Technical Details + +### PlanService Interface +```python +class PlanService: + def __init__(self, db: DatabaseService): + self.db = db + + async def get_limits(self, user_id: str) -> PlanLimits: + """Get plan limits for a user.""" + + async def get_monthly_hours_used(self, user_id: str) -> float: + """Get current monthly usage in hours.""" + + async def add_usage_hours(self, user_id: str, hours: float) -> None: + """Add hours to user's monthly usage.""" + + async def can_claim_task(self, user_id: str, running_count: int) -> tuple[bool, Optional[str]]: + """Check if user can claim another task.""" +``` + +### Integration Points +- Uses DatabaseService for user queries +- Called by TaskQueueService for claim eligibility +- Called by API for queue status + +## Dependencies + +- TKT-TQU-001: Database & Plan Models (columns must exist) + +## Related + +- Design: DESIGN-TQU-001 +- Requirements: REQ-TQU-CONC-001 through REQ-TQU-CONC-003 diff --git a/backend/.omoi_os/tickets/TKT-TQU-003-task-queue-extensions.md b/backend/.omoi_os/tickets/TKT-TQU-003-task-queue-extensions.md new file mode 100644 index 00000000..0c57fa08 --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-003-task-queue-extensions.md @@ -0,0 +1,93 @@ +--- +id: TKT-TQU-003 +title: TaskQueueService Extensions +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: HIGH +phase: PHASE_IMPLEMENTATION +type: feature +requirements: + - REQ-TQU-CONC-004 + - REQ-TQU-QUEUE-001 + - REQ-TQU-QUEUE-002 + - REQ-TQU-QUEUE-003 + - REQ-TQU-USAGE-001 + - REQ-TQU-USAGE-002 +linked_design: designs/task-queue-user-limits.md +estimate: 6h +depends_on: + - TKT-TQU-002 +--- + +# TKT-TQU-003: TaskQueueService Extensions + +## Summary + +Extend existing TaskQueueService with user-based claiming, running count tracking, queue position, and usage hour tracking on completion. + +## Acceptance Criteria + +- [ ] get_running_count(user_id) returns count of claimed/running tasks +- [ ] claim_next_task_for_user(user_id) claims respecting user limits +- [ ] claim_next_task_any_user() claims across all users fairly +- [ ] get_pending_count(user_id) returns pending task count +- [ ] get_queue_position(task_id) returns position in user's queue +- [ ] mark_completed() updates monthly_agent_hours_used +- [ ] mark_failed() updates monthly_agent_hours_used +- [ ] Atomic claiming prevents race conditions +- [ ] Integration tests with PlanService + +## Technical Details + +### New Methods +```python +async def get_running_count(self, user_id: str) -> int: + """Count tasks in claimed/running status for user.""" + +async def claim_next_task_any_user(self) -> Optional[Task]: + """ + Claim next available task across all users. + Respects per-user limits, prioritizes users with fewer running agents. + """ + +async def get_queue_position(self, task_id: str) -> Optional[int]: + """Get position of task in user's queue (by priority then created_at).""" +``` + +### Modified Methods +- mark_completed(): Calculate hours, call plan_service.add_usage_hours() +- mark_failed(): Calculate hours, call plan_service.add_usage_hours() + +### SQL for Cross-User Claiming +```sql +WITH user_running AS ( + SELECT user_id, COUNT(*) as running_count + FROM tasks + WHERE status IN ('claimed', 'running') + GROUP BY user_id +), +user_pending AS ( + SELECT DISTINCT user_id + FROM tasks + WHERE status = 'pending' +) +SELECT up.user_id, u.max_concurrent_agents, + COALESCE(ur.running_count, 0) as running +FROM user_pending up +JOIN users u ON up.user_id = u.id +LEFT JOIN user_running ur ON up.user_id = ur.user_id +WHERE COALESCE(ur.running_count, 0) < u.max_concurrent_agents +ORDER BY COALESCE(ur.running_count, 0) ASC +LIMIT 10 +``` + +## Dependencies + +- TKT-TQU-002: PlanService (for limits and usage tracking) + +## Related + +- Design: DESIGN-TQU-001 +- Existing: omoi_os/services/task_queue.py diff --git a/backend/.omoi_os/tickets/TKT-TQU-004-orchestrator-updates.md b/backend/.omoi_os/tickets/TKT-TQU-004-orchestrator-updates.md new file mode 100644 index 00000000..d208099f --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-004-orchestrator-updates.md @@ -0,0 +1,83 @@ +--- +id: TKT-TQU-004 +title: Orchestrator Worker Updates +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: HIGH +phase: PHASE_IMPLEMENTATION +type: feature +requirements: + - REQ-TQU-DUR-001 + - REQ-TQU-DUR-002 + - REQ-TQU-DUR-003 +linked_design: designs/task-queue-user-limits.md +estimate: 4h +depends_on: + - TKT-TQU-003 +--- + +# TKT-TQU-004: Orchestrator Worker Updates + +## Summary + +Update OrchestratorWorker to use cross-user claiming, enforce per-user timeouts, and kill timed-out sandboxes. + +## Acceptance Criteria + +- [ ] Orchestrator uses claim_next_task_any_user() in main loop +- [ ] Timeout from user's plan passed to sandbox spawner +- [ ] Timeout check runs every 60 seconds +- [ ] Timed-out tasks: sandbox terminated, task marked failed +- [ ] Usage hours tracked even for timed-out tasks +- [ ] Graceful shutdown handles in-flight claims +- [ ] Integration test: queue 3 tasks, verify limits respected + +## Technical Details + +### Main Loop Updates +```python +async def run(self): + while not self._shutdown: + # Periodic timeout check + if now - self._last_timeout_check > timedelta(seconds=60): + await self._kill_timed_out_tasks() + self._last_timeout_check = now + + # Claim across all users + task = await self.task_queue.claim_next_task_any_user() + if task: + await self._process_task(task) + else: + await asyncio.sleep(self.poll_interval) +``` + +### Timeout Handling +```python +async def _kill_timed_out_tasks(self): + running = await self.db.fetch(""" + SELECT t.*, u.max_task_duration_minutes + FROM tasks t + JOIN users u ON t.user_id = u.id + WHERE t.status = 'running' AND t.started_at IS NOT NULL + """) + + for task in running: + max_duration = timedelta(minutes=task["max_task_duration_minutes"]) + if now - task["started_at"] > max_duration: + await self.spawner.terminate(task["sandbox_id"]) + await self.task_queue.mark_failed( + task["id"], + f"Timeout: exceeded {task['max_task_duration_minutes']} minutes" + ) +``` + +## Dependencies + +- TKT-TQU-003: TaskQueueService extensions (claim_next_task_any_user) + +## Related + +- Design: DESIGN-TQU-001 +- Existing: omoi_os/workers/orchestrator_worker.py diff --git a/backend/.omoi_os/tickets/TKT-TQU-005-api-endpoints.md b/backend/.omoi_os/tickets/TKT-TQU-005-api-endpoints.md new file mode 100644 index 00000000..c168c36c --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-005-api-endpoints.md @@ -0,0 +1,93 @@ +--- +id: TKT-TQU-005 +title: API Endpoints +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: HIGH +phase: PHASE_IMPLEMENTATION +type: feature +requirements: + - REQ-TQU-API-001 + - REQ-TQU-QUEUE-004 + - REQ-TQU-SEC-001 + - REQ-TQU-SEC-002 +linked_design: designs/task-queue-user-limits.md +estimate: 4h +depends_on: + - TKT-TQU-003 +--- + +# TKT-TQU-005: API Endpoints + +## Summary + +Create REST API endpoints for task management, queue status, and user limits. + +## Acceptance Criteria + +- [ ] POST /api/v1/tasks - Create task with limit checks +- [ ] GET /api/v1/tasks - List user's tasks with optional status filter +- [ ] DELETE /api/v1/tasks/{id} - Cancel pending/running task +- [ ] GET /api/v1/tasks/queue-status - Return QueueStatus +- [ ] POST /api/v1/tasks/{id}/complete - Sandbox callback for completion +- [ ] GET /api/v1/users/me/limits - Return UserLimits +- [ ] All endpoints require authentication +- [ ] Proper error responses (400, 403, 404) +- [ ] API tests via curl/httpx + +## Technical Details + +### New Endpoints + +```python +# api/routes/tasks.py + +@router.post("", response_model=TaskResponse) +async def create_task( + task: TaskCreate, + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), +): + """Create a new task and add to queue.""" + pending = await task_queue.get_pending_count(user_id) + if pending >= 50: + raise HTTPException(400, "Too many pending tasks (max 50)") + # ... create task + +@router.get("/queue-status") +async def get_queue_status( + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), + plan_service: PlanService = Depends(get_plan_service), +): + """Get user's queue status and limits.""" + limits = await plan_service.get_limits(user_id) + running = await task_queue.get_running_count(user_id) + pending = await task_queue.get_pending_count(user_id) + hours_used = await plan_service.get_monthly_hours_used(user_id) + return QueueStatus(...) + +@router.delete("/{task_id}") +async def cancel_task( + task_id: str, + user_id: str = Depends(get_current_user_id), + task_queue: TaskQueueService = Depends(get_task_queue), + spawner: DaytonaSpawnerService = Depends(get_spawner), +): + """Cancel a pending or running task.""" + # Verify ownership, kill sandbox if running, mark failed +``` + +### Response Models +- TaskResponse, QueueStatus, UserLimits (from design doc) + +## Dependencies + +- TKT-TQU-003: TaskQueueService extensions (for queue operations) + +## Related + +- Design: DESIGN-TQU-001 +- Existing: omoi_os/api/routes/tasks.py diff --git a/backend/.omoi_os/tickets/TKT-TQU-006-integration-testing.md b/backend/.omoi_os/tickets/TKT-TQU-006-integration-testing.md new file mode 100644 index 00000000..5893277f --- /dev/null +++ b/backend/.omoi_os/tickets/TKT-TQU-006-integration-testing.md @@ -0,0 +1,78 @@ +--- +id: TKT-TQU-006 +title: Integration Testing +feature: task-queue-user-limits +created: 2024-12-29 +updated: 2024-12-29 +status: open +priority: MEDIUM +phase: PHASE_INTEGRATION +type: test +requirements: + - REQ-TQU-SLO-001 + - REQ-TQU-SLO-002 + - REQ-TQU-SLO-003 +linked_design: designs/task-queue-user-limits.md +estimate: 4h +depends_on: + - TKT-TQU-004 + - TKT-TQU-005 +--- + +# TKT-TQU-006: Integration Testing + +## Summary + +End-to-end integration tests to verify the complete task queue flow with user limits, timeouts, and overnight execution simulation. + +## Acceptance Criteria + +- [ ] Test: Queue 3 tasks via API, verify orchestrator respects limits +- [ ] Test: Task timeout triggers sandbox termination +- [ ] Test: Monthly hours accumulate correctly +- [ ] Test: Queue position updates as tasks complete +- [ ] Test: Concurrent users don't interfere with each other +- [ ] Test: Cancel running task terminates sandbox +- [ ] Performance: Task claiming < 100ms +- [ ] Performance: Timeout check < 5s for 100 tasks +- [ ] Let orchestrator run for 10 minutes with mixed workload + +## Technical Details + +### Test Scenarios + +1. **Basic Flow** + - Create user with free plan + - Queue 3 tasks + - Verify only 1 runs at a time + - Verify all 3 complete sequentially + +2. **Timeout Handling** + - Create task with 1-minute timeout + - Start long-running agent + - Verify timeout kills sandbox after 60s + - Verify task marked failed with timeout reason + +3. **Monthly Limits** + - Set user to 0.1 hours remaining + - Complete a 10-minute task + - Verify next claim is rejected with limit message + +4. **Multi-User Fairness** + - Create 2 users, each queues 5 tasks + - Verify tasks interleave (not all User A then all User B) + +### Test Infrastructure +- Use pytest-asyncio for async tests +- Mock DaytonaSpawner for sandbox simulation +- Use test database with clean state per test + +## Dependencies + +- TKT-TQU-004: Orchestrator updates (full flow) +- TKT-TQU-005: API endpoints (for HTTP tests) + +## Related + +- Design: DESIGN-TQU-001 +- Requirements: REQ-TQU-SLO-001 through SLO-003 diff --git a/backend/docs/testing/validation_system_test_plan.md b/backend/docs/testing/validation_system_test_plan.md new file mode 100644 index 00000000..6a848fb9 --- /dev/null +++ b/backend/docs/testing/validation_system_test_plan.md @@ -0,0 +1,339 @@ +# Validation System Test Plan + +**Created**: 2025-12-29 +**Status**: Draft +**Purpose**: Comprehensive test plan for the task validation system + +## Overview + +The validation system ensures completed work meets quality standards before marking tasks as complete. This document outlines how to test the complete validation flow. + +## Architecture Summary + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ VALIDATION FLOW │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ 1. Implementer Completes Work │ +│ └─► agent.completed event with branch_name │ +│ │ +│ 2. Event Handler (sandbox.py) │ +│ └─► Extracts branch_name, stores in task.result │ +│ └─► Calls TaskValidatorService.request_validation() │ +│ │ +│ 3. TaskValidatorService.request_validation() │ +│ └─► Updates task.status to "pending_validation" │ +│ └─► Spawns validator sandbox via Daytona with: │ +│ - GITHUB_REPO, GITHUB_REPO_OWNER, GITHUB_REPO_NAME │ +│ - BRANCH_NAME (from task.result) │ +│ - GITHUB_TOKEN (from project owner) │ +│ - VALIDATION_MODE=true │ +│ │ +│ 4. Validator Agent Runs │ +│ └─► Checks: tests, build, git status, PR exists │ +│ └─► Reports result via POST /api/v1/sandbox/{id}/validation-result │ +│ │ +│ 5. TaskValidatorService.handle_validation_result() │ +│ └─► PASSED: task.status = "completed" │ +│ └─► FAILED: task.status = "needs_revision" │ +│ └─► Publishes TASK_VALIDATION_FAILED event │ +│ │ +│ 6. Orchestrator (on TASK_VALIDATION_FAILED) │ +│ └─► Resets task to "pending" with revision_feedback │ +│ └─► Implementer respawns with feedback context │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +## Test Categories + +### 1. Unit Tests + +Test individual methods of `TaskValidatorService` in isolation. + +**File**: `tests/test_task_validator_service.py` + +| Test Case | Description | Expected Outcome | +|-----------|-------------|------------------| +| `test_request_validation_creates_pending_status` | Call request_validation with valid task | Task status → "pending_validation" | +| `test_request_validation_increments_iteration` | Request validation multiple times | iteration increments 1, 2, 3 | +| `test_request_validation_fails_after_max_iterations` | Exceed MAX_VALIDATION_ITERATIONS | Task status → "failed" | +| `test_request_validation_disabled_auto_approves` | TASK_VALIDATION_ENABLED=false | Task status → "completed" immediately | +| `test_handle_validation_result_passed` | Submit passed validation | Task status → "completed" | +| `test_handle_validation_result_failed` | Submit failed validation | Task status → "needs_revision" | +| `test_handle_validation_result_creates_review_record` | Submit any result | ValidationReview record created | +| `test_spawn_validator_gets_repo_info` | Spawn validator for task with project | extra_env includes GITHUB_REPO, BRANCH_NAME | +| `test_spawn_validator_gets_github_token` | Spawn validator with user token | extra_env includes GITHUB_TOKEN | +| `test_auto_approve_marks_completed` | _auto_approve called | Task status → "completed", auto_approved=True | + +### 2. Integration Tests + +Test the complete flow across multiple services. + +**File**: `tests/integration/test_validation_integration.py` + +| Test Case | Description | Expected Outcome | +|-----------|-------------|------------------| +| `test_completion_event_triggers_validation` | Emit agent.completed event | task.status → "pending_validation" | +| `test_validation_passed_completes_task` | Full flow with passing validation | task.status → "completed" | +| `test_validation_failed_triggers_retry` | Full flow with failing validation | task.status → "pending", respawn triggered | +| `test_feedback_propagates_to_respawn` | Fail validation, respawn | New implementer receives revision_feedback | +| `test_branch_name_propagates_to_validator` | Complete task with branch | Validator gets BRANCH_NAME env var | + +### 3. API Endpoint Tests + +Test the validation result endpoint directly. + +**File**: `tests/api/test_validation_endpoint.py` + +| Test Case | Description | Expected Outcome | +|-----------|-------------|------------------| +| `test_post_validation_result_passed` | POST /validation-result with passed=true | 200, task completed | +| `test_post_validation_result_failed` | POST /validation-result with passed=false | 200, task needs_revision | +| `test_post_validation_result_wrong_sandbox` | POST with mismatched sandbox_id | 403 or logged warning | +| `test_post_validation_result_missing_task` | POST for non-existent task | 404 | + +### 4. Event Tests + +Test event publishing and handling. + +| Test Case | Description | Expected Outcome | +|-----------|-------------|------------------| +| `test_validation_requested_event_published` | request_validation called | TASK_VALIDATION_REQUESTED event published | +| `test_validation_passed_event_published` | Validation passes | TASK_VALIDATION_PASSED event published | +| `test_validation_failed_event_published` | Validation fails | TASK_VALIDATION_FAILED event published | +| `test_orchestrator_handles_validation_failed` | TASK_VALIDATION_FAILED event | Task reset to "pending" | + +## Manual Testing Guide + +### Prerequisites + +1. Running services: + ```bash + docker-compose up -d postgres redis + uv run uvicorn omoi_os.api.main:app --host 0.0.0.0 --port 18000 --reload + ``` + +2. Database migrations applied: + ```bash + uv run alembic upgrade head + ``` + +3. Daytona configured (or mocked) + +### Manual Test 1: Trigger Validation Flow + +```bash +# 1. Create a task in the database +curl -X POST http://localhost:18000/api/v1/tasks \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $TOKEN" \ + -d '{ + "ticket_id": "", + "phase_id": "PHASE_IMPLEMENTATION", + "task_type": "implement_feature", + "description": "Test validation flow" + }' + +# 2. Simulate completion event (normally from worker) +# Use the test script below +``` + +### Test Script: `scripts/test_validation_flow.py` + +```python +#!/usr/bin/env python +"""Test script for validation flow.""" +import asyncio +import os +from uuid import uuid4 + +# Set test env +os.environ["TASK_VALIDATION_ENABLED"] = "true" + +from omoi_os.services.database import DatabaseService +from omoi_os.services.event_bus import EventBusService +from omoi_os.services.task_validator import TaskValidatorService +from omoi_os.models.task import Task +from omoi_os.models.ticket import Ticket +from omoi_os.models.project import Project + + +async def test_validation_flow(): + # Initialize services + db = DatabaseService() + event_bus = EventBusService() + validator = TaskValidatorService(db=db, event_bus=event_bus) + + # Create test project, ticket, task + with db.get_session() as session: + project = Project( + name="Test Project", + github_owner="test-owner", + github_repo="test-repo", + ) + session.add(project) + session.commit() + + ticket = Ticket( + title="Test Ticket", + project_id=project.id, + status="in_progress", + ) + session.add(ticket) + session.commit() + + task = Task( + ticket_id=ticket.id, + phase_id="PHASE_IMPLEMENTATION", + task_type="implement_feature", + description="Test task", + status="running", + sandbox_id=f"test-sandbox-{uuid4().hex[:8]}", + result={"branch_name": "feature/test-branch"}, + ) + session.add(task) + session.commit() + task_id = str(task.id) + sandbox_id = task.sandbox_id + + print(f"Created task: {task_id}") + print(f"Sandbox ID: {sandbox_id}") + + # Request validation + validation_id = await validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={ + "success": True, + "branch_name": "feature/test-branch", + } + ) + + print(f"Validation requested: {validation_id}") + + # Check task status + with db.get_session() as session: + task = session.get(Task, task_id) + print(f"Task status: {task.status}") + print(f"Task result: {task.result}") + + # Simulate validation result (passed) + await validator.handle_validation_result( + task_id=task_id, + validator_agent_id=str(uuid4()), + passed=True, + feedback="All checks passed!", + evidence={"tests": "passed", "build": "success"}, + ) + + # Check final status + with db.get_session() as session: + task = session.get(Task, task_id) + print(f"Final task status: {task.status}") + print(f"Validation passed: {task.result.get('validation_passed')}") + + +if __name__ == "__main__": + asyncio.run(test_validation_flow()) +``` + +### Manual Test 2: API Endpoint + +```bash +# After running a task through validation flow, test the endpoint: + +# Simulate validator reporting success +curl -X POST http://localhost:18000/api/v1/sandbox/test-sandbox-123/validation-result \ + -H "Content-Type: application/json" \ + -d '{ + "task_id": "", + "passed": true, + "feedback": "All tests pass, code is production-ready", + "evidence": { + "test_output": "...", + "build_log": "..." + } + }' + +# Simulate validator reporting failure +curl -X POST http://localhost:18000/api/v1/sandbox/test-sandbox-123/validation-result \ + -H "Content-Type: application/json" \ + -d '{ + "task_id": "", + "passed": false, + "feedback": "Tests failing: 3 unit tests failed", + "evidence": { + "test_output": "FAILED test_foo, test_bar, test_baz" + }, + "recommendations": [ + "Fix test_foo by updating the expected value", + "Fix test_bar by handling null case" + ] + }' +``` + +## Running Tests + +### Quick Unit Tests +```bash +uv run pytest tests/test_task_validator_service.py -v +``` + +### Full Validation Suite +```bash +uv run pytest tests/ -k "validation" -v +``` + +### With Coverage +```bash +uv run pytest tests/test_task_validator_service.py --cov=omoi_os.services.task_validator --cov-report=html +``` + +## Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `TASK_VALIDATION_ENABLED` | `true` | Enable/disable validation system | +| `MAX_VALIDATION_ITERATIONS` | `3` | Max retries before task fails | +| `TESTING` | `false` | Skip background loops in tests | + +## Database Tables + +### validation_reviews +- `id` (UUID): Primary key +- `task_id` (UUID): Foreign key to tasks +- `validator_agent_id` (String): Agent that performed validation +- `iteration_number` (Integer): Which validation attempt +- `validation_passed` (Boolean): Pass/fail status +- `feedback` (Text): Human-readable feedback +- `evidence` (JSONB): Test output, logs, etc. +- `recommendations` (JSONB): List of fix recommendations +- `created_at` (DateTime): When review was created + +## Related Files + +- `omoi_os/services/task_validator.py` - Core validation service +- `omoi_os/api/routes/sandbox.py` - Validation result endpoint +- `omoi_os/workers/orchestrator_worker.py` - Handles TASK_VALIDATION_FAILED +- `omoi_os/workers/claude_sandbox_worker.py` - Includes branch_name in completion +- `omoi_os/models/validation_review.py` - ValidationReview model +- `migrations/versions/010_validation_system.py` - Database migration + +## Troubleshooting + +### Task stuck in "pending_validation" +- Check Daytona logs for validator spawn errors +- Verify GITHUB_TOKEN is set for project owner +- Check if validator sandbox started successfully + +### Validation always fails +- Check validator agent logs +- Verify repo/branch info is correct +- Check if tests actually pass in the sandbox + +### Events not publishing +- Verify Redis is running +- Check event_bus is initialized correctly +- Look for Redis connection errors in logs diff --git a/backend/omoi_os/api/main.py b/backend/omoi_os/api/main.py index e48d3f94..3ca49c50 100644 --- a/backend/omoi_os/api/main.py +++ b/backend/omoi_os/api/main.py @@ -196,12 +196,29 @@ async def orchestrator_loop(): session.add(agent) session.commit() - # Spawn sandbox + # Determine execution mode from task type + # Default to implementation for legacy orchestrator loop + execution_mode = "implementation" + if hasattr(task, "task_type") and task.task_type: + # Use exploration mode for spec-creation tasks + if task.task_type in ( + "explore_codebase", "create_spec", "create_requirements", + "create_design", "create_tickets", "create_tasks", + "analyze_dependencies", "define_feature" + ): + execution_mode = "exploration" + elif task.task_type in ( + "validate", "validate_implementation", "review_code", "run_tests" + ): + execution_mode = "validation" + + # Spawn sandbox with appropriate skills for execution mode sandbox_id = await daytona_spawner.spawn_for_task( task_id=task_id, agent_id=agent_id, phase_id=phase_id, agent_type=agent_type, + execution_mode=execution_mode, ) # Update task with sandbox info diff --git a/backend/omoi_os/api/routes/sandbox.py b/backend/omoi_os/api/routes/sandbox.py index ebc599c5..82d0498f 100644 --- a/backend/omoi_os/api/routes/sandbox.py +++ b/backend/omoi_os/api/routes/sandbox.py @@ -536,6 +536,75 @@ async def get_session_transcript_async(db: DatabaseService, session_id: str) -> return None +def get_session_transcript_for_task(db: DatabaseService, task_id: str) -> tuple[str | None, str | None]: + """ + Retrieve Claude session transcript by task_id (SYNC version). + + This is useful for resuming a task that was previously run - we can find + the session transcript stored for that task and pass it to a new sandbox. + + Args: + db: Database service + task_id: Task ID to find transcript for + + Returns: + Tuple of (session_id, transcript_b64), or (None, None) if not found + """ + try: + from omoi_os.models.claude_session_transcript import ClaudeSessionTranscript + + with db.get_session() as session: + # Find the most recent transcript for this task + transcript = ( + session.query(ClaudeSessionTranscript) + .filter_by(task_id=task_id) + .order_by(ClaudeSessionTranscript.updated_at.desc()) + .first() + ) + if transcript: + return transcript.session_id, transcript.transcript_b64 + except Exception as e: + logger.warning(f"Failed to retrieve session transcript for task {task_id}: {e}") + + return None, None + + +async def get_session_transcript_for_task_async( + db: DatabaseService, task_id: str +) -> tuple[str | None, str | None]: + """ + Retrieve Claude session transcript by task_id (ASYNC version - non-blocking). + + This is useful for resuming a task that was previously run - we can find + the session transcript stored for that task and pass it to a new sandbox. + + Args: + db: Database service + task_id: Task ID to find transcript for + + Returns: + Tuple of (session_id, transcript_b64), or (None, None) if not found + """ + try: + from sqlalchemy import select + + from omoi_os.models.claude_session_transcript import ClaudeSessionTranscript + + async with db.get_async_session() as session: + result = await session.execute( + select(ClaudeSessionTranscript) + .filter_by(task_id=task_id) + .order_by(ClaudeSessionTranscript.updated_at.desc()) + ) + transcript = result.scalar_one_or_none() + if transcript: + return transcript.session_id, transcript.transcript_b64 + except Exception as e: + logger.warning(f"Failed to retrieve session transcript for task {task_id}: {e}") + + return None, None + + @router.post("/{sandbox_id}/events", response_model=SandboxEventResponse) async def post_sandbox_event( sandbox_id: str, @@ -648,8 +717,11 @@ async def post_sandbox_event( ) logger.info(f"Successfully updated task {task_id} to running") - # Handle agent.completed -> transition to completed (async) + # Handle agent.completed -> trigger validation or complete directly elif event.event_type == "agent.completed": + # Check if this is a validator agent completion + is_validator = event.event_data.get("agent_type") == "validator" + # Extract result from event_data result = { "success": event.event_data.get("success", True), @@ -658,6 +730,9 @@ async def post_sandbox_event( "session_id": event.event_data.get("session_id"), "stop_reason": event.event_data.get("stop_reason"), } + # Include branch name for validation workflow + if "branch_name" in event.event_data and event.event_data["branch_name"]: + result["branch_name"] = event.event_data["branch_name"] # Include final output if available if "final_output" in event.event_data: result["output"] = event.event_data["final_output"] @@ -667,13 +742,39 @@ async def post_sandbox_event( else: logger.debug(f"Task {task_id} completion missing final_output") - logger.info(f"Updating task {task_id} status to completed") - await task_queue.update_task_status_async( - task_id=task_id, - status="completed", - result=result, - ) - logger.info(f"Successfully updated task {task_id} to completed") + # Check if validation is enabled + validation_enabled = os.environ.get( + "TASK_VALIDATION_ENABLED", "true" + ).lower() in ("true", "1", "yes") + + if is_validator: + # This is a validator completion - process validation result + logger.info(f"Validator completed for task {task_id}") + # Validation result is handled separately via validation API + # Just record the cost, don't change task status here + elif validation_enabled: + # Implementation completed - trigger validation + logger.info( + f"Task {task_id} implementation complete, requesting validation" + ) + from omoi_os.services.task_validator import get_task_validator + + validator = get_task_validator(db=db, event_bus=get_event_bus()) + await validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result=result, + ) + logger.info(f"Validation requested for task {task_id}") + else: + # Validation disabled - mark as completed directly + logger.info(f"Updating task {task_id} status to completed (validation disabled)") + await task_queue.update_task_status_async( + task_id=task_id, + status="completed", + result=result, + ) + logger.info(f"Successfully updated task {task_id} to completed") # Record cost if cost data is available cost_usd = event.event_data.get("cost_usd") @@ -1468,3 +1569,116 @@ async def get_messages(sandbox_id: str) -> list[MessageItem]: queue = _get_message_queue() messages = queue.get_all(sandbox_id) return [MessageItem(**m) for m in messages] + + +# ============================================================================ +# VALIDATION API ENDPOINTS +# ============================================================================ + + +class ValidationResultRequest(BaseModel): + """Request schema for submitting validation results.""" + + task_id: str = Field(..., description="Task ID that was validated") + passed: bool = Field(..., description="Whether validation passed") + feedback: str = Field(..., description="Human-readable feedback") + evidence: Optional[dict] = Field( + default=None, description="Evidence of checks performed (test output, etc.)" + ) + recommendations: Optional[list[str]] = Field( + default=None, description="List of recommendations if validation failed" + ) + + +class ValidationResultResponse(BaseModel): + """Response schema for validation result submission.""" + + status: str + task_id: str + validation_passed: bool + new_task_status: str + + +@router.post("/{sandbox_id}/validation-result", response_model=ValidationResultResponse) +async def submit_validation_result( + sandbox_id: str, + validation: ValidationResultRequest, +) -> ValidationResultResponse: + """ + Submit validation result from a validator agent. + + This endpoint is called by validator agents to report whether validation + passed or failed. Based on the result: + - If passed: Task is marked as "completed" + - If failed: Task is marked as "needs_revision" with feedback + + Args: + sandbox_id: Sandbox identifier of the validator + validation: Validation result data + + Returns: + ValidationResultResponse with new task status + + Example: + POST /api/v1/sandboxes/validator-abc123/validation-result + { + "task_id": "task-xyz", + "passed": false, + "feedback": "Tests are failing. 3 tests failed in test_auth.py", + "evidence": {"test_output": "..."}, + "recommendations": ["Fix the failing tests", "Run pytest before committing"] + } + """ + db = get_db_service() + + # Get validator agent ID from the original task's result + # The validator_agent_id was stored when validation was requested + validator_agent_id = None + async with db.get_async_session() as session: + from sqlalchemy import select + + # Find the original task being validated + result = await session.execute( + select(Task).filter(Task.id == validation.task_id) + ) + task = result.scalar_one_or_none() + if task and task.result: + # Get validator info stored during validation request + validator_agent_id = task.result.get("validator_agent_id") + stored_sandbox = task.result.get("validator_sandbox_id") + # Verify the sandbox matches (security check) + if stored_sandbox and stored_sandbox != sandbox_id: + logger.warning( + f"Sandbox mismatch: expected {stored_sandbox}, got {sandbox_id}" + ) + + if not validator_agent_id: + # Use a placeholder if we can't find the agent + validator_agent_id = f"validator-{sandbox_id[:8]}" + + from omoi_os.services.task_validator import get_task_validator + + validator_service = get_task_validator(db=db, event_bus=get_event_bus()) + await validator_service.handle_validation_result( + task_id=validation.task_id, + validator_agent_id=validator_agent_id, + passed=validation.passed, + feedback=validation.feedback, + evidence=validation.evidence, + recommendations=validation.recommendations, + ) + + # Determine new task status + new_status = "completed" if validation.passed else "needs_revision" + + logger.info( + f"Validation result submitted for task {validation.task_id}: " + f"passed={validation.passed}, new_status={new_status}" + ) + + return ValidationResultResponse( + status="accepted", + task_id=validation.task_id, + validation_passed=validation.passed, + new_task_status=new_status, + ) diff --git a/backend/omoi_os/api/routes/tasks.py b/backend/omoi_os/api/routes/tasks.py index 484bf1e8..e551393e 100644 --- a/backend/omoi_os/api/routes/tasks.py +++ b/backend/omoi_os/api/routes/tasks.py @@ -367,6 +367,60 @@ async def get_task( } +class TaskUpdateRequest(BaseModel): + """Request model for updating a task.""" + + title: Optional[str] = None + description: Optional[str] = None + priority: Optional[str] = None + status: Optional[str] = None + + +@router.patch("/{task_id}", response_model=dict) +async def update_task( + task_id: str, + update: TaskUpdateRequest, + db: DatabaseService = Depends(get_db_service), +): + """ + Update task by ID. + + Args: + task_id: Task UUID + update: Fields to update + db: Database service + + Returns: + Updated task information + """ + async with db.get_async_session() as session: + result = await session.execute( + select(Task).filter(Task.id == task_id) + ) + task = result.scalar_one_or_none() + if not task: + raise HTTPException(status_code=404, detail="Task not found") + + # Update only provided fields + update_data = update.model_dump(exclude_unset=True) + for field, value in update_data.items(): + if value is not None: + setattr(task, field, value) + + await session.commit() + await session.refresh(task) + + return { + "id": task.id, + "ticket_id": task.ticket_id, + "title": task.title, + "description": task.description, + "status": task.status, + "priority": task.priority, + "updated_at": task.updated_at.isoformat() if task.updated_at else None, + } + + @router.get("", response_model=List[dict]) async def list_tasks( status: str | None = None, diff --git a/backend/omoi_os/api/routes/tickets.py b/backend/omoi_os/api/routes/tickets.py index 4a02eaf6..06c9831a 100644 --- a/backend/omoi_os/api/routes/tickets.py +++ b/backend/omoi_os/api/routes/tickets.py @@ -437,6 +437,51 @@ async def get_ticket( return TicketResponse.model_validate(ticket) +class TicketUpdateRequest(BaseModel): + """Request model for updating a ticket.""" + + title: Optional[str] = None + description: Optional[str] = None + priority: Optional[str] = None + status: Optional[str] = None + + +@router.patch("/{ticket_id}", response_model=TicketResponse) +async def update_ticket( + ticket_id: UUID, + update: TicketUpdateRequest, + db: DatabaseService = Depends(get_db_service), +): + """ + Update a ticket by ID (async). + + Args: + ticket_id: Ticket ID + update: Fields to update + db: Database service + + Returns: + Updated ticket instance + """ + async with db.get_async_session() as session: + result = await session.execute( + select(Ticket).filter(Ticket.id == str(ticket_id)) + ) + ticket = result.scalar_one_or_none() + if not ticket: + raise HTTPException(status_code=404, detail="Ticket not found") + + # Update only provided fields + update_data = update.model_dump(exclude_unset=True) + for field, value in update_data.items(): + if value is not None: + setattr(ticket, field, value) + + await session.commit() + await session.refresh(ticket) + return TicketResponse.model_validate(ticket) + + @router.get("/{ticket_id}/context") async def get_ticket_context( ticket_id: UUID, diff --git a/backend/omoi_os/mcp/spec_workflow.py b/backend/omoi_os/mcp/spec_workflow.py index 1923d9a5..7489b0d4 100644 --- a/backend/omoi_os/mcp/spec_workflow.py +++ b/backend/omoi_os/mcp/spec_workflow.py @@ -395,6 +395,55 @@ async def get_ticket(args: dict[str, Any]) -> dict[str, Any]: return _format_error(str(e)) +@tool( + "get_task", + "Get full details of a task including description, acceptance criteria, and parent ticket info. " + "IMPORTANT: Always use the task UUID, not the task title.", + {"task_id": "Task UUID (required - use the full UUID, not the title)"}, +) +async def get_task(args: dict[str, Any]) -> dict[str, Any]: + """Get task details including full description.""" + try: + async with httpx.AsyncClient(timeout=API_TIMEOUT) as client: + response = await client.get( + f"{API_BASE}/api/v1/tasks/{args['task_id']}" + ) + response.raise_for_status() + task = response.json() + + output = f"# Task: {task['title']}\n\n" + output += f"**ID:** {task['id']}\n" + output += f"**Status:** {task['status']}\n" + output += f"**Priority:** {task.get('priority', 'MEDIUM')}\n" + output += f"**Phase:** {task.get('phase_id', 'N/A')}\n" + output += f"**Ticket ID:** {task.get('ticket_id', 'N/A')}\n\n" + + if task.get("description"): + output += f"## Full Description\n\n{task['description']}\n\n" + + # Also fetch parent ticket for additional context + if task.get("ticket_id"): + try: + ticket_response = await client.get( + f"{API_BASE}/api/v1/tickets/{task['ticket_id']}" + ) + if ticket_response.status_code == 200: + ticket = ticket_response.json() + output += f"## Parent Ticket Context\n\n" + output += f"**Ticket Title:** {ticket['title']}\n" + output += f"**Ticket Status:** {ticket['status']}\n\n" + if ticket.get("description"): + output += f"**Ticket Description:**\n{ticket['description']}\n" + except Exception: + pass # Parent ticket fetch is optional + + return _format_response(output) + except httpx.HTTPStatusError as e: + return _format_error(f"HTTP {e.response.status_code}: {e.response.text}") + except Exception as e: + return _format_error(str(e)) + + # ============================================================================= # Approval Tools # ============================================================================= @@ -461,6 +510,7 @@ async def approve_design(args: dict[str, Any]) -> dict[str, Any]: add_spec_task, create_ticket, get_ticket, + get_task, # Added for task context retrieval approve_requirements, approve_design, ] diff --git a/backend/omoi_os/sandbox_skills/__init__.py b/backend/omoi_os/sandbox_skills/__init__.py index 43dcae0d..fc1e1e0b 100644 --- a/backend/omoi_os/sandbox_skills/__init__.py +++ b/backend/omoi_os/sandbox_skills/__init__.py @@ -92,37 +92,65 @@ def get_skill_files(skill_name: str) -> dict[str, str]: return result +def get_skill_set(mode: str) -> list[str]: + """Get skills for a specific execution mode. + + Args: + mode: Execution mode - "exploration", "implementation", "validation". + + Returns: + List of skill names for the specified mode. + """ + manifest = get_skill_manifest() + settings = manifest.get("settings", {}) + skill_sets = settings.get("skill_sets", {}) + return skill_sets.get(mode, []) + + def get_skills_for_upload( skill_names: Optional[list[str]] = None, install_path: str = "/root/.claude/skills", include_all_files: bool = True, + mode: Optional[str] = None, ) -> dict[str, str]: """Get skills ready for upload to a sandbox. Args: - skill_names: Specific skills to include. If None, includes all always_include skills. + skill_names: Specific skills to include. Added to mode-based skills if provided. install_path: Path in sandbox where skills will be installed. include_all_files: If True, include all files (scripts, references, etc.). If False, only include SKILL.md files. + mode: Execution mode - "exploration", "implementation", "validation". + If None, falls back to always_include only. Returns: Dict mapping sandbox file paths to file content. Example: {"/root/.claude/skills/code-review/SKILL.md": "...content..."} + + Modes: + - exploration: For feature definition - creates specs, tickets, tasks + - implementation: For task execution - writes code, runs tests + - validation: For verifying implementation meets requirements """ manifest = get_skill_manifest() settings = manifest.get("settings", {}) - # Determine which skills to include - if skill_names is None: - skill_names = settings.get("always_include", []) + # Start with always_include (truly universal skills) + skills_to_load = set(settings.get("always_include", [])) + + # Add mode-specific skills if mode is provided + if mode: + skill_sets = settings.get("skill_sets", {}) + mode_skills = skill_sets.get(mode, []) + skills_to_load |= set(mode_skills) - # Add any always_include skills not already in the list - always_include = set(settings.get("always_include", [])) - skill_names = list(set(skill_names) | always_include) + # Add any explicitly requested skills + if skill_names: + skills_to_load |= set(skill_names) result = {} - for skill_name in skill_names: + for skill_name in skills_to_load: if include_all_files: # Get all files for the skill skill_files = get_skill_files(skill_name) diff --git a/backend/omoi_os/sandbox_skills/manifest.yaml b/backend/omoi_os/sandbox_skills/manifest.yaml index a8cf04fa..934529c5 100644 --- a/backend/omoi_os/sandbox_skills/manifest.yaml +++ b/backend/omoi_os/sandbox_skills/manifest.yaml @@ -54,9 +54,25 @@ settings: # Whether to also copy to project .claude/skills copy_to_project: false - # Skills to always include (even if not explicitly requested) + # Skills loaded for ALL modes (truly universal) always_include: - - spec-driven-dev - - git-workflow - - error-diagnosis - - code-review + - error-diagnosis # Always need error debugging + + # Mode-specific skill sets + # Mode is determined by task type and passed to get_skills_for_upload() + skill_sets: + # Feature definition mode - explore codebase, create specs/tickets/tasks + exploration: + - spec-driven-dev + + # Task execution mode - implement features, fix bugs, write code + implementation: + - git-workflow + - code-review + - test-writer + - pr-creator + + # Validation mode - verify implementation meets requirements + validation: + - code-review + - test-writer diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py index ec6ce1cd..21e6d9a5 100644 --- a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/api_client.py @@ -164,9 +164,11 @@ async def create_ticket(self, ticket: ParsedTicket, project_id: Optional[str] = Returns: Tuple of (success, message/error) """ + # Use full_body for rich AI context, fallback to description + description_text = ticket.full_body if ticket.full_body else ticket.description payload = { "title": ticket.title, - "description": ticket.description, + "description": description_text, "priority": ticket.priority, "phase_id": "PHASE_IMPLEMENTATION", # Default phase } @@ -229,10 +231,12 @@ async def create_task(self, task: ParsedTask, ticket_api_id: str) -> tuple[bool, if task.dependencies.depends_on: dependencies = {"depends_on": task.dependencies.depends_on} + # Use full_body for rich AI context, fallback to objective + description_text = task.full_body if task.full_body else task.objective payload = { "ticket_id": ticket_api_id, "title": task.title, - "description": task.objective, + "description": description_text, "task_type": "implementation", # Default type "priority": "MEDIUM", # Default priority "phase_id": "PHASE_IMPLEMENTATION", @@ -775,9 +779,10 @@ async def sync( if existing: ticket_api_ids[ticket.id] = existing["id"] - # Check if description needs update + # Check if description needs update (use full_body for rich context) + description_text = ticket.full_body if ticket.full_body else ticket.description existing_desc = existing.get("description", "") or "" - if existing_desc.strip() != ticket.description.strip(): + if existing_desc.strip() != description_text.strip(): if dry_run: summary.add( SyncResult( @@ -789,7 +794,7 @@ async def sync( ) else: success, msg = await self.update_ticket_description( - existing["id"], ticket.description + existing["id"], description_text ) summary.add( SyncResult( @@ -843,9 +848,10 @@ async def sync( existing = task_by_title.get(task.title) if existing: - # Check if description needs update + # Check if description needs update (use full_body for rich context) + description_text = task.full_body if task.full_body else task.objective existing_desc = existing.get("description", "") or "" - if existing_desc.strip() != task.objective.strip(): + if existing_desc.strip() != description_text.strip(): if dry_run: summary.add( SyncResult( @@ -857,7 +863,7 @@ async def sync( ) else: success, msg = await self.update_task_description( - existing["id"], task.objective + existing["id"], description_text ) summary.add( SyncResult( diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py index b0297493..fcbc990d 100644 --- a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/models.py @@ -145,7 +145,8 @@ class ParsedTicket: design_ref: Optional[str] = None tasks: list[str] = field(default_factory=list) dependencies: TicketDependencies = field(default_factory=TicketDependencies) - description: str = "" + description: str = "" # Short summary/description + full_body: str = "" # Full markdown body with all sections (for AI context) file_path: str = "" def is_blocked(self) -> bool: @@ -168,7 +169,8 @@ class ParsedTask: created: date assignee: Optional[str] = None dependencies: TaskDependencies = field(default_factory=TaskDependencies) - objective: str = "" + objective: str = "" # Short objective/description + full_body: str = "" # Full markdown body with all sections (for AI context) file_path: str = "" def is_blocked(self, completed_tasks: set[str]) -> bool: diff --git a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py index 50da5952..e66e4950 100644 --- a/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py +++ b/backend/omoi_os/sandbox_skills/spec-driven-dev/scripts/parse_specs.py @@ -185,9 +185,9 @@ def _parse_date(self, value) -> date: def _extract_description(self, body: str) -> str: """Extract description section from markdown body.""" - # Look for ## Description or ## Objective section + # Look for ## Description, ## Objective, or ## Summary section match = re.search( - r"##\s+(?:Description|Objective)\s*\n(.*?)(?=\n##|\n---|\Z)", + r"##\s+(?:Description|Objective|Summary)\s*\n(.*?)(?=\n##|\n---|\Z)", body, re.DOTALL | re.IGNORECASE, ) @@ -195,6 +195,18 @@ def _extract_description(self, body: str) -> str: return match.group(1).strip() return "" + def _extract_full_body(self, body: str) -> str: + """Extract the full markdown body (everything after the title). + + This preserves all sections (Summary, Acceptance Criteria, Technical Details, etc.) + for rich context in AI-assisted task execution. + """ + # Remove the main title (# TKT-XXX: Title) if present + lines = body.strip().split('\n') + if lines and lines[0].startswith('# '): + lines = lines[1:] + return '\n'.join(lines).strip() + def _extract_section(self, body: str, section_name: str) -> str: """Extract a named section from markdown body.""" pattern = rf"##\s+{re.escape(section_name)}\s*\n(.*?)(?=\n##|\n---|\Z)" @@ -391,6 +403,7 @@ def parse_ticket(self, file_path: Path) -> ParsedTicket: tasks=frontmatter.get("tasks", []) or [], dependencies=dependencies, description=self._extract_description(body), + full_body=self._extract_full_body(body), file_path=str(file_path), ) @@ -432,6 +445,7 @@ def parse_task(self, file_path: Path) -> ParsedTask: assignee=frontmatter.get("assignee"), dependencies=dependencies, objective=self._extract_description(body), + full_body=self._extract_full_body(body), file_path=str(file_path), ) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 3eaa9757..47bbe659 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -134,6 +134,7 @@ async def spawn_for_task( extra_env: Optional[Dict[str, str]] = None, labels: Optional[Dict[str, str]] = None, runtime: str = "openhands", # "openhands" or "claude" + execution_mode: str = "implementation", # "exploration", "implementation", "validation" ) -> str: """Spawn a Daytona sandbox for executing a task. @@ -145,6 +146,10 @@ async def spawn_for_task( extra_env: Additional environment variables labels: Labels for the sandbox runtime: Agent runtime to use - "openhands" (default) or "claude" + execution_mode: Skill loading mode - determines which skills are loaded + - "exploration": For feature definition (creates specs/tickets/tasks) + - "implementation": For task execution (writes code, default) + - "validation": For verifying implementation Returns: Sandbox ID @@ -175,6 +180,7 @@ async def spawn_for_task( env_vars = { "AGENT_ID": agent_id, "TASK_ID": task_id, + "EXECUTION_MODE": execution_mode, # Controls prompt and skill behavior "MCP_SERVER_URL": self.mcp_server_url, "CALLBACK_URL": base_url, # For EventReporter to use correct API URL "PHASE_ID": phase_id, @@ -305,11 +311,14 @@ async def spawn_for_task( # Handle session resumption for Claude runtime if runtime == "claude" and self.db: resume_session_id = None + resume_from_task = extra_env.get("RESUME_FROM_TASK") if extra_env else None + if extra_env and extra_env.get("RESUME_SESSION_ID"): resume_session_id = extra_env["RESUME_SESSION_ID"] elif extra_env and extra_env.get("resume_session_id"): resume_session_id = extra_env["resume_session_id"] + # Option 1: Resume by session_id if resume_session_id: # Retrieve session transcript from database from omoi_os.api.routes.sandbox import get_session_transcript @@ -326,6 +335,24 @@ async def spawn_for_task( f"Session transcript not found for {resume_session_id}, starting fresh session" ) + # Option 2: Resume by task_id (looks up session from the task's previous runs) + elif resume_from_task: + from omoi_os.api.routes.sandbox import get_session_transcript_for_task + + session_id, transcript_b64 = get_session_transcript_for_task( + self.db, resume_from_task + ) + if session_id and transcript_b64: + env_vars["RESUME_SESSION_ID"] = session_id + env_vars["SESSION_TRANSCRIPT_B64"] = transcript_b64 + logger.info( + f"Retrieved session transcript for task {resume_from_task[:8]}... (session: {session_id[:8]}...)" + ) + else: + logger.debug( + f"No session transcript found for task {resume_from_task}, starting fresh session" + ) + # Add extra env vars (can override transcript if explicitly provided) if extra_env: env_vars.update(extra_env) @@ -398,6 +425,7 @@ async def spawn_for_task( env_vars=env_vars, labels=sandbox_labels, runtime=runtime, + execution_mode=execution_mode, ) # Update status @@ -446,6 +474,7 @@ async def _create_daytona_sandbox( env_vars: Dict[str, str], labels: Dict[str, str], runtime: str = "openhands", + execution_mode: str = "implementation", ) -> None: """Create a Daytona sandbox via their API. @@ -456,6 +485,7 @@ async def _create_daytona_sandbox( env_vars: Environment variables to set in sandbox labels: Labels for sandbox organization runtime: Agent runtime - "openhands" or "claude" + execution_mode: Skill loading mode - determines which skills are loaded """ try: from daytona import ( @@ -518,7 +548,7 @@ async def _create_daytona_sandbox( info.extra_data["daytona_sandbox_id"] = sandbox.id # Set environment variables and start the worker - await self._start_worker_in_sandbox(sandbox, env_vars, runtime) + await self._start_worker_in_sandbox(sandbox, env_vars, runtime, execution_mode) logger.info(f"Daytona sandbox {sandbox.id} created for {sandbox_id}") @@ -535,6 +565,7 @@ async def _start_worker_in_sandbox( sandbox: Any, env_vars: Dict[str, str], runtime: str = "openhands", + execution_mode: str = "implementation", ) -> None: """Start the sandbox worker inside the Daytona sandbox. @@ -542,6 +573,7 @@ async def _start_worker_in_sandbox( sandbox: Daytona sandbox instance env_vars: Environment variables for the worker runtime: Agent runtime - "openhands" or "claude" + execution_mode: Skill loading mode - determines which skills are loaded """ # Extract git clone parameters (don't pass token to env vars for security) github_repo = env_vars.pop("GITHUB_REPO", None) @@ -621,13 +653,16 @@ def escape_env_value(v: str) -> str: # Upload Claude skills to sandbox (Claude runtime only) if runtime == "claude": - logger.info("Uploading Claude skills to sandbox...") + logger.info(f"Uploading Claude skills for '{execution_mode}' mode...") try: # Create skills directory sandbox.process.exec("mkdir -p /root/.claude/skills") - # Get skills and upload each one - skills = get_skills_for_upload() + # Get skills based on execution mode + # - exploration: spec-driven-dev (for creating specs/tickets/tasks) + # - implementation: git-workflow, code-review, etc. (for executing tasks) + # - validation: code-review, test-writer (for validating implementation) + skills = get_skills_for_upload(mode=execution_mode) for skill_path, content in skills.items(): # Create parent directory for skill parent_dir = "/".join(skill_path.rsplit("/", 1)[:-1]) @@ -636,7 +671,7 @@ def escape_env_value(v: str) -> str: sandbox.fs.upload_file(content.encode("utf-8"), skill_path) logger.debug(f"Uploaded skill: {skill_path}") - logger.info(f"Uploaded {len(skills)} Claude skills to sandbox") + logger.info(f"Uploaded {len(skills)} Claude skills for '{execution_mode}' mode") except Exception as e: logger.warning(f"Failed to upload Claude skills: {e}") # Continue without skills - agent can still function diff --git a/backend/omoi_os/services/task_queue.py b/backend/omoi_os/services/task_queue.py index 9a805df2..d3ae2d79 100644 --- a/backend/omoi_os/services/task_queue.py +++ b/backend/omoi_os/services/task_queue.py @@ -1512,3 +1512,198 @@ async def get_assigned_tasks_async( result = await session.execute(stmt) return list(result.scalars().all()) + + # ========================================================================= + # CONCURRENCY CONTROL METHODS + # ========================================================================= + # These methods help enforce concurrency limits per project to prevent + # spawning too many sandboxes at once. + + def get_running_count_by_project(self, project_id: str) -> int: + """ + Get the count of currently running tasks for a project. + + Running tasks include those in 'claiming', 'assigned', or 'running' status. + This is used to enforce concurrency limits per project. + + Args: + project_id: UUID of the project + + Returns: + Count of running tasks for the project + """ + from omoi_os.models.ticket import Ticket + + with self.db.get_session() as session: + # Join tasks with tickets to filter by project + count = ( + session.query(Task) + .join(Ticket, Task.ticket_id == Ticket.id) + .filter( + Ticket.project_id == project_id, + Task.status.in_(["claiming", "assigned", "running"]), + ) + .count() + ) + return count + + async def get_running_count_by_project_async(self, project_id: str) -> int: + """ + Async version: Get the count of currently running tasks for a project. + + Args: + project_id: UUID of the project + + Returns: + Count of running tasks for the project + """ + from sqlalchemy import func + from omoi_os.models.ticket import Ticket + + async with self.db.get_async_session() as session: + stmt = ( + select(func.count(Task.id)) + .join(Ticket, Task.ticket_id == Ticket.id) + .filter( + Ticket.project_id == project_id, + Task.status.in_(["claiming", "assigned", "running"]), + ) + ) + result = await session.execute(stmt) + return result.scalar() or 0 + + def get_project_for_task(self, task_id: str) -> Optional[str]: + """ + Get the project ID for a task (via its ticket). + + Args: + task_id: UUID of the task + + Returns: + Project ID if found, None otherwise + """ + from omoi_os.models.ticket import Ticket + + with self.db.get_session() as session: + task = session.query(Task).filter(Task.id == task_id).first() + if not task: + return None + + ticket = session.query(Ticket).filter(Ticket.id == task.ticket_id).first() + if not ticket: + return None + + return ticket.project_id + + def get_next_task_with_concurrency_limit( + self, + max_concurrent_per_project: int = 5, + phase_id: Optional[str] = None, + agent_capabilities: Optional[List[str]] = None, + ) -> Task | None: + """ + Get highest-scored pending task that respects project concurrency limits. + + This method extends get_next_task() by also checking that the task's + project hasn't exceeded its concurrency limit before claiming. + + Args: + max_concurrent_per_project: Maximum concurrent tasks per project (default: 5) + phase_id: Phase identifier to filter by (None = any phase) + agent_capabilities: Optional list of agent capabilities for matching + + Returns: + Task object or None if no eligible tasks available + """ + from omoi_os.models.ticket import Ticket + + with self.db.get_session() as session: + # Get pending tasks without sandbox + query = session.query(Task).filter( + Task.status == "pending", + Task.sandbox_id.is_(None), + ) + if phase_id is not None: + query = query.filter(Task.phase_id == phase_id) + tasks = query.all() + + if not tasks: + return None + + # Filter out tasks with incomplete dependencies, capability mismatches, + # AND tasks whose projects have hit concurrency limits + available_tasks = [] + project_running_counts: dict[str, int] = {} + + for task in tasks: + if not self._check_dependencies_complete(session, task): + continue + + if agent_capabilities is not None and not self._check_capability_match( + task, agent_capabilities + ): + continue + + # Get project ID for this task + ticket = session.query(Ticket).filter(Ticket.id == task.ticket_id).first() + if not ticket or not ticket.project_id: + # Tasks without a project are allowed (no limit) + task.score = self.scorer.compute_score(task) + available_tasks.append(task) + continue + + project_id = ticket.project_id + + # Check/cache running count for this project + if project_id not in project_running_counts: + count = ( + session.query(Task) + .join(Ticket, Task.ticket_id == Ticket.id) + .filter( + Ticket.project_id == project_id, + Task.status.in_(["claiming", "assigned", "running"]), + ) + .count() + ) + project_running_counts[project_id] = count + + # Skip if project is at capacity + if project_running_counts[project_id] >= max_concurrent_per_project: + logger.debug( + f"Project {project_id} at capacity ({project_running_counts[project_id]}/{max_concurrent_per_project}), " + f"skipping task {task.id}" + ) + continue + + # Compute score and add to available tasks + task.score = self.scorer.compute_score(task) + available_tasks.append(task) + + if not available_tasks: + return None + + # Sort by score descending + task = max(available_tasks, key=lambda t: t.score) + + # Atomic claim using raw SQL + result = session.execute( + text(""" + UPDATE tasks + SET status = 'claiming', score = :score + WHERE id = :task_id + AND status = 'pending' + AND sandbox_id IS NULL + RETURNING id + """), + {"task_id": str(task.id), "score": task.score} + ) + claimed_row = result.fetchone() + session.commit() + + if not claimed_row: + logger.debug(f"Task {task.id} was claimed by another process, skipping") + return None + + session.refresh(task) + session.expunge(task) + return task diff --git a/backend/omoi_os/services/task_validator.py b/backend/omoi_os/services/task_validator.py new file mode 100644 index 00000000..c0cd24a3 --- /dev/null +++ b/backend/omoi_os/services/task_validator.py @@ -0,0 +1,501 @@ +"""Task Validator Service for verifying completed work. + +This service spawns validator agents to review work before marking tasks as complete. +The validation workflow: + +1. Implementer agent completes work and reports agent.completed +2. Task is marked as "pending_validation" (not "completed") +3. TaskValidatorService spawns a validator sandbox +4. Validator reviews code, runs tests, checks PR requirements +5. If validation passes -> task marked "completed" +6. If validation fails -> task marked "needs_revision" with feedback + +The validator agent checks: +- Tests pass (runs pytest/npm test/etc.) +- Code builds successfully +- Changes are committed and pushed +- PR is created (if required) +- Code review checklist passes (security, quality, maintainability) +""" + +import asyncio +import os +from datetime import datetime +from typing import Optional +from uuid import uuid4 + +from omoi_os.logging import get_logger +from omoi_os.models.task import Task +from omoi_os.models.validation_review import ValidationReview +from omoi_os.services.database import DatabaseService +from omoi_os.services.event_bus import EventBusService, SystemEvent +from omoi_os.utils.datetime import utc_now + +logger = get_logger(__name__) + + +# Validation requirements that must be met +VALIDATION_REQUIREMENTS = { + "tests_pass": { + "description": "All tests must pass", + "commands": ["pytest", "npm test", "go test ./...", "cargo test"], + "required": True, + }, + "build_passes": { + "description": "Code must build successfully", + "commands": ["npm run build", "cargo build", "go build ./..."], + "required": True, + }, + "changes_committed": { + "description": "All changes must be committed", + "check": "git status --porcelain returns empty", + "required": True, + }, + "changes_pushed": { + "description": "Changes must be pushed to remote", + "check": "git status shows ahead of remote by 0", + "required": True, + }, + "pr_created": { + "description": "Pull request must be created", + "check": "gh pr view returns PR info", + "required": True, # Based on user preference + }, +} + + +class TaskValidatorService: + """Service for validating completed task work. + + This service is responsible for: + 1. Spawning validator agents to review completed work + 2. Tracking validation iterations and feedback + 3. Determining when tasks can be marked as truly complete + """ + + def __init__( + self, + db: DatabaseService, + event_bus: Optional[EventBusService] = None, + ): + self.db = db + self.event_bus = event_bus + self._validation_enabled = os.getenv("TASK_VALIDATION_ENABLED", "true").lower() in ("true", "1", "yes") + self._max_validation_iterations = int(os.getenv("MAX_VALIDATION_ITERATIONS", "3")) + + async def request_validation( + self, + task_id: str, + sandbox_id: str, + implementation_result: dict, + ) -> str: + """Request validation for a completed task. + + This is called when an implementer agent reports completion. + Instead of marking the task as completed, we: + 1. Update task status to "pending_validation" + 2. Store the implementation result + 3. Spawn a validator agent + + Args: + task_id: ID of the task to validate + sandbox_id: Sandbox where implementation was done + implementation_result: Result data from the implementer + + Returns: + Validation request ID + """ + if not self._validation_enabled: + logger.info(f"Validation disabled, auto-approving task {task_id}") + return await self._auto_approve(task_id, implementation_result) + + validation_id = str(uuid4()) + + async with self.db.get_async_session() as session: + from sqlalchemy import select + + # Get the task + result = await session.execute( + select(Task).filter(Task.id == task_id) + ) + task = result.scalar_one_or_none() + + if not task: + logger.error(f"Task {task_id} not found for validation") + return "" + + # Count existing validation iterations + review_count = await session.execute( + select(ValidationReview).filter(ValidationReview.task_id == task_id) + ) + iteration = len(list(review_count.scalars().all())) + 1 + + if iteration > self._max_validation_iterations: + logger.warning( + f"Task {task_id} exceeded max validation iterations ({self._max_validation_iterations}), " + "marking as failed" + ) + task.status = "failed" + task.error_message = f"Failed validation after {iteration - 1} iterations" + await session.commit() + return "" + + # Update task status + task.status = "pending_validation" + task.result = { + **(task.result or {}), + "implementation_result": implementation_result, + "validation_requested_at": utc_now().isoformat(), + "validation_iteration": iteration, + } + await session.commit() + + logger.info( + f"Task {task_id} marked as pending_validation (iteration {iteration})" + ) + + # Spawn validator agent + validator_info = await self._spawn_validator(task_id, sandbox_id, iteration) + + # Store validator info in task result for later lookup + if validator_info: + async with self.db.get_async_session() as session: + result = await session.execute( + select(Task).filter(Task.id == task_id) + ) + task = result.scalar_one_or_none() + if task: + task.result = { + **(task.result or {}), + "validator_sandbox_id": validator_info["sandbox_id"], + "validator_agent_id": validator_info["agent_id"], + } + await session.commit() + + # Publish event + if self.event_bus: + self.event_bus.publish( + SystemEvent( + event_type="TASK_VALIDATION_REQUESTED", + entity_type="task", + entity_id=task_id, + payload={ + "validation_id": validation_id, + "iteration": iteration, + "sandbox_id": sandbox_id, + }, + ) + ) + + return validation_id + + async def handle_validation_result( + self, + task_id: str, + validator_agent_id: str, + passed: bool, + feedback: str, + evidence: Optional[dict] = None, + recommendations: Optional[list] = None, + ) -> None: + """Handle the result of a validation review. + + Args: + task_id: Task that was validated + validator_agent_id: Agent that performed validation + passed: Whether validation passed + feedback: Human-readable feedback + evidence: Evidence of checks performed (test output, etc.) + recommendations: List of recommendations if validation failed + """ + async with self.db.get_async_session() as session: + from sqlalchemy import select + + # Get task + result = await session.execute( + select(Task).filter(Task.id == task_id) + ) + task = result.scalar_one_or_none() + + if not task: + logger.error(f"Task {task_id} not found for validation result") + return + + # Count existing reviews + review_result = await session.execute( + select(ValidationReview).filter(ValidationReview.task_id == task_id) + ) + iteration = len(list(review_result.scalars().all())) + 1 + + # Create validation review record + review = ValidationReview( + task_id=task_id, + validator_agent_id=validator_agent_id, + iteration_number=iteration, + validation_passed=passed, + feedback=feedback, + evidence=evidence or {}, + recommendations=recommendations or [], + ) + session.add(review) + + if passed: + # Validation passed - mark task as completed + task.status = "completed" + task.result = { + **(task.result or {}), + "validation_passed": True, + "validation_iteration": iteration, + "validated_at": utc_now().isoformat(), + } + logger.info(f"Task {task_id} validation PASSED on iteration {iteration}") + + if self.event_bus: + self.event_bus.publish( + SystemEvent( + event_type="TASK_VALIDATION_PASSED", + entity_type="task", + entity_id=task_id, + payload={ + "iteration": iteration, + "feedback": feedback, + }, + ) + ) + else: + # Validation failed - mark for revision + task.status = "needs_revision" + task.result = { + **(task.result or {}), + "validation_passed": False, + "validation_iteration": iteration, + "revision_feedback": feedback, + "revision_recommendations": recommendations or [], + } + logger.info( + f"Task {task_id} validation FAILED on iteration {iteration}, " + f"needs revision. Feedback: {feedback[:100]}..." + ) + + if self.event_bus: + self.event_bus.publish( + SystemEvent( + event_type="TASK_VALIDATION_FAILED", + entity_type="task", + entity_id=task_id, + payload={ + "iteration": iteration, + "feedback": feedback, + "recommendations": recommendations, + }, + ) + ) + + await session.commit() + + async def _spawn_validator( + self, + task_id: str, + original_sandbox_id: str, + iteration: int, + ) -> Optional[dict]: + """Spawn a validator agent to review the task. + + The validator runs in the same sandbox as the implementation + to have access to the code and git state. + + Args: + task_id: Task to validate + original_sandbox_id: Sandbox where implementation was done + iteration: Validation iteration number + + Returns: + Dict with sandbox_id and agent_id, or None if spawn failed + """ + try: + from omoi_os.services.daytona_spawner import get_daytona_spawner + from omoi_os.models.ticket import Ticket + from omoi_os.models.user import User + + spawner = get_daytona_spawner(db=self.db, event_bus=self.event_bus) + + # Get repo/branch info from the task's ticket + extra_env = { + "VALIDATION_MODE": "true", + "ORIGINAL_TASK_ID": task_id, + "VALIDATION_ITERATION": str(iteration), + "ORIGINAL_SANDBOX_ID": original_sandbox_id, + } + + with self.db.get_session() as session: + # Get task with its ticket and project + task = session.query(Task).filter(Task.id == task_id).first() + if task and task.ticket_id: + ticket = session.query(Ticket).filter(Ticket.id == task.ticket_id).first() + if ticket and ticket.project: + project = ticket.project + # Set repo info + if project.github_owner and project.github_repo: + extra_env["GITHUB_REPO"] = f"{project.github_owner}/{project.github_repo}" + extra_env["GITHUB_REPO_OWNER"] = project.github_owner + extra_env["GITHUB_REPO_NAME"] = project.github_repo + + # Get branch name from task result (set by implementer) + if task.result and task.result.get("branch_name"): + extra_env["BRANCH_NAME"] = task.result["branch_name"] + elif task.result and task.result.get("implementation_result"): + impl_result = task.result["implementation_result"] + if impl_result.get("branch_name"): + extra_env["BRANCH_NAME"] = impl_result["branch_name"] + + # Get GitHub token from project owner + if project.created_by: + extra_env["USER_ID"] = str(project.created_by) + user = session.query(User).filter(User.id == project.created_by).first() + if user and user.attributes: + github_token = user.attributes.get("github_access_token") + if github_token: + extra_env["GITHUB_TOKEN"] = github_token + + # Create validator agent record + from omoi_os.models.agent import Agent + + validator_agent_id = str(uuid4()) + + with self.db.get_session() as session: + validator_agent = Agent( + id=validator_agent_id, + agent_type="validator", + phase_id="PHASE_VALIDATION", + capabilities=["validate", "test", "review"], + status="RUNNING", + tags=["validator", "daytona"], + health_status="healthy", + ) + session.add(validator_agent) + session.commit() + + # Build validator prompt + validator_prompt = self._build_validator_prompt(task_id, iteration) + extra_env["INITIAL_PROMPT"] = validator_prompt + + # Spawn validator sandbox with repo/branch info + # The sandbox will clone the repo and checkout the branch + # Use "validation" mode to load code-review and test-writer skills + validator_sandbox_id = await spawner.spawn_for_task( + task_id=f"{task_id}-validator-{iteration}", + agent_id=validator_agent_id, + phase_id="PHASE_VALIDATION", + agent_type="validator", + extra_env=extra_env, + runtime="claude", + execution_mode="validation", + ) + + logger.info( + f"Spawned validator sandbox {validator_sandbox_id} for task {task_id} " + f"(iteration {iteration})" + ) + + return { + "sandbox_id": validator_sandbox_id, + "agent_id": validator_agent_id, + } + + except Exception as e: + logger.error(f"Failed to spawn validator for task {task_id}: {e}") + return None + + def _build_validator_prompt(self, task_id: str, iteration: int) -> str: + """Build the prompt for the validator agent.""" + return f"""You are a code validator agent. Your job is to verify that the implementation work for task {task_id} is complete and meets quality standards. + +## Validation Iteration: {iteration} + +## Your Checklist + +1. **Tests Pass**: Run the test suite and verify all tests pass + - Try: `pytest`, `npm test`, `go test ./...`, `cargo test` + - All tests MUST pass + +2. **Build Passes**: Verify the code builds successfully + - Try: `npm run build`, `cargo build`, `go build ./...` + - No build errors allowed + +3. **Changes Committed**: All changes must be committed to git + - Run: `git status` + - Working directory should be clean (no uncommitted changes) + +4. **Changes Pushed**: All commits must be pushed to remote + - Run: `git status` + - Should not show "Your branch is ahead of..." + +5. **PR Created**: A pull request must exist for this work + - Run: `gh pr view` + - Should show PR details with proper title and description + +6. **Code Quality**: Review the code for: + - Security vulnerabilities + - Obvious bugs or logic errors + - Missing error handling + - Code style consistency + +## Your Actions + +1. Run each check in the checklist above +2. Collect evidence (command outputs, test results) +3. If ALL checks pass, report validation success +4. If ANY check fails, report validation failure with: + - Which checks failed + - What needs to be fixed + - Specific recommendations + +## Reporting Results + +After completing your validation, you MUST report the result using the validation API. + +If validation PASSES: +- Mark the task as validated +- Include a summary of what was verified + +If validation FAILS: +- Provide specific feedback on what failed +- Give actionable recommendations for fixes +- The implementation agent will receive this feedback and retry + +Begin your validation now. Start by checking the git status and running tests.""" + + async def _auto_approve(self, task_id: str, result: dict) -> str: + """Auto-approve a task when validation is disabled.""" + async with self.db.get_async_session() as session: + from sqlalchemy import select + + result_query = await session.execute( + select(Task).filter(Task.id == task_id) + ) + task = result_query.scalar_one_or_none() + + if task: + task.status = "completed" + task.result = { + **(task.result or {}), + **result, + "auto_approved": True, + "completed_at": utc_now().isoformat(), + } + await session.commit() + logger.info(f"Task {task_id} auto-approved (validation disabled)") + + return "auto-approved" + + +def get_task_validator( + db: Optional[DatabaseService] = None, + event_bus: Optional[EventBusService] = None, +) -> TaskValidatorService: + """Get or create TaskValidatorService instance.""" + if db is None: + from omoi_os.api.dependencies import get_db_service + db = get_db_service() + + return TaskValidatorService(db=db, event_bus=event_bus) diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index b41ee1f7..71a31a5f 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -23,6 +23,9 @@ TICKET_ID - Ticket identifier TICKET_TITLE - Ticket title for context TICKET_DESCRIPTION - Full ticket description + TASK_DATA_BASE64 - Base64-encoded JSON with full task context (from orchestrator) + Contains: task_id, task_description, ticket_id, ticket_title, etc. + The task_description is the FULL spec markdown from .omoi_os/ files Environment Variables (optional - skills & subagents): ENABLE_SKILLS - Set to "true" to enable Claude skills @@ -644,6 +647,32 @@ def __init__(self): self.ticket_title = os.environ.get("TICKET_TITLE", "") self.ticket_description = os.environ.get("TICKET_DESCRIPTION", "") + # Phase 6: Decode TASK_DATA_BASE64 if present (from orchestrator) + # This contains full task context including the complete task description + self.task_data: dict = {} + self.task_description = "" # Full task description from spec files + task_data_b64 = os.environ.get("TASK_DATA_BASE64") + if task_data_b64: + try: + import base64 + import json + task_json = base64.b64decode(task_data_b64).decode() + self.task_data = json.loads(task_json) + # Extract task description (this is the FULL spec markdown) + self.task_description = self.task_data.get("task_description", "") + # Also populate ticket fields from task_data if not already set + if not self.ticket_id and self.task_data.get("ticket_id"): + self.ticket_id = self.task_data["ticket_id"] + if not self.ticket_title and self.task_data.get("ticket_title"): + self.ticket_title = self.task_data["ticket_title"] + if not self.ticket_description and self.task_data.get("ticket_description"): + self.ticket_description = self.task_data["ticket_description"] + if not self.task_id and self.task_data.get("task_id"): + self.task_id = self.task_data["task_id"] + logger.info(f"Loaded task data from TASK_DATA_BASE64: task_description={len(self.task_description)} chars") + except Exception as e: + logger.warning(f"Failed to decode TASK_DATA_BASE64: {e}") + # Server connection self.callback_url = os.environ.get("CALLBACK_URL", "http://localhost:8000") self.api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get( @@ -655,6 +684,7 @@ def __init__(self): self.api_base_url = os.environ.get("ANTHROPIC_BASE_URL") # Task and prompts + # INITIAL_PROMPT can be overridden, but we prefer task_description from TASK_DATA_BASE64 self.initial_prompt = os.environ.get("INITIAL_PROMPT", "") self.poll_interval = float(os.environ.get("POLL_INTERVAL", "0.5")) self.heartbeat_interval = int(os.environ.get("HEARTBEAT_INTERVAL", "30")) @@ -708,15 +738,76 @@ def __init__(self): os.environ.get("ENABLE_SUBAGENTS", "true").lower() == "true" ) - # MCP spec workflow tools + # Execution mode - controls prompts and tool availability + # - exploration: For feature definition (create specs, tickets, tasks) + # - implementation: For task execution (write code, run tests) + # - validation: For verifying implementation + self.execution_mode = os.environ.get("EXECUTION_MODE", "implementation") + + # MCP spec workflow tools - only enable for exploration mode + # Implementation agents should NOT be creating new specs/tickets self.enable_spec_tools = ( - os.environ.get("ENABLE_SPEC_TOOLS", "true").lower() == "true" + os.environ.get("ENABLE_SPEC_TOOLS", "").lower() == "true" + or self.execution_mode == "exploration" ) + # Add execution mode-specific instructions to system prompt + if self.execution_mode == "exploration": + append_parts.append(""" +## Execution Mode: EXPLORATION + +You are in **exploration mode**. Your purpose is to: +1. Explore and understand the codebase structure +2. Create specifications (requirements, designs) for new features +3. Break down features into tickets and tasks +4. Analyze dependencies between components +5. Upload specs/tickets/tasks to the server using MCP tools + +**DO NOT write implementation code in this mode.** Focus on planning and documentation. + +Use the spec workflow MCP tools to create and upload: +- Specifications with requirements and designs +- Tickets for trackable work items +- Tasks with clear acceptance criteria +- Dependencies between tasks""") + elif self.execution_mode == "validation": + append_parts.append(""" +## Execution Mode: VALIDATION + +You are in **validation mode**. Your purpose is to: +1. Review the implementation for correctness +2. Run tests and verify they pass +3. Check code quality and adherence to requirements +4. Verify the implementation matches the task specification +5. Report validation results (pass/fail with feedback) + +**DO NOT write new features or make major changes.** Focus on verification. + +If you find issues: +- Document specific problems found +- Provide actionable feedback for fixes +- Do NOT fix the issues yourself (implementation agent will handle)""") + else: # implementation mode (default) + append_parts.append(""" +## Execution Mode: IMPLEMENTATION + +You are in **implementation mode**. Your purpose is to: +1. Execute the assigned task +2. Write code to implement features or fix bugs +3. Run tests to verify your implementation +4. Create commits and pull requests when done + +**DO NOT create new specs, tickets, or tasks.** Focus on executing this specific task. + +Before coding: +1. Read the task specification carefully +2. Check for existing patterns in the codebase +3. Understand the requirements and acceptance criteria""") + # Note: MCP tools are automatically available when we register MCP servers # No need to explicitly add them to allowed_tools - the SDK handles this if self.enable_spec_tools and MCP_AVAILABLE: - # Add spec tools documentation to system prompt append + # Add spec tools documentation to system prompt append (exploration mode only) append_parts.append(""" ## Spec Workflow MCP Tools (mcp__spec_workflow__*) You have access to spec workflow tools for managing specifications, requirements, and tickets: @@ -728,10 +819,32 @@ def __init__(self): - update_design: Update architecture and design artifacts - add_spec_task: Add tasks to a specification - create_ticket: Create tickets for the workflow system -- get_ticket: Get ticket details +- get_ticket: Get ticket details (use UUID, not title) +- get_task: Get task details including full description and acceptance criteria - approve_requirements: Approve requirements and move to Design phase - approve_design: Approve design and move to Implementation phase""") + # Add mandatory task context instruction for implementation/validation modes + # Exploration mode creates tasks, it doesn't execute them + if self.task_id and self.execution_mode in ("implementation", "validation"): + append_parts.append(f""" +## CRITICAL: Task Context (MUST READ FIRST) + +You are assigned to work on task ID: `{self.task_id}` + +**BEFORE doing ANY other work, you MUST:** +1. Call `mcp__spec_workflow__get_task` with task_id="{self.task_id}" to get the full task details +2. Read the task's description, acceptance criteria, and implementation notes carefully +3. If the task references a parent ticket, call `mcp__spec_workflow__get_ticket` with the ticket's UUID to get additional context + +The task description contains the complete specification including: +- Detailed description of what to implement +- Acceptance criteria (checklist of requirements) +- Implementation notes and constraints +- Dependencies on other tasks + +DO NOT start {"coding" if self.execution_mode == "implementation" else "validation"} until you have read and understood the full task specification.""") + # Check for custom SYSTEM_PROMPT env var or additional append content custom_system_prompt = os.environ.get("SYSTEM_PROMPT") system_prompt_append = os.environ.get("SYSTEM_PROMPT_APPEND", "") @@ -1342,7 +1455,38 @@ async def track_tool_use(input_data, tool_use_id, context): if tool_name == "Task": event_data["subagent_type"] = tool_input.get("subagent_type") event_data["description"] = tool_input.get("description") - event_data["prompt"] = tool_input.get("prompt") + event_data["subagent_prompt"] = tool_input.get("prompt") + + # Extract the actual subagent result from tool_response + # Task tool returns: {"result": "...", "usage": {...}, "total_cost_usd": ..., "duration_ms": ...} + subagent_result = None + subagent_usage = None + subagent_cost = None + subagent_duration = None + + if serialized_response: + try: + import json + # Try to parse the response as JSON + if isinstance(serialized_response, str): + result_data = json.loads(serialized_response) + else: + result_data = serialized_response + + if isinstance(result_data, dict): + subagent_result = result_data.get("result") + subagent_usage = result_data.get("usage") + subagent_cost = result_data.get("total_cost_usd") + subagent_duration = result_data.get("duration_ms") + except (json.JSONDecodeError, TypeError): + # If parsing fails, use the raw response as result + subagent_result = serialized_response + + event_data["subagent_result"] = subagent_result + event_data["subagent_usage"] = subagent_usage + event_data["subagent_cost_usd"] = subagent_cost + event_data["subagent_duration_ms"] = subagent_duration + await reporter.report("agent.subagent_completed", event_data) # Special tracking for skills elif tool_name == "Skill": @@ -1622,6 +1766,8 @@ async def _process_messages( "final_output": "\n".join(final_output) if final_output else None, # Include final output for task result + # Include branch name for validation workflow + "branch_name": self.config.branch_name if self.config.branch_name else None, } # Try to report completion with retries (critical for task finalization) @@ -1733,8 +1879,10 @@ async def run(self): "timestamp": datetime.now(timezone.utc).isoformat(), "sdk": "claude_agent_sdk", "model": self.config.model or "default", - "task": self.config.initial_prompt + "task": self.config.task_description + or self.config.initial_prompt or self.config.ticket_description, + "task_description_length": len(self.config.task_description), "ticket_title": self.config.ticket_title, "resumed_session_id": self.config.resume_session_id, "is_resuming": bool(self.config.resume_session_id), @@ -1755,8 +1903,10 @@ async def run(self): async with ClaudeSDKClient(options=sdk_options) as client: # Process initial prompt + # Priority: task_description (from TASK_DATA_BASE64) > initial_prompt > ticket_description > ticket_title initial_task = ( - self.config.initial_prompt + self.config.task_description # Full task spec from orchestrator + or self.config.initial_prompt or self.config.ticket_description or f"Analyze ticket: {self.config.ticket_title}" ) diff --git a/backend/omoi_os/workers/orchestrator_worker.py b/backend/omoi_os/workers/orchestrator_worker.py index 379570ef..88c8df44 100644 --- a/backend/omoi_os/workers/orchestrator_worker.py +++ b/backend/omoi_os/workers/orchestrator_worker.py @@ -54,6 +54,50 @@ } +# Task type categories for execution mode determination +EXPLORATION_TASK_TYPES = frozenset([ + "explore_codebase", + "create_spec", + "create_requirements", + "create_design", + "create_tickets", + "create_tasks", + "analyze_dependencies", + "define_feature", +]) + +VALIDATION_TASK_TYPES = frozenset([ + "validate", + "validate_implementation", + "review_code", + "run_tests", +]) + + +def get_execution_mode(task_type: str) -> Literal["exploration", "implementation", "validation"]: + """Determine execution mode based on task type. + + This controls which skills are loaded into the sandbox: + - exploration: spec-driven-dev skill for creating specs/tickets/tasks + - implementation: git-workflow, code-review, etc. for executing tasks + - validation: code-review, test-writer for validating implementation + + Args: + task_type: The task type string (e.g., "implement_feature", "create_spec") + + Returns: + Execution mode string: "exploration", "implementation", or "validation" + """ + if task_type in EXPLORATION_TASK_TYPES: + return "exploration" + elif task_type in VALIDATION_TASK_TYPES: + return "validation" + else: + # Default to implementation for all other task types + # This includes: implement_feature, fix_bug, write_tests, refactor, etc. + return "implementation" + + async def heartbeat_task(): """Log heartbeat every 30 seconds to confirm worker is alive.""" heartbeat_num = 0 @@ -73,9 +117,13 @@ async def heartbeat_task(): def handle_task_event(event_data: dict) -> None: - """Handle TASK_CREATED events to wake up orchestrator immediately. + """Handle task-related events to wake up orchestrator immediately. + + This is called by the Redis event bus subscriber when: + - A new task is created (TASK_CREATED) + - A new ticket is created (TICKET_CREATED) + - A task completes (SANDBOX_agent.completed) - frees up a slot - This is called by the Redis event bus subscriber when a new task is created. Sets the task_ready_event to interrupt the polling sleep. """ stats["events_received"] += 1 @@ -91,6 +139,87 @@ def handle_task_event(event_data: dict) -> None: task_ready_event.set() +def handle_validation_failed(event_data: dict) -> None: + """Handle TASK_VALIDATION_FAILED event to reset task for re-implementation. + + When a validator agent fails the validation: + 1. Task is already marked as 'needs_revision' (by task_validator service) + 2. This handler resets the task to 'pending' so it can be picked up again + 3. The implementer will receive the validation feedback in the task result + + The implementer gets the revision feedback from task.result which contains: + - revision_feedback: Human-readable description of what failed + - revision_recommendations: List of specific fixes needed + """ + global db + + if not db: + logger.error("database_not_initialized_for_validation_handling") + return + + event_type = event_data.get("event_type", "TASK_VALIDATION_FAILED") + task_id = event_data.get("entity_id") + payload = event_data.get("payload", {}) + iteration = payload.get("iteration", 0) + feedback = payload.get("feedback", "No feedback provided") + + logger.info( + "validation_failed_handling", + task_id=task_id, + iteration=iteration, + feedback_preview=feedback[:100] if feedback else None, + ) + + if not task_id: + logger.warning("validation_failed_no_task_id", event_data=event_data) + return + + try: + from omoi_os.models.task import Task + from sqlalchemy import select + + with db.get_session() as session: + task = session.query(Task).filter(Task.id == task_id).first() + + if not task: + logger.warning("validation_failed_task_not_found", task_id=task_id) + return + + # Only reset if task is in needs_revision status + if task.status != "needs_revision": + logger.info( + "validation_failed_task_not_needs_revision", + task_id=task_id, + current_status=task.status, + ) + return + + # Reset task for re-implementation + # Keep the revision feedback in task.result so implementer can see it + task.status = "pending" + task.sandbox_id = None # Clear sandbox so it gets a fresh one + task.assigned_agent_id = None # Clear agent assignment + + session.commit() + + logger.info( + "task_reset_for_revision", + task_id=task_id, + iteration=iteration, + new_status="pending", + ) + + # Wake up the orchestrator to pick up the reset task + task_ready_event.set() + + except Exception as e: + logger.error( + "validation_failed_handling_error", + task_id=task_id, + error=str(e), + ) + + async def orchestrator_loop(): """Background task that polls queue and assigns tasks to workers. @@ -119,11 +248,32 @@ async def orchestrator_loop(): logger.info("orchestrator_loop_started") # Subscribe to task events for instant wakeup (hybrid approach) - # This allows the orchestrator to respond immediately when tasks are created + # This allows the orchestrator to respond immediately when: + # 1. New tasks are created (so we can spawn sandboxes) + # 2. Tasks complete (so we can spawn more now that a slot is free) + # 3. Validation fails (so we can reset task for re-implementation) try: event_bus.subscribe("TASK_CREATED", handle_task_event) event_bus.subscribe("TICKET_CREATED", handle_task_event) # Tickets also trigger tasks - logger.info("event_subscriptions_registered", events=["TASK_CREATED", "TICKET_CREATED"]) + # Subscribe to completion events to spawn more tasks when slots open + event_bus.subscribe("SANDBOX_agent.completed", handle_task_event) + event_bus.subscribe("SANDBOX_agent.failed", handle_task_event) + event_bus.subscribe("SANDBOX_agent.error", handle_task_event) + # Subscribe to validation events for the revision workflow + event_bus.subscribe("TASK_VALIDATION_FAILED", handle_validation_failed) + event_bus.subscribe("TASK_VALIDATION_PASSED", handle_task_event) # Just for wakeup/metrics + logger.info( + "event_subscriptions_registered", + events=[ + "TASK_CREATED", + "TICKET_CREATED", + "SANDBOX_agent.completed", + "SANDBOX_agent.failed", + "SANDBOX_agent.error", + "TASK_VALIDATION_FAILED", + "TASK_VALIDATION_PASSED", + ], + ) except Exception as e: logger.warning("event_subscription_failed", error=str(e), fallback="polling_only") @@ -134,6 +284,13 @@ async def orchestrator_loop(): sandbox_execution = settings.daytona.sandbox_execution mode = "sandbox" if sandbox_execution else "legacy" + # Get concurrency limit from environment (default: 5 concurrent tasks per project) + max_concurrent_per_project = int(os.getenv("MAX_CONCURRENT_TASKS_PER_PROJECT", "5")) + logger.info( + "concurrency_config", + max_concurrent_per_project=max_concurrent_per_project, + ) + # Initialize Daytona spawner if sandbox mode enabled daytona_spawner = None if sandbox_execution: @@ -168,8 +325,13 @@ async def orchestrator_loop(): with db.get_session() as session: if sandbox_execution: - # Sandbox mode: just get next pending task - task = queue.get_next_task(phase_id=None) + # Sandbox mode: get next pending task with concurrency limits + # This ensures we don't spawn more than max_concurrent_per_project + # sandboxes for any single project at a time + task = queue.get_next_task_with_concurrency_limit( + max_concurrent_per_project=max_concurrent_per_project, + phase_id=None, + ) else: # Legacy mode: check for available agent first available_agent = ( @@ -274,6 +436,16 @@ async def orchestrator_loop(): "ticket_priority": ticket.priority, "ticket_context": ticket.context or {}, } + + # Include revision feedback if task previously failed validation + # This allows the implementer to see what went wrong and fix it + if task.result: + if task.result.get("revision_feedback"): + task_data["revision_feedback"] = task.result["revision_feedback"] + if task.result.get("revision_recommendations"): + task_data["revision_recommendations"] = task.result["revision_recommendations"] + if task.result.get("validation_iteration"): + task_data["validation_iteration"] = task.result["validation_iteration"] # Base64 encode to avoid shell escaping issues task_json = json.dumps(task_data) extra_env["TASK_DATA_BASE64"] = base64.b64encode( @@ -372,6 +544,18 @@ async def orchestrator_loop(): ticket_type=extra_env.get("TICKET_TYPE"), ) + # Determine execution mode based on task type + # This controls which skills are loaded into the sandbox: + # - exploration: spec-driven-dev (for creating specs/tickets/tasks) + # - implementation: git-workflow, code-review, etc. (default) + # - validation: code-review, test-writer + execution_mode = get_execution_mode(task.task_type) + log.info( + "execution_mode_determined", + task_type=task.task_type, + execution_mode=execution_mode, + ) + # Spawn sandbox with user/repo context sandbox_id = await daytona_spawner.spawn_for_task( task_id=task_id, @@ -380,6 +564,7 @@ async def orchestrator_loop(): agent_type=agent_type, extra_env=extra_env if extra_env else None, runtime=sandbox_runtime, + execution_mode=execution_mode, ) log.info( diff --git a/backend/scripts/testing/test_execution_mode_spawn.py b/backend/scripts/testing/test_execution_mode_spawn.py new file mode 100755 index 00000000..8134275d --- /dev/null +++ b/backend/scripts/testing/test_execution_mode_spawn.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 +"""Test spawning sandboxes with different execution modes. + +This script allows you to spawn a sandbox for an existing ticket/task +with a specific execution mode to verify the correct skills and prompts +are loaded. + +Usage: + cd backend + + # List available tickets + uv run python scripts/testing/test_execution_mode_spawn.py --list-tickets + + # List tasks for a ticket + uv run python scripts/testing/test_execution_mode_spawn.py --list-tasks + + # Spawn with auto-detected mode (based on task_type) + uv run python scripts/testing/test_execution_mode_spawn.py --task + + # Spawn with explicit mode override + uv run python scripts/testing/test_execution_mode_spawn.py --task --mode exploration + uv run python scripts/testing/test_execution_mode_spawn.py --task --mode implementation + uv run python scripts/testing/test_execution_mode_spawn.py --task --mode validation + + # Dry run (show what would happen without spawning) + uv run python scripts/testing/test_execution_mode_spawn.py --task --dry-run +""" + +import argparse +import asyncio +import sys +import time +from pathlib import Path +from uuid import uuid4 + +# Add backend to path +sys.path.insert(0, str(Path(__file__).parent.parent.parent)) + +from dotenv import load_dotenv +load_dotenv(Path(__file__).parent.parent.parent / ".env.local") + +from omoi_os.config import get_app_settings + + +def get_db(): + """Get database service with proper connection string.""" + from omoi_os.services.database import DatabaseService + app_settings = get_app_settings() + return DatabaseService(connection_string=app_settings.database.url) + + +def list_tickets(limit: int = 20): + """List recent tickets.""" + from omoi_os.models.ticket import Ticket + + db = get_db() + with db.get_session() as session: + tickets = ( + session.query(Ticket) + .order_by(Ticket.created_at.desc()) + .limit(limit) + .all() + ) + + if not tickets: + print("No tickets found.") + return + + print(f"\n{'ID':<40} {'Status':<15} {'Title':<50}") + print("-" * 105) + for t in tickets: + title = (t.title or "Untitled")[:47] + "..." if len(t.title or "") > 50 else (t.title or "Untitled") + print(f"{t.id:<40} {t.status:<15} {title:<50}") + + +def list_tasks(ticket_id: str): + """List tasks for a ticket.""" + from omoi_os.models.task import Task + + db = get_db() + with db.get_session() as session: + tasks = ( + session.query(Task) + .filter(Task.ticket_id == ticket_id) + .order_by(Task.created_at.desc()) + .all() + ) + + if not tasks: + print(f"No tasks found for ticket {ticket_id}") + return + + print(f"\nTasks for ticket: {ticket_id}") + print(f"\n{'ID':<40} {'Type':<25} {'Status':<15} {'Phase':<20}") + print("-" * 100) + for t in tasks: + print(f"{t.id:<40} {t.task_type:<25} {t.status:<15} {t.phase_id:<20}") + + +def get_task_details(task_id: str): + """Get full task details including ticket info.""" + from sqlalchemy.orm import joinedload + from omoi_os.models.task import Task + from omoi_os.models.ticket import Ticket + + db = get_db() + with db.get_session() as session: + task = session.query(Task).filter(Task.id == task_id).first() + if not task: + print(f"Task {task_id} not found") + return None, None, None + + # Eagerly load ticket with project relationship + ticket = ( + session.query(Ticket) + .options(joinedload(Ticket.project)) + .filter(Ticket.id == task.ticket_id) + .first() + ) + + # Extract project info before detaching + project_info = None + if ticket and ticket.project: + project_info = { + "name": getattr(ticket.project, "name", None), + "github_owner": getattr(ticket.project, "github_owner", None), + "github_repo": getattr(ticket.project, "github_repo", None), + } + + # Detach from session for return + session.expunge(task) + if ticket: + session.expunge(ticket) + + return task, ticket, project_info + + +async def spawn_sandbox( + task_id: str, + mode_override: str = None, + dry_run: bool = False, +): + """Spawn a sandbox for a task with specified execution mode.""" + from omoi_os.workers.orchestrator_worker import get_execution_mode + from omoi_os.sandbox_skills import get_skills_for_upload + from omoi_os.config import load_daytona_settings + + # Get task details + task, ticket, project_info = get_task_details(task_id) + if not task: + return + + # Determine execution mode + auto_mode = get_execution_mode(task.task_type) + execution_mode = mode_override or auto_mode + + print("\n" + "=" * 60) + print("🧪 EXECUTION MODE SPAWN TEST") + print("=" * 60) + + print(f"\n📋 Task Details:") + print(f" Task ID: {task.id}") + print(f" Task Type: {task.task_type}") + print(f" Status: {task.status}") + print(f" Phase: {task.phase_id}") + if task.description: + desc = task.description[:100] + "..." if len(task.description) > 100 else task.description + print(f" Description: {desc}") + + if ticket: + print(f"\n📝 Ticket Details:") + print(f" Ticket ID: {ticket.id}") + print(f" Title: {ticket.title}") + if project_info: + print(f" Project: {project_info.get('name', 'Unknown')}") + + print(f"\n🎯 Execution Mode:") + print(f" Auto-detected: {auto_mode}") + if mode_override: + print(f" Override: {mode_override}") + print(f" Using: {execution_mode}") + + # Get skills for this mode + skills = get_skills_for_upload(mode=execution_mode) + skill_names = set() + for path in skills.keys(): + parts = path.split("/") + if "skills" in parts: + idx = parts.index("skills") + if idx + 1 < len(parts): + skill_names.add(parts[idx + 1]) + + print(f"\n📦 Skills to Load:") + for skill in sorted(skill_names): + marker = "📝" if skill == "spec-driven-dev" else "🔧" + print(f" {marker} {skill}") + + has_spec_skill = "spec-driven-dev" in skill_names + print(f"\n spec-driven-dev included: {'✅ Yes' if has_spec_skill else '❌ No'}") + + if dry_run: + print("\n🔍 DRY RUN - Not spawning actual sandbox") + print(" Would spawn sandbox with:") + print(f" - execution_mode={execution_mode}") + print(f" - runtime=claude") + print(f" - skills={sorted(skill_names)}") + return + + # Check Daytona settings + daytona_settings = load_daytona_settings() + if not daytona_settings.api_key: + print("\n❌ DAYTONA_API_KEY not set - cannot spawn sandbox") + return + + print(f"\n✅ Daytona API Key: {daytona_settings.api_key[:12]}...") + + # Import and create spawner + from omoi_os.services.daytona_spawner import DaytonaSpawnerService + + db = get_db() + spawner = DaytonaSpawnerService( + db=db, + event_bus=None, + mcp_server_url="http://localhost:18000/mcp/", + ) + + # Build extra_env similar to orchestrator + extra_env = { + "TICKET_ID": str(ticket.id) if ticket else "", + "TICKET_TITLE": ticket.title if ticket else "", + } + + # Add project info if available + if project_info: + github_owner = project_info.get("github_owner") + github_repo = project_info.get("github_repo") + if github_owner and github_repo: + extra_env["GITHUB_REPO"] = f"{github_owner}/{github_repo}" + extra_env["GITHUB_REPO_OWNER"] = github_owner + extra_env["GITHUB_REPO_NAME"] = github_repo + + agent_id = f"test-agent-{uuid4().hex[:8]}" + + print(f"\n🚀 Spawning sandbox...") + print(f" Agent ID: {agent_id}") + print(f" Execution Mode: {execution_mode}") + + try: + start_time = time.time() + sandbox_id = await spawner.spawn_for_task( + task_id=task_id, + agent_id=agent_id, + phase_id=task.phase_id, + agent_type="worker", + extra_env=extra_env, + runtime="claude", + execution_mode=execution_mode, + ) + spawn_time = time.time() - start_time + + print(f"\n✅ Sandbox spawned in {spawn_time:.1f}s") + print(f" Sandbox ID: {sandbox_id}") + + # Get sandbox info + info = spawner.get_sandbox_info(sandbox_id) + if info: + print(f" Status: {info.status}") + + # Verify skills were uploaded + sandbox = info.extra_data.get("daytona_sandbox") if info else None + if sandbox: + print(f"\n🔍 Verifying skills in sandbox...") + result = sandbox.process.exec("ls -la /root/.claude/skills/ 2>/dev/null || echo 'Skills dir not found'") + print(f" Skills directory:") + for line in result.result.strip().split("\n"): + print(f" {line}") + + # Check environment variables (use . instead of source for sh compatibility) + result = sandbox.process.exec(". /root/.bashrc && echo $EXECUTION_MODE") + print(f"\n EXECUTION_MODE env var: {result.result.strip()}") + + # Also check /tmp/.sandbox_env file for persistent env vars + result = sandbox.process.exec("cat /tmp/.sandbox_env | grep EXECUTION_MODE || echo 'Not in file'") + print(f" EXECUTION_MODE in env file: {result.result.strip()}") + + print(f"\n🎉 Sandbox ready for testing!") + print(f" You can connect to: {sandbox_id}") + + # Ask if user wants to terminate + print(f"\n⚠️ Remember to terminate the sandbox when done:") + print(f" await spawner.terminate_sandbox('{sandbox_id}')") + + except Exception as e: + print(f"\n❌ Failed to spawn sandbox: {e}") + import traceback + traceback.print_exc() + + +def main(): + parser = argparse.ArgumentParser( + description="Test spawning sandboxes with different execution modes", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=__doc__ + ) + + parser.add_argument("--list-tickets", action="store_true", help="List recent tickets") + parser.add_argument("--list-tasks", metavar="TICKET_ID", help="List tasks for a ticket") + parser.add_argument("--task", metavar="TASK_ID", help="Task ID to spawn sandbox for") + parser.add_argument( + "--mode", + choices=["exploration", "implementation", "validation"], + help="Override execution mode (default: auto-detect from task_type)" + ) + parser.add_argument("--dry-run", action="store_true", help="Show what would happen without spawning") + + args = parser.parse_args() + + if args.list_tickets: + list_tickets() + elif args.list_tasks: + list_tasks(args.list_tasks) + elif args.task: + asyncio.run(spawn_sandbox( + task_id=args.task, + mode_override=args.mode, + dry_run=args.dry_run, + )) + else: + parser.print_help() + + +if __name__ == "__main__": + main() diff --git a/backend/scripts/testing/test_validation_flow.py b/backend/scripts/testing/test_validation_flow.py new file mode 100755 index 00000000..70a2cf98 --- /dev/null +++ b/backend/scripts/testing/test_validation_flow.py @@ -0,0 +1,455 @@ +#!/usr/bin/env python +"""Manual test script for the validation flow. + +This script tests the complete validation workflow: +1. Creates test data (project, ticket, task) +2. Simulates implementer completion +3. Requests validation +4. Simulates validator result (pass or fail) +5. Verifies final task state + +Usage: + # Test passing validation + uv run python scripts/testing/test_validation_flow.py --pass + + # Test failing validation + uv run python scripts/testing/test_validation_flow.py --fail + + # Test full retry loop (fail, then pass) + uv run python scripts/testing/test_validation_flow.py --retry + + # Dry run (don't actually spawn sandboxes) + uv run python scripts/testing/test_validation_flow.py --dry-run --pass +""" + +import argparse +import asyncio +import os +import sys +from uuid import uuid4 + +# Ensure we can import from omoi_os +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + +# Set environment before imports +os.environ.setdefault("TASK_VALIDATION_ENABLED", "true") +os.environ.setdefault("MAX_VALIDATION_ITERATIONS", "3") + + +def create_test_data(db): + """Create test project, ticket, and task.""" + from omoi_os.models.project import Project + from omoi_os.models.ticket import Ticket + from omoi_os.models.task import Task + from omoi_os.models.user import User + + with db.get_session() as session: + # Create or get test user + user = session.query(User).filter(User.email == "validation-test@example.com").first() + if not user: + from omoi_os.services.auth_service import AuthService + auth = AuthService( + db=db, + jwt_secret="test-secret", + jwt_algorithm="HS256", + access_token_expire_minutes=15, + refresh_token_expire_days=7, + ) + user = User( + email="validation-test@example.com", + full_name="Validation Test User", + hashed_password=auth.hash_password("TestPass123!"), + is_active=True, + is_verified=True, + attributes={"github_access_token": "ghp_test_token_for_validation"}, + ) + session.add(user) + session.commit() + print(f"✅ Created test user: {user.email}") + else: + print(f"✅ Using existing user: {user.email}") + + # Create project + project = Project( + name=f"Validation Test Project {uuid4().hex[:6]}", + github_owner="test-owner", + github_repo="test-repo", + created_by=user.id, + ) + session.add(project) + session.commit() + print(f"✅ Created project: {project.name} (ID: {project.id})") + + # Create ticket + ticket = Ticket( + title="Implement Feature for Validation Test", + description="This ticket tests the validation flow", + phase_id="PHASE_IMPLEMENTATION", + status="in_progress", + project_id=project.id, + ) + session.add(ticket) + session.commit() + print(f"✅ Created ticket: {ticket.title} (ID: {ticket.id})") + + # Create task + sandbox_id = f"impl-sandbox-{uuid4().hex[:8]}" + branch_name = f"feature/validation-test-{uuid4().hex[:6]}" + task = Task( + ticket_id=ticket.id, + phase_id="PHASE_IMPLEMENTATION", + task_type="implement_feature", + description="Implement user authentication flow", + status="running", + sandbox_id=sandbox_id, + result={"branch_name": branch_name}, + ) + session.add(task) + session.commit() + print(f"✅ Created task: {task.description}") + print(f" - Task ID: {task.id}") + print(f" - Sandbox ID: {sandbox_id}") + print(f" - Branch: {branch_name}") + + return { + "user_id": str(user.id), + "project_id": str(project.id), + "ticket_id": str(ticket.id), + "task_id": str(task.id), + "sandbox_id": sandbox_id, + "branch_name": branch_name, + } + + +async def test_validation_pass(db, event_bus, test_data, dry_run=False): + """Test validation that passes on first attempt.""" + from omoi_os.services.task_validator import TaskValidatorService + from omoi_os.models.task import Task + from omoi_os.models.validation_review import ValidationReview + from unittest.mock import AsyncMock, patch + + print("\n" + "=" * 60) + print("TESTING: Validation PASS Flow") + print("=" * 60) + + validator = TaskValidatorService(db=db, event_bus=event_bus) + + if dry_run: + # Mock the spawner + with patch.object(validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + validator_agent_id = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": f"validator-sandbox-{uuid4().hex[:8]}", + "agent_id": validator_agent_id, + } + + await _run_validation_pass(validator, db, test_data, validator_agent_id) + else: + # Real spawner (requires Daytona) + print("⚠️ Real spawner mode - ensure Daytona is running") + validation_id = await validator.request_validation( + task_id=test_data["task_id"], + sandbox_id=test_data["sandbox_id"], + implementation_result={ + "success": True, + "branch_name": test_data["branch_name"], + }, + ) + print(f"📋 Validation requested: {validation_id}") + print("⏳ Waiting for validator to complete (check Daytona logs)...") + print(" When validator finishes, run:") + print(f' curl -X POST http://localhost:18000/api/v1/sandbox//validation-result \\') + print(f' -H "Content-Type: application/json" \\') + print(f' -d \'{{"task_id": "{test_data["task_id"]}", "passed": true, "feedback": "All tests pass"}}\'') + + +async def _run_validation_pass(validator, db, test_data, validator_agent_id): + """Run the pass validation flow (used in dry-run mode).""" + from omoi_os.models.task import Task + from omoi_os.models.validation_review import ValidationReview + + # Step 1: Request validation + print("\n📤 Step 1: Requesting validation...") + validation_id = await validator.request_validation( + task_id=test_data["task_id"], + sandbox_id=test_data["sandbox_id"], + implementation_result={ + "success": True, + "branch_name": test_data["branch_name"], + "commit_sha": "abc123def456", + }, + ) + print(f" Validation ID: {validation_id}") + + # Verify status + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + print(f" Task status: {task.status}") + assert task.status == "pending_validation", f"Expected pending_validation, got {task.status}" + print(" ✅ Task is pending_validation") + + # Step 2: Simulate validation passing + print("\n✅ Step 2: Simulating validation PASS...") + await validator.handle_validation_result( + task_id=test_data["task_id"], + validator_agent_id=validator_agent_id, + passed=True, + feedback="All checks passed! Tests: 42/42 pass, Build: success, PR: created", + evidence={ + "tests": {"passed": 42, "failed": 0, "skipped": 0}, + "build": {"status": "success", "duration_s": 12.5}, + "pr": {"url": "https://github.com/test-owner/test-repo/pull/1"}, + "git_status": "clean", + }, + ) + + # Verify final state + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + print(f" Task status: {task.status}") + assert task.status == "completed", f"Expected completed, got {task.status}" + print(" ✅ Task is completed") + + reviews = session.query(ValidationReview).filter( + ValidationReview.task_id == test_data["task_id"] + ).all() + print(f" Validation reviews: {len(reviews)}") + assert len(reviews) == 1 + assert reviews[0].validation_passed is True + print(" ✅ ValidationReview record created") + + print("\n" + "=" * 60) + print("✅ VALIDATION PASS TEST COMPLETED SUCCESSFULLY") + print("=" * 60) + + +async def test_validation_fail(db, event_bus, test_data, dry_run=False): + """Test validation that fails.""" + from omoi_os.services.task_validator import TaskValidatorService + from omoi_os.models.task import Task + from unittest.mock import AsyncMock, patch + + print("\n" + "=" * 60) + print("TESTING: Validation FAIL Flow") + print("=" * 60) + + validator = TaskValidatorService(db=db, event_bus=event_bus) + + if dry_run: + with patch.object(validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + validator_agent_id = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": f"validator-sandbox-{uuid4().hex[:8]}", + "agent_id": validator_agent_id, + } + + await _run_validation_fail(validator, db, test_data, validator_agent_id) + else: + print("⚠️ Real spawner mode - run with --dry-run for testing") + + +async def _run_validation_fail(validator, db, test_data, validator_agent_id): + """Run the fail validation flow.""" + from omoi_os.models.task import Task + + # Step 1: Request validation + print("\n📤 Step 1: Requesting validation...") + await validator.request_validation( + task_id=test_data["task_id"], + sandbox_id=test_data["sandbox_id"], + implementation_result={"success": True, "branch_name": test_data["branch_name"]}, + ) + + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + print(f" Task status: {task.status}") + assert task.status == "pending_validation" + print(" ✅ Task is pending_validation") + + # Step 2: Simulate validation failing + print("\n❌ Step 2: Simulating validation FAIL...") + await validator.handle_validation_result( + task_id=test_data["task_id"], + validator_agent_id=validator_agent_id, + passed=False, + feedback="Tests failing: 3 unit tests failed. Build passes but tests must be fixed.", + evidence={ + "tests": {"passed": 39, "failed": 3, "skipped": 0}, + "failed_tests": ["test_login", "test_logout", "test_session_expiry"], + "build": {"status": "success"}, + }, + recommendations=[ + "Fix test_login: Expected 200, got 401 - check auth middleware", + "Fix test_logout: Session not invalidated - check session cleanup", + "Fix test_session_expiry: Token still valid after expiry - check JWT validation", + ], + ) + + # Verify state + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + print(f" Task status: {task.status}") + assert task.status == "needs_revision", f"Expected needs_revision, got {task.status}" + print(" ✅ Task is needs_revision") + print(f" Revision feedback: {task.result.get('revision_feedback', '')[:50]}...") + print(f" Recommendations: {len(task.result.get('revision_recommendations', []))} items") + + print("\n" + "=" * 60) + print("✅ VALIDATION FAIL TEST COMPLETED SUCCESSFULLY") + print("=" * 60) + + +async def test_validation_retry(db, event_bus, test_data, dry_run=False): + """Test validation that fails then passes on retry.""" + from omoi_os.services.task_validator import TaskValidatorService + from omoi_os.models.task import Task + from omoi_os.models.validation_review import ValidationReview + from unittest.mock import AsyncMock, patch + + print("\n" + "=" * 60) + print("TESTING: Validation RETRY Flow (Fail -> Pass)") + print("=" * 60) + + validator = TaskValidatorService(db=db, event_bus=event_bus) + + if not dry_run: + print("⚠️ Retry test requires --dry-run mode") + return + + with patch.object(validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + # First validation - FAIL + validator_agent_id_1 = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": f"validator-1-{uuid4().hex[:8]}", + "agent_id": validator_agent_id_1, + } + + print("\n📤 Step 1: First validation request...") + await validator.request_validation( + task_id=test_data["task_id"], + sandbox_id=test_data["sandbox_id"], + implementation_result={"success": True, "branch_name": test_data["branch_name"]}, + ) + + print("❌ Step 2: First validation FAILS...") + await validator.handle_validation_result( + task_id=test_data["task_id"], + validator_agent_id=validator_agent_id_1, + passed=False, + feedback="Tests failing: 3 tests failed", + recommendations=["Fix the tests"], + ) + + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + assert task.status == "needs_revision" + print(f" Task status: {task.status}") + print(f" Iteration: {task.result.get('validation_iteration')}") + + # Simulate implementer fixing and re-running + print("\n🔧 Step 3: Simulating implementer fix...") + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + task.status = "running" # Back to running + session.commit() + print(" Task reset to running") + + # Second validation - PASS + validator_agent_id_2 = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": f"validator-2-{uuid4().hex[:8]}", + "agent_id": validator_agent_id_2, + } + + print("\n📤 Step 4: Second validation request...") + await validator.request_validation( + task_id=test_data["task_id"], + sandbox_id=test_data["sandbox_id"], + implementation_result={ + "success": True, + "branch_name": test_data["branch_name"], + "fixed_tests": True, + }, + ) + + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + print(f" Iteration: {task.result.get('validation_iteration')}") + assert task.result.get('validation_iteration') == 2 + + print("✅ Step 5: Second validation PASSES...") + await validator.handle_validation_result( + task_id=test_data["task_id"], + validator_agent_id=validator_agent_id_2, + passed=True, + feedback="All issues resolved! All 42 tests pass.", + ) + + # Verify final state + with db.get_session() as session: + task = session.get(Task, test_data["task_id"]) + assert task.status == "completed" + print(f" Final status: {task.status}") + + reviews = session.query(ValidationReview).filter( + ValidationReview.task_id == test_data["task_id"] + ).order_by(ValidationReview.iteration_number).all() + print(f" Total reviews: {len(reviews)}") + assert len(reviews) == 2 + assert reviews[0].validation_passed is False + assert reviews[1].validation_passed is True + + print("\n" + "=" * 60) + print("✅ VALIDATION RETRY TEST COMPLETED SUCCESSFULLY") + print("=" * 60) + + +async def main(): + parser = argparse.ArgumentParser(description="Test the validation flow") + parser.add_argument("--pass", dest="test_pass", action="store_true", help="Test passing validation") + parser.add_argument("--fail", dest="test_fail", action="store_true", help="Test failing validation") + parser.add_argument("--retry", action="store_true", help="Test fail->pass retry flow") + parser.add_argument("--dry-run", action="store_true", help="Don't spawn real sandboxes") + args = parser.parse_args() + + if not any([args.test_pass, args.test_fail, args.retry]): + parser.print_help() + print("\n⚠️ Please specify at least one test: --pass, --fail, or --retry") + sys.exit(1) + + # Initialize services + from omoi_os.services.database import DatabaseService + from omoi_os.services.event_bus import EventBusService + + db_url = os.getenv("DATABASE_URL", "postgresql+psycopg://postgres:postgres@localhost:15432/app_db") + redis_url = os.getenv("REDIS_URL", "redis://localhost:16379") + + print("🔧 Initializing services...") + db = DatabaseService(db_url) + event_bus = EventBusService(redis_url) + print(f" Database: {db_url}") + print(f" Redis: {redis_url}") + + # Create test data + print("\n📦 Creating test data...") + test_data = create_test_data(db) + + # Run requested tests + if args.test_pass: + await test_validation_pass(db, event_bus, test_data, dry_run=args.dry_run) + + if args.test_fail: + # Need fresh test data for fail test + test_data = create_test_data(db) + await test_validation_fail(db, event_bus, test_data, dry_run=args.dry_run) + + if args.retry: + # Need fresh test data for retry test + test_data = create_test_data(db) + await test_validation_retry(db, event_bus, test_data, dry_run=args.dry_run) + + print("\n🎉 All tests completed!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/backend/tests/integration/__init__.py b/backend/tests/integration/__init__.py index a313ef86..c66cd71b 100644 --- a/backend/tests/integration/__init__.py +++ b/backend/tests/integration/__init__.py @@ -1 +1 @@ -"""Integration tests for OmoiOS components.""" +"""Integration tests package.""" diff --git a/backend/tests/integration/test_validation_integration.py b/backend/tests/integration/test_validation_integration.py new file mode 100644 index 00000000..7ea9a252 --- /dev/null +++ b/backend/tests/integration/test_validation_integration.py @@ -0,0 +1,565 @@ +"""Integration tests for the complete validation flow. + +Tests the full validation workflow from task completion to validation result. +""" + +import os +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +from uuid import uuid4 + +from omoi_os.models.agent import Agent +from omoi_os.models.task import Task +from omoi_os.models.ticket import Ticket +from omoi_os.models.project import Project +from omoi_os.models.user import User +from omoi_os.models.validation_review import ValidationReview +from omoi_os.services.database import DatabaseService +from omoi_os.services.event_bus import EventBusService, SystemEvent +from omoi_os.services.task_validator import TaskValidatorService + + +@pytest.fixture +def integration_db(db_service: DatabaseService) -> DatabaseService: + """Database service for integration tests.""" + return db_service + + +@pytest.fixture +def integration_event_bus(event_bus_service: EventBusService) -> EventBusService: + """Event bus service for integration tests.""" + return event_bus_service + + +@pytest.fixture +def test_project(integration_db: DatabaseService, test_user: User) -> Project: + """Create a test project with full GitHub configuration.""" + with integration_db.get_session() as session: + # Update user with GitHub token + user = session.get(User, test_user.id) + user.attributes = {"github_access_token": "ghp_integration_test_token"} + session.commit() + + project = Project( + name="Integration Test Project", + github_owner="integration-owner", + github_repo="integration-repo", + created_by=test_user.id, + ) + session.add(project) + session.commit() + session.refresh(project) + session.expunge(project) + return project + + +@pytest.fixture +def test_ticket(integration_db: DatabaseService, test_project: Project) -> Ticket: + """Create a test ticket linked to the project.""" + with integration_db.get_session() as session: + ticket = Ticket( + title="Integration Test Ticket", + description="Full validation flow test", + phase_id="PHASE_IMPLEMENTATION", + status="in_progress", + project_id=test_project.id, + ) + session.add(ticket) + session.commit() + session.refresh(ticket) + session.expunge(ticket) + return ticket + + +@pytest.fixture +def implementer_task(integration_db: DatabaseService, test_ticket: Ticket) -> Task: + """Create a task simulating an implementer's work.""" + with integration_db.get_session() as session: + task = Task( + ticket_id=test_ticket.id, + phase_id="PHASE_IMPLEMENTATION", + task_type="implement_feature", + description="Implement user authentication", + status="running", + sandbox_id=f"impl-sandbox-{uuid4().hex[:8]}", + result={"branch_name": "feature/auth-implementation"}, + ) + session.add(task) + session.commit() + session.refresh(task) + session.expunge(task) + return task + + +@pytest.fixture +def task_validator( + integration_db: DatabaseService, integration_event_bus: EventBusService +) -> TaskValidatorService: + """Create TaskValidatorService with validation enabled.""" + os.environ["TASK_VALIDATION_ENABLED"] = "true" + os.environ["MAX_VALIDATION_ITERATIONS"] = "3" + return TaskValidatorService(db=integration_db, event_bus=integration_event_bus) + + +# ------------------------------------------------------------------------- +# Complete Validation Flow Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_complete_validation_flow_pass( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, +): + """Test complete flow: completion -> validation request -> pass -> completed.""" + task_id = str(implementer_task.id) + sandbox_id = implementer_task.sandbox_id + + # Mock spawner to avoid external calls + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + validator_agent_id = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": "validator-sandbox-123", + "agent_id": validator_agent_id, + } + + # Step 1: Request validation (simulating agent.completed event) + validation_id = await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={ + "success": True, + "branch_name": "feature/auth-implementation", + "commit_sha": "abc123def", + }, + ) + + assert validation_id + + # Verify task is pending_validation + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "pending_validation" + assert task.result.get("validation_iteration") == 1 + assert task.result.get("validator_sandbox_id") == "validator-sandbox-123" + + # Step 2: Handle validation result (simulating validator completion) + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=True, + feedback="All checks passed. Tests pass, build succeeds, PR created.", + evidence={ + "tests": {"passed": 42, "failed": 0}, + "build": "success", + "pr_url": "https://github.com/owner/repo/pull/123", + }, + ) + + # Verify final state + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "completed" + assert task.result.get("validation_passed") is True + assert task.result.get("validated_at") is not None + + # Verify review record + reviews = session.query(ValidationReview).filter( + ValidationReview.task_id == task_id + ).all() + assert len(reviews) == 1 + assert reviews[0].validation_passed is True + + +@pytest.mark.asyncio +async def test_complete_validation_flow_fail_and_retry( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, +): + """Test flow: completion -> validation -> fail -> needs_revision -> retry -> pass.""" + task_id = str(implementer_task.id) + sandbox_id = implementer_task.sandbox_id + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + # First validation attempt + validator_agent_id_1 = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": "validator-1", + "agent_id": validator_agent_id_1, + } + + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True, "branch_name": "feature/auth"}, + ) + + # First validation FAILS + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id_1, + passed=False, + feedback="Tests failing: 3 unit tests failed", + recommendations=["Fix test_login", "Fix test_logout", "Fix test_session"], + ) + + # Verify needs_revision state + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "needs_revision" + assert task.result.get("revision_feedback") == "Tests failing: 3 unit tests failed" + assert len(task.result.get("revision_recommendations", [])) == 3 + + # Simulate implementer fixing and re-requesting validation + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + task.status = "running" # Back to running for re-implementation + session.commit() + + # Second validation attempt + validator_agent_id_2 = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": "validator-2", + "agent_id": validator_agent_id_2, + } + + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True, "branch_name": "feature/auth", "fixed_tests": True}, + ) + + # Verify iteration incremented + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.result.get("validation_iteration") == 2 + + # Second validation PASSES + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id_2, + passed=True, + feedback="All issues resolved. All 42 tests pass.", + ) + + # Verify completed state + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "completed" + assert task.result.get("validation_passed") is True + + # Verify both reviews exist + reviews = session.query(ValidationReview).filter( + ValidationReview.task_id == task_id + ).order_by(ValidationReview.iteration_number).all() + assert len(reviews) == 2 + assert reviews[0].validation_passed is False + assert reviews[1].validation_passed is True + + +@pytest.mark.asyncio +async def test_validation_max_iterations_exceeded( + integration_db: DatabaseService, + integration_event_bus: EventBusService, + implementer_task: Task, +): + """Test that task fails after exceeding max validation iterations.""" + os.environ["MAX_VALIDATION_ITERATIONS"] = "2" + task_validator = TaskValidatorService( + db=integration_db, event_bus=integration_event_bus + ) + + task_id = str(implementer_task.id) + sandbox_id = implementer_task.sandbox_id + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + # First validation + mock_spawn.return_value = {"sandbox_id": "v1", "agent_id": str(uuid4())} + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=str(uuid4()), + passed=False, + feedback="First failure", + ) + + # Reset to running + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + task.status = "running" + session.commit() + + # Second validation + mock_spawn.return_value = {"sandbox_id": "v2", "agent_id": str(uuid4())} + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=str(uuid4()), + passed=False, + feedback="Second failure", + ) + + # Reset to running + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + task.status = "running" + session.commit() + + # Third validation request should fail the task + result = await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + + assert result == "" # Empty = failed + + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "failed" + assert "Failed validation after 2 iterations" in task.error_message + + # Reset env + os.environ["MAX_VALIDATION_ITERATIONS"] = "3" + + +# ------------------------------------------------------------------------- +# Branch/Repo Info Propagation Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_branch_name_propagates_to_validator( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, + test_project: Project, +): + """Test that branch_name from task.result is passed to validator.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-xyz") + mock_get_spawner.return_value = mock_spawner + + await task_validator._spawn_validator( + task_id=str(implementer_task.id), + original_sandbox_id=implementer_task.sandbox_id, + iteration=1, + ) + + # Verify branch name passed in extra_env + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["BRANCH_NAME"] == "feature/auth-implementation" + + +@pytest.mark.asyncio +async def test_repo_info_propagates_to_validator( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, + test_project: Project, +): + """Test that GitHub repo info from project is passed to validator.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-xyz") + mock_get_spawner.return_value = mock_spawner + + await task_validator._spawn_validator( + task_id=str(implementer_task.id), + original_sandbox_id=implementer_task.sandbox_id, + iteration=1, + ) + + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["GITHUB_REPO"] == f"{test_project.github_owner}/{test_project.github_repo}" + assert extra_env["GITHUB_REPO_OWNER"] == test_project.github_owner + assert extra_env["GITHUB_REPO_NAME"] == test_project.github_repo + + +@pytest.mark.asyncio +async def test_github_token_propagates_to_validator( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, + test_project: Project, + test_user: User, +): + """Test that GitHub token from project owner is passed to validator.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-xyz") + mock_get_spawner.return_value = mock_spawner + + await task_validator._spawn_validator( + task_id=str(implementer_task.id), + original_sandbox_id=implementer_task.sandbox_id, + iteration=1, + ) + + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["GITHUB_TOKEN"] == "ghp_integration_test_token" + assert extra_env["USER_ID"] == str(test_user.id) + + +# ------------------------------------------------------------------------- +# Event Integration Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_validation_events_published_correctly( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + integration_event_bus: EventBusService, + implementer_task: Task, +): + """Test that all validation events are published in correct order.""" + task_id = str(implementer_task.id) + events_published = [] + + def capture_event(event): + events_published.append(event.event_type) + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + validator_agent_id = str(uuid4()) + mock_spawn.return_value = { + "sandbox_id": "v-1", + "agent_id": validator_agent_id, + } + + with patch.object(integration_event_bus, 'publish', side_effect=capture_event): + # Request validation + await task_validator.request_validation( + task_id=task_id, + sandbox_id=implementer_task.sandbox_id, + implementation_result={"success": True}, + ) + + assert "TASK_VALIDATION_REQUESTED" in events_published + + # Pass validation + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=True, + feedback="All good!", + ) + + assert "TASK_VALIDATION_PASSED" in events_published + + +@pytest.mark.asyncio +async def test_validation_failed_event_contains_feedback( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + integration_event_bus: EventBusService, + implementer_task: Task, +): + """Test that TASK_VALIDATION_FAILED event contains feedback and recommendations.""" + task_id = str(implementer_task.id) + captured_event = None + + def capture_event(event): + nonlocal captured_event + if event.event_type == "TASK_VALIDATION_FAILED": + captured_event = event + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + validator_agent_id = str(uuid4()) + mock_spawn.return_value = {"sandbox_id": "v-1", "agent_id": validator_agent_id} + + await task_validator.request_validation( + task_id=task_id, + sandbox_id=implementer_task.sandbox_id, + implementation_result={"success": True}, + ) + + with patch.object(integration_event_bus, 'publish', side_effect=capture_event): + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=False, + feedback="Tests failing", + recommendations=["Fix test_foo", "Fix test_bar"], + ) + + assert captured_event is not None + assert captured_event.payload["feedback"] == "Tests failing" + assert captured_event.payload["recommendations"] == ["Fix test_foo", "Fix test_bar"] + + +# ------------------------------------------------------------------------- +# Edge Cases +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_validation_with_missing_task( + task_validator: TaskValidatorService, +): + """Test validation request for non-existent task.""" + result = await task_validator.request_validation( + task_id="non-existent-task-id", + sandbox_id="sandbox-123", + implementation_result={"success": True}, + ) + + assert result == "" # Empty string indicates failure + + +@pytest.mark.asyncio +async def test_validation_result_for_missing_task( + task_validator: TaskValidatorService, +): + """Test handling validation result for non-existent task.""" + # Should not raise, just log error + await task_validator.handle_validation_result( + task_id="non-existent-task-id", + validator_agent_id=str(uuid4()), + passed=True, + feedback="This should be ignored", + ) + # No assertion - just ensure no exception + + +@pytest.mark.asyncio +async def test_spawn_validator_failure_continues_gracefully( + task_validator: TaskValidatorService, + integration_db: DatabaseService, + implementer_task: Task, +): + """Test that spawner failure doesn't crash the validation flow.""" + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + mock_spawn.return_value = None # Simulate failure + + validation_id = await task_validator.request_validation( + task_id=str(implementer_task.id), + sandbox_id=implementer_task.sandbox_id, + implementation_result={"success": True}, + ) + + # Validation ID still returned, task still marked pending_validation + assert validation_id + + with integration_db.get_session() as session: + task = session.get(Task, implementer_task.id) + assert task.status == "pending_validation" + # But no validator info stored + assert task.result.get("validator_sandbox_id") is None diff --git a/backend/tests/test_task_validator_service.py b/backend/tests/test_task_validator_service.py new file mode 100644 index 00000000..204998e5 --- /dev/null +++ b/backend/tests/test_task_validator_service.py @@ -0,0 +1,681 @@ +"""Unit tests for TaskValidatorService. + +Tests the core validation service that spawns validators and handles results. +""" + +import os +import pytest +from unittest.mock import AsyncMock, MagicMock, patch +from uuid import uuid4 + +from omoi_os.models.agent import Agent +from omoi_os.models.task import Task +from omoi_os.models.ticket import Ticket +from omoi_os.models.project import Project +from omoi_os.models.user import User +from omoi_os.models.validation_review import ValidationReview +from omoi_os.services.database import DatabaseService +from omoi_os.services.event_bus import EventBusService +from omoi_os.services.task_validator import TaskValidatorService, get_task_validator + + +@pytest.fixture +def task_validator(db_service: DatabaseService, event_bus_service: EventBusService): + """Create a TaskValidatorService for testing.""" + # Ensure validation is enabled for tests + os.environ["TASK_VALIDATION_ENABLED"] = "true" + os.environ["MAX_VALIDATION_ITERATIONS"] = "3" + return TaskValidatorService(db=db_service, event_bus=event_bus_service) + + +@pytest.fixture +def task_validator_disabled(db_service: DatabaseService, event_bus_service: EventBusService): + """Create a TaskValidatorService with validation disabled.""" + os.environ["TASK_VALIDATION_ENABLED"] = "false" + validator = TaskValidatorService(db=db_service, event_bus=event_bus_service) + # Reset for other tests + os.environ["TASK_VALIDATION_ENABLED"] = "true" + return validator + + +@pytest.fixture +def sample_project(db_service: DatabaseService, test_user: User) -> Project: + """Create a sample project with GitHub info.""" + with db_service.get_session() as session: + # Update user with GitHub token + user = session.get(User, test_user.id) + user.attributes = {"github_access_token": "ghp_test_token_12345"} + session.commit() + + project = Project( + name="Test Project", + github_owner="test-owner", + github_repo="test-repo", + created_by=test_user.id, + ) + session.add(project) + session.commit() + session.refresh(project) + session.expunge(project) + return project + + +@pytest.fixture +def sample_ticket_with_project(db_service: DatabaseService, sample_project: Project) -> Ticket: + """Create a sample ticket linked to a project.""" + with db_service.get_session() as session: + ticket = Ticket( + title="Test Ticket", + description="Test description", + phase_id="PHASE_IMPLEMENTATION", + status="in_progress", + project_id=sample_project.id, + ) + session.add(ticket) + session.commit() + session.refresh(ticket) + session.expunge(ticket) + return ticket + + +@pytest.fixture +def running_task_with_sandbox(db_service: DatabaseService, sample_ticket_with_project: Ticket) -> Task: + """Create a running task with sandbox_id and branch_name.""" + with db_service.get_session() as session: + task = Task( + ticket_id=sample_ticket_with_project.id, + phase_id="PHASE_IMPLEMENTATION", + task_type="implement_feature", + description="Test task", + status="running", + sandbox_id=f"sandbox-{uuid4().hex[:8]}", + result={"branch_name": "feature/test-branch"}, + ) + session.add(task) + session.commit() + session.refresh(task) + session.expunge(task) + return task + + +# ------------------------------------------------------------------------- +# request_validation Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_request_validation_creates_pending_status( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that request_validation sets task status to pending_validation.""" + # Mock the spawner to avoid external calls + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + mock_spawn.return_value = { + "sandbox_id": "validator-sandbox-123", + "agent_id": str(uuid4()), + } + + validation_id = await task_validator.request_validation( + task_id=str(running_task_with_sandbox.id), + sandbox_id=running_task_with_sandbox.sandbox_id, + implementation_result={"success": True, "branch_name": "feature/test"}, + ) + + assert validation_id # Non-empty + + # Verify task status updated + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "pending_validation" + assert task.result.get("validation_iteration") == 1 + assert task.result.get("implementation_result") == { + "success": True, + "branch_name": "feature/test", + } + + +@pytest.mark.asyncio +async def test_request_validation_increments_iteration( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that validation_iteration increments on each validation request.""" + task_id = str(running_task_with_sandbox.id) + sandbox_id = running_task_with_sandbox.sandbox_id + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + mock_spawn.return_value = {"sandbox_id": "v-1", "agent_id": str(uuid4())} + + # First validation + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.result.get("validation_iteration") == 1 + # Reset status for next validation + task.status = "running" + session.commit() + + # Add a validation review to simulate completed validation + with db_service.get_session() as session: + review = ValidationReview( + task_id=task_id, + validator_agent_id=str(uuid4()), + iteration_number=1, + validation_passed=False, + feedback="First validation failed", + ) + session.add(review) + session.commit() + + # Second validation + mock_spawn.return_value = {"sandbox_id": "v-2", "agent_id": str(uuid4())} + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.result.get("validation_iteration") == 2 + + +@pytest.mark.asyncio +async def test_request_validation_fails_after_max_iterations( + db_service: DatabaseService, + event_bus_service: EventBusService, + running_task_with_sandbox: Task, +): + """Test that task fails after exceeding MAX_VALIDATION_ITERATIONS.""" + # Set low max iterations for test + os.environ["MAX_VALIDATION_ITERATIONS"] = "2" + task_validator = TaskValidatorService(db=db_service, event_bus=event_bus_service) + + task_id = str(running_task_with_sandbox.id) + + # Add 2 validation reviews (max) + with db_service.get_session() as session: + for i in range(2): + review = ValidationReview( + task_id=task_id, + validator_agent_id=str(uuid4()), + iteration_number=i + 1, + validation_passed=False, + feedback=f"Validation {i+1} failed", + ) + session.add(review) + session.commit() + + # Third validation request should fail the task + result = await task_validator.request_validation( + task_id=task_id, + sandbox_id=running_task_with_sandbox.sandbox_id, + implementation_result={"success": True}, + ) + + assert result == "" # Empty string indicates failure + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "failed" + assert "Failed validation after 2 iterations" in task.error_message + + # Reset env + os.environ["MAX_VALIDATION_ITERATIONS"] = "3" + + +@pytest.mark.asyncio +async def test_request_validation_disabled_auto_approves( + task_validator_disabled: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that validation disabled auto-approves the task.""" + result = await task_validator_disabled.request_validation( + task_id=str(running_task_with_sandbox.id), + sandbox_id=running_task_with_sandbox.sandbox_id, + implementation_result={"success": True, "output": "Done!"}, + ) + + assert result == "auto-approved" + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "completed" + assert task.result.get("auto_approved") is True + assert task.result.get("output") == "Done!" + + +@pytest.mark.asyncio +async def test_request_validation_stores_validator_info( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that validator sandbox_id and agent_id are stored in task.result.""" + validator_sandbox_id = "validator-sandbox-xyz" + validator_agent_id = str(uuid4()) + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + mock_spawn.return_value = { + "sandbox_id": validator_sandbox_id, + "agent_id": validator_agent_id, + } + + await task_validator.request_validation( + task_id=str(running_task_with_sandbox.id), + sandbox_id=running_task_with_sandbox.sandbox_id, + implementation_result={"success": True}, + ) + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.result.get("validator_sandbox_id") == validator_sandbox_id + assert task.result.get("validator_agent_id") == validator_agent_id + + +# ------------------------------------------------------------------------- +# handle_validation_result Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_handle_validation_result_passed( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that passing validation marks task as completed.""" + task_id = str(running_task_with_sandbox.id) + validator_agent_id = str(uuid4()) + + # Set task to pending_validation + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + task.status = "pending_validation" + session.commit() + + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=True, + feedback="All checks passed. Code is production-ready.", + evidence={"tests": "passed", "build": "success"}, + ) + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "completed" + assert task.result.get("validation_passed") is True + assert task.result.get("validated_at") is not None + + +@pytest.mark.asyncio +async def test_handle_validation_result_failed( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that failing validation marks task as needs_revision.""" + task_id = str(running_task_with_sandbox.id) + validator_agent_id = str(uuid4()) + feedback = "Tests are failing: 3 unit tests failed" + recommendations = ["Fix test_foo", "Fix test_bar"] + + # Set task to pending_validation + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + task.status = "pending_validation" + session.commit() + + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=False, + feedback=feedback, + evidence={"test_output": "FAILED test_foo, test_bar, test_baz"}, + recommendations=recommendations, + ) + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "needs_revision" + assert task.result.get("validation_passed") is False + assert task.result.get("revision_feedback") == feedback + assert task.result.get("revision_recommendations") == recommendations + + +@pytest.mark.asyncio +async def test_handle_validation_result_creates_review_record( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that validation result creates a ValidationReview record.""" + task_id = str(running_task_with_sandbox.id) + validator_agent_id = str(uuid4()) + feedback = "All checks passed!" + evidence = {"tests": "passed", "build": "success"} + + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=validator_agent_id, + passed=True, + feedback=feedback, + evidence=evidence, + ) + + with db_service.get_session() as session: + reviews = ( + session.query(ValidationReview) + .filter(ValidationReview.task_id == task_id) + .all() + ) + assert len(reviews) == 1 + review = reviews[0] + assert review.validator_agent_id == validator_agent_id + assert review.validation_passed is True + assert review.feedback == feedback + assert review.evidence == evidence + assert review.iteration_number == 1 + + +@pytest.mark.asyncio +async def test_handle_validation_result_publishes_passed_event( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, + event_bus_service: EventBusService, +): + """Test that passing validation publishes TASK_VALIDATION_PASSED event.""" + task_id = str(running_task_with_sandbox.id) + + with patch.object(event_bus_service, 'publish') as mock_publish: + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=str(uuid4()), + passed=True, + feedback="All checks passed!", + ) + + mock_publish.assert_called_once() + event = mock_publish.call_args[0][0] + assert event.event_type == "TASK_VALIDATION_PASSED" + assert event.entity_id == task_id + assert event.payload["feedback"] == "All checks passed!" + + +@pytest.mark.asyncio +async def test_handle_validation_result_publishes_failed_event( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, + event_bus_service: EventBusService, +): + """Test that failing validation publishes TASK_VALIDATION_FAILED event.""" + task_id = str(running_task_with_sandbox.id) + recommendations = ["Fix the tests", "Add error handling"] + + with patch.object(event_bus_service, 'publish') as mock_publish: + await task_validator.handle_validation_result( + task_id=task_id, + validator_agent_id=str(uuid4()), + passed=False, + feedback="Tests failing", + recommendations=recommendations, + ) + + mock_publish.assert_called_once() + event = mock_publish.call_args[0][0] + assert event.event_type == "TASK_VALIDATION_FAILED" + assert event.entity_id == task_id + assert event.payload["feedback"] == "Tests failing" + assert event.payload["recommendations"] == recommendations + + +# ------------------------------------------------------------------------- +# _spawn_validator Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_spawn_validator_gets_repo_info( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, + sample_project: Project, +): + """Test that _spawn_validator extracts repo info from project.""" + # Mock the spawner + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-123") + mock_get_spawner.return_value = mock_spawner + + result = await task_validator._spawn_validator( + task_id=str(running_task_with_sandbox.id), + original_sandbox_id=running_task_with_sandbox.sandbox_id, + iteration=1, + ) + + assert result is not None + + # Verify spawn_for_task was called with correct extra_env + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["GITHUB_REPO"] == f"{sample_project.github_owner}/{sample_project.github_repo}" + assert extra_env["GITHUB_REPO_OWNER"] == sample_project.github_owner + assert extra_env["GITHUB_REPO_NAME"] == sample_project.github_repo + assert extra_env["VALIDATION_MODE"] == "true" + assert extra_env["ORIGINAL_TASK_ID"] == str(running_task_with_sandbox.id) + + +@pytest.mark.asyncio +async def test_spawn_validator_gets_branch_name( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that _spawn_validator extracts branch_name from task.result.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-123") + mock_get_spawner.return_value = mock_spawner + + await task_validator._spawn_validator( + task_id=str(running_task_with_sandbox.id), + original_sandbox_id=running_task_with_sandbox.sandbox_id, + iteration=1, + ) + + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["BRANCH_NAME"] == "feature/test-branch" + + +@pytest.mark.asyncio +async def test_spawn_validator_gets_github_token( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, + sample_project: Project, + test_user: User, +): + """Test that _spawn_validator extracts GitHub token from project owner.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-123") + mock_get_spawner.return_value = mock_spawner + + await task_validator._spawn_validator( + task_id=str(running_task_with_sandbox.id), + original_sandbox_id=running_task_with_sandbox.sandbox_id, + iteration=1, + ) + + call_kwargs = mock_spawner.spawn_for_task.call_args[1] + extra_env = call_kwargs["extra_env"] + + assert extra_env["GITHUB_TOKEN"] == "ghp_test_token_12345" + assert extra_env["USER_ID"] == str(test_user.id) + + +@pytest.mark.asyncio +async def test_spawn_validator_creates_agent_record( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that _spawn_validator creates a validator Agent record.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_spawner = MagicMock() + mock_spawner.spawn_for_task = AsyncMock(return_value="validator-sandbox-123") + mock_get_spawner.return_value = mock_spawner + + result = await task_validator._spawn_validator( + task_id=str(running_task_with_sandbox.id), + original_sandbox_id=running_task_with_sandbox.sandbox_id, + iteration=1, + ) + + assert result is not None + agent_id = result["agent_id"] + + with db_service.get_session() as session: + agent = session.get(Agent, agent_id) + assert agent is not None + assert agent.agent_type == "validator" + assert agent.phase_id == "PHASE_VALIDATION" + assert "validate" in agent.capabilities + assert "validator" in agent.tags + + +@pytest.mark.asyncio +async def test_spawn_validator_handles_failure( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that _spawn_validator returns None on failure.""" + with patch('omoi_os.services.task_validator.get_daytona_spawner') as mock_get_spawner: + mock_get_spawner.side_effect = Exception("Daytona unavailable") + + result = await task_validator._spawn_validator( + task_id=str(running_task_with_sandbox.id), + original_sandbox_id=running_task_with_sandbox.sandbox_id, + iteration=1, + ) + + assert result is None + + +# ------------------------------------------------------------------------- +# _build_validator_prompt Tests +# ------------------------------------------------------------------------- + + +def test_build_validator_prompt_includes_task_id(task_validator: TaskValidatorService): + """Test that validator prompt includes task_id.""" + task_id = "test-task-123" + prompt = task_validator._build_validator_prompt(task_id, 1) + + assert task_id in prompt + assert "Validation Iteration: 1" in prompt + + +def test_build_validator_prompt_includes_checklist(task_validator: TaskValidatorService): + """Test that validator prompt includes validation checklist.""" + prompt = task_validator._build_validator_prompt("task-123", 1) + + assert "Tests Pass" in prompt + assert "Build Passes" in prompt + assert "Changes Committed" in prompt + assert "Changes Pushed" in prompt + assert "PR Created" in prompt + assert "Code Quality" in prompt + + +# ------------------------------------------------------------------------- +# _auto_approve Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_auto_approve_marks_completed( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, +): + """Test that _auto_approve marks task as completed.""" + result = await task_validator._auto_approve( + task_id=str(running_task_with_sandbox.id), + result={"success": True, "output": "Implementation done!"}, + ) + + assert result == "auto-approved" + + with db_service.get_session() as session: + task = session.get(Task, running_task_with_sandbox.id) + assert task.status == "completed" + assert task.result.get("auto_approved") is True + assert task.result.get("success") is True + assert task.result.get("output") == "Implementation done!" + assert task.result.get("completed_at") is not None + + +# ------------------------------------------------------------------------- +# get_task_validator Tests +# ------------------------------------------------------------------------- + + +def test_get_task_validator_factory(db_service: DatabaseService, event_bus_service: EventBusService): + """Test the get_task_validator factory function.""" + validator = get_task_validator(db=db_service, event_bus=event_bus_service) + + assert isinstance(validator, TaskValidatorService) + assert validator.db == db_service + assert validator.event_bus == event_bus_service + + +# ------------------------------------------------------------------------- +# Event Publishing Tests +# ------------------------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_request_validation_publishes_event( + task_validator: TaskValidatorService, + db_service: DatabaseService, + running_task_with_sandbox: Task, + event_bus_service: EventBusService, +): + """Test that request_validation publishes TASK_VALIDATION_REQUESTED event.""" + task_id = str(running_task_with_sandbox.id) + sandbox_id = running_task_with_sandbox.sandbox_id + + with patch.object(task_validator, '_spawn_validator', new_callable=AsyncMock) as mock_spawn: + mock_spawn.return_value = {"sandbox_id": "v-1", "agent_id": str(uuid4())} + + with patch.object(event_bus_service, 'publish') as mock_publish: + await task_validator.request_validation( + task_id=task_id, + sandbox_id=sandbox_id, + implementation_result={"success": True}, + ) + + mock_publish.assert_called_once() + event = mock_publish.call_args[0][0] + assert event.event_type == "TASK_VALIDATION_REQUESTED" + assert event.entity_id == task_id + assert event.payload["sandbox_id"] == sandbox_id + assert event.payload["iteration"] == 1 diff --git a/docs/design/continuous_claude_sdk.md b/docs/design/continuous_claude_sdk.md new file mode 100644 index 00000000..afb5e6e4 --- /dev/null +++ b/docs/design/continuous_claude_sdk.md @@ -0,0 +1,849 @@ +# Continuous Claude SDK Design + +**Created**: 2025-12-29 +**Status**: Draft +**Purpose**: Design a Python implementation of continuous/iterative Claude Code execution using the Claude Agent SDK, equivalent to the `continuous-claude` bash script. Integrates with the existing OmoiOS sandbox worker infrastructure. + +## Overview + +This document describes a Python implementation that runs Claude Code iteratively with automatic session management, cost tracking, completion detection, and optional git integration. The implementation leverages the `claude-agent-sdk` Python package for native async interaction with Claude Code. + +**Key Integration**: This design builds on the existing `claude_sandbox_worker.py` architecture, reusing its `EventReporter`, `MessagePoller`, `WorkerConfig`, and session management patterns. + +## Problem Statement + +The original `continuous-claude` bash script provides: +1. Iterative execution with configurable limits (runs, cost, duration) +2. Shared context via notes files between iterations +3. Completion signal detection to stop when project is done +4. Git integration (branch creation, PR management, merge queuing) +5. Cost tracking and budget enforcement + +The goal is to replicate this functionality using the Python SDK for: +- Cleaner code with proper typing +- Native async/await patterns +- Direct access to SDK features (session resume, cost tracking, structured outputs) +- **Integration with existing OmoiOS infrastructure** (EventReporter, callbacks, session portability) + +## Architecture + +``` +┌──────────────────────────────────────────────────────────────────────────────┐ +│ Continuous Claude Runner (ContinuousSandboxWorker) │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┐ ┌────────────────────┐ ┌────────────────────────┐ │ +│ │ WorkerConfig │ │ IterationState │ │ Git Integration │ │ +│ │ (extends existing)│ │ Tracker │ │ (Optional) │ │ +│ └─────────┬──────────┘ └─────────┬──────────┘ └───────────┬────────────┘ │ +│ │ │ │ │ +│ └───────────────────────┼──────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────────────────────────┐ │ +│ │ Main Iteration Loop Controller │ │ +│ │ - Checks limits (runs, cost, duration) │ │ +│ │ - Detects completion signals │ │ +│ │ - Manages iteration state │ │ +│ │ - Reports events via EventReporter │ │ +│ └──────────────────────────────────┬─────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────────────────────────┐ │ +│ │ Single Iteration Executor (reuses SandboxWorker._process) │ │ +│ │ - Builds enhanced prompt with context │ │ +│ │ - Calls ClaudeSDKClient.query() │ │ +│ │ - Processes messages (Assistant, Result, System) │ │ +│ │ - Reports events via EventReporter │ │ +│ │ - Tracks cost and session ID │ │ +│ └──────────────────────────────────┬─────────────────────────────────────┘ │ +│ │ │ +├─────────────────────────────────────┼─────────────────────────────────────────┤ +│ Reused from claude_sandbox_worker.py │ +│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────────────┐ │ +│ │ EventReporter │ │ MessagePoller │ │ FileChangeTracker │ │ +│ │ (HTTP callbacks)│ │ (injected msgs) │ │ (diff generation) │ │ +│ └──────────────────┘ └──────────────────┘ └──────────────────────────┘ │ +└─────────────────────────────────────┼─────────────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────────────────────┐ +│ claude-agent-sdk │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ ClaudeSDKClient │ ClaudeAgentOptions │ Message Types │ +│ (bidirectional) │ (configuration) │ (Assistant, Result, etc.) │ +└──────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────────────────────┐ +│ OmoiOS Main Server (callback_url) │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ POST /api/v1/sandboxes/{id}/events │ GET /api/v1/sandboxes/{id}/messages │ +│ (receives iteration events) │ (provides injected messages) │ +└──────────────────────────────────────────────────────────────────────────────┘ +``` + +## Core Components + +### 1. ContinuousWorkerConfig (extends existing WorkerConfig) + +The continuous iteration config extends the existing `WorkerConfig` from `claude_sandbox_worker.py` with iteration-specific settings. This ensures full compatibility with the OmoiOS infrastructure. + +```python +class ContinuousWorkerConfig(WorkerConfig): + """Extended configuration for continuous Claude execution. + + Inherits all WorkerConfig features: + - sandbox_id, task_id, agent_id, ticket_id + - callback_url for EventReporter + - model, api_key, permission_mode + - resume_session_id, session_transcript_b64 + - enable_spec_tools, enable_skills, enable_subagents + + Adds iteration-specific settings from environment variables. + """ + + def __init__(self): + super().__init__() + + # Iteration limits (at least one should be set) + # Environment: CONTINUOUS_MAX_RUNS, CONTINUOUS_MAX_COST_USD, CONTINUOUS_MAX_DURATION + self.max_runs: Optional[int] = self._get_int_env("CONTINUOUS_MAX_RUNS") + self.max_cost_usd: Optional[float] = self._get_float_env("CONTINUOUS_MAX_COST_USD") + self.max_duration_seconds: Optional[int] = self._get_int_env("CONTINUOUS_MAX_DURATION") + + # Completion detection + self.completion_signal = os.environ.get( + "CONTINUOUS_COMPLETION_SIGNAL", + "CONTINUOUS_CLAUDE_PROJECT_COMPLETE" + ) + self.completion_threshold = int(os.environ.get("CONTINUOUS_COMPLETION_THRESHOLD", "3")) + + # Notes file for cross-iteration context + self.notes_file = os.environ.get("CONTINUOUS_NOTES_FILE", "SHARED_TASK_NOTES.md") + + # Git/GitHub settings (reuses existing github_token, github_repo, branch_name) + self.enable_commits = os.environ.get("CONTINUOUS_ENABLE_COMMITS", "true").lower() == "true" + self.git_branch_prefix = os.environ.get("CONTINUOUS_BRANCH_PREFIX", "continuous-claude/") + self.merge_strategy = os.environ.get("CONTINUOUS_MERGE_STRATEGY", "squash") + + # Iteration mode flag + self.continuous_mode = os.environ.get("CONTINUOUS_MODE", "false").lower() == "true" + + def _get_int_env(self, key: str) -> Optional[int]: + val = os.environ.get(key) + return int(val) if val else None + + def _get_float_env(self, key: str) -> Optional[float]: + val = os.environ.get(key) + return float(val) if val else None + + def validate_continuous(self) -> list[str]: + """Validate continuous-specific configuration.""" + errors = super().validate() + + # At least one limit must be set + if not any([self.max_runs, self.max_cost_usd, self.max_duration_seconds]): + errors.append( + "At least one limit required: CONTINUOUS_MAX_RUNS, " + "CONTINUOUS_MAX_COST_USD, or CONTINUOUS_MAX_DURATION" + ) + + return errors +``` + +**Environment Variables for Continuous Mode:** + +| Variable | Description | Default | +|----------|-------------|---------| +| `CONTINUOUS_MODE` | Enable continuous iteration mode | `false` | +| `CONTINUOUS_MAX_RUNS` | Maximum successful iterations | None | +| `CONTINUOUS_MAX_COST_USD` | Maximum total cost in USD | None | +| `CONTINUOUS_MAX_DURATION` | Maximum duration in seconds | None | +| `CONTINUOUS_COMPLETION_SIGNAL` | Phrase to detect completion | `CONTINUOUS_CLAUDE_PROJECT_COMPLETE` | +| `CONTINUOUS_COMPLETION_THRESHOLD` | Consecutive signals to stop | `3` | +| `CONTINUOUS_NOTES_FILE` | Notes file for context sharing | `SHARED_TASK_NOTES.md` | +| `CONTINUOUS_ENABLE_COMMITS` | Enable git commits per iteration | `true` | +| `CONTINUOUS_BRANCH_PREFIX` | Git branch prefix | `continuous-claude/` | +| `CONTINUOUS_MERGE_STRATEGY` | PR merge strategy | `squash` | + +### 2. IterationState + +Tracks runtime state across iterations. This is reported to the main server via `EventReporter` after each iteration. + +```python +@dataclass +class IterationState: + """Tracks state across iterations.""" + + iteration_num: int = 0 # Current iteration + successful_iterations: int = 0 # Completed successfully + error_count: int = 0 # Consecutive errors + extra_iterations: int = 0 # Added due to errors + total_cost: float = 0.0 # Accumulated cost + completion_signal_count: int = 0 # Consecutive completion signals + start_time: Optional[float] = None # For duration tracking + last_session_id: Optional[str] = None # For potential resume + last_transcript_b64: Optional[str] = None # For cross-sandbox resumption + + def to_event_data(self) -> dict: + """Convert state to event payload for EventReporter.""" + return { + "iteration_num": self.iteration_num, + "successful_iterations": self.successful_iterations, + "error_count": self.error_count, + "extra_iterations": self.extra_iterations, + "total_cost_usd": self.total_cost, + "completion_signal_count": self.completion_signal_count, + "elapsed_seconds": time.time() - self.start_time if self.start_time else 0, + "last_session_id": self.last_session_id, + } +``` + +### 3. EventReporter Integration (Reused) + +The continuous worker reuses the existing `EventReporter` from `claude_sandbox_worker.py` to report iteration events back to the main OmoiOS server. + +```python +# Reused from claude_sandbox_worker.py - no changes needed +class EventReporter: + """Reports events back to main server via HTTP POST.""" + + async def report( + self, + event_type: str, + event_data: dict[str, Any], + source: str = "agent", + ) -> bool: + """Report event to main server with full context.""" + url = f"{self.config.callback_url}/api/v1/sandboxes/{self.config.sandbox_id}/events" + # ... existing implementation +``` + +**Iteration-Specific Event Types:** + +| Event Type | When Emitted | Payload | +|------------|--------------|---------| +| `iteration.started` | Before each iteration begins | `{iteration_num, prompt_preview, state}` | +| `iteration.completed` | After successful iteration | `{iteration_num, cost_usd, session_id, transcript_b64, output_preview}` | +| `iteration.failed` | After iteration error | `{iteration_num, error, error_type, retry_allowed}` | +| `iteration.completion_signal` | When completion signal detected | `{iteration_num, signal_count, threshold}` | +| `continuous.started` | At start of continuous run | `{config, limits, goal}` | +| `continuous.completed` | At end of continuous run | `{state, stop_reason, total_iterations, total_cost}` | +| `continuous.limit_reached` | When a limit is hit | `{limit_type, limit_value, current_value}` | + +**Example Event Flow:** + +``` +continuous.started → iteration.started → agent.tool_use → agent.message → +iteration.completed → iteration.started → ... → iteration.completion_signal → +continuous.completed +``` + +### 4. Prompt Enhancement + +Each iteration gets an enhanced prompt with: + +1. **Workflow context** - Explains the continuous loop and completion signal +2. **Primary goal** - The user's original prompt +3. **Previous iteration notes** - Content from `SHARED_TASK_NOTES.md` if exists +4. **Notes update instructions** - How to update the notes file + +```python +WORKFLOW_CONTEXT_TEMPLATE = """## CONTINUOUS WORKFLOW CONTEXT + +This is part of a continuous development loop where work happens +incrementally across multiple iterations... + +**Project Completion Signal**: If you determine that the ENTIRE project +goal is fully complete, include the exact phrase "{completion_signal}" +in your response... + +## PRIMARY GOAL + +{prompt} +""" +``` + +## Message Processing + +The SDK provides typed messages that we process in the iteration: + +| Message Type | SDK Class | Information Extracted | +|--------------|-----------|----------------------| +| System Init | `SystemMessage` | `session_id` | +| Claude Response | `AssistantMessage` | Text content, tool usage | +| Tool Results | `ToolResultBlock` | Tool execution results | +| Completion | `ResultMessage` | `total_cost_usd`, `is_error`, final `session_id` | + +```python +async for message in query(prompt=enhanced_prompt, options=options): + if isinstance(message, SystemMessage): + if message.subtype == "init": + session_id = message.data.get("session_id") + + elif isinstance(message, AssistantMessage): + for block in message.content: + if isinstance(block, TextBlock): + result_text += block.text + + elif isinstance(message, ResultMessage): + iteration_cost = message.total_cost_usd + if message.is_error: + # Handle error + pass +``` + +## Limit Enforcement + +The main loop checks limits before each iteration: + +```python +def should_continue(config: ContinuousClaudeConfig, state: IterationState) -> bool: + # Check completion signal threshold + if state.completion_signal_count >= config.completion_threshold: + return False + + # Check max runs + if config.max_runs and state.successful_iterations >= config.max_runs: + return False + + # Check max cost + if config.max_cost_usd and state.total_cost >= config.max_cost_usd: + return False + + # Check max duration + if config.max_duration_seconds and state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return False + + return True +``` + +## Error Handling Strategy + +| Error Type | Handling | +|------------|----------| +| Claude error (rate limit, auth) | Increment error count, retry | +| 3 consecutive errors | Fatal exit | +| Single error | Add extra iteration, continue | +| Cost/duration exceeded | Graceful stop | + +```python +if success: + state.successful_iterations += 1 + state.error_count = 0 # Reset on success +else: + state.error_count += 1 + state.extra_iterations += 1 + + if state.error_count >= 3: + print("Fatal: 3 consecutive errors. Exiting.") + sys.exit(1) +``` + +## SDK Features Leveraged + +### 1. Native Async Iteration + +```python +# SDK handles subprocess management internally +async for message in query(prompt=prompt, options=options): + # Process typed messages + pass +``` + +### 2. Built-in Cost Tracking + +```python +elif isinstance(message, ResultMessage): + state.total_cost += message.total_cost_usd or 0.0 +``` + +### 3. Session Resume (for future enhancement) + +```python +# Resume previous session +options = ClaudeAgentOptions( + resume=state.last_session_id, + # ... other options +) +``` + +### 4. Permission Modes + +```python +options = ClaudeAgentOptions( + permission_mode="acceptEdits", # Auto-approve file edits + # Or "bypassPermissions" for full automation +) +``` + +## Git Integration (Optional) + +When `enable_commits=True`: + +1. **Branch Creation** - Create `continuous-claude/iteration-N/YYYY-MM-DD-hash` +2. **Commit** - Use Claude to write commit message and commit +3. **PR Creation** - Create PR via `gh` CLI +4. **Wait for Checks** - Poll for CI/review status +5. **Merge** - Merge with configured strategy +6. **Cleanup** - Delete branch, pull latest + +This mirrors the bash script's behavior using `subprocess` calls to `git` and `gh`. + +## ContinuousSandboxWorker (Main Implementation) + +The `ContinuousSandboxWorker` extends the existing `SandboxWorker` to add iteration loop logic. It reuses the message processing, event reporting, and hooks infrastructure. + +```python +class ContinuousSandboxWorker(SandboxWorker): + """Extended worker for continuous/iterative Claude execution. + + Inherits from SandboxWorker: + - EventReporter for HTTP callbacks + - MessagePoller for injected messages + - FileChangeTracker for diffs + - Pre/Post tool hooks + - Session transcript export/import + + Adds: + - Iteration loop with configurable limits + - Completion signal detection + - Cross-iteration context via notes file + - Per-iteration event reporting + """ + + def __init__(self, config: ContinuousWorkerConfig): + super().__init__(config) + self.continuous_config = config + self.iteration_state = IterationState() + + async def run_continuous(self): + """Main continuous execution loop.""" + self._setup_signal_handlers() + self.running = True + self.iteration_state.start_time = time.time() + + # Validate continuous configuration + errors = self.continuous_config.validate_continuous() + if errors: + logger.error("Configuration errors", extra={"errors": errors}) + return 1 + + async with EventReporter(self.continuous_config) as reporter: + self.reporter = reporter + + # Report continuous session start + await reporter.report( + "continuous.started", + { + "goal": self.continuous_config.initial_prompt, + "limits": { + "max_runs": self.continuous_config.max_runs, + "max_cost_usd": self.continuous_config.max_cost_usd, + "max_duration_seconds": self.continuous_config.max_duration_seconds, + }, + "completion_signal": self.continuous_config.completion_signal, + "completion_threshold": self.continuous_config.completion_threshold, + }, + source="worker", + ) + + # Main iteration loop + while self._should_continue(): + self.iteration_state.iteration_num += 1 + success = await self._run_single_iteration() + + if success: + self.iteration_state.successful_iterations += 1 + self.iteration_state.error_count = 0 + else: + self.iteration_state.error_count += 1 + self.iteration_state.extra_iterations += 1 + + if self.iteration_state.error_count >= 3: + await reporter.report( + "continuous.completed", + { + "stop_reason": "consecutive_errors", + **self.iteration_state.to_event_data(), + }, + ) + return 1 + + # Report completion + stop_reason = self._get_stop_reason() + await reporter.report( + "continuous.completed", + { + "stop_reason": stop_reason, + **self.iteration_state.to_event_data(), + }, + ) + + return 0 + + def _should_continue(self) -> bool: + """Check if iteration should continue.""" + state = self.iteration_state + config = self.continuous_config + + # Check completion signal threshold + if state.completion_signal_count >= config.completion_threshold: + return False + + # Check max runs + if config.max_runs and state.successful_iterations >= config.max_runs: + return False + + # Check max cost + if config.max_cost_usd and state.total_cost >= config.max_cost_usd: + return False + + # Check max duration + if config.max_duration_seconds and state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return False + + # Check shutdown signal + if self._should_stop: + return False + + return True + + def _get_stop_reason(self) -> str: + """Determine why the loop stopped.""" + state = self.iteration_state + config = self.continuous_config + + if state.completion_signal_count >= config.completion_threshold: + return "completion_signal" + if config.max_runs and state.successful_iterations >= config.max_runs: + return "max_runs_reached" + if config.max_cost_usd and state.total_cost >= config.max_cost_usd: + return "max_cost_reached" + if config.max_duration_seconds and state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return "max_duration_reached" + if self._should_stop: + return "shutdown_signal" + return "unknown" + + async def _run_single_iteration(self) -> bool: + """Execute a single iteration. + + Returns True on success, False on error. + """ + state = self.iteration_state + config = self.continuous_config + + # Build enhanced prompt with context + enhanced_prompt = self._build_iteration_prompt() + + # Report iteration start + await self.reporter.report( + "iteration.started", + { + "iteration_num": state.iteration_num, + "prompt_preview": enhanced_prompt[:500], + **state.to_event_data(), + }, + ) + + try: + # Create SDK options with hooks (reuses parent's method) + pre_hook = await self._create_pre_tool_hook() + post_hook = await self._create_post_tool_hook() + sdk_options = config.to_sdk_options( + pre_tool_hook=pre_hook, + post_tool_hook=post_hook, + ) + + # Execute iteration + async with ClaudeSDKClient(options=sdk_options) as client: + await client.query(enhanced_prompt) + result, output = await self._process_messages(client) + + if result: + # Track cost and session + iteration_cost = getattr(result, "total_cost_usd", 0.0) or 0.0 + state.total_cost += iteration_cost + state.last_session_id = getattr(result, "session_id", None) + + # Export transcript for cross-sandbox resumption + if state.last_session_id: + state.last_transcript_b64 = config.export_session_transcript( + state.last_session_id + ) + + # Check for completion signal + output_text = "\n".join(output) if output else "" + if config.completion_signal in output_text: + state.completion_signal_count += 1 + await self.reporter.report( + "iteration.completion_signal", + { + "iteration_num": state.iteration_num, + "signal_count": state.completion_signal_count, + "threshold": config.completion_threshold, + }, + ) + else: + state.completion_signal_count = 0 # Reset on non-signal + + # Report iteration completion + await self.reporter.report( + "iteration.completed", + { + "iteration_num": state.iteration_num, + "cost_usd": iteration_cost, + "session_id": state.last_session_id, + "transcript_b64": state.last_transcript_b64, + "output_preview": output_text[:1000] if output_text else None, + **state.to_event_data(), + }, + ) + + return True + + else: + # Iteration failed (no ResultMessage) + await self.reporter.report( + "iteration.failed", + { + "iteration_num": state.iteration_num, + "error": "No result message received", + "error_type": "no_result", + "retry_allowed": state.error_count < 2, + }, + ) + return False + + except Exception as e: + await self.reporter.report( + "iteration.failed", + { + "iteration_num": state.iteration_num, + "error": str(e), + "error_type": type(e).__name__, + "retry_allowed": state.error_count < 2, + }, + ) + return False + + def _build_iteration_prompt(self) -> str: + """Build enhanced prompt with iteration context.""" + config = self.continuous_config + state = self.iteration_state + + # Read notes file if exists + notes_content = "" + notes_path = Path(config.cwd) / config.notes_file + if notes_path.exists(): + try: + notes_content = notes_path.read_text() + except Exception: + pass + + # Build enhanced prompt + prompt_parts = [ + f"## CONTINUOUS WORKFLOW CONTEXT", + f"", + f"This is iteration {state.iteration_num} of a continuous development loop.", + f"Work incrementally across iterations. Update the notes file to preserve context.", + f"", + f"**Limits**: max_runs={config.max_runs}, max_cost=${config.max_cost_usd}, ", + f"max_duration={config.max_duration_seconds}s", + f"**Progress**: {state.successful_iterations} successful iterations, ", + f"${state.total_cost:.2f} spent", + f"", + f"**Project Completion Signal**: If you determine the ENTIRE project goal is ", + f"fully complete, include the exact phrase \"{config.completion_signal}\" in your ", + f"response. This must appear {config.completion_threshold} consecutive times to ", + f"stop the loop.", + f"", + f"## PRIMARY GOAL", + f"", + config.initial_prompt or config.ticket_description or "No goal specified", + ] + + if notes_content: + prompt_parts.extend([ + f"", + f"## PREVIOUS ITERATION NOTES", + f"", + notes_content, + ]) + + prompt_parts.extend([ + f"", + f"## NOTES UPDATE INSTRUCTIONS", + f"", + f"Update `{config.notes_file}` with:", + f"- What you accomplished this iteration", + f"- What remains to be done", + f"- Any blockers or issues", + f"- Important context for the next iteration", + ]) + + return "\n".join(prompt_parts) +``` + +**Key Integration Points:** + +1. **Inherits `SandboxWorker`** - Reuses `_process_messages()`, hooks, file tracking +2. **Uses `EventReporter`** - Reports iteration events via HTTP callbacks +3. **Session Portability** - Uses `export_session_transcript()` for cross-sandbox resumption +4. **Notes File** - Provides cross-iteration context similar to bash script +5. **Configurable via Environment** - All settings from environment variables + +## CLI Interface + +```bash +# Install +pip install claude-agent-sdk + +# Run with max iterations +python continuous_claude.py -p "Add unit tests" -m 10 + +# Run with cost limit +python continuous_claude.py -p "Fix bugs" --max-cost 5.00 + +# Run with time limit +python continuous_claude.py -p "Refactor" --max-duration 2h + +# Combine limits (first reached stops) +python continuous_claude.py -p "Feature work" -m 20 --max-cost 10.00 --max-duration 4h + +# Without git integration +python continuous_claude.py -p "Experiment" -m 5 --disable-commits +``` + +## Comparison: Bash vs Python SDK vs OmoiOS Integration + +| Aspect | Bash Script | Python SDK (Standalone) | OmoiOS Integration | +|--------|------------|------------------------|-------------------| +| Subprocess management | Manual | SDK handles internally | SDK handles internally | +| JSON parsing | `jq` dependency | Native Python | Native Python | +| Type safety | None | Full typing with dataclasses | Full typing + Pydantic | +| Async handling | Background processes | Native async/await | Native async/await | +| Cost tracking | Parse JSON output | `ResultMessage.total_cost_usd` | Events to server DB | +| Session management | External tracking | Built-in `resume` option | Cross-sandbox via HTTP | +| Error handling | Exit codes | Exceptions with context | Events + retries | +| Integration | Standalone script | Importable module | Full OmoiOS ecosystem | +| Event reporting | None | Manual implementation | `EventReporter` (HTTP) | +| Message injection | None | Manual implementation | `MessagePoller` | +| File diff tracking | None | Manual implementation | `FileChangeTracker` | +| Dashboard visibility | None | None | Real-time UI updates | + +**Key OmoiOS Advantages:** + +1. **Real-time Monitoring** - Iteration progress visible in OmoiOS dashboard +2. **Event Persistence** - All iteration events stored in database +3. **Cross-Sandbox Resumption** - Session transcripts portable via HTTP +4. **Human-in-the-Loop** - Inject messages during iterations via `MessagePoller` +5. **File Diffs** - Track changes with `FileChangeTracker` for code review +6. **Existing Infrastructure** - Reuses proven `EventReporter`, hooks, MCP tools + +## Future Enhancements + +1. **Parallel Execution** - Run multiple Claude instances on different tasks +2. **Worktree Support** - Use git worktrees for parallel git operations +3. **Webhook Notifications** - Send status updates to external services +4. **Structured Outputs** - Use JSON schema for iteration reports +5. **Custom Hooks** - Add pre/post iteration hooks for custom logic +6. **Database Persistence** - Track iteration history in SQLite/PostgreSQL + +## Implementation File Location + +The implementation should extend the existing `claude_sandbox_worker.py` to maintain code reuse and consistency with the OmoiOS infrastructure. + +**Option A: Extend Existing Worker (Recommended)** + +Add continuous mode directly to the existing worker: + +``` +backend/omoi_os/workers/ +├── claude_sandbox_worker.py # Existing worker (add ContinuousWorkerConfig) +├── continuous_sandbox_worker.py # NEW: ContinuousSandboxWorker class +└── __init__.py # Export both workers +``` + +The worker can be invoked with `CONTINUOUS_MODE=true` to enable iteration: + +```bash +# Standard mode (existing behavior) +SANDBOX_ID=xxx CALLBACK_URL=http://... python claude_sandbox_worker.py + +# Continuous mode (new behavior) +CONTINUOUS_MODE=true \ +CONTINUOUS_MAX_RUNS=10 \ +CONTINUOUS_MAX_COST_USD=5.00 \ +SANDBOX_ID=xxx \ +CALLBACK_URL=http://... \ +python continuous_sandbox_worker.py +``` + +**Option B: Standalone Script** + +If standalone usage is needed (without OmoiOS backend): + +``` +scripts/continuous_claude/ +├── __init__.py +├── continuous_claude.py # Standalone implementation (copies EventReporter) +├── config.py # Standalone configuration +└── README.md # Usage documentation +``` + +**Recommended Approach**: Option A - extend existing worker to maximize code reuse. + +## Server-Side Integration + +The OmoiOS main server needs endpoints to receive iteration events: + +```python +# Already exists in api/v1/sandboxes.py +POST /api/v1/sandboxes/{sandbox_id}/events + +# Event types to handle: +# - continuous.started +# - continuous.completed +# - continuous.limit_reached +# - iteration.started +# - iteration.completed +# - iteration.failed +# - iteration.completion_signal +``` + +**Database Schema Extension (Optional):** + +If tracking iteration history is desired: + +```sql +CREATE TABLE continuous_iterations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + sandbox_id UUID REFERENCES sandboxes(id), + iteration_num INTEGER NOT NULL, + status VARCHAR(20) NOT NULL, -- 'completed', 'failed' + cost_usd DECIMAL(10, 4), + session_id VARCHAR(255), + transcript_b64 TEXT, -- For cross-sandbox resumption + output_preview TEXT, + started_at TIMESTAMP WITH TIME ZONE, + completed_at TIMESTAMP WITH TIME ZONE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE INDEX idx_continuous_iterations_sandbox ON continuous_iterations(sandbox_id); +``` + +## References + +- [Claude Agent SDK Python](https://github.com/anthropics/claude-agent-sdk-python) +- [Original continuous-claude bash script](https://github.com/AnandChowdhary/continuous-claude) +- [SDK Documentation](https://deepwiki.com/anthropics/claude-agent-sdk-python) +- [Existing Worker Implementation](../backend/omoi_os/workers/claude_sandbox_worker.py) diff --git a/docs/design/frontend/command_workflow_modes.md b/docs/design/frontend/command_workflow_modes.md new file mode 100644 index 00000000..2e464a35 --- /dev/null +++ b/docs/design/frontend/command_workflow_modes.md @@ -0,0 +1,325 @@ +# Command Page Workflow Modes Design + +**Created**: 2025-12-30 +**Status**: Draft +**Purpose**: Design document for adding workflow mode selection to the Command Center page + +--- + +## Overview + +The Command Center (`/command`) is the primary entry point for authenticated users. Currently it supports a single workflow: describe → create ticket → spawn sandbox. This document outlines the design for supporting multiple workflow modes from the command page. + +--- + +## Workflow Modes + +### Mode 1: Quick Implementation (Default) + +**Purpose**: Fast path for simple tasks - directly spawn an agent to implement. + +**User Flow**: +``` +1. User types prompt: "Add a login button to the header" + ↓ +2. System creates ticket (PHASE_IMPLEMENTATION) + ↓ +3. Orchestrator spawns sandbox + ↓ +4. Redirect to /sandbox/:id for real-time monitoring + ↓ +5. Agent implements and creates PR +``` + +**Best For**: +- Small bug fixes +- Simple features +- Quick experiments +- Tasks that don't need extensive planning + +**Requirements**: Project must be selected + +--- + +### Mode 2: Spec-Driven Development + +**Purpose**: Structured workflow for complex features requiring planning and approval gates. + +**User Flow**: +``` +1. User types prompt: "Add payment processing with Stripe" + ↓ +2. System analyzes codebase and generates: + - Requirements (EARS-style WHEN/SHALL patterns) + - Design (architecture, data models, APIs) + - Tasks (discrete units with dependencies) + ↓ +3. User reviews/edits in Spec Workspace + ↓ +4. User approves Requirements → Design → Plan + ↓ +5. System creates tickets for each task + ↓ +6. Orchestrator assigns to agents + ↓ +7. Parallel execution with Guardian monitoring + ↓ +8. PRs created, user reviews and merges +``` + +**Best For**: +- New features +- Complex integrations +- Multi-component changes +- Production-critical work + +**Requirements**: Project must be selected + +--- + +## UI Design (Dropdown Approach) + +### Layout + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ │ +│ What would you like to do? │ +│ │ +│ ┌──────────────────────────────────────────────────────────┐ │ +│ │ │ │ +│ │ Describe what you want to build... │ │ +│ │ │ │ +│ └──────────────────────────────────────────────────────────┘ │ +│ │ +│ [⚡ Quick ▼] [Project selector] [Model] [Submit →] │ +│ │ +│ ┌──────────────────────────────────────────────────────────┐ │ +│ │ ℹ️ Agent will immediately start implementing │ │ +│ └──────────────────────────────────────────────────────────┘ │ +│ │ +└──────────────────────────────────────────────────────────────────┘ +``` + +### Dropdown Options + +``` +┌─────────────────────────────────┐ +│ ⚡ Quick ✓ │ ← Default +├─────────────────────────────────┤ +│ 📋 Spec-Driven │ +└─────────────────────────────────┘ +``` + +Each option shows: +- Icon (visual distinction) +- Mode name +- Checkmark for selected + +### Behavior by Mode + +| Mode | Placeholder | Helper Text | Submit Label | +|------|-------------|-------------|--------------| +| Quick | "Describe what you want to build..." | "Agent will immediately start implementing" | "Launch →" | +| Spec-Driven | "Describe the feature to plan..." | "We'll generate requirements & design for approval" | "Create Spec →" | + +--- + +## Mode-Specific UI Elements + +### Quick Mode +``` +Placeholder: "Describe what you want to build..." +Helper text: "An agent will immediately start implementing your request" +Submit button: "Launch →" +``` + +### Spec-Driven Mode +``` +Placeholder: "Describe the feature you want to plan..." +Helper text: "We'll generate requirements, design, and tasks for your review" +Submit button: "Create Spec →" +``` + +--- + +## Data Flow & API Parameters + +### Parameter Differences by Mode + +| Parameter | Quick | Spec-Driven | +|-----------|-------|-------------| +| `workflow_mode` | `"quick"` | `"spec_driven"` | +| `phase_id` | `"PHASE_IMPLEMENTATION"` | `"PHASE_REQUIREMENTS"` | +| `auto_spawn_sandbox` | `true` | `false` | +| `generate_spec` | `false` | `true` | + +### Quick Mode (Current Implementation) +```typescript +// Uses existing ticket creation flow +POST /api/tickets +{ + title: prompt.slice(0, 100), + description: prompt, + phase_id: "PHASE_IMPLEMENTATION", + priority: "MEDIUM", + workflow_mode: "quick", // NEW: identifies mode + auto_spawn_sandbox: true // NEW: tells orchestrator to spawn immediately +} + +// Response: ticket created, orchestrator spawns sandbox +// Frontend: waits for SANDBOX_SPAWNED event, redirects to /sandbox/:id +``` + +### Spec-Driven Mode (New) +```typescript +// Creates a spec with auto-generated requirements/design/tasks +POST /api/tickets +{ + title: prompt.slice(0, 100), + description: prompt, + phase_id: "PHASE_REQUIREMENTS", // Starts in requirements phase + priority: "MEDIUM", + workflow_mode: "spec_driven", // Triggers spec generation + generate_spec: true, // Auto-generate requirements & design + auto_spawn_sandbox: false // Don't spawn until user approves +} + +// Response: ticket created with spec_id +{ + id: "ticket-123", + spec_id: "spec-456", // Generated spec attached + status: "pending_approval" +} + +// Frontend: redirects to /specs/:spec_id for review & approval +``` + +### Unified Submit Handler + +```typescript +const handleSubmit = async (prompt: string) => { + const basePayload = { + title: prompt.slice(0, 100) + (prompt.length > 100 ? "..." : ""), + description: prompt, + priority: "MEDIUM", + project_id: selectedProject?.id, + } + + let payload: CreateTicketPayload + + switch (selectedMode) { + case "quick": + payload = { + ...basePayload, + phase_id: "PHASE_IMPLEMENTATION", + workflow_mode: "quick", + auto_spawn_sandbox: true, + } + // Wait for sandbox, redirect to /sandbox/:id + break + + case "spec_driven": + payload = { + ...basePayload, + phase_id: "PHASE_REQUIREMENTS", + workflow_mode: "spec_driven", + generate_spec: true, + auto_spawn_sandbox: false, + } + // Redirect to /specs/:spec_id + break + } + + const result = await createTicketMutation.mutateAsync(payload) + // Handle redirect based on mode... +} +``` + +--- + +## Navigation After Submit + +| Mode | Destination | Purpose | +|------|-------------|---------| +| Quick | `/sandbox/:id` | Real-time agent monitoring | +| Spec-Driven | `/specs/:id` | Spec workspace for review/approval | + +--- + +## State Machine + +``` + ┌─────────────────┐ + │ Command Page │ + │ (Mode Select) │ + └────────┬────────┘ + │ + ┌──────────────┴──────────────┐ + │ │ + ▼ ▼ + ┌─────────┐ ┌───────────┐ + │ Quick │ │ Spec-Drv │ + │ Mode │ │ Mode │ + └────┬────┘ └─────┬─────┘ + │ │ + ▼ ▼ + ┌───────────┐ ┌───────────┐ + │ Sandbox │ │ Spec │ + │ Detail │ │ Workspace │ + └───────────┘ └─────┬─────┘ + │ + (After approvals) + │ + ▼ + ┌───────────┐ + │ Sandboxes │ + │ (Parallel)│ + └───────────┘ +``` + +--- + +## Implementation Phases + +### Phase 1: UI Foundation +- Add mode selector dropdown to command page +- Update prompt placeholders based on mode +- Add mode-specific helper text +- Track selected mode in state + +### Phase 2: Quick Mode Polish +- Ensure current flow works as "Quick Mode" +- Add mode indicator to created tickets +- Update sandbox detail to show mode + +### Phase 3: Spec-Driven Mode +- Create spec generation API endpoint +- Build Spec Workspace page (`/specs/:id`) +- Add approval gate UI +- Connect to task creation on approval + +--- + +## Open Questions + +1. **Mode Persistence**: Should we remember the user's last-used mode? +2. **Mode Suggestions**: Should we suggest modes based on prompt analysis? +3. **Hybrid Flows**: Can users switch modes mid-flow (e.g., quick → spec-driven)? +4. **Keyboard Shortcuts**: Should modes have keyboard shortcuts (e.g., Cmd+1, Cmd+2, Cmd+3)? + +--- + +## Related Documentation + +- [Command Center Design](../figma_prompts/prompt_4a_command_center.md) +- [Feature Planning User Journey](../user_journey/02_feature_planning.md) +- [Execution Monitoring](../user_journey/03_execution_monitoring.md) +- [MCP Spec Workflow Server](./mcp_spec_workflow_server.md) + +--- + +**Next Steps**: +1. Review and approve design +2. Implement Phase 1 (UI Foundation) +3. Iterate based on feedback diff --git a/docs/page_flows/03_agents_workspaces.md b/docs/page_flows/03_agents_workspaces.md index 0b477994..2f5f2d32 100644 --- a/docs/page_flows/03_agents_workspaces.md +++ b/docs/page_flows/03_agents_workspaces.md @@ -1,133 +1,256 @@ -# 3 Agents Workspaces +# 3 Sandboxes & Agent Execution **Part of**: [Page Flow Documentation](./README.md) +**Note**: The UI has shifted from an "agent-based" approach to a "sandbox-based" approach. Users interact with sandboxes (isolated execution environments) rather than directly spawning agents. Sandboxes are created automatically when tasks are launched from the Command Center. + +--- + +## Architecture Overview + +The sandbox-based workflow follows this pattern: + +``` +Command Center (/command) + │ + │ User describes what they want to build + ▼ + Create Ticket (API) + │ + │ Backend orchestrator receives ticket + ▼ + Spawn Sandbox (Automatic) + │ + │ Agent runs in isolated sandbox + ▼ + Sandbox Detail View (/sandbox/:sandboxId) + │ + │ Real-time events stream via WebSocket + ▼ + User monitors progress & can send messages +``` + +--- + +## UI Components + +### IconRail Navigation + +The vertical icon navigation includes these sections: + +| Icon | Section | Route | Description | +|------|---------|-------|-------------| +| Terminal | Command | `/command` | Primary landing - launch new tasks | +| FolderGit2 | Projects | `/projects` | Project management | +| Workflow | Phases | `/phases` | Phase management | +| Box | Sandboxes | `/sandboxes` | View all sandboxes | +| BarChart3 | Analytics | `/analytics` | Usage analytics | +| Building2 | Organizations | `/organizations` | Organization settings | +| Settings | Settings | `/settings` | User settings | + +### ContextualPanel (Sidebar) + +The contextual sidebar changes based on the current route: + +| Route | Panel | Content | +|-------|-------|---------| +| `/command` | TasksPanel | Running/Pending/Completed/Failed tasks grouped by status | +| `/sandbox/*` | TasksPanel | Same task list with selected sandbox highlighted | +| `/projects` | ProjectsPanel | Project list with favorites/active sections | +| `/phases` | PhasesPanel | Phase configuration | +| `/board/*` | ProjectsPanel | Project context for board view | +| `/health` | HealthPanel | System health metrics | +| `/graph/*` | GraphFiltersPanel | Graph filter options | + --- -### Flow 4: Agent Management & Spawning + +### Flow 4: Sandbox-Based Execution (Current Implementation) ``` ┌─────────────────────────────────────────────────────────────┐ -│ PAGE: /agents (Agent List) │ +│ PAGE: /command (Command Center) │ │ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ Agents │ │ -│ │ │ │ -│ │ [Spawn Agent] [View Health] │ │ -│ │ │ │ -│ │ ┌──────────────────────────────────────────────┐ │ │ -│ │ │ Agent: worker-1 │ │ │ -│ │ │ Status: 🟢 Active │ │ │ -│ │ │ Phase: IMPLEMENTATION │ │ │ -│ │ │ Current Task: "Implement JWT" │ │ │ -│ │ │ Heartbeat: 5s ago ✓ │ │ │ -│ │ │ [View Details] [Intervene] │ │ │ -│ │ └──────────────────────────────────────────────┘ │ │ -│ │ │ │ -│ │ ┌──────────────────────────────────────────────┐ │ │ -│ │ │ Agent: worker-2 │ │ │ -│ │ │ Status: 🟡 Idle │ │ │ -│ │ │ Phase: INTEGRATION │ │ │ -│ │ │ Current Task: None │ │ │ -│ │ │ Heartbeat: 2s ago ✓ │ │ │ -│ │ │ [View Details] [Assign Task] │ │ │ -│ │ └──────────────────────────────────────────────┘ │ │ -│ │ │ │ -│ │ ┌──────────────────────────────────────────────┐ │ │ -│ │ │ Agent: worker-3 │ │ │ -│ │ │ Status: 🔴 Stuck │ │ │ -│ │ │ Phase: IMPLEMENTATION │ │ │ -│ │ │ Current Task: "Setup OAuth2" │ │ │ -│ │ │ Heartbeat: 95s ago ⚠️ │ │ │ -│ │ │ Guardian: Intervention sent 30s ago │ │ │ -│ │ │ [View Details] [Force Intervene] │ │ │ -│ │ └──────────────────────────────────────────────┘ │ │ -│ └──────────────────────────────────────────────────────┘ │ +│ ┌────────────────────────────────────────────────────────┐ │ +│ │ IconRail │ ContextualPanel (TasksPanel) │ Main Content │ │ +│ ├──────────┼───────────────────────────────┼──────────────┤ │ +│ │ │ │ │ │ +│ │ [Logo] │ [Search tasks...] │ "What would │ │ +│ │ │ [Filter] [Sort] │ you like │ │ +│ │ Terminal │ [+ New Task] │ to build?" │ │ +│ │ (active) │ │ │ │ +│ │ │ RUNNING │ ┌──────────┐ │ │ +│ │ Projects │ ┌────────────────────────┐ │ │[Prompt ]│ │ │ +│ │ │ │⟳ Fix authentication │ │ │[input... ]│ │ │ +│ │ Phases │ │ running • 5m │ │ └──────────┘ │ │ +│ │ │ └────────────────────────┘ │ │ │ +│ │ Sandbox │ │ [Model ▼] │ │ +│ │ │ COMPLETED │ │ │ +│ │ Analytics│ ┌────────────────────────┐ │ ┌──────────┐ │ │ +│ │ │ │✓ Add user dashboard │ │ │📁 repo ▼ │ │ │ +│ │ Orgs │ │ completed • 1h │ │ │⎇ main ▼ │ │ │ +│ │ │ └────────────────────────┘ │ └──────────┘ │ │ +│ │ Settings │ │ │ │ +│ │ │ FAILED │ │ │ +│ │ │ ┌────────────────────────┐ │ │ │ +│ │ │ │✗ DB migration │ │ │ │ +│ │ │ │ failed • 2h │ │ │ │ +│ │ │ └────────────────────────┘ │ │ │ +│ └──────────┴───────────────────────────────┴──────────────┘ │ │ │ └───────────────────────────┬──────────────────────────────────┘ │ - │ Click "Spawn Agent" + │ User types prompt and submits │ ▼ ┌─────────────────────────────────────────────────────────────┐ -│ PAGE: /agents/spawn (Spawn Agent Modal) │ +│ PAGE: /command (Launching State) │ │ │ │ ┌──────────────────────────────────────────────────────┐ │ -│ │ Spawn New Agent │ │ │ │ │ │ -│ │ Agent Type: │ │ -│ │ ○ Worker (Execution) │ │ -│ │ ○ Planner (Planning) │ │ -│ │ ○ Validator (Testing) │ │ +│ │ ┌─────────────────────────────────────────────────┐│ │ +│ │ │ ⟳ Creating task... ││ │ +│ │ │ ⟳ Launching sandbox environment... ││ │ +│ │ └─────────────────────────────────────────────────┘│ │ │ │ │ │ -│ │ Phase Assignment: │ │ -│ │ [Select Phase ▼] │ │ -│ │ • PHASE_INITIAL │ │ -│ │ • PHASE_IMPLEMENTATION │ │ -│ │ • PHASE_INTEGRATION │ │ -│ │ • PHASE_REFACTORING │ │ -│ │ │ │ -│ │ Capabilities: │ │ -│ │ ☑ File Editing │ │ -│ │ ☑ Terminal Access │ │ -│ │ ☑ Code Generation │ │ -│ │ ☐ Testing │ │ -│ │ │ │ -│ │ Project: │ │ -│ │ [Select Project ▼] │ │ -│ │ │ │ -│ │ [Cancel] [Spawn Agent] │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ +│ Steps: │ +│ 1. Create ticket via API │ +│ 2. Wait for SANDBOX_SPAWNED event via WebSocket │ +│ 3. Redirect to /sandbox/:sandboxId │ +│ │ └───────────────────────────┬──────────────────────────────────┘ │ - │ Click "Spawn Agent" + │ Sandbox created, redirect triggered │ ▼ ┌─────────────────────────────────────────────────────────────┐ -│ PAGE: /agents/:agentId (Agent Detail View) │ +│ PAGE: /sandbox/:sandboxId (Sandbox Detail View) │ +│ │ +│ ┌────────────────────────────────────────────────────────┐ │ +│ │ IconRail │ ContextualPanel (TasksPanel) │ Main Content │ │ +│ ├──────────┼───────────────────────────────┼──────────────┤ │ +│ │ │ │ │ │ +│ │ [Logo] │ [Search tasks...] │ ← Back to │ │ +│ │ │ │ Command │ │ +│ │ Terminal │ RUNNING │ │ │ +│ │ │ ┌────────────────────────┐ │ 🤖 Task Name │ │ +│ │ Projects │ │⟳ Fix authentication │←──│ [Running] │ │ +│ │ │ │ selected • running │ │ │ │ +│ │ Phases │ └────────────────────────┘ │ sandbox-id │ │ +│ │ │ │ │ │ +│ │ Sandbox │ COMPLETED │ [🟢 Live] │ │ +│ │ (active) │ ... │ [Refresh] │ │ +│ │ │ │ │ │ +│ │ Analytics│ │ [Events] │ │ +│ │ │ │ [Details] │ │ +│ │ Orgs │ │ │ │ +│ │ │ │ ┌──────────┐ │ │ +│ │ Settings │ │ │ agent. │ │ │ +│ │ │ │ │ thinking │ │ │ +│ │ │ │ │ ... │ │ │ +│ │ │ │ │ │ │ │ +│ │ │ │ │ agent. │ │ │ +│ │ │ │ │ tool_use │ │ │ +│ │ │ │ │ Read() │ │ │ +│ │ │ │ └──────────┘ │ │ +│ │ │ │ │ │ +│ │ │ │ ┌──────────┐ │ │ +│ │ │ │ │[Message ]│ │ │ +│ │ │ │ │[to agent]│ │ │ +│ │ │ │ └──────────┘ │ │ +│ └──────────┴───────────────────────────────┴──────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### Flow 5: Sandbox Detail View Features + +``` +┌─────────────────────────────────────────────────────────────┐ +│ PAGE: /sandbox/:sandboxId - Events Tab │ │ │ │ ┌──────────────────────────────────────────────────────┐ │ -│ │ Agent: worker-1 │ │ -│ │ Status: 🟢 Active │ │ -│ │ Phase: IMPLEMENTATION │ │ +│ │ ← Back to Command │ │ +│ │ │ │ +│ │ 🤖 Build Authentication System │ │ +│ │ [Running] sandbox-abc123 │ │ +│ │ [🟢 Live] [⟳] │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ │ ┌──────────────────────────────────────────────────────┐ │ -│ │ Tabs: [Overview] [Trajectory] [Tasks] [Logs] │ │ +│ │ [Events] [Details] │ │ +│ │ ^^^^^^^^ (active) │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ │ ┌──────────────────────────────────────────────────────┐ │ -│ │ Overview Tab │ │ +│ │ Events Stream (real-time via WebSocket) │ │ +│ │ │ │ +│ │ ┌────────────────────────────────────────────────┐│ │ +│ │ │ 💭 agent.thinking ││ │ +│ │ │ "I'll analyze the codebase structure..." ││ │ +│ │ └────────────────────────────────────────────────┘│ │ │ │ │ │ -│ │ Current Task: "Implement JWT" │ │ -│ │ Progress: 60% │ │ -│ │ Heartbeat: 5s ago ✓ │ │ +│ │ ┌────────────────────────────────────────────────┐│ │ +│ │ │ 🔧 agent.tool_completed - Read ││ │ +│ │ │ src/auth/login.ts ││ │ +│ │ │ ▼ Show content ││ │ +│ │ └────────────────────────────────────────────────┘│ │ │ │ │ │ -│ │ Recent Activity: │ │ -│ │ • Started task "Implement JWT" 10m ago │ │ -│ │ • Committed changes 5m ago │ │ -│ │ • Guardian intervention 2m ago │ │ +│ │ ┌────────────────────────────────────────────────┐│ │ +│ │ │ ✏️ agent.tool_completed - Write ││ │ +│ │ │ src/auth/jwt.ts (new file) ││ │ +│ │ │ ▼ Show diff ││ │ +│ │ └────────────────────────────────────────────────┘│ │ │ │ │ │ -│ │ [View Trajectory] [Send Intervention] │ │ +│ │ ┌────────────────────────────────────────────────┐│ │ +│ │ │ 💻 agent.tool_completed - Bash ││ │ +│ │ │ $ npm install jsonwebtoken ││ │ +│ │ │ ▼ Show output ││ │ +│ │ └────────────────────────────────────────────────┘│ │ +│ └──────────────────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ [Send a message to the agent...] │ │ +│ │ [____________________________________] [Send] │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ +└─────────────────────────────────────────────────────────────┘ +``` + +``` +┌─────────────────────────────────────────────────────────────┐ +│ PAGE: /sandbox/:sandboxId - Details Tab │ +│ │ │ ┌──────────────────────────────────────────────────────┐ │ -│ │ Trajectory Tab │ │ +│ │ [Events] [Details] │ │ +│ │ ^^^^^^^^^ (active) │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ Task Information │ │ +│ │ │ │ +│ │ Task ID: task-xyz789 │ │ +│ │ Sandbox ID: sandbox-abc123 │ │ +│ │ Status: [Running] │ │ +│ │ Priority: Medium │ │ +│ │ Task Type: implementation │ │ +│ │ Phase: PHASE_IMPLEMENTATION │ │ +│ │ Created: Dec 30, 2025 10:30 AM │ │ +│ │ Started: Dec 30, 2025 10:31 AM │ │ │ │ │ │ -│ │ Alignment Score: 78% │ │ -│ │ ┌──────────────────────────────────────────────┐ │ │ -│ │ │ Timeline: │ │ │ -│ │ │ │ │ │ -│ │ │ [10m] Started task │ │ │ -│ │ │ [8m] Analyzing requirements │ │ │ -│ │ │ [6m] Writing code │ │ │ -│ │ │ [4m] Guardian: "Focus on core flow" │ │ │ -│ │ │ [2m] Adjusted approach │ │ │ -│ │ │ [now] Testing implementation │ │ │ -│ │ │ │ │ │ -│ │ └──────────────────────────────────────────────┘ │ │ +│ │ Description: │ │ +│ │ Build an authentication system with OAuth2... │ │ +│ └──────────────────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────────────────┐ │ +│ │ Event Summary │ │ │ │ │ │ -│ │ [View Full Trajectory] │ │ +│ │ Total Events: 47 │ │ +│ │ Tool Uses: 23 │ │ +│ │ File Edits: 8 │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ └─────────────────────────────────────────────────────────────┘ @@ -137,195 +260,163 @@ ## API Integration -### Backend Endpoints +### Backend Endpoints (Sandbox-Based) -All agent endpoints are prefixed with `/api/v1/`. - ---- - -### GET /api/v1/agents -**Description:** List all registered agents - -**Response (200):** -```json -[ - { - "agent_id": "uuid", - "agent_type": "worker", - "phase_id": "PHASE_IMPLEMENTATION", - "status": "idle", - "capabilities": ["python", "analysis"], - "capacity": 2, - "health_status": "healthy", - "tags": ["python"], - "last_heartbeat": "2025-01-15T10:00:00Z", - "created_at": "2025-01-15T09:00:00Z" - } -] +#### Task Creation (via Command Center) ``` - ---- - -### POST /api/v1/agents/register -**Description:** Register a new agent +POST /api/v1/tickets +``` +Creates a ticket which triggers the orchestrator to spawn a sandbox. **Request Body:** ```json { - "agent_type": "worker", + "title": "Build authentication system", + "description": "Full prompt text...", "phase_id": "PHASE_IMPLEMENTATION", - "capabilities": ["python", "javascript", "analysis"], - "capacity": 2, - "status": "idle", - "tags": ["frontend", "backend"] + "priority": "MEDIUM", + "check_duplicates": false, + "force_create": true } ``` **Response (201):** ```json { - "agent_id": "uuid", - "agent_type": "worker", - "phase_id": "PHASE_IMPLEMENTATION", - "status": "idle", - "capabilities": ["python", "javascript", "analysis"], - "capacity": 2, - "health_status": "healthy", - "tags": ["frontend", "backend"], - "last_heartbeat": null, - "created_at": "2025-01-15T10:00:00Z" + "id": "ticket-uuid", + "title": "Build authentication system", + "status": "OPEN", + "phase_id": "PHASE_IMPLEMENTATION" } ``` --- -### GET /api/v1/agents/{agent_id} -**Description:** Get specific agent details +#### WebSocket Events (Real-Time Updates) -**Path Params:** `agent_id` (string) +The frontend subscribes to events via WebSocket at `/api/v1/events/stream`. ---- +**Sandbox Events:** +- `SANDBOX_CREATED` / `SANDBOX_SPAWNED` / `sandbox.spawned` - Sandbox is ready +- `TASK_STARTED` - Task execution began +- `TASK_SANDBOX_ASSIGNED` - Sandbox assigned to task -### PATCH /api/v1/agents/{agent_id} -**Description:** Update agent properties - -**Request Body (all fields optional):** -```json -{ - "capabilities": ["python", "go"], - "capacity": 3, - "status": "busy", - "tags": ["high-priority"], - "health_status": "degraded" -} -``` +**Agent Events (streamed to sandbox detail page):** +- `agent.thinking` - Agent reasoning +- `agent.tool_use` - Tool invocation started +- `agent.tool_completed` - Tool finished with result +- `agent.file_edited` - File modification +- `agent.message` - Agent text output --- -### GET /api/v1/agents/health -**Description:** Get health status for all agents +#### GET /api/v1/tasks +**Description:** List tasks (used by TasksPanel) **Query Params:** -- `timeout_seconds` (optional): Custom timeout for stale detection (default: 90) +- `limit` (optional): Max tasks to return +- `status` (optional): Filter by status **Response (200):** ```json [ { - "agent_id": "uuid", - "health_status": "healthy", - "last_heartbeat": "2025-01-15T10:00:00Z", - "seconds_since_heartbeat": 15, - "is_stale": false + "id": "task-uuid", + "title": "Build authentication system", + "task_type": "implementation", + "status": "running", + "sandbox_id": "sandbox-uuid", + "ticket_id": "ticket-uuid", + "created_at": "2025-12-30T10:30:00Z", + "started_at": "2025-12-30T10:31:00Z" } ] ``` --- -### GET /api/v1/agents/{agent_id}/health -**Description:** Get health for specific agent +#### GET /api/v1/tasks/by-sandbox/:sandboxId +**Description:** Get task associated with a sandbox + +**Response (200):** +```json +{ + "id": "task-uuid", + "title": "Build authentication system", + "description": "Full prompt...", + "task_type": "implementation", + "status": "running", + "sandbox_id": "sandbox-uuid", + "priority": "MEDIUM", + "phase_id": "PHASE_IMPLEMENTATION", + "created_at": "2025-12-30T10:30:00Z", + "started_at": "2025-12-30T10:31:00Z" +} +``` --- -### GET /api/v1/agents/statistics -**Description:** Get comprehensive agent statistics +#### GET /api/v1/sandboxes/:sandboxId/events +**Description:** Get historical events for a sandbox (WebSocket provides real-time) **Response (200):** ```json -{ - "total_agents": 10, - "by_status": { "idle": 5, "busy": 3, "maintenance": 2 }, - "by_type": { "worker": 8, "monitor": 2 }, - "by_health": { "healthy": 8, "degraded": 1, "stale": 1 } -} +[ + { + "id": "event-uuid", + "sandbox_id": "sandbox-uuid", + "event_type": "agent.thinking", + "event_data": { + "content": "I'll analyze the codebase..." + }, + "created_at": "2025-12-30T10:31:05Z" + } +] ``` --- -### POST /api/v1/agents/{agent_id}/heartbeat -**Description:** Send heartbeat from agent +#### POST /api/v1/sandboxes/:sandboxId/message +**Description:** Send a message to the agent in a sandbox **Request Body:** ```json { - "agent_id": "uuid", - "sequence_number": 42, - "health_metrics": { - "cpu_percent": 45.5, - "memory_percent": 60.2, - "disk_percent": 35.0 - }, - "current_task_id": "task-uuid", - "checksum": "sha256-hash" + "content": "Can you also add password validation?" } ``` **Response (200):** ```json { - "received": true, - "acknowledged_sequence": 42, - "server_timestamp": "2025-01-15T10:00:00Z", - "message": "Heartbeat acknowledged" + "success": true, + "message_id": "msg-uuid" } ``` --- -### GET /api/v1/agents/search -**Description:** Search for agents by capabilities - -**Query Params:** -- `capabilities`: List of required capabilities -- `phase_id` (optional): Limit to specific phase -- `agent_type` (optional): Filter by agent type -- `limit` (default: 5, max: 20) - -**Response (200):** -```json -[ - { - "agent": { "agent_id": "uuid", "...": "..." }, - "match_score": 0.85, - "matched_capabilities": ["python", "analysis"] - } -] -``` - ---- - -### GET /api/v1/agents/stale -**Description:** Get list of stale agents - ---- - -### POST /api/v1/agents/cleanup-stale -**Description:** Mark stale agents for cleanup - -**Query Params:** -- `timeout_seconds` (optional): Custom timeout -- `mark_as` (default: "timeout"): Status to mark stale agents +## Component Summary + +### TasksPanel (`/components/panels/TasksPanel.tsx`) +- Displays tasks grouped by status (Running, Pending, Completed, Failed) +- Search/filter functionality +- Highlights selected sandbox when on `/sandbox/:sandboxId` route +- "New Task" button links to `/command` + +### Command Page (`/app/(app)/command/page.tsx`) +- Primary prompt input for describing tasks +- Repository/branch selector +- Model selector +- Shows launch progress states +- Automatically redirects to sandbox when ready + +### Sandbox Detail Page (`/app/(app)/sandbox/[sandboxId]/page.tsx`) +- Real-time event streaming via WebSocket +- Events tab: Shows agent activity (thinking, tool use, file edits) +- Details tab: Task metadata and event summary +- Message input: Send messages to agent +- Connection status indicator (Live/Disconnected) --- diff --git a/docs/page_flows/10_command_center.md b/docs/page_flows/10_command_center.md index bd2c6f78..73b084c6 100644 --- a/docs/page_flows/10_command_center.md +++ b/docs/page_flows/10_command_center.md @@ -2,7 +2,34 @@ **Part of**: [Page Flow Documentation](./README.md) -**Design Reference**: Cursor BG Agent - warm cream background (#F5F5F0), minimal header, left sidebar with agent history, centered prompt input. +**Note**: The Command Center is the primary landing page for authenticated users. It follows a sandbox-based approach where users describe what they want to build, and the system spawns an isolated sandbox environment with an AI agent. + +--- + +## Current Implementation + +The Command Center (`/command`) is implemented in `/frontend/app/(app)/command/page.tsx` and uses: +- **TasksPanel** in the ContextualPanel (sidebar) showing running/completed/failed tasks grouped by status +- **PromptInput** component for the main text input +- **RepoSelector** for selecting project/repository context +- **ModelSelector** for choosing the AI model + +### Layout Structure + +``` +┌─────────────────────────────────────────────────────────────┐ +│ IconRail │ ContextualPanel (TasksPanel) │ Main Content │ +│ 56px │ 256px │ flex-1 │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Submission Flow + +1. User types prompt and submits +2. Frontend creates a ticket via `POST /api/v1/tickets` +3. Frontend subscribes to WebSocket events, waiting for `SANDBOX_SPAWNED` +4. Fallback polling every 3s checks for tasks with `sandbox_id` +5. When sandbox is ready, redirects to `/sandbox/:sandboxId` --- @@ -10,14 +37,12 @@ | Element | Specification | |---------|---------------| -| Background | Warm cream #F5F5F0 | -| Sidebar Width | 220px (collapsible) | +| Background | bg-background (theme-aware) | +| Sidebar Width | 256px (collapsible to 40px) | | Prompt Max-Width | 700px (centered) | -| Agent Card | Status icon + Task name + Time + Line changes (+X -Y) + Repo | -| Line Changes | JetBrains Mono, green (#22863A) for +, red (#CB2431) for - | -| Time Groups | TODAY, THIS WEEK, THIS MONTH (11px uppercase, muted) | -| Header | Minimal - right-aligned 🛡️ Guardian indicator + Dashboard link + Profile avatar | -| Guardian Indicator | 🟢 Active (monitoring) / 🟡 Paused / 🔴 Issue - clickable to /health | +| Task Card | Status icon + Task name + Time ago | +| Status Groups | RUNNING, PENDING, COMPLETED, FAILED | +| Header | Minimal header with MinimalHeader component | --- ### Flow 33: Command Center (Primary Landing Page) @@ -289,68 +314,63 @@ --- -### Flow 35: Recent Agents Sidebar +### Flow 35: TasksPanel Sidebar (Current Implementation) + +The sidebar uses `TasksPanel` component which groups tasks by status: ``` ┌─────────────────────────────────────────────────────────────┐ -│ COMPONENT: Recent Agents Sidebar │ +│ COMPONENT: TasksPanel (ContextualPanel sidebar) │ │ │ │ ┌────────────────┐ │ │ │ │ │ -│ │ Search agents │ │ -│ │ [🔍________] │ │ -│ │ │ │ -│ │ + New Agent │ ← Quick action to start fresh │ -│ │ │ │ +│ │ [Search tasks.]│ │ +│ │ [Filter][Sort]│ │ +│ │ │ │ +│ │ [+ New Task] │ ← Links to /command │ +│ │ │ │ │ │ ─────────── │ │ -│ │ Today │ │ +│ │ RUNNING │ │ │ │ │ │ │ │ ┌──────────┐ │ │ -│ │ │ ✓ Auth │ │ ← Completed (green check) │ -│ │ │ system │ │ │ -│ │ │ 1h │ │ │ -│ │ │ proj-a │ │ ← Repo indicator │ -│ │ └──────────┘ │ │ -│ │ │ │ -│ │ ┌──────────┐ │ │ -│ │ │ ⟳ Fix │ │ ← In Progress (spinner) │ -│ │ │ API │ │ │ -│ │ │ 29 -10 │ │ ← +lines/-lines indicator │ -│ │ │ proj-b │ │ │ +│ │ │⟳ Fix auth│ │ ← Running (spinner icon) │ +│ │ │ running │ │ │ +│ │ │ 5m │ │ │ │ │ └──────────┘ │ │ │ │ │ │ │ │ ─────────── │ │ -│ │ This Week │ │ +│ │ PENDING │ │ │ │ │ │ │ │ ┌──────────┐ │ │ -│ │ │ ✓ Tests │ │ │ -│ │ │ 87 -34 │ │ │ -│ │ │ 1d │ │ │ +│ │ │⏳ Setup │ │ ← Pending (clock icon) │ +│ │ │ pending │ │ │ +│ │ │ 10m │ │ │ │ │ └──────────┘ │ │ │ │ │ │ +│ │ ─────────── │ │ +│ │ COMPLETED │ │ +│ │ │ │ │ │ ┌──────────┐ │ │ -│ │ │ ✗ DB mig │ │ ← Failed (red X) │ -│ │ │ Errored│ │ │ -│ │ │ 2d │ │ │ +│ │ │✓ Add dash│ │ ← Completed (check icon) │ +│ │ │ completed│ │ │ +│ │ │ 1h │ │ │ │ │ └──────────┘ │ │ │ │ │ │ │ │ ─────────── │ │ -│ │ This Month │ │ -│ │ ... more │ │ +│ │ FAILED │ │ │ │ │ │ -│ │ ─────────── │ │ -│ │ Errored │ ← Quick filter for failed agents │ -│ │ Expired │ ← Quick filter for timed out │ +│ │ ┌──────────┐ │ │ +│ │ │✗ DB mig │ │ ← Failed (X icon) │ +│ │ │ failed │ │ │ +│ │ │ 2h │ │ │ +│ │ └──────────┘ │ │ │ │ │ │ -│ │ ─────────── │ │ -│ │ [Settings ⚙]│ │ -│ │ [Help ?] │ │ │ └────────────────┘ │ │ │ -│ │ Click on agent card │ +│ │ Click on task card │ │ ▼ │ │ │ -│ Navigates to: /agents/:agentId (Agent Detail View) │ +│ Navigates to: /sandbox/:sandboxId (Sandbox Detail View) │ │ │ └─────────────────────────────────────────────────────────────┘ ``` @@ -507,7 +527,7 @@ ## Navigation Summary -### Main Routes (Updated) +### Main Routes (Updated for Sandbox-Based UI) ``` / (Landing - unauthenticated) @@ -519,14 +539,11 @@ ├── /reset-password (Password reset confirmation) ├── /onboarding (First-time user) │ -└── / (Authenticated - Command Center) ← PRIMARY +└── /command (Authenticated - Command Center) ← PRIMARY │ - ├── /analytics (Analytics Dashboard) ← SECONDARY + ├── /sandbox/:sandboxId (Sandbox Detail View) ← MAIN WORKFLOW │ - ├── /health (System Health Dashboard) ← MONITORING - │ ├── /health/trajectories (Active Trajectory Analyses) - │ ├── /health/interventions (Intervention History) - │ └── /health/settings (Monitoring Configuration) + ├── /sandboxes (Sandbox list) │ ├── /projects (Project list) │ ├── /projects/new (Create project) @@ -535,21 +552,22 @@ │ ├── /projects/:id/specs (Specs list) │ └── /projects/:id/specs/:specId (Spec viewer) │ + ├── /phases (Phase management) + │ ├── /board/:projectId (Kanban board) │ └── /board/:projectId/:ticketId (Ticket detail) │ ├── /graph/:projectId (Dependency graph) │ └── /graph/:projectId/:ticketId (Ticket graph) │ - ├── /agents (Agent list) - │ ├── /agents/spawn (Spawn agent) - │ ├── /agents/:agentId (Agent detail) - │ └── /agents/:agentId/workspace (Workspace detail) + ├── /analytics (Analytics Dashboard) │ - ├── /workspaces (Workspace list) - │ └── /workspaces/:agentId (Workspace detail) + ├── /health (System Health Dashboard) + │ ├── /health/trajectories (Active Trajectory Analyses) + │ ├── /health/interventions (Intervention History) + │ └── /health/settings (Monitoring Configuration) │ - ├── /commits/:commitSha (Commit diff viewer) + ├── /organizations (Organization management) │ └── /settings (User settings) ├── /settings/profile (User profile) @@ -560,14 +578,12 @@ ### Key User Actions (Updated) -1. **Command Center Flow**: / → Select Project → Type Task → Submit → Agent Detail View -2. **Quick Project Start**: / → Select Unconnected Repo → Type Task → Auto-Create Project + Spawn Agent -3. **Connect New Repo**: / → Connect New Repository → OAuth → Creates Project → Type Task → Spawn Agent -4. **Analytics Access**: / → Click "Analytics" in nav → Analytics Dashboard -5. **Agent History**: / → Click agent in sidebar → Agent Detail View -6. **Return to Command**: Any page → Click logo or "Command" → / -7. **System Health Access**: Any page → Click 🛡️ Guardian indicator → Quick Status Popover → View System Health → /health -8. **Quick Intervention**: Any page → Click 🛡️ indicator (🟡/🔴) → See agent status → Send Intervention +1. **Primary Flow**: /command → Type Task → Submit → Wait for Sandbox → Auto-redirect to /sandbox/:sandboxId +2. **Monitor Sandbox**: /sandbox/:sandboxId → View Events Tab → Watch real-time agent activity +3. **Send Message**: /sandbox/:sandboxId → Type message → Send to agent +4. **View Task History**: /command → Click task in sidebar → /sandbox/:sandboxId +5. **Return to Command**: Any page → Click Terminal icon in IconRail → /command +6. **System Health Access**: Any page → Navigate to /health via IconRail or links --- diff --git a/docs/user_flows_summary.md b/docs/user_flows_summary.md index 18aeb56e..676c2d65 100644 --- a/docs/user_flows_summary.md +++ b/docs/user_flows_summary.md @@ -311,10 +311,24 @@ This document summarizes the comprehensive updates made to user journey, user fl --- -## Navigation Structure +## Navigation Structure (Updated for Sandbox-Based UI) + +The main navigation uses an IconRail (vertical icon bar) with the following sections: + +| Icon | Section | Route | Description | +|------|---------|-------|-------------| +| Terminal | Command | `/command` | Primary landing - launch new tasks | +| FolderGit2 | Projects | `/projects` | Project management | +| Workflow | Phases | `/phases` | Phase management | +| Box | Sandboxes | `/sandboxes` | View all sandboxes | +| BarChart3 | Analytics | `/analytics` | Usage analytics | +| Building2 | Organizations | `/organizations` | Organization settings | +| Settings | Settings | `/settings` | User settings | + +### Full Route Structure ``` -/ (Landing) +/ (Landing - unauthenticated) ├── /register ├── /login ├── /login/oauth @@ -322,12 +336,18 @@ This document summarizes the comprehensive updates made to user journey, user fl ├── /forgot-password ├── /reset-password ├── /onboarding -└── /dashboard - ├── /organizations - │ ├── /organizations/new - │ └── /organizations/:id - │ ├── /organizations/:id/settings - │ └── /organizations/:id/members +│ +└── / (Authenticated - redirects to /command) + │ + ├── /command (PRIMARY - Command Center) + │ └── User types prompt → Creates ticket → Spawns sandbox → Redirects + │ + ├── /sandbox/:sandboxId (Sandbox Detail View) + │ ├── Events tab (real-time agent activity stream) + │ └── Details tab (task metadata, event summary) + │ + ├── /sandboxes (Sandbox list) + │ ├── /projects │ ├── /projects/new │ ├── /projects/:id @@ -342,20 +362,33 @@ This document summarizes the comprehensive updates made to user journey, user fl │ ├── /projects/:id/phase-gates │ └── /projects/:id/settings │ └── /projects/:id/settings/github - ├── /board/:projectId + │ + ├── /phases (Phase management) + │ + ├── /board/:projectId (Kanban board) + │ └── /board/:projectId/:ticketId + │ + ├── /graph/:projectId (Dependency graph) + │ └── /graph/:projectId/:ticketId + │ ├── /diagnostic/:entityType/:entityId │ ├── /diagnostic/ticket/:ticketId │ ├── /diagnostic/task/:taskId - │ └── /diagnostic/agent/:agentId - │ └── /board/:projectId/:ticketId - ├── /graph/:projectId - │ └── /graph/:projectId/:ticketId - ├── /agents - │ ├── /agents/spawn - │ ├── /agents/:agentId - │ └── /agents/:agentId/workspace - ├── /workspaces - ├── /commits/:commitSha + │ └── /diagnostic/sandbox/:sandboxId + │ + ├── /analytics (Usage analytics) + │ + ├── /health (System health dashboard) + │ ├── /health/trajectories + │ ├── /health/interventions + │ └── /health/settings + │ + ├── /organizations + │ ├── /organizations/new + │ └── /organizations/:id + │ ├── /organizations/:id/settings + │ └── /organizations/:id/members + │ └── /settings ├── /settings/profile ├── /settings/api-keys @@ -363,15 +396,40 @@ This document summarizes the comprehensive updates made to user journey, user fl └── /settings/preferences ``` +### Contextual Sidebar (ContextualPanel) + +The sidebar content changes based on the current route: + +| Route | Panel | Content | +|-------|-------|---------| +| `/command` | TasksPanel | Running/Pending/Completed/Failed tasks | +| `/sandbox/*` | TasksPanel | Same tasks, selected sandbox highlighted | +| `/projects` | ProjectsPanel | Project list with favorites | +| `/phases` | PhasesPanel | Phase configuration | +| `/board/*` | ProjectsPanel | Project context | +| `/health` | HealthPanel | System health metrics | +| `/graph/*` | GraphFiltersPanel | Graph filter options | +| `/analytics` | AnalyticsPanel | Analytics filters | +| `/settings` | SettingsPanel | Settings navigation | +| `/organizations` | OrganizationsPanel | Organization list | + --- ## User Flows +### Primary Sandbox Flow (Main User Journey) +1. **Command Center** (`/command`) → User describes what they want to build +2. **Submit** → Creates ticket via API, orchestrator spawns sandbox +3. **Auto-redirect** → User is redirected to `/sandbox/:sandboxId` +4. **Monitor** → Real-time event stream shows agent activity (thinking, tool use, file edits) +5. **Interact** → User can send messages to the agent while it works +6. **Complete** → Task completes, user can review results + ### Registration Flow -1. Landing → Register/Login → Email Verification → Onboarding → Dashboard +1. Landing → Register/Login → Email Verification → Onboarding → Dashboard (redirects to /command) ### Organization Setup Flow -1. Onboarding → Create Organization → Configure Limits → Dashboard +1. Onboarding → Create Organization → Configure Limits → /command ### Kanban Board Flow 1. Project → Board → View Tickets → Click Ticket → Ticket Detail → (Details/Tasks/Commits/Graph/Comments/Audit) diff --git a/docs/user_journey/00_overview.md b/docs/user_journey/00_overview.md index 23a1fb8c..7bdbbaf9 100644 --- a/docs/user_journey/00_overview.md +++ b/docs/user_journey/00_overview.md @@ -72,72 +72,91 @@ OmoiOS follows a **spec-driven autonomous engineering workflow** where users des ## Dashboard Layout +### Current Implementation + +The UI uses a three-column layout with IconRail navigation and route-aware contextual panels. + +**Key Components:** +- `frontend/components/layout/IconRail.tsx` - Primary navigation +- `frontend/components/layout/ContextualPanel.tsx` - Route-aware sidebar +- `frontend/components/panels/TasksPanel.tsx` - Sandbox tasks grouped by status + ### Main Dashboard Structure ``` -┌─────────────────────────────────────────────────────────────┐ -│ Header: Logo | Projects | 🛡️ Guardian | Search | Notifications | Profile │ -├─────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────┐ ┌──────────────────────────────────────┐ │ -│ │ Sidebar │ │ Main Content Area │ │ -│ │ │ │ │ │ -│ │ • Home │ │ ┌────────────────────────────────┐ │ │ -│ │ • Board │ │ │ Overview Section │ │ │ -│ │ • Graph │ │ │ • Total Specs: 5 │ │ │ -│ │ • Specs │ │ │ • Active Agents: 3 │ │ │ -│ │ • Stats │ │ │ • Tickets in Progress: 12 │ │ │ -│ │ • Agents│ │ │ • Recent Commits: 8 │ │ │ -│ │ • Cost │ │ │ • Guardian: 🟢 Monitoring │ │ │ -│ │ • Memory│ │ │ • System Health: 94% │ │ │ -│ │ • Audit │ │ │ • Budget: 78% used │ │ │ -│ │ • Health│ │ └────────────────────────────────┘ │ │ -│ │ │ │ ┌────────────────────────────────┐ │ │ -│ │ │ │ │ Active Specs Grid │ │ │ -│ │ │ │ │ │ │ │ -│ │ │ │ │ ┌──────────┐ ┌──────────┐ │ │ │ -│ │ │ │ │ │ Spec 1 │ │ Spec 2 │ │ │ │ -│ │ │ │ │ │ Progress: │ │ Progress: │ │ │ │ -│ │ │ │ │ │ ████░░ 60%│ │ ██████ 80%│ │ │ │ -│ │ │ │ │ └──────────┘ └──────────┘ │ │ │ -│ │ │ │ └────────────────────────────────┘ │ │ -│ │ │ │ │ │ -│ │ │ │ ┌────────────────────────────────┐ │ │ -│ │ │ │ │ Quick Actions │ │ │ -│ │ │ │ │ [+ New Spec] [+ New Project] │ │ │ -│ │ │ │ └────────────────────────────────┘ │ │ -│ └─────────┘ └──────────────────────────────────────┘ │ -│ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ Right Sidebar (Collapsible) │ │ -│ │ Recent Activity Feed │ │ -│ │ • Spec "Auth System" requirements approved │ │ -│ │ • Agent worker-1 completed task "Setup JWT" │ │ -│ │ • Discovery: Bug found in login flow │ │ -│ │ • 🛡️ Guardian intervention sent to worker-2 │ │ -│ │ • 🔄 Monitoring cycle completed (5 agents checked) │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────┘ +┌────────────────────────────────────────────────────────────────────────┐ +│ │ +│ ┌──────┐ ┌─────────────────┐ ┌──────────────────────────────────┐ │ +│ │Icon │ │ Contextual │ │ Main Content Area │ │ +│ │Rail │ │ Panel │ │ │ │ +│ │ │ │ │ │ Route-specific content: │ │ +│ │ Logo │ │ Route-aware │ │ │ │ +│ │ │ │ sidebar that │ │ /command → Prompt input + │ │ +│ │ ──── │ │ changes based │ │ loading state │ │ +│ │ Term │ │ on pathname: │ │ │ │ +│ │ Proj │ │ │ │ /sandbox/:id → Event stream + │ │ +│ │ Phas │ │ /command → │ │ agent chat │ │ +│ │ Sand │ │ TasksPanel │ │ │ │ +│ │ Anal │ │ │ │ /projects → Project grid │ │ +│ │ Orgs │ │ /sandbox/* → │ │ │ │ +│ │ │ │ TasksPanel │ │ /phases → Workflow phases │ │ +│ │ ──── │ │ │ │ │ │ +│ │ Sett │ │ /projects → │ │ /sandboxes → All sandboxes │ │ +│ │ │ │ ProjectsPanel │ │ │ │ +│ │ │ │ │ │ /analytics → Metrics dashboard │ │ +│ │ │ │ /phases → │ │ │ │ +│ │ │ │ PhasesPanel │ │ /organizations → Team mgmt │ │ +│ │ │ │ │ │ │ │ +│ └──────┘ └─────────────────┘ └──────────────────────────────────┘ │ +│ │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +### IconRail Navigation + +| Icon | Section | Route | Description | +|------|---------|-------|-------------| +| Terminal | Command | `/command` | Primary entry point - describe what to build | +| FolderGit2 | Projects | `/projects` | Project management and selection | +| Workflow | Phases | `/phases` | Workflow phase configuration | +| Box | Sandboxes | `/sandboxes` | List of all sandbox executions | +| BarChart3 | Analytics | `/analytics` | Usage metrics and performance | +| Building2 | Organizations | `/organizations` | Team and org management | +| Settings | Settings | `/settings` | User and system preferences | + +### ContextualPanel Mapping + +| Route Pattern | Panel | Content | +|---------------|-------|---------| +| `/command` | TasksPanel | Sandboxes grouped by status (Running, Pending, Completed, Failed) | +| `/sandbox/*` | TasksPanel | Same as above, with current sandbox highlighted | +| `/projects` | ProjectsPanel | Project list and quick actions | +| `/phases` | PhasesPanel | Phase configuration | +| `/sandboxes` | TasksPanel | Full sandbox history | + +### TasksPanel Structure + +The TasksPanel groups sandbox tasks by execution status: + +``` +┌─────────────────────┐ +│ Running │ ← Currently executing sandboxes +│ ├─ Payment API │ +│ └─ Auth System │ +├─────────────────────┤ +│ Pending │ ← Queued for execution +│ └─ Database Setup │ +├─────────────────────┤ +│ Completed │ ← Successfully finished +│ ├─ User Profile │ +│ └─ API Routes │ +├─────────────────────┤ +│ Failed │ ← Execution errors +│ └─ Image Upload │ +└─────────────────────┘ ``` -**Dashboard Sections:** -- **Overview Section**: Key metrics (total specs, active agents, tickets in progress, recent commits, Guardian status, system health) -- **Active Specs Grid**: Cards showing all active specs with progress bars -- **Quick Actions**: Buttons for common actions (+ New Spec, + New Project) -- **Recent Activity Sidebar**: Chronological feed of recent events including monitoring cycles (collapsible) -- **Guardian Status Indicator**: Real-time monitoring system status in header (🛡️ icon) - -**Managing Multiple Specs:** -- Dashboard shows grid view of all active specs -- Each spec card displays: - - Spec name and description - - Progress bar (0-100%) - - Status badge (Draft, Requirements, Design, Tasks, Executing, Completed) - - Last updated timestamp - - Quick actions ([View] [Edit] [Export]) -- Filter options: All | Active | Completed | Draft -- Search bar to find specs by name +Clicking any task navigates to `/sandbox/:sandboxId` for detailed monitoring. --- diff --git a/docs/user_journey/03_execution_monitoring.md b/docs/user_journey/03_execution_monitoring.md index 4bab470c..0c00371d 100644 --- a/docs/user_journey/03_execution_monitoring.md +++ b/docs/user_journey/03_execution_monitoring.md @@ -104,7 +104,86 @@ - Progress dashboard: Metrics update live ``` -#### 3.2 Monitoring Views +#### 3.2 Sandbox-Based Monitoring (Frontend) + +The frontend provides real-time monitoring through the sandbox detail view at `/sandbox/:sandboxId`. + +**Current Implementation:** +- `frontend/app/(app)/sandbox/[sandboxId]/page.tsx` - Sandbox detail view +- `frontend/hooks/use-sandbox-monitor.ts` - WebSocket event streaming +- `frontend/components/sandbox/EventRenderer.tsx` - Event visualization + +**Sandbox Detail View Structure:** + +``` +┌────────────────────────────────────────────────────────────────────────┐ +│ IconRail │ TasksPanel │ Sandbox Detail View │ +│ │ (grouped by status) │ │ +│ │ │ ┌──────────────────────────────┐ │ +│ │ Running: │ │ Tabs: [Events] [Details] │ │ +│ │ ├─ Current sandbox │ ├──────────────────────────────┤ │ +│ │ │ (highlighted) │ │ │ │ +│ │ │ │ │ Real-time Event Stream: │ │ +│ │ Completed: │ │ │ │ +│ │ ├─ Payment API │ │ 10:23 agent.thinking │ │ +│ │ └─ Auth System │ │ "Analyzing requirements" │ │ +│ │ │ │ │ │ +│ │ │ │ 10:24 agent.tool_use │ │ +│ │ │ │ Tool: Read file.ts │ │ +│ │ │ │ │ │ +│ │ │ │ 10:25 agent.file_edited │ │ +│ │ │ │ Modified: src/api.ts │ │ +│ │ │ │ │ │ +│ │ │ └──────────────────────────────┘ │ +│ │ │ │ +│ │ │ ┌──────────────────────────────┐ │ +│ │ │ │ Message Input │ │ +│ │ │ │ [Type a message to agent...] │ │ +│ │ │ └──────────────────────────────┘ │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +**WebSocket Event Types:** +| Event | Description | +|-------|-------------| +| `SANDBOX_SPAWNED` | Sandbox created and agent assigned | +| `agent.thinking` | Agent reasoning/planning | +| `agent.tool_use` | Agent using a tool (Read, Edit, Bash, etc.) | +| `agent.tool_completed` | Tool execution finished with result | +| `agent.file_edited` | File was modified | +| `agent.message` | Agent output message | +| `TASK_COMPLETED` | Sandbox execution finished | + +**EventRenderer - Beautiful Event Display:** + +The `EventRenderer` component (`frontend/components/sandbox/EventRenderer.tsx`) provides specialized rendering for each event type: + +| Card Type | Used For | Features | +|-----------|----------|----------| +| **MessageCard** | Agent/user messages | Markdown rendering, "Thinking" indicator with amber styling | +| **FileWriteCard** | Write/Edit tool results | Cursor-style diff view, syntax highlighting (oneDark theme), language icons (🐍 Python, ⚛️ React, 🦀 Rust, etc.), line numbers, +/- stats | +| **BashCard** | Terminal commands | `$` prompt styling, stdout/stderr parsing, exit code badges, dark terminal theme | +| **GlobCard** | File search results | Tree-style directory listing with folder icons, file counts | +| **GrepCard** | Code search results | Grouped by directory, match counts, expandable | +| **TodoCard** | Task lists | Status icons (✓ completed, ▶ in progress, ⏱ pending), progress tracking | +| **ToolCard** | Generic tools | Collapsible with input/output JSON display | + +**Syntax Highlighting:** +- Uses `react-syntax-highlighter` with Prism + oneDark theme +- Auto-detects language from file extension +- Supports 40+ languages (Python, TypeScript, Rust, Go, etc.) + +**User Interactions:** +- View real-time event stream as agent works +- Switch between Events and Details tabs +- Send messages to agent while it's running +- Navigate between sandboxes via TasksPanel +- Expand/collapse code blocks +- Copy code with one click + +--- + +#### 3.3 Extended Monitoring Views (Planned) **Kanban Board View:** ``` @@ -206,7 +285,7 @@ Workspace Isolation Features: - Merge conflict logging: All resolutions logged for audit ``` -#### 3.3 Discovery & Workflow Branching +#### 3.4 Discovery & Workflow Branching ``` Agent working on Task A discovers bug: @@ -265,7 +344,7 @@ Agent working on Task A discovers bug: - Discoveries saved for future reference - Collective intelligence improves over time -#### 3.4 Collective Intelligence & Memory System +#### 3.5 Collective Intelligence & Memory System **How Agents Learn from Each Other:** @@ -344,7 +423,7 @@ Agent A encounters PostgreSQL timeout error: > 💡 **For user-facing memory management flows**, see [11_cost_memory_management.md](./11_cost_memory_management.md) (Memory Search, Patterns, Insights). -#### 3.5 Guardian Interventions +#### 3.6 Guardian Interventions ``` Guardian monitors agent trajectories every 60 seconds: @@ -373,7 +452,7 @@ Guardian monitors agent trajectories every 60 seconds: - Add constraint: Add new requirement - Inject tool call: Force specific action -#### 3.6 System Health & Monitoring Dashboard +#### 3.7 System Health & Monitoring Dashboard ``` User can view System Health at any time via header indicator or sidebar: diff --git a/docs/user_journey/06_key_interactions.md b/docs/user_journey/06_key_interactions.md index ee24e80b..4e11957a 100644 --- a/docs/user_journey/06_key_interactions.md +++ b/docs/user_journey/06_key_interactions.md @@ -5,6 +5,95 @@ --- ## Key User Interactions +### Primary Flow: Sandbox Interaction + +The main user interaction pattern centers on creating and monitoring sandboxes: + +**1. Create Sandbox (Command Center)** +``` +/command page: +┌──────────────────────────────────────────────┐ +│ What do you want to build? │ +│ │ +│ [Large text input area] │ +│ │ +│ User types: "Add payment processing with │ +│ Stripe integration including webhooks" │ +│ │ +│ [Submit Button] → │ +└──────────────────────────────────────────────┘ +``` + +**2. Wait for Sandbox Spawn** +- Submit creates a ticket via API +- System shows loading state while orchestrator processes +- WebSocket listens for `SANDBOX_SPAWNED` event +- Fallback polling every 3s if WebSocket misses event + +**3. Auto-redirect to Sandbox Detail** +- Once `sandbox_id` received, redirect to `/sandbox/:sandboxId` +- TasksPanel highlights the new sandbox as "Running" + +**4. Monitor Agent in Real-Time** + +Events render beautifully via the `EventRenderer` component with specialized cards: + +``` +/sandbox/:sandboxId page: +┌──────────────────────────────────────────────┐ +│ [Events] [Details] │ +├──────────────────────────────────────────────┤ +│ │ +│ ┌──────────────────────────────────────────┐ │ +│ │ 🤔 Thinking │ │ +│ │ "Analyzing Stripe integration patterns" │ │ +│ └──────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────┐ │ +│ │ $ Terminal │ │ +│ │ $ pnpm add stripe │ │ +│ │ + stripe@14.0.0 │ │ +│ └──────────────────────────────────────────┘ │ +│ │ +│ ┌──────────────────────────────────────────┐ │ +│ │ + Created 📜 payment.ts +45 │ │ +│ │ ─────────────────────────────────────────│ │ +│ │ 1 │ import Stripe from 'stripe'; │ │ +│ │ 2 │ │ │ +│ │ 3 │ export const stripe = new Stripe( │ │ +│ │ 4 │ process.env.STRIPE_SECRET_KEY! │ │ +│ │ 5 │ ); │ │ +│ └──────────────────────────────────────────┘ │ +│ │ +└──────────────────────────────────────────────┘ +``` + +**Event Card Types:** +- **MessageCard**: Agent messages with Markdown, thinking indicator +- **FileWriteCard**: Cursor-style diffs with syntax highlighting, language icons +- **BashCard**: Terminal with `$` prompt, stdout/stderr, exit codes +- **GlobCard**: Tree-style file listings +- **GrepCard**: Search results grouped by directory +- **TodoCard**: Task progress with status icons + +**5. Interact with Agent** +``` +┌──────────────────────────────────────────────┐ +│ [Message input] │ +│ "Make sure to use webhook signature │ +│ verification for security" [Send] │ +└──────────────────────────────────────────────┘ +``` +- Messages sent to running agent as context +- Agent incorporates feedback into its work + +**6. Navigate Between Sandboxes** +- TasksPanel shows all sandboxes grouped by status +- Click any sandbox to view its detail page +- Current sandbox highlighted in panel + +--- + ### Command Palette (Cmd+K) Quick navigation to: - Create new ticket @@ -16,6 +105,8 @@ Quick navigation to: ### Real-Time Updates All views update automatically via WebSocket: +- **Sandbox events**: Agent thinking, tool use, file edits stream in real-time +- **TasksPanel**: Sandbox status changes (Running → Completed/Failed) - Kanban board: Tickets move as agents work - Dependency graph: Nodes update as tasks complete - Activity timeline: New events appear instantly diff --git a/frontend/app/(app)/command/page.tsx b/frontend/app/(app)/command/page.tsx index 8da01003..4893ea4a 100644 --- a/frontend/app/(app)/command/page.tsx +++ b/frontend/app/(app)/command/page.tsx @@ -3,7 +3,7 @@ import { useState, useMemo, useEffect, useCallback, useRef } from "react" import { useRouter } from "next/navigation" import { toast } from "sonner" -import { PromptInput, ModelSelector, RepoSelector, Project, Repository } from "@/components/command" +import { PromptInput, ModelSelector, RepoSelector, Project, Repository, WorkflowModeSelector, getWorkflowModeConfig, type WorkflowMode } from "@/components/command" import { useProjects } from "@/hooks/useProjects" import { useConnectedRepositories } from "@/hooks/useGitHub" import { useCreateTicket } from "@/hooks/useTickets" @@ -30,8 +30,12 @@ export default function CommandCenterPage() { const [selectedRepo, setSelectedRepo] = useState(null) const [selectedBranch, setSelectedBranch] = useState("main") const [selectedModel, setSelectedModel] = useState("opus-4.5") + const [selectedMode, setSelectedMode] = useState("quick") const [launchState, setLaunchState] = useState({ status: "idle" }) + // Get mode configuration for dynamic UI + const modeConfig = getWorkflowModeConfig(selectedMode) + // Fetch real data const { data: projectsData } = useProjects({ status: "active" }) const { data: connectedRepos } = useConnectedRepositories() @@ -168,14 +172,33 @@ export default function CommandCenterPage() { try { setLaunchState({ status: "creating_ticket", prompt }) - // Create a ticket with the prompt as title/description - const result = await createTicketMutation.mutateAsync({ + // Build payload based on selected mode + const basePayload = { title: prompt.slice(0, 100) + (prompt.length > 100 ? "..." : ""), description: prompt, - phase_id: "PHASE_IMPLEMENTATION", - priority: "MEDIUM", + priority: "MEDIUM" as const, check_duplicates: false, // Don't check for dups on command prompts force_create: true, + project_id: selectedProject?.id, + } + + // Mode-specific parameters + const modePayload = selectedMode === "quick" + ? { + phase_id: "PHASE_IMPLEMENTATION", + workflow_mode: "quick" as const, + auto_spawn_sandbox: true, + } + : { + phase_id: "PHASE_REQUIREMENTS", + workflow_mode: "spec_driven" as const, + generate_spec: true, + auto_spawn_sandbox: false, + } + + const result = await createTicketMutation.mutateAsync({ + ...basePayload, + ...modePayload, }) // Check if we got a duplicate response instead of a ticket @@ -185,9 +208,18 @@ export default function CommandCenterPage() { return } - // Ticket created, now wait for the orchestrator to spawn a sandbox - toast.info("Launching sandbox...") - setLaunchState({ status: "waiting_for_sandbox", ticketId: result.id, prompt }) + // Handle navigation based on mode + if (selectedMode === "quick") { + // Ticket created, now wait for the orchestrator to spawn a sandbox + toast.info("Launching sandbox...") + setLaunchState({ status: "waiting_for_sandbox", ticketId: result.id, prompt }) + } else { + // Spec-driven: redirect to spec workspace + toast.info("Creating spec...") + // For now, redirect to tickets page until spec workspace is built + // TODO: Update to /specs/:id when spec workspace is implemented + router.push(`/tickets/${result.id}`) + } } catch (error) { toast.error("Failed to create task. Please try again.") @@ -216,7 +248,7 @@ export default function CommandCenterPage() { What would you like to build?

- Describe what you want, and we'll spawn an AI agent to build it + {modeConfig.helperText}

@@ -224,7 +256,8 @@ export default function CommandCenterPage() { {/* Launch Status */} @@ -247,16 +280,22 @@ export default function CommandCenterPage() { {/* Controls Row */}
- +
+ + +
@@ -138,19 +139,29 @@ export default function SandboxDetailPage({ params }: SandboxDetailPageProps) { // Collect all tool_completed content keys for deduplication const completedToolKeys = new Set() const completedFileContentKeys = new Set() - - // First pass: collect keys from tool_completed events + // Collect subagent prompts to filter out duplicate user messages + const subagentPrompts = new Set() + + // First pass: collect keys from tool_completed events and subagent prompts for (const event of events) { if (event.event_type === "agent.tool_completed") { const toolKey = getToolUseKey(event) if (toolKey) completedToolKeys.add(toolKey) - + // Track file content keys const contentKey = getFileContentKey(event) if (contentKey) completedFileContentKeys.add(contentKey) } + + // Track subagent prompts to filter out duplicate user messages + if (event.event_type === "agent.subagent_invoked") { + const data = event.event_data as Record + const toolInput = (data.tool_input || {}) as Record + const prompt = (data.subagent_prompt || toolInput.prompt) as string | undefined + if (prompt) subagentPrompts.add(prompt) + } } - + // Track which content keys we've already rendered const renderedContentKeys = new Set() @@ -159,13 +170,20 @@ export default function SandboxDetailPage({ params }: SandboxDetailPageProps) { .filter((e) => { // Skip hidden event types if (HIDDEN_EVENT_TYPES.includes(e.event_type)) return false - + + // Skip user messages that are subagent prompts (already shown in SubagentCard) + if (e.event_type === "agent.user_message") { + const data = e.event_data as Record + const content = (data.content || data.message) as string | undefined + if (content && subagentPrompts.has(content)) return false + } + // Skip tool_use events if there's a corresponding tool_completed if (e.event_type === "agent.tool_use") { const key = getToolUseKey(e) if (key && completedToolKeys.has(key)) return false } - + // Skip file_edited events if we have a tool_completed with the same content if (e.event_type === "agent.file_edited") { const contentKey = getFileContentKey(e) @@ -436,8 +454,11 @@ export default function SandboxDetailPage({ params }: SandboxDetailPageProps) { )} {task?.description && (
-

Description

-

{task.description}

+ {/* Only show label if description doesn't start with its own heading */} + {!/^#/.test(task.description.trim()) && ( +

Description

+ )} +
)}
diff --git a/frontend/app/(auth)/layout.tsx b/frontend/app/(auth)/layout.tsx index 8d8c17a1..f95588f5 100644 --- a/frontend/app/(auth)/layout.tsx +++ b/frontend/app/(auth)/layout.tsx @@ -1,5 +1,6 @@ import Link from "next/link" import { Card } from "@/components/ui/card" +import { OmoiOSLogo } from "@/components/ui/omoios-logo" export default function AuthLayout({ children, @@ -9,11 +10,8 @@ export default function AuthLayout({ return (
{/* Logo */} - -
- O -
- OmoiOS + + {/* Auth Card */} diff --git a/frontend/components/command/PromptInput.tsx b/frontend/components/command/PromptInput.tsx index 1f2fa626..8b6694df 100644 --- a/frontend/components/command/PromptInput.tsx +++ b/frontend/components/command/PromptInput.tsx @@ -10,6 +10,7 @@ interface PromptInputProps { onSubmit?: (prompt: string) => void isLoading?: boolean placeholder?: string + submitLabel?: string className?: string } @@ -17,6 +18,7 @@ export function PromptInput({ onSubmit, isLoading = false, placeholder = "Ask Cursor to build, fix bugs, explore", + submitLabel, className, }: PromptInputProps) { const [value, setValue] = useState("") @@ -76,17 +78,20 @@ export function PromptInput({
diff --git a/frontend/components/command/WorkflowModeSelector.tsx b/frontend/components/command/WorkflowModeSelector.tsx new file mode 100644 index 00000000..ea4fad1a --- /dev/null +++ b/frontend/components/command/WorkflowModeSelector.tsx @@ -0,0 +1,96 @@ +"use client" + +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { cn } from "@/lib/utils" +import { Zap, FileText } from "lucide-react" + +export type WorkflowMode = "quick" | "spec_driven" + +export interface WorkflowModeOption { + id: WorkflowMode + name: string + description: string + icon: React.ComponentType<{ className?: string }> + placeholder: string + helperText: string + submitLabel: string +} + +export const workflowModes: WorkflowModeOption[] = [ + { + id: "quick", + name: "Quick", + description: "Immediate implementation", + icon: Zap, + placeholder: "Describe what you want to build...", + helperText: "Agent will immediately start implementing your request", + submitLabel: "Launch", + }, + { + id: "spec_driven", + name: "Spec-Driven", + description: "Plan first, then build", + icon: FileText, + placeholder: "Describe the feature to plan...", + helperText: "We'll generate requirements & design for your approval", + submitLabel: "Create Spec", + }, +] + +interface WorkflowModeSelectorProps { + value?: WorkflowMode + onValueChange?: (value: WorkflowMode) => void + className?: string +} + +export function WorkflowModeSelector({ + value = "quick", + onValueChange, + className, +}: WorkflowModeSelectorProps) { + const handleChange = (newValue: string) => { + onValueChange?.(newValue as WorkflowMode) + } + + const selectedMode = workflowModes.find((m) => m.id === value) || workflowModes[0] + + return ( + + ) +} + +// Helper to get mode config +export function getWorkflowModeConfig(mode: WorkflowMode): WorkflowModeOption { + return workflowModes.find((m) => m.id === mode) || workflowModes[0] +} diff --git a/frontend/components/command/index.ts b/frontend/components/command/index.ts index 3a4fcc88..e1a4cd89 100644 --- a/frontend/components/command/index.ts +++ b/frontend/components/command/index.ts @@ -3,3 +3,4 @@ export { ModelSelector, type Model } from "./ModelSelector" export { RepoSelector, type Project, type Repository } from "./RepoSelector" export { RecentAgentsSidebar, type RecentAgent } from "./RecentAgentsSidebar" export { CommandPalette } from "./CommandPalette" +export { WorkflowModeSelector, workflowModes, getWorkflowModeConfig, type WorkflowMode, type WorkflowModeOption } from "./WorkflowModeSelector" diff --git a/frontend/components/layout/AuthLayout.tsx b/frontend/components/layout/AuthLayout.tsx index a2ece88b..924f08e2 100644 --- a/frontend/components/layout/AuthLayout.tsx +++ b/frontend/components/layout/AuthLayout.tsx @@ -2,6 +2,7 @@ import Link from "next/link" import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card" +import { OmoiOSLogo } from "@/components/ui/omoios-logo" interface AuthLayoutProps { children: React.ReactNode @@ -23,11 +24,8 @@ export function AuthLayout({ return (
{/* Logo */} - -
- O -
- OmoiOS + + {/* Auth Card */} diff --git a/frontend/components/layout/IconRail.tsx b/frontend/components/layout/IconRail.tsx index c7dcc094..2832ea15 100644 --- a/frontend/components/layout/IconRail.tsx +++ b/frontend/components/layout/IconRail.tsx @@ -18,6 +18,7 @@ import { TooltipProvider, TooltipTrigger, } from "@/components/ui/tooltip" +import { OmoiOSMark } from "@/components/ui/omoios-logo" export type NavSection = "command" | "projects" | "phases" | "sandboxes" | "analytics" | "organizations" | "settings" @@ -81,9 +82,9 @@ export function IconRail({ activeSection, onSectionChange, className }: IconRail {/* Logo */} - O + {/* Main Navigation */} diff --git a/frontend/components/sandbox/EventRenderer.tsx b/frontend/components/sandbox/EventRenderer.tsx index 42b6aa5f..67dcb8ac 100644 --- a/frontend/components/sandbox/EventRenderer.tsx +++ b/frontend/components/sandbox/EventRenderer.tsx @@ -37,6 +37,16 @@ import { Plus, Minus, File, + Loader2, + Plug, + Database, + FileSearch, + Send, + Key, + Zap, + BookOpen, + DollarSign, + Hash, } from "lucide-react" import type { SandboxEvent } from "@/lib/api/types" import { Markdown } from "@/components/ui/markdown" @@ -1064,15 +1074,646 @@ function TodoCard({ todos, timestamp }: TodoCardProps) { ) } +// ============================================================================ +// Ask User Question Card - Beautiful question display +// ============================================================================ + +interface QuestionOption { + label: string + description?: string +} + +interface Question { + question: string + header?: string + options: QuestionOption[] + multiSelect?: boolean +} + +interface AskUserQuestionCardProps { + questions: Question[] + answer?: string + status?: "running" | "completed" + timestamp?: string +} + +function AskUserQuestionCard({ questions, answer, status = "completed", timestamp }: AskUserQuestionCardProps) { + const isWaiting = status === "running" && !answer + + return ( +
+ {/* Header */} +
+ + + {isWaiting ? "Waiting for Response" : "Question"} + + {isWaiting && ( + + + Pending + + )} + {timestamp && ( + {timestamp} + )} +
+ + {/* Questions */} +
+ {questions.map((q, qIdx) => ( +
+ {/* Question header badge */} + {q.header && ( + + {q.header} + + )} + + {/* Question text */} +

{q.question}

+ + {/* Options */} +
+ {q.options.map((opt, optIdx) => ( +
+
+ {/* Empty circle/checkbox */} +
+
+

{opt.label}

+ {opt.description && ( +

{opt.description}

+ )} +
+
+ ))} +
+
+ ))} + + {/* Answer if provided */} + {answer && ( +
+
+ +
+

Response

+

{answer}

+
+
+
+ )} +
+
+ ) +} + +// ============================================================================ +// MCP Tool Card - Clean display for MCP server tool calls +// ============================================================================ + +// Parse MCP tool name: mcp__server__tool → { server: "server", tool: "tool" } +// Handles server names with underscores like "spec_workflow" +// Pattern: mcp____ where server/tool can contain single underscores +function parseMcpToolName(toolName: string): { server: string; tool: string } | null { + if (!toolName.startsWith("mcp__")) return null + + // Remove "mcp__" prefix and split by double underscore + const withoutPrefix = toolName.slice(5) // Remove "mcp__" + const lastDoubleUnderscoreIndex = withoutPrefix.lastIndexOf("__") + + if (lastDoubleUnderscoreIndex === -1) return null + + const server = withoutPrefix.slice(0, lastDoubleUnderscoreIndex) + const tool = withoutPrefix.slice(lastDoubleUnderscoreIndex + 2) + + if (!server || !tool) return null + + return { server, tool } +} + +// Get display-friendly server name +function formatServerName(server: string): string { + return server + .split("_") + .map(word => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") +} + +// Get display-friendly tool name +function formatToolName(tool: string): string { + return tool + .split(/[-_]/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") +} + +// Get server-specific icon and color +function getMcpServerConfig(server: string): { icon: typeof Plug; color: string; bgColor: string } { + const configs: Record = { + spec_workflow: { icon: FileSearch, color: "text-violet-500", bgColor: "bg-violet-500/10" }, + database: { icon: Database, color: "text-cyan-500", bgColor: "bg-cyan-500/10" }, + api: { icon: Send, color: "text-blue-500", bgColor: "bg-blue-500/10" }, + auth: { icon: Key, color: "text-amber-500", bgColor: "bg-amber-500/10" }, + } + return configs[server] || { icon: Plug, color: "text-indigo-500", bgColor: "bg-indigo-500/10" } +} + +interface McpToolCardProps { + server: string + tool: string + input: Record + output?: string + status?: "running" | "completed" | "error" + timestamp?: string +} + +function McpToolCard({ server, tool, input, output, status = "completed", timestamp }: McpToolCardProps) { + const [isOpen, setIsOpen] = useState(false) + const [copiedInput, setCopiedInput] = useState(false) + const [copiedOutput, setCopiedOutput] = useState(false) + + const config = getMcpServerConfig(server) + const Icon = config.icon + const isRunning = status === "running" + const hasOutput = !!output + + const handleCopyInput = (e: React.MouseEvent) => { + e.stopPropagation() + navigator.clipboard.writeText(JSON.stringify(input, null, 2)) + setCopiedInput(true) + setTimeout(() => setCopiedInput(false), 2000) + } + + const handleCopyOutput = (e: React.MouseEvent) => { + e.stopPropagation() + if (output) { + navigator.clipboard.writeText(output) + setCopiedOutput(true) + setTimeout(() => setCopiedOutput(false), 2000) + } + } + + // Format input as clean key-value pairs + const inputEntries = Object.entries(input) + const hasManyInputs = inputEntries.length > 3 + + // Try to parse output as JSON for nicer formatting + let parsedOutput: Record | string | null = null + let isJsonOutput = false + if (output) { + try { + parsedOutput = JSON.parse(output) + isJsonOutput = typeof parsedOutput === "object" && parsedOutput !== null + } catch { + parsedOutput = output + } + } + + return ( + +
+ + + + +
+ {/* Input Section */} +
+
+ Input + +
+ {hasManyInputs ? ( + // Show as JSON for complex inputs +
+                  {JSON.stringify(input, null, 2)}
+                
+ ) : ( + // Show as clean key-value pairs for simple inputs +
+ {inputEntries.map(([key, value]) => ( +
+ + {key}: + + + {typeof value === "string" ? value : JSON.stringify(value)} + +
+ ))} +
+ )} +
+ + {/* Output Section */} + {hasOutput && ( +
+
+ Output + +
+
+                  {isJsonOutput ? JSON.stringify(parsedOutput, null, 2) : output}
+                
+
+ )} + + {/* Running indicator */} + {isRunning && !hasOutput && ( +
+ + Waiting for response... +
+ )} +
+
+
+
+ ) +} + +// ============================================================================ +// Skill Invoked Card - Shows when a skill is invoked +// ============================================================================ + +interface SkillInvokedCardProps { + skillName: string + input?: Record + timestamp?: string +} + +function SkillInvokedCard({ skillName, input, timestamp }: SkillInvokedCardProps) { + // Format skill name for display + const displayName = skillName + .split(/[-_]/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") + + return ( +
+
+ + + Skill Invoked + + + {skillName} + + + {timestamp && ( + {timestamp} + )} +
+
+

+ Executing {displayName} skill +

+ {input && Object.keys(input).length > 0 && ( +
+ {Object.entries(input).map(([key, value]) => ( + + {key}: {String(value)} + + ))} +
+ )} +
+
+ ) +} + +// ============================================================================ +// Subagent Card - Shows subagent invocation and completion +// ============================================================================ + +interface SubagentCardProps { + subagentType: string + description?: string + prompt?: string + status: "running" | "completed" + timestamp?: string + result?: string + usage?: { input_tokens?: number; output_tokens?: number } + costUsd?: number + durationMs?: number +} + +function SubagentCard({ subagentType, description, prompt, status, timestamp, result, usage, costUsd, durationMs }: SubagentCardProps) { + const [isOpen, setIsOpen] = useState(false) + const isRunning = status === "running" + + // Format subagent type for display + const displayType = subagentType + .split(/[-_]/) + .map(word => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") + + // Format duration for display + const formatDuration = (ms: number) => { + if (ms < 1000) return `${ms}ms` + if (ms < 60000) return `${(ms / 1000).toFixed(1)}s` + return `${(ms / 60000).toFixed(1)}m` + } + + // Format cost for display + const formatCost = (cost: number) => { + if (cost < 0.01) return `$${cost.toFixed(4)}` + return `$${cost.toFixed(2)}` + } + + return ( + +
+ + + + +
+ {/* Metadata row - duration, cost, tokens */} + {!isRunning && (durationMs || costUsd || usage) && ( +
+ {durationMs && ( + + + {formatDuration(durationMs)} + + )} + {costUsd !== undefined && costUsd !== null && ( + + + {formatCost(costUsd)} + + )} + {usage && ( + + + {usage.input_tokens?.toLocaleString() || 0} in / {usage.output_tokens?.toLocaleString() || 0} out + + )} +
+ )} + + {/* Prompt section */} + {prompt && ( +
+ Prompt +

{prompt}

+
+ )} + + {/* Result section */} + {result && !isRunning && ( +
+ Result +
+

{result}

+
+
+ )} +
+
+
+
+ ) +} + +// ============================================================================ +// Read Card - File content display with syntax highlighting +// ============================================================================ + +interface ReadCardProps { + filePath: string + content?: string + numLines?: number + timestamp?: string + status?: "running" | "completed" +} + +function ReadCard({ filePath, content, numLines, timestamp, status = "completed" }: ReadCardProps) { + const [expanded, setExpanded] = useState(true) + const [showFullContent, setShowFullContent] = useState(false) + const [copied, setCopied] = useState(false) + + const language = getLanguageFromPath(filePath) + const fileName = filePath.split("/").pop() || filePath + const isRunning = status === "running" + + // Split content into lines for display + const lines = content?.split("\n") || [] + const maxLines = 30 + const displayContent = showFullContent ? content : lines.slice(0, maxLines).join("\n") + const hasMoreLines = lines.length > maxLines + + const handleCopy = (e: React.MouseEvent) => { + e.stopPropagation() + if (content) { + navigator.clipboard.writeText(content) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } + } + + // Get file icon based on extension + const getFileIcon = () => { + if (language === "python") return "🐍" + if (language === "javascript" || language === "typescript") return "📜" + if (language === "jsx" || language === "tsx") return "⚛️" + if (language === "rust") return "🦀" + if (language === "go") return "🐹" + if (language === "docker") return "🐳" + if (language === "json" || language === "yaml" || language === "toml") return "📋" + if (language === "markdown") return "📝" + if (language === "html" || language === "css") return "🎨" + return null + } + + const fileIcon = getFileIcon() + + return ( +
+ {/* Header */} +
setExpanded(!expanded)} + > + + Read + + {isRunning && ( + + + Reading + + )} + +
+ {fileIcon && {fileIcon}} + + {filePath} + +
+ + {numLines && ( + + {numLines} lines + + )} + + {content && ( + + )} + + {timestamp && ( + {timestamp} + )} + {expanded ? ( + + ) : ( + + )} +
+ + {/* Content */} + {expanded && content && ( +
+ + {displayContent || ""} + + + {/* Show more button */} + {hasMoreLines && !showFullContent && ( + + )} +
+ )} + + {/* Running state */} + {expanded && isRunning && !content && ( +
+ + Reading file... +
+ )} +
+ ) +} + // ============================================================================ // Bash Command Card - Clean terminal-style display // ============================================================================ interface BashCardProps { command: string + description?: string output?: string exitCode?: number timestamp?: string + status?: "running" | "completed" | "error" } // Parse Bash output which may be JSON with stdout/stderr fields @@ -1137,15 +1778,16 @@ function parseBashOutput(output: string): { stdout: string; stderr: string; exit return { stdout: output, stderr: "" } } -function BashCard({ command, output, exitCode: providedExitCode, timestamp }: BashCardProps) { +function BashCard({ command, description, output, exitCode: providedExitCode, timestamp, status = "completed" }: BashCardProps) { const [expanded, setExpanded] = useState(true) const [copied, setCopied] = useState(false) - + // Parse the output const { stdout, stderr, exitCode: parsedExitCode } = parseBashOutput(output || "") const exitCode = providedExitCode ?? parsedExitCode const hasOutput = stdout || stderr const isError = (exitCode !== undefined && exitCode !== 0) || !!stderr + const isRunning = status === "running" const handleCopy = (e: React.MouseEvent) => { e.stopPropagation() @@ -1154,47 +1796,55 @@ function BashCard({ command, output, exitCode: providedExitCode, timestamp }: Ba setTimeout(() => setCopied(false), 2000) } + // Truncate command for header display + const truncatedCommand = command.length > 60 ? command.slice(0, 60) + "..." : command + return (
{/* Header */} -
setExpanded(!expanded)} > Terminal - {isError && ( + {isRunning && ( + + + Running + + )} + {isError && !isRunning && ( {stderr ? "Error" : `Exit ${exitCode}`} )} - - {command.length > 50 ? command.slice(0, 50) + "..." : command} - - + {/* Show description if available, otherwise truncated command */} + + {description || truncatedCommand} + + {hasOutput && ( - )} - + {timestamp && ( {timestamp} )} - {hasOutput && ( - expanded ? ( - - ) : ( - - ) + {expanded ? ( + + ) : ( + )}
- + {/* Terminal content */} {expanded && (
@@ -1205,7 +1855,7 @@ function BashCard({ command, output, exitCode: providedExitCode, timestamp }: Ba {command}
- + {/* Output */} {stdout && (
@@ -1214,7 +1864,7 @@ function BashCard({ command, output, exitCode: providedExitCode, timestamp }: Ba
)} - + {/* Stderr */} {stderr && (
@@ -1223,13 +1873,21 @@ function BashCard({ command, output, exitCode: providedExitCode, timestamp }: Ba
)} - - {/* No output message */} - {!stdout && !stderr && ( + + {/* No output message - only show for completed commands */} + {!stdout && !stderr && !isRunning && (
Command completed with no output
)} + + {/* Running indicator */} + {isRunning && !stdout && !stderr && ( +
+ + Running... +
+ )}
)}
@@ -1293,6 +1951,12 @@ export function EventRenderer({ event, className }: EventRendererProps) { if (event_type === "agent.message" || event_type === "agent.assistant_message") { const content = getString(data, "content") if (!content) return null + + // Skip messages that are actually subagent prompts (they're rendered by SubagentCard) + // These typically come through as agent.message but should be suppressed + const isSubagentContext = data.subagent_type || data.tool === "Task" + if (isSubagentContext) return null + return
} @@ -1316,6 +1980,9 @@ export function EventRenderer({ event, className }: EventRendererProps) { const toolInput = (data.tool_input || {}) as Record const toolResponse = getString(data, "tool_response") + // Skip Task tool - this is handled by agent.subagent_invoked/completed events + if (tool === "Task") return null + // Write tool - show full file content if (tool === "Write") { const filePath = getString(toolInput, "filePath") || getString(toolInput, "file_path") || "" @@ -1366,13 +2033,71 @@ export function EventRenderer({ event, className }: EventRendererProps) { ) } + // Read tool - file content display + if (tool === "Read") { + const filePath = getString(toolInput, "filePath") || getString(toolInput, "file_path") || "" + // Parse toolResponse to extract file content + let fileContent = "" + let numLines: number | undefined + + if (toolResponse) { + // Try to extract content from Python dict format: {'type': 'text', 'file': {'content': '...'}} + // Use regex to extract the content field directly + // Use [\s\S] instead of . to match across newlines (since 's' flag not available) + const contentMatch = toolResponse.match(/'content':\s*'((?:[^'\\]|\\[\s\S])*)'(?:,|\})/) + if (contentMatch) { + fileContent = contentMatch[1] + // Unescape common escape sequences + .replace(/\\n/g, "\n") + .replace(/\\t/g, "\t") + .replace(/\\r/g, "\r") + .replace(/\\\\/g, "\\") + .replace(/\\'/g, "'") + .replace(/\\"/g, '"') + } else { + // Try standard JSON parsing + try { + const parsed = JSON.parse(toolResponse) + if (parsed.file?.content) { + fileContent = parsed.file.content + numLines = parsed.file.numLines || parsed.file.totalLines + } else if (typeof parsed === "string") { + fileContent = parsed + } + } catch { + // Not JSON, use raw response + fileContent = toolResponse + } + } + + // Try to extract numLines/totalLines + const numLinesMatch = toolResponse.match(/'(?:numLines|totalLines)':\s*(\d+)/) + if (numLinesMatch) { + numLines = parseInt(numLinesMatch[1], 10) + } + } + + return ( +
+ +
+ ) + } + // Bash tool if (tool === "Bash") { const command = getString(toolInput, "command") + const description = getString(toolInput, "description") return (
@@ -1418,6 +2143,36 @@ export function EventRenderer({ event, className }: EventRendererProps) { return
} + // AskUserQuestion tool + if (tool === "AskUserQuestion" && toolInput.questions) { + const questions = toolInput.questions as Question[] + return ( +
+ +
+ ) + } + + // MCP tool calls - detect by mcp__ prefix + const mcpParsed = parseMcpToolName(tool) + if (mcpParsed) { + return ( +
+ +
+ ) + } + // Default tool card return (
@@ -1431,6 +2186,39 @@ export function EventRenderer({ event, className }: EventRendererProps) { const tool = getString(data, "tool") const toolInput = (data.tool_input || data.input || {}) as Record + // Skip Task tool - this is handled by agent.subagent_invoked/completed events + if (tool === "Task") return null + + // Read tool - show running state + if (tool === "Read") { + const filePath = getString(toolInput, "filePath") || getString(toolInput, "file_path") || "" + return ( +
+ +
+ ) + } + + // Bash tool - show running state with BashCard + if (tool === "Bash") { + const command = getString(toolInput, "command") + const description = getString(toolInput, "description") + return ( +
+ +
+ ) + } + // Glob tool - show running state with GlobCard if (tool === "Glob") { const pattern = getString(toolInput, "pattern") @@ -1463,6 +2251,36 @@ export function EventRenderer({ event, className }: EventRendererProps) { ) } + // AskUserQuestion tool - show waiting state + if (tool === "AskUserQuestion" && toolInput.questions) { + const questions = toolInput.questions as Question[] + return ( +
+ +
+ ) + } + + // MCP tool calls - show running state + const mcpParsed = parseMcpToolName(tool) + if (mcpParsed) { + return ( +
+ +
+ ) + } + return (
@@ -1546,6 +2364,70 @@ export function EventRenderer({ event, className }: EventRendererProps) { ) } + // Skill invoked events + if (event_type === "agent.skill_invoked") { + const input = (data.input || {}) as Record + const skillName = getString(input, "skill") || getString(data, "skill_name") || "unknown" + return ( +
+ +
+ ) + } + + // Subagent invoked events + if (event_type === "agent.subagent_invoked") { + const toolInput = (data.tool_input || {}) as Record + const subagentType = getString(data, "subagent_type") || getString(toolInput, "subagent_type") || "unknown" + const description = getString(data, "description") || getString(data, "subagent_description") || getString(toolInput, "description") || "" + const prompt = getString(data, "subagent_prompt") || getString(toolInput, "prompt") || "" + return ( +
+ +
+ ) + } + + // Subagent completed events + if (event_type === "agent.subagent_completed") { + const toolInput = (data.tool_input || {}) as Record + const subagentType = getString(data, "subagent_type") || getString(toolInput, "subagent_type") || "unknown" + const description = getString(data, "description") || getString(data, "subagent_description") || getString(toolInput, "description") || "" + const prompt = getString(data, "subagent_prompt") || getString(toolInput, "prompt") || "" + + // Extract result data from completed subagent + const result = getString(data, "subagent_result") || undefined + const usage = data.subagent_usage as { input_tokens?: number; output_tokens?: number } | undefined + const costUsd = typeof data.subagent_cost_usd === "number" ? data.subagent_cost_usd : undefined + const durationMs = typeof data.subagent_duration_ms === "number" ? data.subagent_duration_ms : undefined + + return ( +
+ +
+ ) + } + // Skip noise if (event_type.includes("heartbeat")) return null From f0eff30b79d31dae0701c47b77ae48064567397c Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 13:23:06 -0300 Subject: [PATCH 008/290] Add cursor-based pagination and noise filtering to trajectory API Backend: - Add cursor-based pagination to trajectory endpoint (cursor, direction params) - Filter noisy explore subagent events (Glob **, Grep searches) - Add next_cursor, prev_cursor, has_more to TrajectorySummaryResponse - Change default limit from 500 to 100 per page for better performance - Fix duplicate validation sandbox spawning (orchestrator polls instead) - Add validating/pending_validation status handling Frontend: - Update TrajectorySummaryResponse type with cursor fields - Update sandbox API client for cursor pagination - Implement infinite scroll in useSandboxMonitor hook - Add "Load older events" button to sandbox detail page - Add validating status to TasksPanel and TaskCard - Reset pagination state when sandbox changes This fixes the issue where events weren't showing for sandboxes with more than 100 events - the old offset-based pagination with oldest-first ordering meant newest events were cut off. Cursor-based pagination prevents page shift issues when new events arrive. --- backend/omoi_os/api/routes/sandbox.py | 197 ++++++++++++++---- backend/omoi_os/services/task_queue.py | 93 +++++++++ backend/omoi_os/services/task_validator.py | 26 +-- .../omoi_os/workers/orchestrator_worker.py | 168 +++++++++++++++ .../app/(app)/sandbox/[sandboxId]/page.tsx | 23 ++ frontend/app/(app)/sandboxes/page.tsx | 14 +- frontend/components/custom/TaskCard.tsx | 18 +- frontend/components/panels/TasksPanel.tsx | 50 ++++- frontend/hooks/useSandbox.ts | 118 ++++++++--- frontend/lib/api/sandbox.ts | 7 +- frontend/lib/api/types.ts | 4 + 11 files changed, 630 insertions(+), 88 deletions(-) diff --git a/backend/omoi_os/api/routes/sandbox.py b/backend/omoi_os/api/routes/sandbox.py index 82d0498f..576b7eb9 100644 --- a/backend/omoi_os/api/routes/sandbox.py +++ b/backend/omoi_os/api/routes/sandbox.py @@ -919,13 +919,18 @@ class TrajectorySummaryResponse(BaseModel): - Excluding heartbeat events from the main event list - Providing a summary of heartbeats (count, first, last) - Filtering out noise to show meaningful trajectory events + - Supporting cursor-based pagination for infinite scroll """ sandbox_id: str events: list[SandboxEventItem] heartbeat_summary: HeartbeatSummary total_events: int # Total including heartbeats - trajectory_events: int # Count excluding heartbeats + trajectory_events: int # Count excluding heartbeats and noise + # Cursor-based pagination + next_cursor: Optional[str] = None # ID of oldest event in this batch (for loading older) + prev_cursor: Optional[str] = None # ID of newest event in this batch (for loading newer) + has_more: bool = False # Whether there are more events to load def query_sandbox_events( @@ -1110,7 +1115,7 @@ async def get_sandbox_events( def query_trajectory_summary( db: DatabaseService, sandbox_id: str, - limit: int = 100, + limit: int = 500, ) -> dict: """ Query trajectory events with heartbeat aggregation (SYNC version). @@ -1119,11 +1124,12 @@ def query_trajectory_summary( - Excluding heartbeat events from the main list - Aggregating heartbeat info (count, first, last) - Returning only meaningful trajectory events + - Returning events in descending order (newest first) so users always see latest Args: db: Database service sandbox_id: Sandbox identifier - limit: Maximum non-heartbeat events to return + limit: Maximum non-heartbeat events to return (default 500) Returns: Dict with events, heartbeat_summary, and counts @@ -1160,11 +1166,12 @@ def query_trajectory_summary( last_heartbeat = heartbeat_stats.last if heartbeat_stats else None # Get non-heartbeat events (the actual trajectory) + # Order by DESCENDING (newest first) so we get the most recent events trajectory_events = ( session.query(SandboxEvent) .filter(SandboxEvent.sandbox_id == sandbox_id) .filter(~SandboxEvent.event_type.in_(heartbeat_types)) - .order_by(SandboxEvent.created_at.asc()) # Chronological order + .order_by(SandboxEvent.created_at.desc()) # Newest first .limit(limit) .all() ) @@ -1198,30 +1205,37 @@ async def query_trajectory_summary_async( db: DatabaseService, sandbox_id: str, limit: int = 100, + cursor: Optional[str] = None, + direction: str = "older", ) -> dict: """ Query trajectory events with heartbeat aggregation (ASYNC version - non-blocking). This provides a cleaner view by: - - Excluding heartbeat events from the main list + - Excluding heartbeat and noisy events from the main list - Aggregating heartbeat info (count, first, last) - Returning only meaningful trajectory events + - Supporting cursor-based pagination for infinite scroll Args: db: Database service sandbox_id: Sandbox identifier - limit: Maximum non-heartbeat events to return + limit: Maximum events to return per page (default 100) + cursor: Event ID to paginate from (cursor-based pagination) + direction: "older" to load events older than cursor, "newer" for newer Returns: - Dict with events, heartbeat_summary, and counts + Dict with events, heartbeat_summary, counts, and pagination cursors """ - from sqlalchemy import func, select + from sqlalchemy import func, select, and_, or_ from omoi_os.models.sandbox_event import SandboxEvent async with db.get_async_session() as session: - # Define heartbeat event types to exclude from main list - heartbeat_types = ["agent.heartbeat", "heartbeat"] + # Event types to exclude from trajectory + # - Heartbeats: Just noise, summarized separately + # - Explore subagent tool_use/tool_result for Read/Glob/Grep: Very noisy file reads + excluded_event_types = ["agent.heartbeat", "heartbeat"] # Get total count of all events total_result = await session.execute( @@ -1239,7 +1253,7 @@ async def query_trajectory_summary_async( func.max(SandboxEvent.created_at).label("last"), ) .filter(SandboxEvent.sandbox_id == sandbox_id) - .filter(SandboxEvent.event_type.in_(heartbeat_types)) + .filter(SandboxEvent.event_type.in_(["agent.heartbeat", "heartbeat"])) ) heartbeat_stats = heartbeat_result.first() @@ -1247,17 +1261,104 @@ async def query_trajectory_summary_async( first_heartbeat = heartbeat_stats.first if heartbeat_stats else None last_heartbeat = heartbeat_stats.last if heartbeat_stats else None - # Get non-heartbeat events (the actual trajectory) - trajectory_result = await session.execute( - select(SandboxEvent) - .filter(SandboxEvent.sandbox_id == sandbox_id) - .filter(~SandboxEvent.event_type.in_(heartbeat_types)) - .order_by(SandboxEvent.created_at.asc()) # Chronological order - .limit(limit) + # Build base query for trajectory events + base_filter = and_( + SandboxEvent.sandbox_id == sandbox_id, + ~SandboxEvent.event_type.in_(excluded_event_types), ) - trajectory_events = trajectory_result.scalars().all() - trajectory_count = len(trajectory_events) + # If cursor provided, get the cursor event's created_at for pagination + cursor_timestamp = None + if cursor: + cursor_result = await session.execute( + select(SandboxEvent.created_at).filter(SandboxEvent.id == cursor) + ) + cursor_row = cursor_result.first() + if cursor_row: + cursor_timestamp = cursor_row[0] + + # Build pagination filter + if cursor_timestamp: + if direction == "older": + # Get events older than cursor (created_at < cursor's created_at) + pagination_filter = and_( + base_filter, + SandboxEvent.created_at < cursor_timestamp + ) + else: + # Get events newer than cursor (created_at > cursor's created_at) + pagination_filter = and_( + base_filter, + SandboxEvent.created_at > cursor_timestamp + ) + else: + pagination_filter = base_filter + + # Query events - newest first for "older" direction, oldest first for "newer" + if direction == "older": + trajectory_result = await session.execute( + select(SandboxEvent) + .filter(pagination_filter) + .order_by(SandboxEvent.created_at.desc()) + .limit(limit + 1) # +1 to check if there are more + ) + else: + trajectory_result = await session.execute( + select(SandboxEvent) + .filter(pagination_filter) + .order_by(SandboxEvent.created_at.asc()) + .limit(limit + 1) + ) + + trajectory_events = list(trajectory_result.scalars().all()) + + # Check if there are more events + has_more = len(trajectory_events) > limit + if has_more: + trajectory_events = trajectory_events[:limit] + + # For "newer" direction, reverse to maintain newest-first order + if direction == "newer": + trajectory_events = list(reversed(trajectory_events)) + + # Filter out noisy explore subagent events + # These are tool_use/tool_result events with Read/Glob/Grep from explore subagents + def is_noisy_explore_event(event: SandboxEvent) -> bool: + if event.event_type not in ("agent.tool_use", "agent.tool_result"): + return False + if not event.event_data: + return False + # Check if it's from an explore subagent + tool = event.event_data.get("tool", "") + tool_input = event.event_data.get("tool_input", {}) + # Filter Read/Glob/Grep that are clearly explore operations + if tool in ("Read", "Glob", "Grep"): + # Keep if it's the main agent (no subagent context) + # Filter if it looks like bulk exploration + if isinstance(tool_input, dict): + # Glob with ** patterns is exploratory + pattern = tool_input.get("pattern", "") + if tool == "Glob" and "**" in str(pattern): + return True + # Grep searches are usually exploratory + if tool == "Grep": + return True + return False + + # Apply noise filter + filtered_events = [e for e in trajectory_events if not is_noisy_explore_event(e)] + + trajectory_count = len(filtered_events) + + # Build cursors for pagination + next_cursor = None # Cursor for loading older events + prev_cursor = None # Cursor for loading newer events + + if filtered_events: + # Oldest event in batch is cursor for "load older" + next_cursor = str(filtered_events[-1].id) if has_more else None + # Newest event in batch is cursor for "load newer" (when scrolling back up) + prev_cursor = str(filtered_events[0].id) return { "sandbox_id": sandbox_id, @@ -1270,7 +1371,7 @@ async def query_trajectory_summary_async( "source": e.source, "created_at": e.created_at, } - for e in trajectory_events + for e in filtered_events ], "heartbeat_summary": { "count": heartbeat_count, @@ -1279,45 +1380,63 @@ async def query_trajectory_summary_async( }, "total_events": total_count, "trajectory_events": trajectory_count, + "next_cursor": next_cursor, + "prev_cursor": prev_cursor, + "has_more": has_more, } @router.get("/{sandbox_id}/trajectory", response_model=TrajectorySummaryResponse) async def get_sandbox_trajectory( sandbox_id: str, - limit: int = Query(default=100, le=500, ge=1, description="Max events to return"), + limit: int = Query(default=100, le=1000, ge=1, description="Max events to return per page"), + cursor: Optional[str] = Query(default=None, description="Event ID cursor for pagination"), + direction: str = Query(default="older", description="Load 'older' or 'newer' events from cursor"), ) -> TrajectorySummaryResponse: """ - Get trajectory summary for a sandbox with heartbeat aggregation. + Get trajectory summary for a sandbox with heartbeat aggregation and cursor-based pagination. This endpoint is optimized for viewing agent activity by: - Excluding heartbeat events from the main event list - Providing a summary of heartbeats (count, first timestamp, last timestamp) - - Returning events in chronological order (oldest to newest) + - Filtering noisy explore subagent events (Glob/Grep) + - Supporting cursor-based infinite scroll pagination + + Pagination: + - Initial load: No cursor, returns newest events first + - Load older: Pass `cursor=next_cursor` with `direction=older` + - Load newer: Pass `cursor=prev_cursor` with `direction=newer` + The frontend should reverse the order for chronological display if needed. Much faster than fetching all events when there are many heartbeats. Args: sandbox_id: Sandbox identifier (from URL path) - limit: Maximum number of trajectory events to return (default: 100, max: 500) + limit: Maximum number of trajectory events to return per page (default: 100, max: 1000) + cursor: Event ID to paginate from (for infinite scroll) + direction: "older" to load events older than cursor, "newer" for newer events Returns: - TrajectorySummaryResponse with filtered events and heartbeat summary + TrajectorySummaryResponse with filtered events, heartbeat summary, and pagination cursors Example: - GET /api/v1/sandboxes/sandbox-abc123/trajectory?limit=50 + Initial load: GET /api/v1/sandboxes/sandbox-abc123/trajectory?limit=100 + Load more: GET /api/v1/sandboxes/sandbox-abc123/trajectory?limit=100&cursor=event-id&direction=older Response: { "sandbox_id": "sandbox-abc123", - "events": [...], // Non-heartbeat events only + "events": [...], // Non-heartbeat, non-noise events only "heartbeat_summary": { "count": 150, "first_heartbeat": "2025-12-19T10:00:00Z", "last_heartbeat": "2025-12-19T10:25:00Z" }, "total_events": 165, - "trajectory_events": 15 + "trajectory_events": 15, + "next_cursor": "event-id-123", // For loading older events + "prev_cursor": "event-id-456", // For loading newer events + "has_more": true } """ try: @@ -1327,6 +1446,8 @@ async def get_sandbox_trajectory( db=db, sandbox_id=sandbox_id, limit=limit, + cursor=cursor, + direction=direction, ) return TrajectorySummaryResponse( sandbox_id=result["sandbox_id"], @@ -1334,17 +1455,19 @@ async def get_sandbox_trajectory( heartbeat_summary=HeartbeatSummary(**result["heartbeat_summary"]), total_events=result["total_events"], trajectory_events=result["trajectory_events"], + next_cursor=result.get("next_cursor"), + prev_cursor=result.get("prev_cursor"), + has_more=result.get("has_more", False), ) except Exception as e: - logger.error(f"Failed to get trajectory for sandbox {sandbox_id}: {e}") - # Return empty response if error - return TrajectorySummaryResponse( - sandbox_id=sandbox_id, - events=[], - heartbeat_summary=HeartbeatSummary(count=0), - total_events=0, - trajectory_events=0, + import traceback + error_details = traceback.format_exc() + logger.error( + f"Failed to get trajectory for sandbox {sandbox_id}: {e}\n{error_details}" ) + # Re-raise to let FastAPI handle the error and return a proper 500 response + # This makes debugging easier - the frontend will see the error instead of empty events + raise # ============================================================================ diff --git a/backend/omoi_os/services/task_queue.py b/backend/omoi_os/services/task_queue.py index d3ae2d79..c49140b5 100644 --- a/backend/omoi_os/services/task_queue.py +++ b/backend/omoi_os/services/task_queue.py @@ -1707,3 +1707,96 @@ def get_next_task_with_concurrency_limit( session.refresh(task) session.expunge(task) return task + + def get_next_validation_task( + self, + max_concurrent_per_project: int = 5, + ) -> Task | None: + """ + Get next task that needs validation (status = 'pending_validation'). + + This method is used by the orchestrator to spawn validation sandboxes + for tasks that have completed implementation and need review. + + Args: + max_concurrent_per_project: Maximum concurrent tasks per project (default: 5) + + Returns: + Task object or None if no tasks need validation + """ + from omoi_os.models.ticket import Ticket + + with self.db.get_session() as session: + # Get pending_validation tasks + query = session.query(Task).filter( + Task.status == "pending_validation", + ) + tasks = query.all() + + if not tasks: + return None + + # Filter by project concurrency limits + available_tasks = [] + project_running_counts: dict[str, int] = {} + + for task in tasks: + # Get project ID for this task + ticket = session.query(Ticket).filter(Ticket.id == task.ticket_id).first() + if not ticket or not ticket.project_id: + # Tasks without a project are allowed (no limit) + available_tasks.append(task) + continue + + project_id = ticket.project_id + + # Check/cache running count for this project + if project_id not in project_running_counts: + count = ( + session.query(Task) + .join(Ticket, Task.ticket_id == Ticket.id) + .filter( + Ticket.project_id == project_id, + Task.status.in_(["claiming", "assigned", "running", "validating"]), + ) + .count() + ) + project_running_counts[project_id] = count + + # Skip if project is at capacity + if project_running_counts[project_id] >= max_concurrent_per_project: + logger.debug( + f"Project {project_id} at capacity ({project_running_counts[project_id]}/{max_concurrent_per_project}), " + f"skipping validation task {task.id}" + ) + continue + + available_tasks.append(task) + + if not available_tasks: + return None + + # Get oldest pending_validation task (FIFO for validation) + task = min(available_tasks, key=lambda t: t.updated_at or t.created_at) + + # Atomic claim: set status to 'validating' + result = session.execute( + text(""" + UPDATE tasks + SET status = 'validating' + WHERE id = :task_id + AND status = 'pending_validation' + RETURNING id + """), + {"task_id": str(task.id)} + ) + claimed_row = result.fetchone() + session.commit() + + if not claimed_row: + logger.debug(f"Validation task {task.id} was claimed by another process, skipping") + return None + + session.refresh(task) + session.expunge(task) + return task diff --git a/backend/omoi_os/services/task_validator.py b/backend/omoi_os/services/task_validator.py index c0cd24a3..bceb5a14 100644 --- a/backend/omoi_os/services/task_validator.py +++ b/backend/omoi_os/services/task_validator.py @@ -154,23 +154,15 @@ async def request_validation( f"Task {task_id} marked as pending_validation (iteration {iteration})" ) - # Spawn validator agent - validator_info = await self._spawn_validator(task_id, sandbox_id, iteration) - - # Store validator info in task result for later lookup - if validator_info: - async with self.db.get_async_session() as session: - result = await session.execute( - select(Task).filter(Task.id == task_id) - ) - task = result.scalar_one_or_none() - if task: - task.result = { - **(task.result or {}), - "validator_sandbox_id": validator_info["sandbox_id"], - "validator_agent_id": validator_info["agent_id"], - } - await session.commit() + # NOTE: We do NOT spawn the validator here anymore. + # The orchestrator worker polls for pending_validation tasks and spawns + # validation sandboxes via get_next_validation_task(). This ensures: + # 1. No duplicate sandbox spawns (orchestrator uses atomic claims) + # 2. Consistent validation flow through the orchestrator + # 3. Proper concurrency limits are respected + # + # The _spawn_validator() method is kept for manual/testing use but is not + # called in the normal flow. # Publish event if self.event_bus: diff --git a/backend/omoi_os/workers/orchestrator_worker.py b/backend/omoi_os/workers/orchestrator_worker.py index 88c8df44..19aa009e 100644 --- a/backend/omoi_os/workers/orchestrator_worker.py +++ b/backend/omoi_os/workers/orchestrator_worker.py @@ -661,8 +661,176 @@ async def orchestrator_loop(): log.info("task_assigned", agent_id=agent_id) else: + # No pending tasks - check for validation tasks log.debug("no_pending_tasks") + # Also check for tasks needing validation (in addition to pending tasks) + # This spawns validation sandboxes for tasks in pending_validation status + if sandbox_execution and daytona_spawner: + validation_task = queue.get_next_validation_task( + max_concurrent_per_project=max_concurrent_per_project, + ) + if validation_task: + val_task_id = str(validation_task.id) + val_phase_id = validation_task.phase_id or "PHASE_IMPLEMENTATION" + + val_log = log.bind( + task_id=val_task_id, + phase=val_phase_id, + ticket_id=str(validation_task.ticket_id), + task_type="validation", + ) + val_log.info( + "validation_task_found", + task_status=validation_task.status, + original_task_type=validation_task.task_type, + ) + + try: + # Extract task data for validation sandbox + import json + import base64 + + extra_env: dict[str, str] = {} + user_id_for_token = None + + with db.get_session() as session: + from omoi_os.models.ticket import Ticket + from omoi_os.models.user import User + + ticket = session.get(Ticket, validation_task.ticket_id) + if ticket: + extra_env["TICKET_ID"] = str(ticket.id) + extra_env["TICKET_TITLE"] = ticket.title or "" + extra_env["TICKET_DESCRIPTION"] = ticket.description or "" + + # Include task data with validation context + task_data = { + "task_id": str(validation_task.id), + "task_type": validation_task.task_type, + "task_description": validation_task.description or "", + "task_priority": validation_task.priority, + "phase_id": validation_task.phase_id, + "ticket_id": str(ticket.id), + "ticket_title": ticket.title or "", + "ticket_description": ticket.description or "", + "validation_mode": True, # Signals this is a validation run + "implementation_result": validation_task.result or {}, # Previous implementation result + } + + task_json = json.dumps(task_data) + extra_env["TASK_DATA_BASE64"] = base64.b64encode( + task_json.encode() + ).decode() + + if ticket.project: + if ticket.project.created_by: + user_id_for_token = ticket.project.created_by + extra_env["USER_ID"] = str(user_id_for_token) + owner = ticket.project.github_owner + repo = ticket.project.github_repo + if owner and repo: + extra_env["GITHUB_REPO"] = f"{owner}/{repo}" + + if user_id_for_token: + user = session.get(User, user_id_for_token) + if user: + attrs = user.attributes or {} + github_token = attrs.get("github_access_token") + if github_token: + extra_env["GITHUB_TOKEN"] = github_token + + # Register validation agent + from omoi_os.models.agent import Agent + from uuid import uuid4 + + agent_id = str(uuid4()) + with db.get_session() as session: + agent = Agent( + id=agent_id, + agent_type="validator", + phase_id=val_phase_id, + capabilities=["validation", "code-review", "test-runner"], + status="RUNNING", + tags=["sandbox", "daytona", "validation"], + health_status="healthy", + ) + session.add(agent) + session.commit() + + sandbox_runtime = os.environ.get("SANDBOX_RUNTIME", "claude") + + val_log.info( + "spawning_validation_sandbox", + agent_id=agent_id, + runtime=sandbox_runtime, + ) + + # Spawn with validation execution mode + sandbox_id = await daytona_spawner.spawn_for_task( + task_id=val_task_id, + agent_id=agent_id, + phase_id=val_phase_id, + agent_type="validator", + extra_env=extra_env if extra_env else None, + runtime=sandbox_runtime, + execution_mode="validation", # Force validation mode + ) + + # Update task with sandbox info + queue.assign_task(validation_task.id, agent_id) + + with db.get_session() as session: + task_obj = ( + session.query(Task).filter(Task.id == validation_task.id).first() + ) + if task_obj: + task_obj.sandbox_id = sandbox_id + task_obj.status = "running" # Move from validating to running + session.commit() + + stats["tasks_processed"] += 1 + val_log.info( + "validation_sandbox_spawned", + sandbox_id=sandbox_id, + agent_id=agent_id, + ) + + # Publish event + from omoi_os.services.event_bus import SystemEvent + + event_bus.publish( + SystemEvent( + event_type="VALIDATION_SANDBOX_SPAWNED", + entity_type="sandbox", + entity_id=sandbox_id, + payload={ + "sandbox_id": sandbox_id, + "task_id": val_task_id, + "ticket_id": str(validation_task.ticket_id), + "agent_id": agent_id, + "execution_mode": "validation", + }, + ) + ) + + except Exception as spawn_error: + import traceback + + error_details = traceback.format_exc() + stats["tasks_failed"] += 1 + val_log.error( + "validation_sandbox_spawn_failed", + error=str(spawn_error), + traceback=error_details, + ) + # Reset task status to pending_validation for retry + queue.update_task_status( + validation_task.id, + "pending_validation", + error_message=f"Validation sandbox spawn failed: {spawn_error}", + ) + # Hybrid wait: event-driven with polling fallback # - If TASK_CREATED event fires, wake up immediately # - Otherwise, poll every 5 seconds as fallback diff --git a/frontend/app/(app)/sandbox/[sandboxId]/page.tsx b/frontend/app/(app)/sandbox/[sandboxId]/page.tsx index 5062a91f..297890de 100644 --- a/frontend/app/(app)/sandbox/[sandboxId]/page.tsx +++ b/frontend/app/(app)/sandbox/[sandboxId]/page.tsx @@ -132,6 +132,9 @@ export default function SandboxDetailPage({ params }: SandboxDetailPageProps) { sendMessage, isSendingMessage, refresh, + hasMore, + isLoadingMore, + loadMoreEvents, } = useSandboxMonitor(sandboxId) // Filter and sort events, deduplicating redundant events @@ -348,6 +351,26 @@ export default function SandboxDetailPage({ params }: SandboxDetailPageProps) { {/* Events scroll area */}
+ {/* Load More button at top for loading older events */} + {hasMore && ( +
+ +
+ )} {isLoadingHistory ? (
diff --git a/frontend/app/(app)/sandboxes/page.tsx b/frontend/app/(app)/sandboxes/page.tsx index c376f9b9..c60f44b3 100644 --- a/frontend/app/(app)/sandboxes/page.tsx +++ b/frontend/app/(app)/sandboxes/page.tsx @@ -24,7 +24,7 @@ import { useSandboxTasks } from "@/hooks/useTasks" import { useQueryClient } from "@tanstack/react-query" import { taskKeys } from "@/hooks/useTasks" -type TaskStatus = "pending" | "running" | "completed" | "failed" +type TaskStatus = "pending" | "running" | "completed" | "failed" | "pending_validation" | "validating" function normalizeStatus(status: string): TaskStatus { const lower = status.toLowerCase() @@ -44,6 +44,10 @@ function normalizeStatus(status: string): TaskStatus { case "error": case "cancelled": return "failed" + case "pending_validation": + return "pending_validation" + case "validating": + return "validating" default: return "pending" } @@ -69,6 +73,8 @@ const statusConfig: Record { - if (!tasks) return { pending: 0, running: 0, completed: 0, failed: 0 } + if (!tasks) return { pending: 0, running: 0, completed: 0, failed: 0, pending_validation: 0, validating: 0 } return tasks.reduce( (acc, task) => { const status = normalizeStatus(task.status) acc[status]++ return acc }, - { pending: 0, running: 0, completed: 0, failed: 0 } + { pending: 0, running: 0, completed: 0, failed: 0, pending_validation: 0, validating: 0 } ) }, [tasks]) @@ -164,7 +170,7 @@ export default function SandboxesPage() { > All - {(["running", "pending", "completed", "failed"] as TaskStatus[]).map((status) => { + {(["running", "validating", "pending_validation", "pending", "completed", "failed"] as TaskStatus[]).map((status) => { const config = statusConfig[status] const count = statusCounts[status] return ( diff --git a/frontend/components/custom/TaskCard.tsx b/frontend/components/custom/TaskCard.tsx index 112a2821..10713420 100644 --- a/frontend/components/custom/TaskCard.tsx +++ b/frontend/components/custom/TaskCard.tsx @@ -2,9 +2,9 @@ import Link from "next/link" import { cn } from "@/lib/utils" -import { Loader2, Check, X, AlertCircle, Clock } from "lucide-react" +import { Loader2, Check, X, AlertCircle, Clock, ShieldCheck } from "lucide-react" -export type TaskStatus = "pending" | "assigned" | "running" | "completed" | "failed" +export type TaskStatus = "pending" | "assigned" | "running" | "completed" | "failed" | "pending_validation" | "validating" interface TaskCardProps { id: string @@ -43,6 +43,16 @@ const statusConfig = { iconClass: "text-destructive", label: "Failed", }, + pending_validation: { + icon: ShieldCheck, + iconClass: "text-purple-500", + label: "Awaiting Validation", + }, + validating: { + icon: Loader2, + iconClass: "animate-spin text-purple-500", + label: "Validating", + }, } export function TaskCard({ @@ -105,6 +115,10 @@ function normalizeStatus(status: string): TaskStatus { case "error": case "cancelled": return "failed" + case "pending_validation": + return "pending_validation" + case "validating": + return "validating" default: return "pending" } diff --git a/frontend/components/panels/TasksPanel.tsx b/frontend/components/panels/TasksPanel.tsx index 5a18a575..c1cca33c 100644 --- a/frontend/components/panels/TasksPanel.tsx +++ b/frontend/components/panels/TasksPanel.tsx @@ -43,6 +43,10 @@ function normalizeStatus(status: string): TaskStatus { case "error": case "cancelled": return "failed" + case "pending_validation": + return "pending_validation" + case "validating": + return "validating" default: return "pending" } @@ -89,10 +93,18 @@ export function TasksPanel({ pathname }: TasksPanelProps) { normalizeStatus(t.status) === "failed" ) - const pendingTasks = filteredTasks.filter((t) => + const pendingTasks = filteredTasks.filter((t) => normalizeStatus(t.status) === "pending" ) + const pendingValidationTasks = filteredTasks.filter((t) => + normalizeStatus(t.status) === "pending_validation" + ) + + const validatingTasks = filteredTasks.filter((t) => + normalizeStatus(t.status) === "validating" + ) + return (
{/* Header */} @@ -155,6 +167,42 @@ export function TasksPanel({ pathname }: TasksPanelProps) {
)} + {validatingTasks.length > 0 && ( +
+ Validating + {validatingTasks.map((task) => ( + + ))} +
+ )} + + {pendingValidationTasks.length > 0 && ( +
+ Pending Validation + {pendingValidationTasks.map((task) => ( + + ))} +
+ )} + {pendingTasks.length > 0 && (
Pending diff --git a/frontend/hooks/useSandbox.ts b/frontend/hooks/useSandbox.ts index 848440e7..952061fa 100644 --- a/frontend/hooks/useSandbox.ts +++ b/frontend/hooks/useSandbox.ts @@ -3,7 +3,7 @@ */ import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query" -import { useCallback, useState, useMemo } from "react" +import { useCallback, useState, useMemo, useEffect } from "react" import { sandboxApi } from "@/lib/api/sandbox" import { useEvents, type SystemEvent } from "./useEvents" import type { @@ -66,20 +66,22 @@ export function useSandboxTask(sandboxId: string | null, options: { enabled?: bo /** * Fetch sandbox trajectory (events without heartbeats, with summary) + * Supports cursor-based pagination for infinite scroll */ export function useSandboxTrajectory( sandboxId: string | null, options: { limit?: number - offset?: number + cursor?: string | null + direction?: "older" | "newer" enabled?: boolean } = {} ) { - const { limit = 100, offset = 0, enabled = true } = options + const { limit = 100, cursor, direction, enabled = true } = options return useQuery({ - queryKey: [...sandboxKeys.trajectory(sandboxId || ""), { limit, offset }], - queryFn: () => sandboxApi.getTrajectory(sandboxId!, { limit, offset }), + queryKey: [...sandboxKeys.trajectory(sandboxId || ""), { limit, cursor, direction }], + queryFn: () => sandboxApi.getTrajectory(sandboxId!, { limit, cursor, direction }), enabled: enabled && !!sandboxId, }) } @@ -184,11 +186,18 @@ export function useSandboxMessages(sandboxId: string | null, options: { enabled? // ============================================================================ /** - * Combined hook for monitoring a sandbox with real-time events and message sending + * Combined hook for monitoring a sandbox with real-time events, message sending, + * and cursor-based infinite scroll pagination. */ export function useSandboxMonitor(sandboxId: string | null) { const queryClient = useQueryClient() + // State for infinite scroll pagination + const [olderEvents, setOlderEvents] = useState([]) + const [nextCursor, setNextCursor] = useState(null) + const [hasMore, setHasMore] = useState(false) + const [isLoadingMore, setIsLoadingMore] = useState(false) + // Real-time events const { events: realtimeEvents, @@ -201,6 +210,7 @@ export function useSandboxMonitor(sandboxId: string | null) { }) // Historical events (for initial load) + // Request 100 events per page (events are returned newest-first from API) const { data: historicalData, isLoading: isLoadingHistory, @@ -210,29 +220,73 @@ export function useSandboxMonitor(sandboxId: string | null) { enabled: !!sandboxId, }) + // Reset pagination state when sandbox changes + useEffect(() => { + setOlderEvents([]) + setNextCursor(null) + setHasMore(false) + }, [sandboxId]) + + // Update cursor and hasMore when initial data loads + useEffect(() => { + if (historicalData) { + setNextCursor(historicalData.next_cursor) + setHasMore(historicalData.has_more) + } + }, [historicalData]) + // Send message mutation const sendMessageMutation = useSendSandboxMessage() - // Memoize historical events reference - const historicalEvents = useMemo( - () => historicalData?.events || [], + // Memoize historical events reference (initial page) + // API returns events newest-first, so we reverse to get chronological order for display + const initialEvents = useMemo( + () => (historicalData?.events || []).slice().reverse(), [historicalData?.events] ) - // Combine historical and real-time events with memoization - // Real-time events are newer, so they go first - const uniqueEvents = useMemo(() => { - const allEvents = [...realtimeEvents, ...historicalEvents] + // Load more older events (for infinite scroll) + const loadMoreEvents = useCallback(async () => { + if (!sandboxId || !nextCursor || isLoadingMore) return - // Deduplicate by event type + created_at (rough dedup) - return allEvents.reduce((acc, event) => { - const key = `${event.event_type}-${event.created_at}` - if (!acc.some((e) => `${e.event_type}-${e.created_at}` === key)) { - acc.push(event) - } - return acc - }, [] as SandboxEvent[]) - }, [realtimeEvents, historicalEvents]) + setIsLoadingMore(true) + try { + const data = await sandboxApi.getTrajectory(sandboxId, { + limit: 100, + cursor: nextCursor, + direction: "older", + }) + + // Reverse to get chronological order (oldest first within this batch) + const newEvents = data.events.slice().reverse() + // Prepend older events (they go before the initial events) + setOlderEvents((prev) => [...newEvents, ...prev]) + setNextCursor(data.next_cursor) + setHasMore(data.has_more) + } catch (error) { + console.error("Failed to load more events:", error) + } finally { + setIsLoadingMore(false) + } + }, [sandboxId, nextCursor, isLoadingMore]) + + // Combine all events: older (loaded via infinite scroll) + initial + realtime + // Order: oldest first (chronological) + const uniqueEvents = useMemo(() => { + // olderEvents are already chronological (oldest first) + // initialEvents are already chronological (oldest first within initial page) + // realtimeEvents are newest first, so we need to handle them carefully + const allEvents = [...olderEvents, ...initialEvents, ...realtimeEvents] + + // Deduplicate by event ID (most reliable) or fallback to type+timestamp + const seen = new Set() + return allEvents.filter((event) => { + const key = event.id || `${event.event_type}-${event.created_at}` + if (seen.has(key)) return false + seen.add(key) + return true + }) + }, [olderEvents, initialEvents, realtimeEvents]) // Memoize sendMessage callback const sendMessage = useCallback( @@ -246,18 +300,27 @@ export function useSandboxMonitor(sandboxId: string | null) { [sandboxId, sendMessageMutation] ) - // Memoize refresh callback + // Memoize refresh callback - also clears loaded older events const refresh = useCallback(() => { if (sandboxId) { + setOlderEvents([]) + setNextCursor(null) + setHasMore(false) queryClient.invalidateQueries({ queryKey: sandboxKeys.trajectory(sandboxId) }) } }, [sandboxId, queryClient]) + // Clear all events including loaded older events + const clearAllEvents = useCallback(() => { + setOlderEvents([]) + clearEvents() + }, [clearEvents]) + return { // Events events: uniqueEvents, realtimeEvents, - historicalEvents, + historicalEvents: [...olderEvents, ...initialEvents], heartbeatSummary: historicalData?.heartbeat_summary, totalEvents: historicalData?.total_events || 0, @@ -268,10 +331,15 @@ export function useSandboxMonitor(sandboxId: string | null) { isLoadingHistory, historyError, + // Pagination + hasMore, + isLoadingMore, + loadMoreEvents, + // Actions sendMessage, isSendingMessage: sendMessageMutation.isPending, - clearEvents, + clearEvents: clearAllEvents, // Refresh refresh, diff --git a/frontend/lib/api/sandbox.ts b/frontend/lib/api/sandbox.ts index ba85acab..366cf97b 100644 --- a/frontend/lib/api/sandbox.ts +++ b/frontend/lib/api/sandbox.ts @@ -40,17 +40,20 @@ export async function getSandboxEvents( /** * Get trajectory summary for a sandbox (excludes heartbeats, provides summary) + * Supports cursor-based pagination for infinite scroll */ export async function getSandboxTrajectory( sandboxId: string, options: { limit?: number - offset?: number + cursor?: string | null + direction?: "older" | "newer" } = {} ): Promise { const params = new URLSearchParams() if (options.limit) params.set("limit", options.limit.toString()) - if (options.offset) params.set("offset", options.offset.toString()) + if (options.cursor) params.set("cursor", options.cursor) + if (options.direction) params.set("direction", options.direction) const query = params.toString() return api.get( diff --git a/frontend/lib/api/types.ts b/frontend/lib/api/types.ts index c3f727a2..3ac4360d 100644 --- a/frontend/lib/api/types.ts +++ b/frontend/lib/api/types.ts @@ -911,6 +911,10 @@ export interface TrajectorySummaryResponse { heartbeat_summary: HeartbeatSummary total_events: number trajectory_events: number + // Cursor-based pagination + next_cursor: string | null // Cursor for loading older events + prev_cursor: string | null // Cursor for loading newer events + has_more: boolean // Whether there are more events to load } export interface SandboxEventsListResponse { From 3c96363c951761208f11f0f7ef0410c5bcc204b3 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 13:24:56 -0300 Subject: [PATCH 009/290] Add Claude settings.local.json to sandbox with allowed commands This fixes the issue where sandbox agents were asking for approval to run common development commands like `npm run type-check`. The settings.local.json file now allows: - npm/pnpm/yarn run commands - pytest, uv run, python, node - git, gh (GitHub CLI) - Common shell commands (ls, cat, mkdir, rm, cp, mv, grep, find) - cargo, go, make for other language support This file is uploaded to /root/.claude/settings.local.json when the sandbox is created with Claude runtime. --- backend/omoi_os/services/daytona_spawner.py | 35 +++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 47bbe659..0c8a4fdd 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -658,6 +658,41 @@ def escape_env_value(v: str) -> str: # Create skills directory sandbox.process.exec("mkdir -p /root/.claude/skills") + # Create settings.local.json with allowed commands + # This allows common development commands without requiring approval + settings_content = """{ + "permissions": { + "allow": [ + "Bash(npm run *)", + "Bash(pnpm run *)", + "Bash(yarn run *)", + "Bash(npx *)", + "Bash(pytest*)", + "Bash(uv run *)", + "Bash(python *)", + "Bash(node *)", + "Bash(git *)", + "Bash(gh *)", + "Bash(cd *)", + "Bash(ls *)", + "Bash(cat *)", + "Bash(mkdir *)", + "Bash(rm *)", + "Bash(cp *)", + "Bash(mv *)", + "Bash(grep *)", + "Bash(find *)", + "Bash(cargo *)", + "Bash(go *)", + "Bash(make *)" + ], + "deny": [], + "ask": [] + } +}""" + sandbox.fs.upload_file(settings_content.encode("utf-8"), "/root/.claude/settings.local.json") + logger.info("Uploaded Claude settings.local.json with allowed commands") + # Get skills based on execution mode # - exploration: spec-driven-dev (for creating specs/tickets/tasks) # - implementation: git-workflow, code-review, etc. (for executing tasks) From a556951db407f8575b5bbedfc3b01922fc57f066 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 13:32:17 -0300 Subject: [PATCH 010/290] Add explicit git push and PR instructions to implementation mode The sandbox agent was completing work but not pushing code or creating PRs. The implementation mode system prompt now explicitly instructs agents to: 1. Run tests after coding 2. Stage and commit changes 3. Push to remote (MANDATORY) 4. Create a Pull Request with gh CLI Added CRITICAL warning that work is NOT complete until code is pushed and PR is created. The validator checks these requirements. --- .../omoi_os/workers/claude_sandbox_worker.py | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index 71a31a5f..fb13ce58 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -795,14 +795,28 @@ def __init__(self): 1. Execute the assigned task 2. Write code to implement features or fix bugs 3. Run tests to verify your implementation -4. Create commits and pull requests when done +4. **MANDATORY: Complete the git workflow** (commit, push, create PR) **DO NOT create new specs, tickets, or tasks.** Focus on executing this specific task. -Before coding: +### Before coding: 1. Read the task specification carefully 2. Check for existing patterns in the codebase -3. Understand the requirements and acceptance criteria""") +3. Understand the requirements and acceptance criteria + +### After coding is complete (MANDATORY): +You MUST complete these steps before considering your work done: + +1. **Run tests**: Ensure all tests pass (`pytest`, `npm test`, etc.) +2. **Stage and commit**: `git add -A && git commit -m "feat(scope): description"` +3. **Push to remote**: `git push` (or `git push -u origin ` for first push) +4. **Create a Pull Request**: Use `gh pr create --title "..." --body "..."` + +**CRITICAL**: Your work is NOT complete until code is pushed and a PR is created. +The validator will check for: +- Clean git status (no uncommitted changes) +- Code pushed to remote (not ahead of origin) +- PR exists with proper title and description""") # Note: MCP tools are automatically available when we register MCP servers # No need to explicitly add them to allowed_tools - the SDK handles this From da47fb80d487f8351ccd8cb24a82a9ced767bcb5 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 13:58:07 -0300 Subject: [PATCH 011/290] Add ContinuousSandboxWorker for iterative task execution Implements the continuous_claude_sdk.md design to ensure agents complete all work (code pushed, PR created) before stopping. New components: - ContinuousSandboxWorker: Extends SandboxWorker with iteration loop - ContinuousWorkerConfig: Extended config with iteration limits - IterationState: Tracks progress across iterations - Git validation: Checks clean status, pushed state, PR existence Key features: - Iterative execution until task truly complete - Completion signal detection (TASK_COMPLETE phrase) - Git validation after completion signal - Auto-retry when validation fails (code not pushed, no PR) - Configurable limits: max_runs, max_cost_usd, max_duration - Notes file for cross-iteration context - Full event reporting for dashboard visibility DaytonaSpawner updates: - Added continuous_mode parameter to spawn_for_task - Added _get_continuous_worker_script method - Sets CONTINUOUS_MODE and default limits in env vars Usage: spawner.spawn_for_task( task_id="...", runtime="claude", continuous_mode=True, # Enable iterative execution ) --- backend/omoi_os/services/daytona_spawner.py | 49 +- backend/omoi_os/workers/__init__.py | 16 + .../workers/continuous_sandbox_worker.py | 845 ++++++++++++++++++ 3 files changed, 909 insertions(+), 1 deletion(-) create mode 100644 backend/omoi_os/workers/continuous_sandbox_worker.py diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 0c8a4fdd..ad80a68f 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -135,6 +135,7 @@ async def spawn_for_task( labels: Optional[Dict[str, str]] = None, runtime: str = "openhands", # "openhands" or "claude" execution_mode: str = "implementation", # "exploration", "implementation", "validation" + continuous_mode: bool = False, # Enable continuous iteration until task complete ) -> str: """Spawn a Daytona sandbox for executing a task. @@ -150,6 +151,9 @@ async def spawn_for_task( - "exploration": For feature definition (creates specs/tickets/tasks) - "implementation": For task execution (writes code, default) - "validation": For verifying implementation + continuous_mode: If True, runs iterative loop until task is complete + (code pushed, PR created) or limits are reached. Only works with + runtime="claude". Returns: Sandbox ID @@ -187,6 +191,15 @@ async def spawn_for_task( "SANDBOX_ID": sandbox_id, } + # Add continuous mode settings if enabled + if continuous_mode and runtime == "claude": + env_vars["CONTINUOUS_MODE"] = "true" + # Default limits for continuous mode (can be overridden via extra_env) + env_vars.setdefault("CONTINUOUS_MAX_RUNS", "10") + env_vars.setdefault("CONTINUOUS_MAX_COST_USD", "20.0") + env_vars.setdefault("CONTINUOUS_MAX_DURATION", "3600") # 1 hour + logger.info("Continuous mode enabled for sandbox") + # Add agent type if specified if agent_type: env_vars["AGENT_TYPE"] = agent_type @@ -787,7 +800,11 @@ def escape_env_value(v: str) -> str: # Upload the appropriate worker script if runtime == "claude": - worker_script = self._get_claude_worker_script() + if continuous_mode: + worker_script = self._get_continuous_worker_script() + logger.info("Using continuous worker script for iterative execution") + else: + worker_script = self._get_claude_worker_script() else: worker_script = self._get_worker_script() sandbox.fs.upload_file(worker_script.encode("utf-8"), "/tmp/sandbox_worker.py") @@ -2271,6 +2288,36 @@ async def terminate_sandbox(self, sandbox_id: str) -> bool: logger.error(f"Failed to terminate sandbox {sandbox_id} via direct API: {e}") return False + def _get_continuous_worker_script(self) -> str: + """Get the Continuous Sandbox Worker script content. + + Reads from backend/omoi_os/workers/continuous_sandbox_worker.py + This worker runs Claude Code in an iterative loop until: + - Task is complete (code pushed, PR created) + - Limits are reached (max runs, cost, duration) + - Consecutive errors occur + + Features (extends base Claude worker): + - Iterative execution with completion signal detection + - Git validation (clean status, pushed, PR exists) + - Cross-iteration context via notes file + - Automatic retry on validation failure + - Per-iteration event reporting + """ + # Try to read from file first (development mode) + worker_file = ( + Path(__file__).parent.parent / "workers" / "continuous_sandbox_worker.py" + ) + if worker_file.exists(): + logger.info(f"Loading continuous worker script from {worker_file}") + return worker_file.read_text() + + # Fallback to standard Claude worker if continuous worker not found + logger.warning( + "Continuous worker file not found, falling back to standard Claude worker" + ) + return self._get_claude_worker_script() + def mark_completed(self, sandbox_id: str, result: Optional[Dict] = None) -> None: """Mark a sandbox as completed (called when task finishes).""" info = self._sandboxes.get(sandbox_id) diff --git a/backend/omoi_os/workers/__init__.py b/backend/omoi_os/workers/__init__.py index 0e7b3592..7aef59ed 100644 --- a/backend/omoi_os/workers/__init__.py +++ b/backend/omoi_os/workers/__init__.py @@ -5,6 +5,11 @@ with the main server via HTTP callbacks (webhook pattern). Uses the Claude Agent SDK (claude_code_sdk) for Claude interactions. + +Workers: +- SandboxWorker: Base worker for single-run task execution +- ContinuousSandboxWorker: Iterative worker that runs until task is complete + (code pushed, PR created) or limits are reached """ from omoi_os.workers.sandbox_agent_worker import ( @@ -15,10 +20,21 @@ process_sdk_response, ) +from omoi_os.workers.continuous_sandbox_worker import ( + ContinuousSandboxWorker, + ContinuousWorkerConfig, + IterationState, +) + __all__ = [ + # Base worker "EventReporter", "MessagePoller", "SandboxWorker", "WorkerConfig", "process_sdk_response", + # Continuous worker + "ContinuousSandboxWorker", + "ContinuousWorkerConfig", + "IterationState", ] diff --git a/backend/omoi_os/workers/continuous_sandbox_worker.py b/backend/omoi_os/workers/continuous_sandbox_worker.py new file mode 100644 index 00000000..bc1ff570 --- /dev/null +++ b/backend/omoi_os/workers/continuous_sandbox_worker.py @@ -0,0 +1,845 @@ +#!/usr/bin/env python3 +""" +Continuous Sandbox Worker - Iterative Claude execution until task completion. + +This worker extends the base SandboxWorker to run Claude Code in a continuous +loop until the task is truly complete (code pushed, PR created) or limits are hit. + +The iteration loop: +1. Executes the task with enhanced prompt including context +2. Checks for completion signal in output +3. Validates work is done (tests pass, code pushed, PR exists) +4. If not complete, continues with updated context +5. Stops when: completion signal threshold reached, limits hit, or validation passes + +Environment Variables (extends base worker): + CONTINUOUS_MODE - Set to "true" to enable continuous iteration + CONTINUOUS_MAX_RUNS - Maximum successful iterations (default: 10) + CONTINUOUS_MAX_COST_USD - Maximum total cost in USD (default: 20.0) + CONTINUOUS_MAX_DURATION - Maximum duration in seconds (default: 3600) + CONTINUOUS_COMPLETION_SIGNAL - Phrase to detect completion (default: TASK_COMPLETE) + CONTINUOUS_COMPLETION_THRESHOLD - Consecutive signals to stop (default: 2) + CONTINUOUS_NOTES_FILE - Notes file for context sharing (default: ITERATION_NOTES.md) + CONTINUOUS_AUTO_VALIDATE - Auto-validate git status after completion signal (default: true) + +See docs/design/continuous_claude_sdk.md for full design documentation. +""" + +import asyncio +import os +import subprocess +import time +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Optional + +from omoi_os.workers.claude_sandbox_worker import ( + SandboxWorker, + WorkerConfig, + EventReporter, + MessagePoller, + ClaudeSDKClient, + SDK_AVAILABLE, + logger, +) + + +# ============================================================================= +# Continuous Worker Configuration +# ============================================================================= + + +class ContinuousWorkerConfig(WorkerConfig): + """Extended configuration for continuous Claude execution. + + Inherits all WorkerConfig features: + - sandbox_id, task_id, agent_id, ticket_id + - callback_url for EventReporter + - model, api_key, permission_mode + - resume_session_id, session_transcript_b64 + - enable_spec_tools, enable_skills, enable_subagents + + Adds iteration-specific settings from environment variables. + """ + + def __init__(self): + super().__init__() + + # Enable continuous mode flag + self.continuous_mode = os.environ.get("CONTINUOUS_MODE", "false").lower() == "true" + + # Iteration limits (at least one should be set for safety) + self.max_runs: Optional[int] = self._get_int_env("CONTINUOUS_MAX_RUNS", 10) + self.max_cost_usd: Optional[float] = self._get_float_env("CONTINUOUS_MAX_COST_USD", 20.0) + self.max_duration_seconds: Optional[int] = self._get_int_env("CONTINUOUS_MAX_DURATION", 3600) + + # Completion detection + self.completion_signal = os.environ.get( + "CONTINUOUS_COMPLETION_SIGNAL", + "TASK_COMPLETE" + ) + self.completion_threshold = int(os.environ.get("CONTINUOUS_COMPLETION_THRESHOLD", "2")) + + # Notes file for cross-iteration context + self.notes_file = os.environ.get("CONTINUOUS_NOTES_FILE", "ITERATION_NOTES.md") + + # Auto-validate git status after completion signal + self.auto_validate = os.environ.get("CONTINUOUS_AUTO_VALIDATE", "true").lower() == "true" + + # Git validation requirements + self.require_clean_git = os.environ.get("CONTINUOUS_REQUIRE_CLEAN_GIT", "true").lower() == "true" + self.require_pushed = os.environ.get("CONTINUOUS_REQUIRE_PUSHED", "true").lower() == "true" + self.require_pr = os.environ.get("CONTINUOUS_REQUIRE_PR", "true").lower() == "true" + + def _get_int_env(self, key: str, default: Optional[int] = None) -> Optional[int]: + val = os.environ.get(key) + if val: + return int(val) + return default + + def _get_float_env(self, key: str, default: Optional[float] = None) -> Optional[float]: + val = os.environ.get(key) + if val: + return float(val) + return default + + def validate_continuous(self) -> list[str]: + """Validate continuous-specific configuration.""" + errors = super().validate() + + # At least one limit must be set for safety + if not any([self.max_runs, self.max_cost_usd, self.max_duration_seconds]): + errors.append( + "At least one limit required: CONTINUOUS_MAX_RUNS, " + "CONTINUOUS_MAX_COST_USD, or CONTINUOUS_MAX_DURATION" + ) + + return errors + + def to_dict(self) -> dict: + """Return config as dict including continuous settings.""" + base = super().to_dict() + base.update({ + "continuous_mode": self.continuous_mode, + "max_runs": self.max_runs, + "max_cost_usd": self.max_cost_usd, + "max_duration_seconds": self.max_duration_seconds, + "completion_signal": self.completion_signal, + "completion_threshold": self.completion_threshold, + "notes_file": self.notes_file, + "auto_validate": self.auto_validate, + "require_clean_git": self.require_clean_git, + "require_pushed": self.require_pushed, + "require_pr": self.require_pr, + }) + return base + + +# ============================================================================= +# Iteration State Tracking +# ============================================================================= + + +@dataclass +class IterationState: + """Tracks state across iterations.""" + + iteration_num: int = 0 # Current iteration + successful_iterations: int = 0 # Completed successfully + error_count: int = 0 # Consecutive errors + extra_iterations: int = 0 # Added due to errors + total_cost: float = 0.0 # Accumulated cost + completion_signal_count: int = 0 # Consecutive completion signals + start_time: Optional[float] = None # For duration tracking + last_session_id: Optional[str] = None # For potential resume + last_transcript_b64: Optional[str] = None # For cross-sandbox resumption + validation_passed: bool = False # Whether git validation passed + validation_feedback: str = "" # Feedback from validation + + # Track what's been accomplished + tests_passed: bool = False + code_committed: bool = False + code_pushed: bool = False + pr_created: bool = False + + def to_event_data(self) -> dict: + """Convert state to event payload for EventReporter.""" + elapsed = time.time() - self.start_time if self.start_time else 0 + return { + "iteration_num": self.iteration_num, + "successful_iterations": self.successful_iterations, + "error_count": self.error_count, + "extra_iterations": self.extra_iterations, + "total_cost_usd": self.total_cost, + "completion_signal_count": self.completion_signal_count, + "elapsed_seconds": elapsed, + "last_session_id": self.last_session_id, + "validation_passed": self.validation_passed, + "tests_passed": self.tests_passed, + "code_committed": self.code_committed, + "code_pushed": self.code_pushed, + "pr_created": self.pr_created, + } + + +# ============================================================================= +# Git Validation +# ============================================================================= + + +def check_git_status(cwd: str) -> dict[str, Any]: + """Check git status for validation. + + Returns dict with: + - is_clean: No uncommitted changes + - is_pushed: Not ahead of remote + - has_pr: PR exists for current branch + - branch_name: Current branch + - status_output: Raw git status output + - errors: List of validation errors + """ + result = { + "is_clean": False, + "is_pushed": False, + "has_pr": False, + "branch_name": None, + "status_output": "", + "errors": [], + } + + try: + # Get current branch + branch_result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + if branch_result.returncode == 0: + result["branch_name"] = branch_result.stdout.strip() + + # Check git status + status_result = subprocess.run( + ["git", "status", "--porcelain"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + result["status_output"] = status_result.stdout + result["is_clean"] = status_result.returncode == 0 and not status_result.stdout.strip() + + if not result["is_clean"]: + result["errors"].append("Uncommitted changes detected") + + # Check if ahead of remote + status_verbose = subprocess.run( + ["git", "status"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + status_text = status_verbose.stdout + + # Check for "Your branch is ahead" message + if "Your branch is ahead" in status_text: + result["is_pushed"] = False + result["errors"].append("Code not pushed to remote") + elif "Your branch is up to date" in status_text or "nothing to commit" in status_text: + result["is_pushed"] = True + else: + # If we can't determine, assume it's pushed + result["is_pushed"] = True + + # Check for PR using gh CLI + try: + pr_result = subprocess.run( + ["gh", "pr", "view", "--json", "number,title,state"], + cwd=cwd, + capture_output=True, + text=True, + timeout=30, + ) + if pr_result.returncode == 0 and pr_result.stdout.strip(): + result["has_pr"] = True + else: + result["has_pr"] = False + result["errors"].append("No PR found for current branch") + except FileNotFoundError: + # gh CLI not installed + result["errors"].append("GitHub CLI (gh) not available to check PR status") + except subprocess.TimeoutExpired: + result["errors"].append("Timeout checking PR status") + + except subprocess.TimeoutExpired: + result["errors"].append("Timeout running git commands") + except Exception as e: + result["errors"].append(f"Git validation error: {str(e)}") + + return result + + +# ============================================================================= +# Continuous Sandbox Worker +# ============================================================================= + + +class ContinuousSandboxWorker(SandboxWorker): + """Extended worker for continuous/iterative Claude execution. + + Inherits from SandboxWorker: + - EventReporter for HTTP callbacks + - MessagePoller for injected messages + - FileChangeTracker for diffs + - Pre/Post tool hooks + - Session transcript export/import + + Adds: + - Iteration loop with configurable limits + - Completion signal detection + - Git validation (tests pass, code pushed, PR exists) + - Cross-iteration context via notes file + - Per-iteration event reporting + """ + + def __init__(self, config: ContinuousWorkerConfig): + super().__init__(config) + self.continuous_config = config + self.iteration_state = IterationState() + + async def run(self): + """Main entry point - runs continuous or single mode based on config.""" + if self.continuous_config.continuous_mode: + return await self.run_continuous() + else: + # Fall back to standard single-run mode + return await super().run() + + async def run_continuous(self): + """Main continuous execution loop.""" + self._setup_signal_handlers() + self.running = True + self.iteration_state.start_time = time.time() + + logger.info("=" * 60) + logger.info("CONTINUOUS SANDBOX WORKER") + logger.info("=" * 60) + logger.info("Configuration: %s", self.continuous_config.to_dict()) + + # Validate continuous configuration + errors = self.continuous_config.validate_continuous() + if errors: + logger.error("Configuration errors", extra={"errors": errors}) + return 1 + + if not SDK_AVAILABLE: + logger.error("claude_agent_sdk package not installed - Run: pip install claude-agent-sdk") + return 1 + + # Setup workspace + Path(self.continuous_config.cwd).mkdir(parents=True, exist_ok=True) + + async with EventReporter(self.continuous_config) as reporter: + self.reporter = reporter + + # Report continuous session start + await reporter.report( + "continuous.started", + { + "goal": self.continuous_config.task_description + or self.continuous_config.initial_prompt + or self.continuous_config.ticket_description, + "limits": { + "max_runs": self.continuous_config.max_runs, + "max_cost_usd": self.continuous_config.max_cost_usd, + "max_duration_seconds": self.continuous_config.max_duration_seconds, + }, + "completion_signal": self.continuous_config.completion_signal, + "completion_threshold": self.continuous_config.completion_threshold, + "validation_requirements": { + "require_clean_git": self.continuous_config.require_clean_git, + "require_pushed": self.continuous_config.require_pushed, + "require_pr": self.continuous_config.require_pr, + }, + }, + source="worker", + ) + + async with MessagePoller(self.continuous_config) as poller: + # Main iteration loop + while self._should_continue(): + self.iteration_state.iteration_num += 1 + + # Check for injected messages (user intervention) + messages = await poller.poll() + for msg in messages: + content = msg.get("content", "") + msg_type = msg.get("message_type", "user_message") + + if msg_type == "interrupt": + logger.info("Received interrupt message") + self._should_stop = True + break + elif content: + # Store user message for next iteration + self._inject_user_context(content) + + if self._should_stop: + break + + success = await self._run_single_iteration() + + if success: + self.iteration_state.successful_iterations += 1 + self.iteration_state.error_count = 0 + + # If validation passed, we're truly done + if self.iteration_state.validation_passed: + logger.info("Validation passed - task complete!") + break + else: + self.iteration_state.error_count += 1 + self.iteration_state.extra_iterations += 1 + + if self.iteration_state.error_count >= 3: + await reporter.report( + "continuous.completed", + { + "stop_reason": "consecutive_errors", + **self.iteration_state.to_event_data(), + }, + ) + return 1 + + # Report completion + stop_reason = self._get_stop_reason() + await reporter.report( + "continuous.completed", + { + "stop_reason": stop_reason, + **self.iteration_state.to_event_data(), + }, + ) + + logger.info( + "Continuous worker completed", + extra={ + "stop_reason": stop_reason, + "iterations": self.iteration_state.iteration_num, + "successful": self.iteration_state.successful_iterations, + "total_cost": self.iteration_state.total_cost, + } + ) + + return 0 + + def _should_continue(self) -> bool: + """Check if iteration should continue.""" + state = self.iteration_state + config = self.continuous_config + + # Check if validation already passed + if state.validation_passed: + return False + + # Check completion signal threshold + if state.completion_signal_count >= config.completion_threshold: + # If auto-validate is enabled, only stop if validation passed + if config.auto_validate and not state.validation_passed: + # Continue to allow agent to fix issues + logger.info("Completion signal reached but validation not passed - continuing") + state.completion_signal_count = 0 # Reset to allow more iterations + else: + return False + + # Check max runs + if config.max_runs and state.successful_iterations >= config.max_runs: + return False + + # Check max cost + if config.max_cost_usd and state.total_cost >= config.max_cost_usd: + return False + + # Check max duration + if config.max_duration_seconds and state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return False + + # Check shutdown signal + if self._should_stop: + return False + + return True + + def _get_stop_reason(self) -> str: + """Determine why the loop stopped.""" + state = self.iteration_state + config = self.continuous_config + + if state.validation_passed: + return "validation_passed" + if state.completion_signal_count >= config.completion_threshold: + return "completion_signal" + if config.max_runs and state.successful_iterations >= config.max_runs: + return "max_runs_reached" + if config.max_cost_usd and state.total_cost >= config.max_cost_usd: + return "max_cost_reached" + if config.max_duration_seconds and state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return "max_duration_reached" + if self._should_stop: + return "shutdown_signal" + return "unknown" + + def _inject_user_context(self, content: str): + """Store user message to include in next iteration prompt.""" + # Append to notes file for next iteration + notes_path = Path(self.continuous_config.cwd) / self.continuous_config.notes_file + try: + existing = notes_path.read_text() if notes_path.exists() else "" + timestamp = datetime.now(timezone.utc).isoformat() + new_content = f"{existing}\n\n## User Message ({timestamp})\n\n{content}\n" + notes_path.write_text(new_content) + except Exception as e: + logger.warning("Failed to inject user context", extra={"error": str(e)}) + + async def _run_single_iteration(self) -> bool: + """Execute a single iteration. + + Returns True on success, False on error. + """ + state = self.iteration_state + config = self.continuous_config + + # Build enhanced prompt with context + enhanced_prompt = self._build_iteration_prompt() + + # Report iteration start + await self.reporter.report( + "iteration.started", + { + "iteration_num": state.iteration_num, + "prompt_preview": enhanced_prompt[:500], + **state.to_event_data(), + }, + ) + + try: + # Create SDK options with hooks (reuses parent's method) + pre_hook = await self._create_pre_tool_hook() + post_hook = await self._create_post_tool_hook() + sdk_options = config.to_sdk_options( + pre_tool_hook=pre_hook, + post_tool_hook=post_hook, + ) + + # Execute iteration + async with ClaudeSDKClient(options=sdk_options) as client: + await client.query(enhanced_prompt) + result, output = await self._process_messages(client) + + if result: + # Track cost and session + iteration_cost = getattr(result, "total_cost_usd", 0.0) or 0.0 + state.total_cost += iteration_cost + state.last_session_id = getattr(result, "session_id", None) + + # Export transcript for cross-sandbox resumption + if state.last_session_id: + state.last_transcript_b64 = config.export_session_transcript( + state.last_session_id + ) + + # Check for completion signal + output_text = "\n".join(output) if output else "" + if config.completion_signal in output_text: + state.completion_signal_count += 1 + logger.info( + "Completion signal detected", + extra={ + "count": state.completion_signal_count, + "threshold": config.completion_threshold, + } + ) + + await self.reporter.report( + "iteration.completion_signal", + { + "iteration_num": state.iteration_num, + "signal_count": state.completion_signal_count, + "threshold": config.completion_threshold, + }, + ) + + # Run git validation if auto-validate enabled + if config.auto_validate: + await self._run_validation() + else: + state.completion_signal_count = 0 # Reset on non-signal + + # Report iteration completion + await self.reporter.report( + "iteration.completed", + { + "iteration_num": state.iteration_num, + "cost_usd": iteration_cost, + "session_id": state.last_session_id, + "output_preview": output_text[:1000] if output_text else None, + **state.to_event_data(), + }, + ) + + return True + + else: + # Iteration failed (no ResultMessage) + await self.reporter.report( + "iteration.failed", + { + "iteration_num": state.iteration_num, + "error": "No result message received", + "error_type": "no_result", + "retry_allowed": state.error_count < 2, + }, + ) + return False + + except Exception as e: + logger.error("Iteration failed", extra={"error": str(e)}, exc_info=True) + await self.reporter.report( + "iteration.failed", + { + "iteration_num": state.iteration_num, + "error": str(e), + "error_type": type(e).__name__, + "retry_allowed": state.error_count < 2, + }, + ) + return False + + async def _run_validation(self): + """Run git validation to check if work is truly complete.""" + state = self.iteration_state + config = self.continuous_config + + logger.info("Running git validation...") + + git_status = check_git_status(config.cwd) + + # Update state with validation results + state.code_committed = git_status["is_clean"] + state.code_pushed = git_status["is_pushed"] + state.pr_created = git_status["has_pr"] + + # Determine if validation passed + validation_errors = [] + + if config.require_clean_git and not git_status["is_clean"]: + validation_errors.append("Uncommitted changes exist") + + if config.require_pushed and not git_status["is_pushed"]: + validation_errors.append("Code not pushed to remote") + + if config.require_pr and not git_status["has_pr"]: + validation_errors.append("No PR created") + + if not validation_errors: + state.validation_passed = True + state.validation_feedback = "All validation checks passed" + logger.info("Git validation PASSED") + else: + state.validation_passed = False + state.validation_feedback = "; ".join(validation_errors) + logger.info( + "Git validation FAILED", + extra={"errors": validation_errors} + ) + + # Update notes file with validation feedback for next iteration + self._update_notes_with_validation(validation_errors, git_status) + + # Report validation result + await self.reporter.report( + "iteration.validation", + { + "iteration_num": state.iteration_num, + "passed": state.validation_passed, + "feedback": state.validation_feedback, + "git_status": { + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + "has_pr": git_status["has_pr"], + "branch_name": git_status["branch_name"], + }, + "errors": validation_errors, + }, + ) + + def _update_notes_with_validation(self, errors: list[str], git_status: dict): + """Update notes file with validation feedback for next iteration.""" + config = self.continuous_config + notes_path = Path(config.cwd) / config.notes_file + + try: + existing = notes_path.read_text() if notes_path.exists() else "" + + feedback_section = f""" + +## VALIDATION FAILED - Iteration {self.iteration_state.iteration_num} + +The completion signal was detected, but validation checks failed. +**You must fix these issues before the task is truly complete:** + +### Issues Found: +""" + for error in errors: + feedback_section += f"- ❌ {error}\n" + + feedback_section += f""" +### Git Status: +- Branch: {git_status.get('branch_name', 'unknown')} +- Clean working directory: {'✅ Yes' if git_status['is_clean'] else '❌ No'} +- Code pushed to remote: {'✅ Yes' if git_status['is_pushed'] else '❌ No'} +- PR exists: {'✅ Yes' if git_status['has_pr'] else '❌ No'} + +### Required Actions: +""" + if not git_status["is_clean"]: + feedback_section += "1. Stage and commit all changes: `git add -A && git commit -m \"...\"`\n" + if not git_status["is_pushed"]: + feedback_section += "2. Push code to remote: `git push`\n" + if not git_status["has_pr"]: + feedback_section += "3. Create a pull request: `gh pr create --title \"...\" --body \"...\"`\n" + + feedback_section += f""" +**After fixing these issues, include `{config.completion_signal}` in your response again.** +""" + + notes_path.write_text(existing + feedback_section) + + except Exception as e: + logger.warning("Failed to update notes with validation feedback", extra={"error": str(e)}) + + def _build_iteration_prompt(self) -> str: + """Build enhanced prompt with iteration context.""" + config = self.continuous_config + state = self.iteration_state + + # Read notes file if exists + notes_content = "" + notes_path = Path(config.cwd) / config.notes_file + if notes_path.exists(): + try: + notes_content = notes_path.read_text() + except Exception: + pass + + # Get the primary goal + primary_goal = ( + config.task_description + or config.initial_prompt + or config.ticket_description + or "No goal specified" + ) + + # Calculate remaining budget + remaining_cost = None + if config.max_cost_usd: + remaining_cost = config.max_cost_usd - state.total_cost + + remaining_runs = None + if config.max_runs: + remaining_runs = config.max_runs - state.successful_iterations + + # Build enhanced prompt + prompt_parts = [ + "## CONTINUOUS WORKFLOW CONTEXT", + "", + f"This is **iteration {state.iteration_num}** of a continuous development loop.", + "Work incrementally. The loop continues until all requirements are met.", + "", + ] + + # Add progress info + prompt_parts.extend([ + "### Current Progress:", + f"- Successful iterations: {state.successful_iterations}", + f"- Cost spent: ${state.total_cost:.2f}", + ]) + + if remaining_cost is not None: + prompt_parts.append(f"- Remaining budget: ${remaining_cost:.2f}") + if remaining_runs is not None: + prompt_parts.append(f"- Remaining runs: {remaining_runs}") + + prompt_parts.extend([ + "", + "### Completion Requirements:", + "For your work to be considered complete, you MUST:", + "1. ✅ Implement the requested changes", + "2. ✅ Run tests and ensure they pass", + "3. ✅ Commit all changes (no uncommitted files)", + "4. ✅ Push code to remote (`git push`)", + "5. ✅ Create a Pull Request (`gh pr create ...`)", + "", + f"**Completion Signal**: When ALL requirements are met, include the exact phrase:", + f"**`{config.completion_signal}`**", + "", + "The system will validate your work. If validation fails, you'll continue", + "with specific feedback about what needs to be fixed.", + "", + ]) + + # Add primary goal + prompt_parts.extend([ + "## PRIMARY GOAL", + "", + primary_goal, + "", + ]) + + # Add previous iteration notes if they exist + if notes_content: + prompt_parts.extend([ + "## PREVIOUS ITERATION NOTES", + "", + notes_content, + "", + ]) + + # Add notes update instructions + prompt_parts.extend([ + "## NOTES UPDATE INSTRUCTIONS", + "", + f"Update `{config.notes_file}` with:", + "- What you accomplished this iteration", + "- What remains to be done", + "- Any blockers or issues", + "- Important context for the next iteration", + "", + ]) + + return "\n".join(prompt_parts) + + +# ============================================================================= +# Entry Point +# ============================================================================= + + +async def main(): + """Entry point for continuous sandbox worker.""" + config = ContinuousWorkerConfig() + worker = ContinuousSandboxWorker(config) + return await worker.run() + + +if __name__ == "__main__": + import sys + exit_code = asyncio.run(main()) + sys.exit(exit_code) From 5757eee91df5de0fc619eb1b226385d2c43ecc8c Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 14:08:46 -0300 Subject: [PATCH 012/290] feat(workers): Integrate continuous mode directly into SandboxWorker Implements continuous iteration mode in the base claude_sandbox_worker.py to ensure tasks complete fully (code pushed, PR created) before stopping. Key changes: - Add IterationState dataclass for tracking across iterations - Add check_git_status() function for validation - Add continuous mode settings to WorkerConfig: - continuous_mode: Enabled by default for implementation/validation - max_iterations, max_total_cost_usd, max_duration_seconds - completion_signal, completion_threshold - require_clean_git, require_code_pushed, require_pr_created - Update SandboxWorker.run() with iteration loop: - Detect completion signal (TASK_COMPLETE) in output - Run git validation when signal detected - Write validation feedback to ITERATION_NOTES.md - Continue iterating until validation passes or limits reached - Update __init__.py to export from production worker This addresses the issue where agents would stop before completing the full git workflow (commit, push, PR) due to context length limits. --- backend/omoi_os/workers/__init__.py | 26 +- .../omoi_os/workers/claude_sandbox_worker.py | 620 +++++++++++++++++- 2 files changed, 625 insertions(+), 21 deletions(-) diff --git a/backend/omoi_os/workers/__init__.py b/backend/omoi_os/workers/__init__.py index 7aef59ed..1075dfe9 100644 --- a/backend/omoi_os/workers/__init__.py +++ b/backend/omoi_os/workers/__init__.py @@ -7,34 +7,42 @@ Uses the Claude Agent SDK (claude_code_sdk) for Claude interactions. Workers: -- SandboxWorker: Base worker for single-run task execution -- ContinuousSandboxWorker: Iterative worker that runs until task is complete - (code pushed, PR created) or limits are reached +- SandboxWorker: Production worker with integrated continuous mode support + - Single-run mode: Execute task once and wait for messages + - Continuous mode: Iterate until task truly completes (code pushed, PR created) + - Enabled by default for implementation and validation execution modes +- ContinuousSandboxWorker: Deprecated - use SandboxWorker with continuous_mode=True """ -from omoi_os.workers.sandbox_agent_worker import ( +# Main production worker (claude_sandbox_worker.py) +from omoi_os.workers.claude_sandbox_worker import ( EventReporter, MessagePoller, SandboxWorker, WorkerConfig, - process_sdk_response, + IterationState, + check_git_status, ) +# Legacy compat: process_sdk_response from simple worker +from omoi_os.workers.sandbox_agent_worker import process_sdk_response + +# Keep continuous worker for backwards compatibility from omoi_os.workers.continuous_sandbox_worker import ( ContinuousSandboxWorker, ContinuousWorkerConfig, - IterationState, ) __all__ = [ - # Base worker + # Base worker (with integrated continuous mode) "EventReporter", "MessagePoller", "SandboxWorker", "WorkerConfig", "process_sdk_response", - # Continuous worker + "IterationState", + "check_git_status", + # Continuous worker (deprecated - use SandboxWorker with continuous_mode=True) "ContinuousSandboxWorker", "ContinuousWorkerConfig", - "IterationState", ] diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index fb13ce58..f338b902 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -137,6 +137,152 @@ class AgentDefinition: MCP_AVAILABLE = False +# ============================================================================= +# Iteration State Tracking (Continuous Mode) +# ============================================================================= + + +@dataclass +class IterationState: + """Tracks state across iterations for continuous execution mode. + + When enabled, the worker runs in a loop until: + - Validation passes (code pushed, PR created) + - Limits are reached (max_runs, max_cost, max_duration) + - 3 consecutive errors occur + """ + iteration_num: int = 0 # Current iteration + successful_iterations: int = 0 # Completed successfully + error_count: int = 0 # Consecutive errors + total_cost: float = 0.0 # Accumulated cost + completion_signal_count: int = 0 # Consecutive completion signals + start_time: Optional[float] = None # For duration tracking + last_session_id: Optional[str] = None # For potential resume + validation_passed: bool = False # Whether git validation passed + validation_feedback: str = "" # Feedback from validation + + # Track what's been accomplished + tests_passed: bool = False + code_committed: bool = False + code_pushed: bool = False + pr_created: bool = False + + def to_event_data(self) -> dict: + """Convert state to event payload.""" + import time + elapsed = time.time() - self.start_time if self.start_time else 0 + return { + "iteration_num": self.iteration_num, + "successful_iterations": self.successful_iterations, + "error_count": self.error_count, + "total_cost_usd": self.total_cost, + "completion_signal_count": self.completion_signal_count, + "elapsed_seconds": elapsed, + "last_session_id": self.last_session_id, + "validation_passed": self.validation_passed, + "tests_passed": self.tests_passed, + "code_committed": self.code_committed, + "code_pushed": self.code_pushed, + "pr_created": self.pr_created, + } + + +# ============================================================================= +# Git Validation (for continuous mode completion checking) +# ============================================================================= + + +def check_git_status(cwd: str) -> dict[str, Any]: + """Check git status for validation. + + Returns dict with: + - is_clean: No uncommitted changes + - is_pushed: Not ahead of remote + - has_pr: PR exists for current branch + - branch_name: Current branch + - errors: List of validation errors + """ + result = { + "is_clean": False, + "is_pushed": False, + "has_pr": False, + "branch_name": None, + "status_output": "", + "errors": [], + } + + try: + # Get current branch + branch_result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + if branch_result.returncode == 0: + result["branch_name"] = branch_result.stdout.strip() + + # Check git status + status_result = subprocess.run( + ["git", "status", "--porcelain"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + result["status_output"] = status_result.stdout + result["is_clean"] = status_result.returncode == 0 and not status_result.stdout.strip() + + if not result["is_clean"]: + result["errors"].append("Uncommitted changes detected") + + # Check if ahead of remote + status_verbose = subprocess.run( + ["git", "status"], + cwd=cwd, + capture_output=True, + text=True, + timeout=10, + ) + status_text = status_verbose.stdout + + if "Your branch is ahead" in status_text: + result["is_pushed"] = False + result["errors"].append("Code not pushed to remote") + elif "Your branch is up to date" in status_text or "nothing to commit" in status_text: + result["is_pushed"] = True + else: + # If we can't determine, assume it's pushed + result["is_pushed"] = True + + # Check for PR using gh CLI + try: + pr_result = subprocess.run( + ["gh", "pr", "view", "--json", "number,title,state"], + cwd=cwd, + capture_output=True, + text=True, + timeout=30, + ) + if pr_result.returncode == 0 and pr_result.stdout.strip(): + result["has_pr"] = True + else: + result["has_pr"] = False + result["errors"].append("No PR found for current branch") + except FileNotFoundError: + result["errors"].append("GitHub CLI (gh) not available") + except subprocess.TimeoutExpired: + result["errors"].append("Timeout checking PR status") + + except subprocess.TimeoutExpired: + result["errors"].append("Timeout running git commands") + except Exception as e: + result["errors"].append(f"Git validation error: {str(e)}") + + return result + + # ============================================================================= # File Change Tracking # ============================================================================= @@ -911,6 +1057,38 @@ def __init__(self): # Use this to provide a summary of previous conversation self.conversation_context = os.environ.get("CONVERSATION_CONTEXT", "") + # ================================================================= + # Continuous Mode Settings + # ================================================================= + # When enabled, the worker runs in a loop until task truly completes + # or limits are reached. This ensures tasks don't stop due to + # context length issues. + + # Enable continuous mode by default for implementation and validation + # These modes need to ensure work is ACTUALLY completed + continuous_default = self.execution_mode in ("implementation", "validation") + self.continuous_mode = ( + os.environ.get("CONTINUOUS_MODE", str(continuous_default)).lower() == "true" + ) + + # Iteration limits + self.max_iterations = int(os.environ.get("MAX_ITERATIONS", "10")) + self.max_total_cost_usd = float(os.environ.get("MAX_TOTAL_COST_USD", "20.0")) + self.max_duration_seconds = int(os.environ.get("MAX_DURATION_SECONDS", "3600")) # 1 hour + self.max_consecutive_errors = int(os.environ.get("MAX_CONSECUTIVE_ERRORS", "3")) + + # Completion detection + self.completion_signal = os.environ.get("COMPLETION_SIGNAL", "TASK_COMPLETE") + self.completion_threshold = int(os.environ.get("COMPLETION_THRESHOLD", "1")) + + # Notes file for cross-iteration context preservation + self.notes_file = os.environ.get("NOTES_FILE", "ITERATION_NOTES.md") + + # Git validation requirements for implementation mode completion + self.require_clean_git = os.environ.get("REQUIRE_CLEAN_GIT", "true").lower() == "true" + self.require_code_pushed = os.environ.get("REQUIRE_CODE_PUSHED", "true").lower() == "true" + self.require_pr_created = os.environ.get("REQUIRE_PR_CREATED", "true").lower() == "true" + # Append conversation context to system prompt if provided (for hydration) # NOTE: Must be after conversation_context is initialized above if self.conversation_context: @@ -958,6 +1136,16 @@ def to_dict(self) -> dict: "fork_session": self.fork_session, "has_session_transcript": bool(self.session_transcript_b64), "has_conversation_context": bool(self.conversation_context), + # Continuous mode settings + "continuous_mode": self.continuous_mode, + "max_iterations": self.max_iterations, + "max_total_cost_usd": self.max_total_cost_usd, + "max_duration_seconds": self.max_duration_seconds, + "max_consecutive_errors": self.max_consecutive_errors, + "completion_signal": self.completion_signal, + "require_clean_git": self.require_clean_git, + "require_code_pushed": self.require_code_pushed, + "require_pr_created": self.require_pr_created, } def get_custom_agents(self) -> dict: @@ -1359,7 +1547,12 @@ def setup_github_workspace(config: WorkerConfig) -> bool: class SandboxWorker: - """Main worker orchestrator with comprehensive event tracking.""" + """Main worker orchestrator with comprehensive event tracking. + + Supports both single-run and continuous modes: + - Single-run: Execute task once and wait for messages + - Continuous: Iterate until task truly completes (code pushed, PR created) + """ def __init__(self, config: WorkerConfig): self.config = config @@ -1370,6 +1563,9 @@ def __init__(self, config: WorkerConfig): self.reporter: Optional[EventReporter] = None self.file_tracker = FileChangeTracker() + # Iteration state for continuous mode + self.iteration_state = IterationState() + def _setup_signal_handlers(self): """Setup graceful shutdown on SIGTERM/SIGINT.""" @@ -1841,6 +2037,238 @@ async def _process_messages( return None, final_output + # ========================================================================= + # Continuous Mode Helper Methods + # ========================================================================= + + def _should_continue_iteration(self) -> bool: + """Check if iteration should continue in continuous mode.""" + import time + state = self.iteration_state + config = self.config + + # Check if validation already passed + if state.validation_passed: + return False + + # Check completion signal threshold + if state.completion_signal_count >= config.completion_threshold: + # Only stop if validation passed + if not state.validation_passed: + # Continue to allow agent to fix issues + logger.info("Completion signal reached but validation not passed - continuing") + state.completion_signal_count = 0 # Reset to allow more iterations + else: + return False + + # Check max iterations + if state.successful_iterations >= config.max_iterations: + return False + + # Check max cost + if state.total_cost >= config.max_total_cost_usd: + return False + + # Check max duration + if state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return False + + # Check consecutive errors + if state.error_count >= config.max_consecutive_errors: + return False + + # Check shutdown signal + if self._should_stop: + return False + + return True + + def _get_stop_reason(self) -> str: + """Determine why the iteration loop stopped.""" + import time + state = self.iteration_state + config = self.config + + if state.validation_passed: + return "validation_passed" + if state.completion_signal_count >= config.completion_threshold: + return "completion_signal" + if state.successful_iterations >= config.max_iterations: + return "max_iterations_reached" + if state.total_cost >= config.max_total_cost_usd: + return "max_cost_reached" + if state.start_time: + elapsed = time.time() - state.start_time + if elapsed >= config.max_duration_seconds: + return "max_duration_reached" + if state.error_count >= config.max_consecutive_errors: + return "consecutive_errors" + if self._should_stop: + return "shutdown_signal" + return "unknown" + + async def _run_validation(self): + """Run git validation to check if work is truly complete.""" + state = self.iteration_state + config = self.config + + logger.info("Running git validation...") + + git_status = check_git_status(config.cwd) + + # Update state with validation results + state.code_committed = git_status["is_clean"] + state.code_pushed = git_status["is_pushed"] + state.pr_created = git_status["has_pr"] + + # Determine if validation passed based on config requirements + validation_errors = [] + + if config.require_clean_git and not git_status["is_clean"]: + validation_errors.append("Uncommitted changes exist") + + if config.require_code_pushed and not git_status["is_pushed"]: + validation_errors.append("Code not pushed to remote") + + if config.require_pr_created and not git_status["has_pr"]: + validation_errors.append("No PR created") + + if not validation_errors: + state.validation_passed = True + state.validation_feedback = "All validation checks passed" + logger.info("Git validation PASSED") + else: + state.validation_passed = False + state.validation_feedback = "; ".join(validation_errors) + logger.info("Git validation FAILED", extra={"errors": validation_errors}) + + # Update notes file with validation feedback for next iteration + self._update_notes_with_validation(validation_errors, git_status) + + # Report validation result + if self.reporter: + await self.reporter.report( + "iteration.validation", + { + "iteration_num": state.iteration_num, + "passed": state.validation_passed, + "feedback": state.validation_feedback, + "git_status": { + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + "has_pr": git_status["has_pr"], + "branch_name": git_status["branch_name"], + }, + "errors": validation_errors, + }, + ) + + def _update_notes_with_validation(self, errors: list[str], git_status: dict): + """Update notes file with validation feedback for next iteration.""" + config = self.config + notes_path = Path(config.cwd) / config.notes_file + + try: + existing = notes_path.read_text() if notes_path.exists() else "" + + feedback_section = f""" + +## VALIDATION FAILED - Iteration {self.iteration_state.iteration_num} + +The completion signal was detected, but validation checks failed. +**You must fix these issues before the task is truly complete:** + +### Issues Found: +""" + for error in errors: + feedback_section += f"- ❌ {error}\n" + + feedback_section += f""" +### Git Status: +- Branch: {git_status.get('branch_name', 'unknown')} +- Clean working directory: {'✅ Yes' if git_status['is_clean'] else '❌ No'} +- Code pushed to remote: {'✅ Yes' if git_status['is_pushed'] else '❌ No'} +- PR exists: {'✅ Yes' if git_status['has_pr'] else '❌ No'} + +### Required Actions: +""" + if not git_status["is_clean"]: + feedback_section += "1. Stage and commit all changes: `git add -A && git commit -m \"...\"`\n" + if not git_status["is_pushed"]: + feedback_section += "2. Push code to remote: `git push`\n" + if not git_status["has_pr"]: + feedback_section += "3. Create a pull request: `gh pr create --title \"...\" --body \"...\"`\n" + + feedback_section += f""" +**After fixing these issues, include `{config.completion_signal}` in your response again.** +""" + + notes_path.write_text(existing + feedback_section) + logger.info("Updated notes file with validation feedback") + + except Exception as e: + logger.warning("Failed to update notes with validation feedback", extra={"error": str(e)}) + + def _build_iteration_prompt(self, base_task: str) -> str: + """Build enhanced prompt with iteration context.""" + config = self.config + state = self.iteration_state + + # Read notes file if exists + notes_content = "" + notes_path = Path(config.cwd) / config.notes_file + if notes_path.exists(): + try: + notes_content = notes_path.read_text() + except Exception: + pass + + # First iteration - return base task with completion instructions + if state.iteration_num == 1: + prompt = f"""{base_task} + +--- + +## Completion Requirements (IMPORTANT) + +When you have completed the task, you MUST: +1. Ensure all tests pass +2. Commit all changes: `git add -A && git commit -m "..."` +3. Push to remote: `git push` +4. Create a PR: `gh pr create --title "..." --body "..."` + +**When all work is done and pushed, include the phrase `{config.completion_signal}` in your response.** +""" + return prompt + + # Subsequent iterations - include notes file and previous context + prompt = f"""## Continuing Task (Iteration {state.iteration_num}) + +This is a continuation of your previous work. Please review the notes below and complete any remaining work. + +### Original Task: +{base_task} + +### Previous Iteration Notes: +{notes_content if notes_content else "(No notes from previous iterations)"} + +### Current Status: +- Iterations completed: {state.successful_iterations} +- Validation passed: {state.validation_passed} +- Code committed: {state.code_committed} +- Code pushed: {state.code_pushed} +- PR created: {state.pr_created} + +### What to do: +1. Review the validation feedback above (if any) +2. Fix any issues identified +3. Ensure all work is committed, pushed, and a PR exists +4. Include `{config.completion_signal}` when truly done +""" + return prompt + async def run(self): """Main worker loop with comprehensive event tracking.""" self._setup_signal_handlers() @@ -1926,20 +2354,188 @@ async def run(self): ) if initial_task and initial_task.strip(): - logger.info("Processing initial task", extra={"task_preview": initial_task[:100]}) - try: - await client.query(initial_task) - await self._process_messages(client) - except Exception as e: - logger.error("Failed to process initial task", extra={"error": str(e)}, exc_info=True) - if self.reporter: - await self.reporter.report( - "agent.error", + # Initialize iteration state + import time + self.iteration_state.start_time = time.time() + + if self.config.continuous_mode: + # ================================================= + # CONTINUOUS MODE: Iterate until task truly completes + # ================================================= + logger.info("=" * 40) + logger.info("CONTINUOUS MODE ENABLED") + logger.info("=" * 40) + logger.info( + "Will iterate until: validation_passed OR max_iterations=%d OR max_cost=$%.2f OR max_duration=%ds", + self.config.max_iterations, + self.config.max_total_cost_usd, + self.config.max_duration_seconds, + ) + + # Report continuous mode start + await reporter.report( + "continuous.started", + { + "goal": initial_task[:500], + "limits": { + "max_iterations": self.config.max_iterations, + "max_cost_usd": self.config.max_total_cost_usd, + "max_duration_seconds": self.config.max_duration_seconds, + }, + "completion_signal": self.config.completion_signal, + "completion_threshold": self.config.completion_threshold, + "validation_requirements": { + "require_clean_git": self.config.require_clean_git, + "require_code_pushed": self.config.require_code_pushed, + "require_pr_created": self.config.require_pr_created, + }, + }, + source="worker", + ) + + # Iteration loop + while self._should_continue_iteration(): + self.iteration_state.iteration_num += 1 + iteration_prompt = self._build_iteration_prompt(initial_task) + + # Report iteration start + await reporter.report( + "iteration.started", { - "error": f"Initial task failed: {str(e)}", - "phase": "initial_task", + "iteration_num": self.iteration_state.iteration_num, + "prompt_preview": iteration_prompt[:500], + **self.iteration_state.to_event_data(), }, ) + + logger.info( + "Starting iteration %d", + self.iteration_state.iteration_num, + extra={"state": self.iteration_state.to_event_data()} + ) + + try: + await client.query(iteration_prompt) + result, output = await self._process_messages(client) + + if result: + # Track cost + iteration_cost = getattr(result, "total_cost_usd", 0.0) or 0.0 + self.iteration_state.total_cost += iteration_cost + self.iteration_state.last_session_id = getattr(result, "session_id", None) + + # Check for completion signal in output + output_text = "\n".join(output) if output else "" + if self.config.completion_signal in output_text: + self.iteration_state.completion_signal_count += 1 + logger.info( + "Completion signal detected (%d/%d)", + self.iteration_state.completion_signal_count, + self.config.completion_threshold, + ) + + await reporter.report( + "iteration.completion_signal", + { + "iteration_num": self.iteration_state.iteration_num, + "signal_count": self.iteration_state.completion_signal_count, + "threshold": self.config.completion_threshold, + }, + ) + + # Run git validation + await self._run_validation() + + # If validation passed, we're done! + if self.iteration_state.validation_passed: + logger.info("Validation PASSED - task truly complete!") + break + else: + # No completion signal - reset counter + self.iteration_state.completion_signal_count = 0 + + self.iteration_state.successful_iterations += 1 + self.iteration_state.error_count = 0 + + # Report iteration completion + await reporter.report( + "iteration.completed", + { + "iteration_num": self.iteration_state.iteration_num, + "cost_usd": iteration_cost, + "output_preview": output_text[:1000] if output_text else None, + **self.iteration_state.to_event_data(), + }, + ) + else: + # No result - iteration failed + self.iteration_state.error_count += 1 + await reporter.report( + "iteration.failed", + { + "iteration_num": self.iteration_state.iteration_num, + "error": "No result message received", + "error_count": self.iteration_state.error_count, + }, + ) + + except Exception as e: + self.iteration_state.error_count += 1 + logger.error( + "Iteration %d failed", + self.iteration_state.iteration_num, + extra={"error": str(e)}, + exc_info=True, + ) + await reporter.report( + "iteration.failed", + { + "iteration_num": self.iteration_state.iteration_num, + "error": str(e), + "error_type": type(e).__name__, + "error_count": self.iteration_state.error_count, + }, + ) + + # Report continuous mode completion + stop_reason = self._get_stop_reason() + await reporter.report( + "continuous.completed", + { + "stop_reason": stop_reason, + **self.iteration_state.to_event_data(), + }, + ) + + logger.info( + "Continuous mode completed", + extra={ + "stop_reason": stop_reason, + "iterations": self.iteration_state.iteration_num, + "successful": self.iteration_state.successful_iterations, + "total_cost": self.iteration_state.total_cost, + "validation_passed": self.iteration_state.validation_passed, + } + ) + + else: + # ================================================= + # SINGLE-RUN MODE: Execute once (original behavior) + # ================================================= + logger.info("Processing initial task (single-run mode)", extra={"task_preview": initial_task[:100]}) + try: + await client.query(initial_task) + await self._process_messages(client) + except Exception as e: + logger.error("Failed to process initial task", extra={"error": str(e)}, exc_info=True) + if self.reporter: + await self.reporter.report( + "agent.error", + { + "error": f"Initial task failed: {str(e)}", + "phase": "initial_task", + }, + ) else: logger.warning( "No initial task provided - worker will wait for messages" From 7e1a1fc74166e971569a5bf936ce26404690d102 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 14:13:42 -0300 Subject: [PATCH 013/290] feat(spawner): Auto-enable continuous mode for implementation/validation Updates DaytonaSpawnerService to automatically enable continuous mode for implementation and validation execution modes when using Claude runtime. Key changes: - Change continuous_mode parameter from bool to Optional[bool] - None (default): Auto-enable for implementation/validation modes - True: Force enable - False: Force disable - Update env var names to match WorkerConfig: - MAX_ITERATIONS, MAX_TOTAL_COST_USD, MAX_DURATION_SECONDS - Use base claude_sandbox_worker.py for all Claude workers - Continuous mode is now environment-driven via CONTINUOUS_MODE - Deprecate _get_continuous_worker_script() method This ensures tasks in implementation and validation modes iterate until truly complete (code pushed, PR created) without requiring explicit continuous_mode=True. --- backend/omoi_os/services/daytona_spawner.py | 69 +++++++++++---------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index ad80a68f..e39e9061 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -135,7 +135,7 @@ async def spawn_for_task( labels: Optional[Dict[str, str]] = None, runtime: str = "openhands", # "openhands" or "claude" execution_mode: str = "implementation", # "exploration", "implementation", "validation" - continuous_mode: bool = False, # Enable continuous iteration until task complete + continuous_mode: Optional[bool] = None, # None = auto-enable for implementation/validation ) -> str: """Spawn a Daytona sandbox for executing a task. @@ -151,9 +151,12 @@ async def spawn_for_task( - "exploration": For feature definition (creates specs/tickets/tasks) - "implementation": For task execution (writes code, default) - "validation": For verifying implementation - continuous_mode: If True, runs iterative loop until task is complete + continuous_mode: Enable continuous iteration until task is complete (code pushed, PR created) or limits are reached. Only works with runtime="claude". + - None (default): Auto-enable for implementation/validation modes + - True: Force enable + - False: Force disable Returns: Sandbox ID @@ -191,13 +194,27 @@ async def spawn_for_task( "SANDBOX_ID": sandbox_id, } + # Determine continuous mode: + # - None (default): Auto-enable for implementation/validation modes with Claude runtime + # - True: Force enable + # - False: Force disable + effective_continuous_mode = continuous_mode + if continuous_mode is None and runtime == "claude": + # Auto-enable for implementation and validation modes + # These modes need to ensure tasks complete fully (code pushed, PR created) + effective_continuous_mode = execution_mode in ("implementation", "validation") + if effective_continuous_mode: + logger.info( + f"Auto-enabling continuous mode for '{execution_mode}' mode" + ) + # Add continuous mode settings if enabled - if continuous_mode and runtime == "claude": + if effective_continuous_mode and runtime == "claude": env_vars["CONTINUOUS_MODE"] = "true" # Default limits for continuous mode (can be overridden via extra_env) - env_vars.setdefault("CONTINUOUS_MAX_RUNS", "10") - env_vars.setdefault("CONTINUOUS_MAX_COST_USD", "20.0") - env_vars.setdefault("CONTINUOUS_MAX_DURATION", "3600") # 1 hour + env_vars.setdefault("MAX_ITERATIONS", "10") + env_vars.setdefault("MAX_TOTAL_COST_USD", "20.0") + env_vars.setdefault("MAX_DURATION_SECONDS", "3600") # 1 hour logger.info("Continuous mode enabled for sandbox") # Add agent type if specified @@ -799,12 +816,11 @@ def escape_env_value(v: str) -> str: logger.debug("No GITHUB_REPO configured - skipping repository clone") # Upload the appropriate worker script + # Note: Claude worker has continuous mode built-in, controlled by CONTINUOUS_MODE env var if runtime == "claude": - if continuous_mode: - worker_script = self._get_continuous_worker_script() - logger.info("Using continuous worker script for iterative execution") - else: - worker_script = self._get_claude_worker_script() + worker_script = self._get_claude_worker_script() + if effective_continuous_mode: + logger.info("Using Claude worker with continuous mode enabled via environment") else: worker_script = self._get_worker_script() sandbox.fs.upload_file(worker_script.encode("utf-8"), "/tmp/sandbox_worker.py") @@ -2291,30 +2307,15 @@ async def terminate_sandbox(self, sandbox_id: str) -> bool: def _get_continuous_worker_script(self) -> str: """Get the Continuous Sandbox Worker script content. - Reads from backend/omoi_os/workers/continuous_sandbox_worker.py - This worker runs Claude Code in an iterative loop until: - - Task is complete (code pushed, PR created) - - Limits are reached (max runs, cost, duration) - - Consecutive errors occur - - Features (extends base Claude worker): - - Iterative execution with completion signal detection - - Git validation (clean status, pushed, PR exists) - - Cross-iteration context via notes file - - Automatic retry on validation failure - - Per-iteration event reporting - """ - # Try to read from file first (development mode) - worker_file = ( - Path(__file__).parent.parent / "workers" / "continuous_sandbox_worker.py" - ) - if worker_file.exists(): - logger.info(f"Loading continuous worker script from {worker_file}") - return worker_file.read_text() + DEPRECATED: Use _get_claude_worker_script() instead. + Continuous mode is now integrated into the base claude_sandbox_worker.py + and is controlled via the CONTINUOUS_MODE environment variable. - # Fallback to standard Claude worker if continuous worker not found - logger.warning( - "Continuous worker file not found, falling back to standard Claude worker" + This method now just returns the base Claude worker script. + """ + logger.info( + "Note: Continuous mode is now integrated into base Claude worker. " + "Using claude_sandbox_worker.py with CONTINUOUS_MODE env var." ) return self._get_claude_worker_script() From 61dfeb4fc5b185125fbc336a45c951bb9d3db33b Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 14:47:22 -0300 Subject: [PATCH 014/290] fix(spawner): Persist GITHUB_TOKEN in sandbox env files Previously, env vars were written to /tmp/.sandbox_env and ~/.bashrc BEFORE the GITHUB_TOKEN was added back to env_vars (after git clone). This meant the token wasn't available for: - Subprocesses spawned by the worker - Continuous mode iterations - gh CLI commands Now we: 1. Delay writing env file/bashrc until AFTER all env vars are finalized 2. Include GITHUB_TOKEN and GH_TOKEN in the persisted environment 3. Add belt-and-suspenders sourcing of env file in bashrc 4. Log the number of persisted env vars for debugging --- backend/omoi_os/services/daytona_spawner.py | 65 ++++++++++++--------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index e39e9061..aab7e044 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -611,31 +611,15 @@ async def _start_worker_in_sandbox( github_owner = env_vars.pop("GITHUB_REPO_OWNER", None) github_repo_name = env_vars.pop("GITHUB_REPO_NAME", None) - # Build environment export string (without sensitive token) - # Properly escape values to handle quotes and special characters + # Helper function to escape environment variable values for shell export def escape_env_value(v: str) -> str: """Escape environment variable value for shell export.""" # Use shlex.quote to properly escape shell values return shlex.quote(str(v)) - env_exports = " ".join( - [f"export {k}={escape_env_value(v)}" for k, v in env_vars.items()] - ) - - # Also write env vars to a file for persistence and debugging - # For the file, we can use simpler escaping since it's not in a shell command - env_file_content = "\n".join( - [ - f'{k}="{v.replace(chr(34), chr(92) + chr(34))}"' - for k, v in env_vars.items() - ] - ) - sandbox.process.exec( - f"cat > /tmp/.sandbox_env << 'ENVEOF'\n{env_file_content}\nENVEOF" - ) - # Export to current shell profile for all future commands - # Use proper heredoc delimiter - sandbox.process.exec(f"cat >> /root/.bashrc << 'ENVEOF'\n{env_exports}\nENVEOF") + # NOTE: We delay writing env file and bashrc until AFTER all env vars are set + # (including GITHUB_TOKEN which gets added after git clone). + # See the "Persist final environment" section below. # Install required packages based on runtime # Use uv for faster installation if available, fallback to pip @@ -797,13 +781,9 @@ def escape_env_value(v: str) -> str: logger.info("Git configured for push access with GitHub token") - # Update env file with workspace path - sandbox.process.exec( - f'echo "WORKSPACE_PATH="{workspace_path}"" >> /tmp/.sandbox_env' - ) - sandbox.process.exec( - f'echo "export WORKSPACE_PATH="{workspace_path}"" >> /root/.bashrc' - ) + # NOTE: WORKSPACE_PATH, GITHUB_TOKEN, GH_TOKEN are now in env_vars + # and will be persisted to /tmp/.sandbox_env and ~/.bashrc in the + # "Persist final environment" section below. except Exception as e: logger.warning(f"Failed to clone repository via SDK: {e}") @@ -825,12 +805,41 @@ def escape_env_value(v: str) -> str: worker_script = self._get_worker_script() sandbox.fs.upload_file(worker_script.encode("utf-8"), "/tmp/sandbox_worker.py") - # Rebuild env_exports with any new variables (like WORKSPACE_PATH) + # Rebuild env_exports with any new variables (like WORKSPACE_PATH, GITHUB_TOKEN) # Use the same escape function defined above env_exports = " ".join( [f"export {k}={escape_env_value(v)}" for k, v in env_vars.items()] ) + # ========================================================================= + # Persist final environment (AFTER all env vars including GITHUB_TOKEN are set) + # This ensures the token is available for: + # - Subprocesses spawned by the worker + # - Continuous mode iterations that may source bashrc + # - gh CLI commands that read GH_TOKEN from environment + # ========================================================================= + + # Write env vars to a file for persistence and debugging + # For the file, we can use simpler escaping since it's not in a shell command + env_file_content = "\n".join( + [ + f'{k}="{v.replace(chr(34), chr(92) + chr(34))}"' + for k, v in env_vars.items() + ] + ) + sandbox.process.exec( + f"cat > /tmp/.sandbox_env << 'ENVEOF'\n{env_file_content}\nENVEOF" + ) + + # Export to current shell profile for all future commands + # This ensures env vars persist across shell sessions and subprocesses + sandbox.process.exec(f"cat >> /root/.bashrc << 'ENVEOF'\n{env_exports}\nENVEOF") + + # Also source the env file in bashrc for belt-and-suspenders persistence + sandbox.process.exec('echo "source /tmp/.sandbox_env 2>/dev/null || true" >> /root/.bashrc') + + logger.info(f"Persisted {len(env_vars)} environment variables (including GitHub tokens)") + # Create workspace directory (even if no repo cloned) sandbox.process.exec("mkdir -p /workspace") From 1efc3fe49c90524359b58c5d5fd13d40359d2856 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 15:37:52 -0300 Subject: [PATCH 015/290] feat(spawner): Grant full permissions to sandbox Claude agents Update settings.local.json to allow all tool operations without prompts: - Bash(*) - all shell commands - Read(*), Write(*), Edit(*), MultiEdit(*) - all file operations - Glob(*), Grep(*) - all search operations - WebFetch(*), TodoWrite(*), Task(*) - all utility tools - mcp__* - all MCP server tools This is appropriate for sandbox environments where we want agents to execute autonomously without permission prompts. --- backend/omoi_os/services/daytona_spawner.py | 42 ++++++++------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index aab7e044..769cdab8 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -672,40 +672,28 @@ def escape_env_value(v: str) -> str: # Create skills directory sandbox.process.exec("mkdir -p /root/.claude/skills") - # Create settings.local.json with allowed commands - # This allows common development commands without requiring approval + # Create settings.local.json with FULL permissions + # This is a sandbox environment - allow everything without prompts settings_content = """{ "permissions": { "allow": [ - "Bash(npm run *)", - "Bash(pnpm run *)", - "Bash(yarn run *)", - "Bash(npx *)", - "Bash(pytest*)", - "Bash(uv run *)", - "Bash(python *)", - "Bash(node *)", - "Bash(git *)", - "Bash(gh *)", - "Bash(cd *)", - "Bash(ls *)", - "Bash(cat *)", - "Bash(mkdir *)", - "Bash(rm *)", - "Bash(cp *)", - "Bash(mv *)", - "Bash(grep *)", - "Bash(find *)", - "Bash(cargo *)", - "Bash(go *)", - "Bash(make *)" + "Bash(*)", + "Read(*)", + "Write(*)", + "Edit(*)", + "MultiEdit(*)", + "Glob(*)", + "Grep(*)", + "WebFetch(*)", + "TodoWrite(*)", + "Task(*)", + "mcp__*" ], - "deny": [], - "ask": [] + "deny": [] } }""" sandbox.fs.upload_file(settings_content.encode("utf-8"), "/root/.claude/settings.local.json") - logger.info("Uploaded Claude settings.local.json with allowed commands") + logger.info("Uploaded Claude settings.local.json with full permissions") # Get skills based on execution mode # - exploration: spec-driven-dev (for creating specs/tickets/tasks) From e413515dbb714ca421c25225ae7cfaaa367c052a Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 15:53:27 -0300 Subject: [PATCH 016/290] fix(spawner): Fix NameError - pass continuous_mode to _start_worker_in_sandbox The `effective_continuous_mode` variable was referenced inside `_start_worker_in_sandbox` but was only defined in the calling function `spawn_for_task`. This caused a NameError that silently failed (caught by exception handler), preventing: - Worker script from being uploaded to /tmp/sandbox_worker.py - Environment file from being created at /tmp/.sandbox_env - Any events from being sent back to the server Fix: Added `continuous_mode` parameter to `_start_worker_in_sandbox` and pass `effective_continuous_mode` from the call site. --- backend/omoi_os/services/daytona_spawner.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 769cdab8..aba052cc 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -578,7 +578,11 @@ async def _create_daytona_sandbox( info.extra_data["daytona_sandbox_id"] = sandbox.id # Set environment variables and start the worker - await self._start_worker_in_sandbox(sandbox, env_vars, runtime, execution_mode) + # Pass effective_continuous_mode from spawn_for_task to control worker logging + await self._start_worker_in_sandbox( + sandbox, env_vars, runtime, execution_mode, + continuous_mode=effective_continuous_mode or False + ) logger.info(f"Daytona sandbox {sandbox.id} created for {sandbox_id}") @@ -596,6 +600,7 @@ async def _start_worker_in_sandbox( env_vars: Dict[str, str], runtime: str = "openhands", execution_mode: str = "implementation", + continuous_mode: bool = False, ) -> None: """Start the sandbox worker inside the Daytona sandbox. @@ -604,6 +609,7 @@ async def _start_worker_in_sandbox( env_vars: Environment variables for the worker runtime: Agent runtime - "openhands" or "claude" execution_mode: Skill loading mode - determines which skills are loaded + continuous_mode: Whether continuous iteration mode is enabled """ # Extract git clone parameters (don't pass token to env vars for security) github_repo = env_vars.pop("GITHUB_REPO", None) @@ -787,7 +793,7 @@ def escape_env_value(v: str) -> str: # Note: Claude worker has continuous mode built-in, controlled by CONTINUOUS_MODE env var if runtime == "claude": worker_script = self._get_claude_worker_script() - if effective_continuous_mode: + if continuous_mode: logger.info("Using Claude worker with continuous mode enabled via environment") else: worker_script = self._get_worker_script() From 99055eac65ca7a4012053da16f08c2f2c7ff2dad Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:02:28 -0300 Subject: [PATCH 017/290] fix(spawner): Separate sandbox creation from worker startup error handling Previously, if _start_worker_in_sandbox failed, the exception was caught by the general try/except that was meant for sandbox creation failures. This caused the code to silently fall back to mock sandbox, leaving the REAL Daytona sandbox running but with no worker script uploaded. Fix: - Move worker startup outside the sandbox creation try/except - Add separate try/except for worker startup with proper error logging - Re-raise worker errors so callers know something went wrong - Add full traceback logging for debugging This ensures that when a real sandbox is created successfully but the worker fails to start, we get clear error messages instead of silent fallback to mock mode. --- backend/omoi_os/services/daytona_spawner.py | 32 +++++++++++++++------ 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index aba052cc..4e00ce74 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -570,6 +570,7 @@ async def _create_daytona_sandbox( ) sandbox = daytona.create(params=params, timeout=120) + logger.info(f"Daytona sandbox {sandbox.id} created for {sandbox_id}") # Store sandbox reference info = self._sandboxes.get(sandbox_id) @@ -577,22 +578,35 @@ async def _create_daytona_sandbox( info.extra_data["daytona_sandbox"] = sandbox info.extra_data["daytona_sandbox_id"] = sandbox.id - # Set environment variables and start the worker - # Pass effective_continuous_mode from spawn_for_task to control worker logging - await self._start_worker_in_sandbox( - sandbox, env_vars, runtime, execution_mode, - continuous_mode=effective_continuous_mode or False - ) - - logger.info(f"Daytona sandbox {sandbox.id} created for {sandbox_id}") - except ImportError as e: # Daytona SDK not available - use mock for local testing logger.warning(f"Daytona SDK import failed: {e}, using mock sandbox") await self._create_mock_sandbox(sandbox_id, env_vars) + return # Don't try to start worker in mock sandbox here except Exception as e: logger.error(f"Failed to create Daytona sandbox: {e}") + import traceback + logger.error(f"Traceback: {traceback.format_exc()}") await self._create_mock_sandbox(sandbox_id, env_vars) + return # Don't try to start worker in mock sandbox here + + # Start worker OUTSIDE the try/except so errors are not silently swallowed + # This ensures we see exactly what fails during worker startup + try: + # Pass effective_continuous_mode from spawn_for_task to control worker logging + await self._start_worker_in_sandbox( + sandbox, env_vars, runtime, execution_mode, + continuous_mode=effective_continuous_mode or False + ) + logger.info(f"Worker started successfully in sandbox {sandbox.id}") + except Exception as e: + # Log worker startup errors clearly - don't fall back to mock! + # The sandbox exists, we just failed to start the worker + logger.error(f"Failed to start worker in sandbox {sandbox.id}: {e}") + import traceback + logger.error(f"Worker startup traceback: {traceback.format_exc()}") + # Re-raise so the caller knows something went wrong + raise RuntimeError(f"Worker startup failed in sandbox {sandbox.id}: {e}") from e async def _start_worker_in_sandbox( self, From a30c2b533d55093b04abc26237d9ac04665ed0c8 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:30:54 -0300 Subject: [PATCH 018/290] config: Update MCP server URL to production api.omoios.dev --- backend/config/base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/config/base.yaml b/backend/config/base.yaml index 89075e34..1191f3b2 100644 --- a/backend/config/base.yaml +++ b/backend/config/base.yaml @@ -103,7 +103,7 @@ daytona: sandbox_disk_gb: 8 # Disk space in GiB (max: 10) integrations: - mcp_server_url: http://localhost:18000/mcp + mcp_server_url: https://api.omoios.dev/mcp enable_mcp_tools: true github_token: null From 086126ac830cf232e097b9db15cc3d74ed14fa82 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:38:13 -0300 Subject: [PATCH 019/290] Update MCP server URL from Railway to api.omoios.dev Replace old Railway URL (omoi-api-production.up.railway.app) with new production URL (api.omoios.dev) in all configuration files and docs. This ensures Daytona sandboxes can properly report events to the API. --- backend/config/local.yaml | 2 +- backend/docker-compose.yml | 2 +- backend/docs/DISTRIBUTED_AGENT_ARCHITECTURE.md | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/config/local.yaml b/backend/config/local.yaml index 0e5f6252..a726e49c 100644 --- a/backend/config/local.yaml +++ b/backend/config/local.yaml @@ -25,7 +25,7 @@ workspace: integrations: # Production URL for Daytona sandboxes (they run in remote cloud) - mcp_server_url: https://omoi-api-production.up.railway.app/mcp + mcp_server_url: https://api.omoios.dev/mcp enable_mcp_tools: true embedding: diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml index d7823dd8..64c7d845 100644 --- a/backend/docker-compose.yml +++ b/backend/docker-compose.yml @@ -123,7 +123,7 @@ services: - DAYTONA_API_KEY=${DAYTONA_API_KEY:-} # MCP Server URL for Daytona sandboxes to connect back # Using production URL since Daytona sandboxes run in remote cloud - - INTEGRATIONS_MCP_SERVER_URL=https://omoi-api-production.up.railway.app/mcp + - INTEGRATIONS_MCP_SERVER_URL=https://api.omoios.dev/mcp depends_on: postgres: condition: service_healthy diff --git a/backend/docs/DISTRIBUTED_AGENT_ARCHITECTURE.md b/backend/docs/DISTRIBUTED_AGENT_ARCHITECTURE.md index 9dfa22b9..0d91944e 100644 --- a/backend/docs/DISTRIBUTED_AGENT_ARCHITECTURE.md +++ b/backend/docs/DISTRIBUTED_AGENT_ARCHITECTURE.md @@ -57,8 +57,8 @@ Agents only need HTTP access to the MCP server. No direct database connections, ## Production Deployment ### URLs -- **API**: `https://omoi-api-production.up.railway.app` -- **MCP Endpoint**: `https://omoi-api-production.up.railway.app/mcp/` +- **API**: `https://api.omoios.dev` +- **MCP Endpoint**: `https://api.omoios.dev/mcp/` ### Services (Railway) | Service | Purpose | @@ -77,7 +77,7 @@ from omoi_os.tools.mcp_tools import register_mcp_tools_with_agent # Add all 27 MCP tools to an agent agent_tools = register_mcp_tools_with_agent( agent_tools=[], - mcp_url="https://omoi-api-production.up.railway.app/mcp/" + mcp_url="https://api.omoios.dev/mcp/" ) ``` From dcb4579a6faa342cc76d7c686a87a70b8db9e18f Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:41:40 -0300 Subject: [PATCH 020/290] Set permanent production MCP URL in config/production.yaml Use hardcoded https://api.omoios.dev/mcp instead of relying on RAILWAY_PUBLIC_DOMAIN environment variable which was causing sandboxes to call incorrect URLs. --- backend/config/production.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/config/production.yaml b/backend/config/production.yaml index e331c805..3127b3a1 100644 --- a/backend/config/production.yaml +++ b/backend/config/production.yaml @@ -88,8 +88,8 @@ daytona: sandbox_disk_gb: 8 # Disk space in GiB (max: 10) integrations: - # MCP server URL - this will be the Railway public URL - mcp_server_url: ${MCP_SERVER_URL:-https://${RAILWAY_PUBLIC_DOMAIN}/mcp} + # MCP server URL - permanent production domain + mcp_server_url: https://api.omoios.dev/mcp enable_mcp_tools: true github_token: ${GITHUB_TOKEN} From 4f23c2823c5a7efd24dd0a59a28c743e42239c08 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:47:39 -0300 Subject: [PATCH 021/290] Fix continuous_mode parameter passing to _create_daytona_sandbox The effective_continuous_mode variable was being referenced inside _create_daytona_sandbox but never passed as a parameter. Fixed by: 1. Adding continuous_mode parameter to _create_daytona_sandbox signature 2. Passing effective_continuous_mode from spawn_for_task call site 3. Using the local continuous_mode parameter in _start_worker_in_sandbox call --- backend/omoi_os/services/daytona_spawner.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 4e00ce74..c4ea7661 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -456,6 +456,7 @@ async def spawn_for_task( labels=sandbox_labels, runtime=runtime, execution_mode=execution_mode, + continuous_mode=effective_continuous_mode, ) # Update status @@ -505,6 +506,7 @@ async def _create_daytona_sandbox( labels: Dict[str, str], runtime: str = "openhands", execution_mode: str = "implementation", + continuous_mode: bool = False, ) -> None: """Create a Daytona sandbox via their API. @@ -516,6 +518,7 @@ async def _create_daytona_sandbox( labels: Labels for sandbox organization runtime: Agent runtime - "openhands" or "claude" execution_mode: Skill loading mode - determines which skills are loaded + continuous_mode: Whether continuous iteration mode is enabled """ try: from daytona import ( @@ -593,10 +596,10 @@ async def _create_daytona_sandbox( # Start worker OUTSIDE the try/except so errors are not silently swallowed # This ensures we see exactly what fails during worker startup try: - # Pass effective_continuous_mode from spawn_for_task to control worker logging + # Pass continuous_mode from spawn_for_task to control worker logging await self._start_worker_in_sandbox( sandbox, env_vars, runtime, execution_mode, - continuous_mode=effective_continuous_mode or False + continuous_mode=continuous_mode ) logger.info(f"Worker started successfully in sandbox {sandbox.id}") except Exception as e: From 14d712e8e0eb5f7307925a5e6ce849e861b5a327 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:50:17 -0300 Subject: [PATCH 022/290] Fix Daytona SDK list() iteration in get_sandbox_logs.py PaginatedSandboxes returns tuples when iterated directly. Use .items attribute to get the actual list of Sandbox objects. --- backend/scripts/get_sandbox_logs.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/backend/scripts/get_sandbox_logs.py b/backend/scripts/get_sandbox_logs.py index ea0c2063..eb403892 100644 --- a/backend/scripts/get_sandbox_logs.py +++ b/backend/scripts/get_sandbox_logs.py @@ -45,7 +45,8 @@ def get_sandbox_logs(sandbox_id: str): # Try to find by label result = daytona.list() - sandboxes = getattr(result, "sandboxes", result) + # PaginatedSandboxes has .items attribute with the actual list + sandboxes = getattr(result, "items", None) or getattr(result, "sandboxes", result) target_sandbox = None for sb in sandboxes: @@ -166,14 +167,8 @@ def list_sandboxes(): daytona = Daytona(config) result = daytona.list() - # Handle paginated response - sandboxes = ( - getattr(result, "items", result) - if hasattr(result, "items") - else list(result) - ) - if hasattr(result, "sandboxes"): - sandboxes = result.sandboxes + # Handle paginated response - PaginatedSandboxes has .items attribute + sandboxes = getattr(result, "items", None) or getattr(result, "sandboxes", []) print(f"Found {len(sandboxes)} sandboxes:\n") for sb in sandboxes[:20]: # Limit to first 20 From 85febad33980f5c05a468031dee4959d90652f95 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 16:59:34 -0300 Subject: [PATCH 023/290] Use configured MCP server URL in test_spawner_e2e.py Instead of hardcoded localhost URL, use the URL from app settings so tests work properly with production API (api.omoios.dev). --- backend/scripts/test_spawner_e2e.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backend/scripts/test_spawner_e2e.py b/backend/scripts/test_spawner_e2e.py index 2c0b3434..f3401c00 100644 --- a/backend/scripts/test_spawner_e2e.py +++ b/backend/scripts/test_spawner_e2e.py @@ -33,7 +33,7 @@ async def main(): from omoi_os.services.daytona_spawner import DaytonaSpawnerService # Check Daytona API key - from omoi_os.config import load_daytona_settings + from omoi_os.config import load_daytona_settings, get_app_settings daytona_settings = load_daytona_settings() if not daytona_settings.api_key: @@ -43,11 +43,14 @@ async def main(): print(f"✅ Daytona API Key: {daytona_settings.api_key[:12]}...") # Create spawner (no db/event_bus for this test) + # Use the configured MCP server URL from settings + settings = get_app_settings() spawner = DaytonaSpawnerService( db=None, event_bus=None, - mcp_server_url="http://localhost:18000/mcp/", + mcp_server_url=settings.integrations.mcp_server_url, ) + print(f" MCP Server URL: {settings.integrations.mcp_server_url}") # Test task info task_id = f"test-e2e-{int(time.time())}" From 1e872a60731e92c52062fa8f95d0f9323bed8087 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 17:21:28 -0300 Subject: [PATCH 024/290] Add OmoiOS logo assets (PNG and SVG) --- assets/omoios-logo.png | Bin 0 -> 151503 bytes assets/omoios-logo.svg | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 assets/omoios-logo.png create mode 100644 assets/omoios-logo.svg diff --git a/assets/omoios-logo.png b/assets/omoios-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..16d9ebbfaa4504250ede8932c02d312cb9a01c4a GIT binary patch literal 151503 zcmce-^TqWDW@HByvw9(nx;5& zb7=#wpY798ya)aJRayFmSK2+=c0n6v`diXzu*0w=QduCZX#?|REqSENsef@0lely* zGE8gn=*w#DjpVl>`AvvPbd(^pSeIL;7xdrN617De={jl-EuS7AlvxS#`b1>uKe$op z`#`ulBp=1aZHl=kpf`?$l1c4O%p^*Q!Y-u`TEv%5f)V=PhSbt@144Q?XZZnp{z`gk z5zM3qc&Z(V#hUC&d%Lgx_y0l?58rn*yP{i}-ZPVQeiRCb__J!n=cCnq-=V|~#csR^ zD4wvTWxdNTl?h<{(fb=J5JDeh`*ez<1>f>)2cXj!eHYG1M0Z^x-oqKCO9goB6{|EK z0~H@(#82P^=$smBmJlPZ+SH2HdL`2Lk~>jUdr1x2`b$>Bw$&0ejtMnR-i_MF;SZJ= z3VGP3SlXGE(fXx>-trO|QB3dn4lO4Dctp!qaM)fygU%s&F)T7e>}HYJLKxS>W}a;eVg}Z{y?m8?TOr z?5|&7%AR*JJ6=96fhf{vKbp{rYNO5Ldm&YFqPvpPqfV8)MyjrH{$&O5_;S!QF^vvI zeDzlFG5|=?kA#X(SSK9+!GR~xeo8DF?jGTDwedOMqoM|=>)@MDFU1*sGwpH3Zi5t%e6WwcjN&P_bmL?mQHn$Hx$-B$>5C82Ce$XnHU{?6g~ju|()Sm(FP%Zx7cw z3YYyo9t;AAFVYb0=j4i?Xysmx?^M@V;<+VWX33!;fu*{u0tkqTUp-@QB));(A7-{h zckYqHRsD;p74~e?Wf!74V2m3H6B0thcmC3b2K7VHx8(gMkqHD5Ow>J5Vat#ItC^C^ zW}QL0*2AYUoyzuEjEcJ@Q;P%~Q*09H?QH7%V(jZ&SG@*5WMdOfr$Z#tEAyu4r>NuO zi*&5$<8KtNiJ^|X@8?v}YCFJX0kGG+H7N&6rylMaP_i)*e--3|BrNU|$5}?B_fD_1 zO~bj&0~(RY66x1m^aR!X6aj`X=7Yq}*Deol-QJi)-fu4lWpmS*6PYg%QX?)O{I9^K z=1BbX*lh^8e6RBTtoG$TQ~>@KF5ub+d-LkWsOiWt_O=2%xfnEVi0|b1lj?(1O1a`t zs++Jr`Ys&bcXMV3ySmmLC*sBVDtbMK<>NRNc+i&Gl?!IoQaJ-Y_((i1(W+!R2~Fv^ zEKPM7F8dvdWr5Ie?Qh&wXy@MPg=!UqTH&r^igPVy!hjl1aH9F7`-SmQD65(N-?y$T z@y}uuD)FfizGDBoS;1d(AGgWvLq~DUbiwlvW`bB6D8RzA&tpza>Vu088I{(58sPyD znI;KVH8F=W@4KiXnS*h1gg|y^HrU$f2^~~+!k-U?Ld|++gfUNJ?WY3c9@!W{ zh|)>ZAK!j05YM^_+;b+Z$m~)u4@f65(Ei(rL)D3Py58;wB~c5J{LrPqR>E}RANa1Y zttLVN2SGV$mq)?RNIt9C`-0qhL3H^4wMy!@pTz}3q1m-X+nI{i8TFKjK0+^kF+1Mi zAIvN3-y|PurVHt(9&WRaqBXAaNWwtlx08anH(o$i4E^NceMi)0`-k+Q;Cs;eH6izR zd$ZBPCc?WYzK*QYP>Pa_E)~u#IP=rcqsS5cXevrPRpmuyZb5L$DV18jeleMyO1tps zB^N!RiZde~mq#Nq2r3I>c^0oiB4aV5PaYXwH#@>v}0d;|~x6=7`KFRfmj!osTX*Wu~ zRwSux9#@}rrpkWmz~?{lpc$R5LRZf;X@~}filZn{u2I#rZPlzE^zI31*N=T}YE`|v zi5iJLRE^gBc1BMU>g}jd5if-vZHgyfe4FZ&5O<$R^S>hJ3m3tQ7zBi3v_q>)Vez*s+VYo7pQ@$6z;0&XRh&+Uom+6c^jq*ST@t5}#AOiX=5~r8s~B6vK1lD;EG*7dntfZx$ED`AS%zb|%?7%4hDXR|Ib$zom- zC48O>3XLjL9PCP_qxho?h<-6m;OZewEX8Hr8zBE4v5s?)pW$oiu}`b1pHHX=^= z>W6>uW1#fIkx}l4cpYw5$$cZgr;Arx#7DmG(OKA{Rs^Oq9W)&|)U?z88TQAoi$T1Z z!09wyaqFJnUc9ur%Z#+sm341@K+xF67;2QDe0N{L#&lY;?SObU-YX;#=EJsdtRJ8> zI!zhK0~G^2OdvQLwSy*tHo_laWP)qn^oDC8oEpmn*Di4q zJS&D|dIH{a)3}!u75^Ti^%goE%B7$b0$blt@w1{Bws>l2-}zFLf@G5G8GS_y2|o|_ zzd9$G>k1J+Bab$Gdy4-8@mbxwq8w=d3qrPDPEhw4*M;7RJH&kR2|d1RhsO_k#QX7F znzv+T5$2BSTVna2-j9=`Y|rk#EA%9dSk9fbR`M%1?}GYN<*q3XuB)Daoao6G2qIsy zw+mIW8oYw~<@3}m)MzA~2sRR%7YhxOT1MEGgcIP(qX|`uztR~voG^CBTRw$n(h^9u zfEs@g7thXeJ#!DEa63lA4%eezU>_soaO!mn>G&j|Zb)QBhTDYjfA~E2C@K4b#)}TO zZu*?}g-!V2rPKKF8}Z-M5V2p(BoKG?z~C>PBA@QQ9j-*U;gzL?e+71_A>D(VO@y<@ zvEi4n)^eX1QGHX@uZVO05Iz=a z)j3+PLXXc%5TV7hHVPe2p~p)hxW;+tLW6%~W%ZPz5xKDT1ivG_=q%``J6jR=E#6%s{BqsvdG zHolP_q1&d|4ttkSGebh&)-5_T>9_i}zTKh*y69*@T03)WgJ}~tR}O(49z-$ui~HRE zD4z9YtCSV9OG~=t_Aw^pyx6tE;>G|qb(dW0y(97q1YFRnK8(t|^u-&AW%Ez{wFOhA z)`QZzzZ`*?48`xajR+&$^9S*CU&C^^Ii?m_x@b*mlIBOkHW=&i){~@zolW)>AXem4 zz`fnAH%YqW_?OPO_Om~z+!So3*i6Tm)pwd`Ez!SS$Mk6kj5)YVA>1T6fYVb)0Wck| z+KxCA&VfyMG^imMhm`{#cDR)$=m>0Vy(yWUC9ZZ~ak@U$$d8k8zWT~)&6snB33$WT zMEno836+iM-w$MoIM2VWXjxr%kM1d}cHQm#Oou63MlJ;1hJ4g-xo3CFFHsQ(%xW)e z0yv>j_2#QZ{kplrZOqwcLaJBP2>E~++s7rKj!^CzI$>&GbLF%qih~ZnlG)XS6ol_> zkXI?#c+~&e{&Sa~Ebw9QjrZ0BIj)-xXor0mw#OdtkS)mrO8>>B`P z!sz7pZP4WB2UpTe7$=Puslb1kuk@l{-=kAa@I{-~qoBAC}7R#mH*r`%yO5Z=JW z=lgNHEja<#O$}tmSVs3+9EPp3k9wg#ItXr-pB;(YgbF zgvO4kKj+xwNS%?6-erzlHGR1Gk^*D)NckA3*#yM-p z1UoYlGBy+wopc+I_}81&ijg$jEJ(~Re*bxWMX*4LtiLcZHu?KN)gn@sv z3)BaYK)Qg01qJfi5mvz6kK;or9r~s7vv_VroL|fJw?jn!lA9?f;Euw5O$hC%BGTuH zN~RvsjI&}>sX^9!dD+mTjb3z-V*cdDJBmw3?cW3zaFYO`Egb0*P6;*XOqzn&^dOE8 z`hgQO{&5Y6ho!3=wowicO5v4O8po#;87?-%DrCbJ?Kp%<%-q2lwC_@wdA*^M?~<9; z47#dC;u4AHl5s(Zu(-M^-z z@A!;XqE)c$qbG;|kx`ak#Swxtt<5O$g*==Ca80fk^%=zq%GuTtaIriX*uVYh6WJMu zy@8z>SNejShMZ-pWvjuZn8W6M6%t1{CCkdDDrEX^`IrqpYoY5)p!EYha}5*Rk4NNc z-6~G(IuuTHc-49;PQF)K`T4}|e{xH0{_-;Na5w$KbBS@OH!;tFv_OtQJ^*qCiF8dv zGsIhWEKMrm5#Pb+|5D|y>ScYk9@OE^7P?^16iBY{W)j8nK#aqZL9AxvFZhP+DC?Q4 z)oadz3SPC+&THj+z3E}H6JJPty6HA7wUT-mqnQ5PQ%L<|MlPLW%2<|l`3dyo6GYyn zl`)sRd98g)&`x|Q{9^EL#@kjVN2ae@zk5Fnzicc>AfzK@86=m-lbmrh+eRksCM&+x zD|D5qo-kRh7BAo_9*cbDP-Xle9yAb;U7P5Hhk_uk$y?OZvPp7}qw3v%Oj^IK2y;?w z0imvG@3?&wv%IEJ^dr+Uyu2#kUQ6;F&?A1HeOq~4klVsqPC2*He|I^Y(}n8@mtD8* z5S5ehxUW*wLMgQCKD7T`@pS60kfMAi|4Vpi=&|0FGQ2}*$ITWPtmZ-9Ry~biJCj_# zD)5z)_ovhwdo_N1U4|yR0_ezvfO;dCDGBJ3P5s>lD1%KIaf@9k(`o7`x1_+tM{1x7 zGK1joXdLM53h_AAO1gYt$<)OALLj$0<^KhCMLguu=9IZbb9I{D+UC%W;Nq&|Hkf1Y z2^q-00>9F&nR_mPkZ=BJsmZY9)^H(7&TvkAAxR#I;*b-wi2o1hh(9pMCM+_dZ1uJ9 zrIJ>vRnPeD!RdaI<;|Ut?AgH2)wK)A9g}q@f;i-M0dh851s=?u|*Ws(H=5uN1Vos&0CdW_UuI_FURjkW(tTh0iwPo{$2? zZ%N7CXi{;Cig(<`l<@@Knp;mGNu+s|Wbn{kik$}6oUl|9X(JEZ`dt>ujI${<+ZP!( zf1EPvM|p_sJ9lS29}`@jlrQqY+8Z#!=nuiy9P@z2qwpCd!8=@N9994c;DD{r__D-hRX@NOyr4?ekrGu*y`i;=K{hXyUY_s-e z$FW=1BX#|+?9Ay8*09m_`@JsC0VdI=av}HD>}kXBLeRgg&*m>6Ht=|g^Q{mc?e+vV zv2Vf_Z(<%q9eoW-|K^Eb#hRXe8?gR(YfoJJ_nrOY0;W*C-t}LI%Zil_j%TSaoUVLq z?t8TLs=?T3(~9;U!UvS71DCug`rDHuZe(H17$Tz0)z}b>q5%z^O8@ISQVRZpXkN*v zH{57`q78jeCq^AU)ZQmSgM5T2SzV*KlnUZYEfqfu^oc2h@=0)Vbn=V=Oi#9=*_vNe z{OKrEZq6LM-}rG=`o^^=KC?b>bn9M{P|2hu)L#CZ(;0##D3V-)W*$#-$q_J*F-;c4 zsc!f1qB_rk=qe3O39-W)^6mdU1Chez;e1>yy zg-D~vqkPmhFNW*O!<|C?kH(s&rKiK(TUnZev|H-lseJ)2-E}fWGJ`Z$^J#jAJ3YX{ zL0565vSI|HDmAT*zRY3|RwBubKbJqhD_I`4ew*X~VW8lL^j9;xuU(jScF%c)4bgT8 zKK*zHu8g{~E@Zt;IvYsSl+){7Qw%vj+y&@Xsq2r$rGFRV@t(J0P;!fU>@7Q!`Nq{+ zX(1}DSQf!3Ot;$edPA3jW7rPB&u?#|1F{ihAl)^Q$nxYokA6UP#!j?RKg9QlEz}th zUwSg$3!+H-bFm5I%9M{4Hi5@NH2n7zovy)z)hcmQdvtM?%BsSP2eIUW!GH*>h9_!q ztE6!48C6;gp;l&RpA(%i4-IBH9sHH*Jngq0noO;XgqAlN9EUERoZ1e?e>TAd{q=y* zF~#)^eYkw%naWZHXs>llB){)h;!{QM{wKOP%tP+rI(9wQZBOn^F#~b4YK73@x~F;^ zjg$9Pw~UK7!&j?fBN83nf1SRyDnmu8AmjFLbo>qRbN^&MwAZIFw=!{FqqJP_2iy}_NEq+4N<6&sK9H*h}UdphhwT4(w-vuD3_dJlHSg|Y} zD)bc)YA7Xa6DOH<3IQmea<~iPmyRe+U2u6M#$zJhgUiIux%#+ylB1O1CwrG%psH7# zmDOpROT4lLu!i0d`JJyH3|dD>WnMrlR_S9D^5eZCi+>9e_y$L@5a3E=&{UKshDQ-Q zSNqj}VOAx9_2bC$-ZR1f*XR`=T$8sUV#Y%oMY0EPjV)00G+lzfPLj#BF=$V%o5Cr{ zy~O0GF(AW-9lc>Qrxf!P5NXEVIndqau(jz3n03TD9E3jTuVugy zPdw$Lf^>MYK~#`J$(*baO5bQhzkS+EyRQm%l|4Mb7N)^jUEPG60EYFiC(&>B% zvr81W6o_oL0FQ4tv@$Uf`NZ84k7dRJ3eIgHW7CJKqf4nHUOX{)d)r9*2g#DNEZ~-VZODDzzmK+0*Duh_~KbC(rH7 zfh+QEOmTh;51RC3aP!hUey80?*cm=$`{CxG2~(i3?(!3{+D?r^XUo4g6~c%gr{8@D zcte3*HrZr$1YKgZA6&yq)7nlRQ8Ef+Hc3S9*}#V^dO%NUgKQIF%)TtBscaZ?C@E?x zrZQec1M)~CSzEtfAa5emN>x@8DY{(y*I)EgXAez zL&ZAc1KB`S-*N^ziWl4z*7ahei~JmO7%0Pz%GTp4XKe2!h-m8D zkp`6{POC@^v5!6WOog}gCQfmfM;@m?_aXdHdcOkC64zvp z)T8h9(k?*$)PL#ewk}v|tCr{QocYyU`YrXd(s_NyE->q`bpkh4C>pHQhV(T)2_j?} zv}$eSVff&nTT!oLOp9;K*xD)N`c>pLNAi#5o{AOCH@92%$kGkdk-kyMKKE8+Pc z$tli{(KuKF#isXNSL>wpU)SM}H?RkoHU4<(f}?BNvkPTZO~p%}=o9T%I#PKdnTBqR zk!-1?fXDaqRDMAw!n&BwF=`rj)=L7nRWz6+g?MZFTcA*HLwETQnrI+m62>(he_U6! zM_LQ=&|nGTeKK{Q2`?jis*gOpsG`@)9>bY|LlfU6bV&Ah)j!5Sw<|d#Onc45C+;D( zZqEbu$#baAyC(H8_cFlPGwNLSTWJD3d?mzPSeoF_&(=m2+U(!a8wV41#`!{}LDyk3 zFZ(xsw2HiDOf;{e5yYE`0acrDvwr>F3{n;+u)8X%XsQQ|n5#2_!)KWOaTwAMKOzT0 z(TRoL^SXh2+DlD}$R--#?n;{7yvqHvIDV)!KVfy}mx|^TRqa2oZni#Ig`jqh_65+v zc2~%}Va%*994RA*Dlfc)u<4_p;*iJ@Eh)jR0Z-XA^aCh=^xdDLH=H1ih_~_4W`fGT zuf$cl)!y}f-4Rx^s*eY9%baNce_jA?OwFl{*dn!Q^KN zG9Vhflt)N|t|7Mij!HC+Gr6Z$kC$5nF$&F(B`BA~{kvM1^h3iGplOJ^$R7QUN%65W z7d~{+9-uMi#Ib*sHH2ooGwx*q_{Nj8GYHjH4uK0vA)iIub?xY4e!lfY=vQt5Ew{i= zDYYYB#bKD$M<7t465M6zhdnBBb$<=gAAA)82E&%(*|xI zqYo^RhHdwWWOfW~)ws=p;p5{^_u1Aq8$)%Me_W%jj57Mv@>2O5j-e`Qyu9r#Mt&5km}4nmz4E8F1Q z`?ti|rLrX~>ZGe{FAm(a?yghrPRgf&WPo<^a!lD1v=KKZtVdy0;HlB?F_bI_cvs)=1nB8WK!+jSEOZ$h0uhvw%JaqqIxZF!F z=I_Y+lwsi_(`JVOR?wwdi{+~EH6a=>yY^k7%?7nnv|v~1UJ?ce(plE(Uk2pZ(#I6( zHLUt%Umv}%uqOcU6rkC$NZi``kYN7F_4=KSX+e?4&-CwqVnRh7 zb%j|ARHBv8f-1V3IuM>`A8xdbB3|)sbPdo0LyscTF@;x1!gK0fAjF|zwe>x?*l)vB z2#i$x?FOOws5L$H>+1H1cJrFnuS@B$D-UYCd=YrYuCFOp%=lQOa zXDBhtMTh6_?hor@A^I~59f>{BuaXZv5=N|*|3Zcjg_v+k1b}#F`lE-kgy&tR4#0sZ zez3L7A)9*k;%-HQLWvC__w}b*HV-BRAyt0=ml*Xa(4H2U_0#5gm%BU)V1N4t^o`{fe#!CmO?Aa zAUn{n2k(w6R>eb9BIZx@MywOmjcEZrltV2B4ZA{sWUd*VJ0n5N7X{SqWL$aaDsWj3 ztV;4UZr+1+xGQZ%$!MTa4z3L%M4BcID)K`Uq9*De_+7KXQ}Mx%%r`LIWQ;yK8>G^iqi zBP8ffg$~0%f?*SN=n}WSWm5s7wL=iS9$tUvCnbV>2xQYJUdvSztG{&vD8aB)FcDm6 z<_-6^k5FI@S*Vu@K&O3>2g;PVr9dG#r|t!ch0ahVmtEVyPr2PWzbELYh9*oFVU$%u z*F9|eG4JEyZ~Im+u8UWJn&({tv7kHpjDOKqegH4G2R_`b{dh^OiD66}URCEr3lOQf zmk0&WEJt+gk?n!XYbiKXF8APH)Z0u&&6R5<8v~4?1Jz`r$4wM&oW4%iuAqEVdWDWf zSZ6JDd*T7Uax z8+R4&h!A>r-7p?{Mmx4PH(r3c=R~Q1g^Q+=2#bpjmhF6kv%kEU+OQxtNY}ZjO7Q-w z=f!P4K=&kq_Y*0;t_*hh64B9B0G+SVO5gDC@!pTJY$e$5=|6~4f@VR1PzL-20f5ef zL7by){dT-S8`>@1Y!-%*7{F7022e&0wdQk9B04D;BiiU;@Ta;^;-=;dr%Ny?z84EX zuX?DOkDmrDdE@}B-hzZVZefbY!qmMm-LGeu_b9(d_?=M9D7MExZ_u%rXH9hZeb4D# zAO{Ch3bvjf=eHW-LwChk+*eM+p^^2xBOlU* zOhH8!6A}T%6j39e08i-*u%;}FF>d`ISQ6`gx)8H!QAn<;S20agbQzvsR_z8zjL1mL zMtB>K;fp-rv2t2Ozjdk5LE!LJ40J3=d}7jia-zX;2>Hvah3M~}xxJf>WWvvKj&)Im zH>M|f(M1*1l0LJ)GRzXe|C)c=H+_d+c6l}MgqDXb!S$ZZNIxfI*RMZ73#{~Kyc8aI3@f5BtHHJDZ zd;)gA@r5XMki*t3}muf}Nlb}&ekL$VQ z3~&P<%M%%mO5~W2W}qVIgsB*8PeGClBc^~Y_!)>cN@-iFZ!O{p`xKXO_y$OY;a;FY z*^bTL8X*PUJlB|m9k9TU>U;~*;H(psBu(~6sc)iXQu1YF1342PmWkIBmFi^)Rkxdz zeO){$eaXb#7r=uon!d>9alvv_fEv}SJIbdF1%)25tW_YN==Kbk+$2*<+Vi<6%@7+as92fo7+1q7#PlJqxna?D~GBV z6nJ!l<}y3AFxO`Vnm66AN%i)`hhpp}iwA$PjEj*vT?dj|hV8=kgE={n^;6rKQF;*a zz0(~H)iy_!g$^>PR$tO;itz)nq!{|pXyp64vh9$F2fbkju3aQ=aoc2zBBH9hM zgn8`P{q=jy zRa65I@Dy87Rxq33JCFNU2H=+&5q(PE;1Rj4^0?p|5WA3yy({i`TniR41EySJg~ZYU zjApXZq9^z#%6SX3e8wGA3JzCO~4={q%AR zY*~y~4J#&z(tzK~mdtub+V>>}z+aXu6f)4mPQfwSYp@VV!7&!idr5$xhiIDCaf$f5 z^#-cbYr%7qnAZw=LhM9geY%_9<**8AE>?$j(ceg7x=&gxtA|kFn0Vh5K!s!xq+w!_ zGz9T(j782`T)vLDCzXAyRT@kdWGnW1YpwjcW)%z^2I8h`gCi%x*_ZwVT7M<%%^M36!y z&n4NY;h8}lg2iHWlPdS`8y{pyJZG2p-qlZyY<(t$4sY18FD{1_^VD%}+@r}5Y;`Rj zFHMKeqUg+*#ifUHxP{e{TxjjYOaXb8i9Ft7S`9{|c?FMoyhSuKZCUa-bQ!WrCluIb znaZc^Ud6iCik~FkHNlZXv|U8!CzormO)oa7#h|SG&iLokKE|&4TA$u3RW@s{FHCsO zxKf+Y;^tN2^5Y$db{MoQa?1Z@@~X8BODb+< z3XBMMU*7c8*%?7axHmE@V8XO!ggBC8nr~80s=H1W@PqP5rC9e zKuH^ZlcwMrUrmrk)J-|%rPsS13xRoZ;@@>HBM>Vg3*g!1R*ks7 zs2OmTas`GtF}H6N?@D1{hBjaC#@K z*)dL-&x^*Nu}d{!)o-_36#jcc&M-v7pzgONi^ERt~&oQszT}+=&~7L zd=d|uy^{Du0((J8(~j%grfl%tjXt<@@4JGEx~&X$;s8G88OXGU)BD}XE{r=a-pz2J ze^4D8`rot=l&Ld$vbR|6wPVUK2V9cr=E=_arNggSQUxKV5}w@5iH#+a@xx&fNA+Z} zR94T`pjwE;nGkG74j3#1{OZaLBRb#ObfUi0{4_6}9G#GS?@769VZGnz@j$eAdIv~v z>7u8DG;y?Klgg4#BmZXD{)cjqx_t}Kk%^wgXO&;6f?aPm!W+Ss3#gc`OqTZDXFDn? z061L^YgTD-mEs*N9d`z<{OPZIBoqt)?HB+(A%KBn7s5dWYV(u(M(-~jz8E#QY zpZk)=K&C-H2GJJF$j~=@ zfsp^Plo&0UU8aRU}W$hQhOF*rYXQv+So4;qC49=xJ1-GkVW?NaziNtYO!4YdIt=VsQ)`A>umBP# z5w2#!F48YS&G@Ulmb47=0!K3sbQkzwkxTFwFe?fQe~?^uQ3hUP^ob@;<_mohYyHT@ z;$v-5?h5{N=3r@V%t2~>Q{RNoocfg#-*4XHL&R4oBFcUvJqFNMDW*y@?(yB(u1JH8cI)pG^Kv zwa;-Kxz`r=@nMZtxlY$(YWc6I7M8?{XJweHYa#){YyIIyJm!J%2@tfXx3ES7iPR8Y zEK^*_%**zeizjMcHXnuFnerF+Gy{~2z`6DF`h_>X8Q`@0aqe@APK{#PBZ!Atu6pY8 zA2lqtU*R3_g6GfV+b>)L&hY8LkS0eW>!T@f;s+Q|aNVYrX&3JQO{VjcB4o(m zT=shTP?iS8nAoBe*AP@A_r+sE3jA)TL8=RFbSMcd{%Txgi&;B6SR7&12pw#6l)K-059U@o%N&wxk>7wxA0ez6I7BY1EEw z>jhpCHLr@)vymsq@pr+E89@EJ$GB^~&}Fo=Mc9s6R*k;q?MHr#Wu~MmMP8E-ES=;^ zt~UNij{w`njcIX>DY>K8woSA|L~%!$u^h$ppA%F6TcZ)M#B$1qyCxP?|CV?TDmo1O z>J)dTZ2)>7JNu&$tH<(ZVDzZz9vPTe{%5<_8>sYSS!A&yf-$*l_uUp`A_J((c3X9q z9}m%im=neg)}u18k^I>aJK(<^re?R*>cl!Su&$X;K9^6slEsf!HxJ>d*7&_y#KFgx z5St=`JdM}3-mx#7&V$%msL)3d@~F5COe|43q=5`a`Li_9lRvh0dZ7+GrBawaFHxBU z+}|QJ3AkSQ;Y#=qQ+0@Xwc?DiYiXxe+_$N;VBrnhd52pq_T9;M!iWqPk1vE+l(#$V zDGp5@$2oUHTf7TXEnjc~m)@9HPn>s$*X-fk-aBNFS~;fhJupfdwhcEmc1012Ri~=w z?&I!|S8!X0bK^A=O5<{4jdGOMl-#riwmV6cmMUKbxDP34J`)VyNm382W->fiqLUQE zXZU^l{GQyL57XjitxjRfQCl=`bYIVossoT)}XX475B2R|q-2A+)vqt*G== z27P=wGGz;Y@wve)N+D?1`XP&&iCit+~_>J*Zhz83;V%rN%#&-A$I_eAE5pLOXKnrGcESKTvKy-^0AMQRswW z>twB;B6gyr>SsH4S<@A3zlCRVVxd7*4z!@z@B9 zPy=j@rTz>CZ#V^4i3QIqn|}3yDnAXr@3LUNCBHzeBd0dll0<$Ahg|*YRu({uTO;-1 zgB_Fcm^(EghFX7Z$E_rQLqFJ17p*pL{pq?k<%XJ+1v_A{Gub1xReE3Bkh!q?RX0V@~i;*TBI!!Xb{K@q1W5DQ@)DoNera7z^-oI~#^o9rZqHdo`^j(7?_{}8z$+VY*?5{J1sYy_eTGrE%8 z&mBMDI2H$b0HuXL)}(3P?x4&yaY(~OI_W%pgSkcg35sKxap!LDMJzEQmNvNmOJo1l zldi3A@f;iKe?x`}SIV{SHj28WE~uB&%!g)HV9rKrnsw#ed{HRo-YAy>BM2$DCVKX!F0k!M{*Fq458tpA z@%H1<^xl#g<$UKKFLe6*&foRG3~E~H@%m}u*W)X~$)C~3oeHS(`e|!grX$|aIYZ!3xZj*SJa81WU3~i57{$kZ)%Qk?=1QI%0QM^TtZRcmuZ>!>m z=N>8MF+J+GNufR;?mpmwG|-|wlENzo6I7pW5h)%gEVru&L*g3qf%@m*j~Sw@OS}?U zVee^yQNn*s?_HO>2%@3tGn{Mj;4RNbTJbk&MSW{MvM|n_n5BUyW**kVSNrCLu=hv6 z{xb3C$BS`c#14A-9^G08D!&&awdVR-15X!`JMgY){7{B(V9mnK203U;j2ohKHNxTi z`&*pt3&~Bxw|%6M782Bzri3enj{WV_CQMixeGubOjU%>ic#Uc$Z4x%h?Y!#<`$kh| z665NDn@7@o>$IVX6Ns-v^3V)2_<<66hJNs>9XLeFoUH|&zasjv?4jSebyAXl!%`sE zsEB6V&Id<&CoLH?^W-t(zNuJZEiuF);RQL7l5~c%KJm|TwJMbo5&k#sVBQh?Xkpv-KN@0K~i>xU9<&H?}E4QKMd#$Ka3F@wt5|R&avX^ zh#t3PmfHX-Xx)jhbYV>+Sb6DGlY(m}9t?L9bmtuDhD9*ve9`wjmu|@Y=!)SHJ98o1 zb_rkMCOj9y;HAt?ErGSy#Lq^cC!9Cts|mx9)H8eS+rcp6T;wZPaU3^{tt0O#FNixr zdpj`qsY&=p%5UCeI}fz>wY(oLaB9W186(ed4BC74!fk-XLY?1{z8^(1PC1?&uAVen&J(?>2;dONI3?EGYFYW;>oq@^Rt ze|KU1mjc+>HAOAvEugZQ(1ihK<0DpfF8SDPa#84N(%dMH<<L!L?$5`Jza96k!9!R3yS1l1gto*M=9|ei66ES<@=uCrZADFPDvlVNDVmcNAW>#eD%%JMKf; zLHZY{3$wI5lAOAKZt99ro4MB@3Z-k1n5VJ8&Ae{PG$ab}iu379#7mB|hkTq4Xkf*nB5> z)Q$ErEqA(d=nOwz^hZ$Byow;6^LTac>+(?CMzxnx#E;MThvd#3FLop6C+E{Sw)o&k z6Ku#$UTI=QEr%yzy?9=4OJ&|=FRbr0%w9*|0a0hd2Zz-487c=CErg%m#8U*SeTUSC zMcm!{i+#gQ_imq_7NZLv9DoM=l^#W_xb4wD4K)_v>4B3tAnD%&3poQuofyR)EG0zb zj0V^L_Lpz~nED?L<=zIfR6d~NN#iVhan6;x4coS!&{I;9@nA$QzvHr`Ep8IwcI9MP z!1tIeNb-OAw+}G?b8a0`B&SCrXIC;H$tKlCK6a{YrtkoksxA-bKo5??`reW@2Z|AL(viv(Ufo@W#x*v)3-fkIhoD$BTMF_95Rz5(I z!+z6Y0Z1PbuO?T%__a*ov2ZT_r+|0rrPJ0qC$-m>$vk17YO&gU&dn``ZwUM(ocUdL zzT`Pq=oJF)i1I+=tG~_8Xc^1Ty>mWNE7nnjez=jz05;(~q{B4i0_1~O@e@derL2eI zA%(3&*Vn#Tw~k7Y(2TpcR|S5UUBFSKQnLf15IwRAy)Vb?m{Ofn8?Lu)3tNn&uzk{a zqL`8qeImGg0*SvPgcDc{?GrM|NqWd2Ik>!NxuXZKu&V6(N$xE{(Jx0eZjW1qr9YYB zkK(4C8YVRwl!XZL2GqJggEX;~r#K_JyC~%0(TIA(GqHa*j-S+qb-aL+ai4x-?+$~u z`{b4rtIybO$({ZfrTOp5t(AUP^x9_8pP=TZ!@;suc_FxSqJ$&;7 zzki)HY@KoH(dwSvo4Pq((DFD?gbN=T!ien3QzkX&4^<94Pn%XnOLo(wt*kK>$dZf; ziTyt>K(pNt4p561WUG17nx_wXKv2Q!Ij+Dd>Q_F+%Y+x~J>`@+nbGlsrcO*F(-)Vu zdWbwi>u*xU!Wn%?*>0l0V_S*mk@ueG%@N0kJM7Lz@xdC)DY*_5H)N$-c>Jp=Mf`~F z>lPJ%VJ*I6?Gg{q#rOYS>Vxl}8vc-ndmve8d0*e&O`+G6M+*7&)&6}NAgarNV=whW zyLwjdpI^G9DHZF4IDRXH$*`T9N348}nW*qi?14ekzH?%OF7RBam&;K6`oatouW-ht zjOB5Cj1^s{bnnufz~Ip>NXsazdP3o}`_*64uEeETzXyxu8EVd7Xzoq{T5zb# z3e}%*4u;2CPR~35pV|V6cQK!8=oTwL)LIDw4)O4)wQSDP#tX z(?$%|>}B)to+e0z_3H)9uJES@sC1i!;cMa$G0Xymz~_JVNPVlroEIAyL<&aJV~q6wqv@){ntZ?aHoB#|QxFhI zB?gG3G=iWA(j_I*up!dj0>Y4xZb1ZLptRB;?da~>fW>d0-*tWe->&U_o)h=E&wbA8 zz#3BQ5Gmho59xQj9X0_QVzv5XZJuHiQ7N&XFkeY`=7N@#vU~o735Gm1?na_!;%p9M zCk9sa)f`7*xdW&|cl7gzjnx$f#7ew4A2xo_-cSwWf7L#U_~nxUc}@6) z9laA0TVy!8ob2=s_FCcSm_7$RGYHWpxTe^lI#SPW%xdD~VIkl?Qw9dyBg~vpc|}V4 zKH&qv+Wi5AoK<5MJ1z~PswIrVAVOYMyip0M$X)c4 zG%Fi<-l#m{#HomH)H49(mK8pGr68Si*9b646-#?x{IKqLA3Zya5GldYYe2skKHy-8fy8Ti>22)^ua#ra z!<=jDG^x3#3SHDYvfT#1r(QtuT62YtK~UsrldJ44l`xw6UF+?GMZ`=~=-2QcaW{jw_G ztb~WnmR}cf17B0&P-1r0aOE-L2S(NBJMo8JO$8P)TCb(D9uXDoYBB zwVywe248q4<&Vwk*Z;=#sbzY`J(<|Lb0OIIJEJoe6;*>9WI3hSFw_fg!(i<PvGR;G}c1x^VGcoC4-FW${G#&-GLV_jk-@W6P8-_N?s{gO|Sr*Eqk&M#(I< zwey6m`ET>@tsfd9Qp2;YKtyA0MUpk{R)esjSi)fRewr*IF^P1trO5Y$zd_1-!HU4H znTJ#LqOgnCU_`EGruHax9!oABBF$?`9`RDD$Hi_RN?_s z^h%X2pO zw*$dqmabirRiRe1H&@Zm8sb5mY#K2n#{Po;hU&|j9(dk+=0euMAHH4Jb4|3hLoQOt zbjCb>7?Rb(RDTB;tA3Fa4%?3oPwIT)RK_el}yo+xm zXcd27$;Lyac)NR{f;0&fZC{T>{uR(e+m~mi)Ump>w$@R|!lrJ;X89Eh=^XxmJ}a~Q zAbd#VL&bAqJ%j;i;gw9ZMxI;vU^ru-#UL)*?Sm;7pylaX*TPN*)V_TVjqoeiNxGJ5 z{;v_9P=sGM(w8lE&Dge|>gdov=)Z7-@%k67v`fb>wig$k&mSa_94N<`u<7^cd&C$MMa%}%tKm@aIBev{coX9yzzz3ptAG%URe=~@f zG~ZVj7^CkZKXvDYJlb$ZiOg=7`{GVkVa3VE=dcZ`aE~Ec`1fNe*6sWY_lSRk6>DlU zG`_1-OJUWB#VNNd_p8vP+J9xkpseZ297940(QCtNpQ$l&rzbtkswM9)slh!)7l?Ox z7{7GL2T_O7rJhb%=kGC|M z-!ZWvU)n~4&Ce{DfgKO;?9vkgz3l^d7wNsjb;xz)Iurb3aNotwyxy@q-n_C|pOyZ$ zo?}?sGv=J%AZIgU@WSliZ1q7XCm*zhRD-W5otn<-yV3Xi+6!zVef%8@7IQ}y+l{f} zC|d5Fk_2>AIyf%sAEB5-&C9_-nSmYGEtD5ckmrK`E#z_vUNmtj`xVt0{rN%<57DL3 zm5!KSU$HroNMv73qbO0fg8{fy{oI5x6>_tBbtFVfWT zQ{BA%11<$)7a8&@6hW6)EDdFLkyAdmLx_QTt1vy4+YCU6Fmq!pLKOJ^&K!8HiYw#8uH!!O!(^@BxWs1>og(v#pz!fn3H|7pSH>M)iOurxj3+qRAhz^q%D(~ z_q}O~u&g%1MCq(LjJ;F;erRu28eE;?x*3DzJ1eh&DSe%mTr zoMic~8C()_Bc9g@Nj^eRQkF5lM%@<%L_cAf(7?VQN@>D6KMcT~^e`K@1W1b^DM&6| zcF)HG|JX0JA@D_J_^!V8Pgw30E)gqaZfOJ#M@rOx)5L+6Nfh^T zl7(Ai`)qq*NAc)f04d1^FQV8^OTb_xC^~7tm>B&riNt_`E$Mv?n|g__;qGeWcM6}^ z*Gx+dfn+0c+v6f;1U0^vU&d&|KWk2(a_=5M*_ocSwXffksK-3CI>d%rY)Gm)A(=Po z;`$|>-`Ir%=m68hZFcQZ-!YbYssf;1b=7+y7YjV+h@T(X@Oa7hAE2;4^SZKm-{U90 zyok}T4+mXOnG_YqUM6Y}Nx|%!2)C1F*Y_j>LN6DNzeJ(oftQ9mq$+qR2yHaAb&t`F zV;O*DLLJS&_6pbNk*H#5i6g;FDMj%GI{N3QYx>D;*!Za-W~n*a-P^I5UDG67UL3E~ zpo63gFRv!X4%I^5Yvf=}rw&UkuEPl5@+>numii$!75VP7T$2vXY^uQ_fa zahyce)^jebw4~M@19J=tt`GoiQJeV5=%>Dle_K(S)ik`#qZ%2Ga%{B{#s3}{mTlBq zlBNRM5+5aqwK{oCOhL z-=oB%Z(i;q9H_{1ELWzuk@|*PxZ&do`T~N!&n6<1@z0tdZ#Kqsv2K>&T(pZ9_?ZZK z($?L9o5BWPxYHD3;rK2m&T=}(#)JBa9pbA3CWa0h+37m@k-LR4Hg)LbknX^gVYaS` zUHvI5!XDYTpokw8eF!0lZ;e*v!g8DP;nZZ$oJq@0yFKN&v|kao`+rY?-}Uy5BSJ9U z`9h@4Q}F5s)9i@Jghv=PmAPfbw{~c7^~TbX}+^{wq#HHH2At@?l8U>&;VebPHZtQhyrPDg3{N> zsAtj(6wXtg$EE$^YPYyO!vr_CRyin+5IG3kHM?aebh$M!GY55Aa`$->~>gTRqR zm*0D%Ug~x>^|JXD?%dA`;Wf^+^0Sw$_PC7ho43+2z4;Mr{30M2`(O)ooFMusyQS-~ z4y5;4vytF6J?&HrSVc1(xz=7C3uu=V)t~yvgI7=@pe9p1gTEjUyf+jrE9dAFe=2M_ z(0M>)@IyN>ZV&Q$3iNe4u9YlZr{T{u*X3WhX2v@qwtLMJMI~&iwb1ouP?7+tQR+33wI+uWFeU)1n=bw2#0 zJGX8CswgyvBwoHYQv=FCb6f#a#p?5l$>4c+_UkEMm<+cQUCqh!-r#KkKvRy%!!XvS zB)JmBs}|S&Ycc9q11mkgBGg$Gsqp83SUoKNS`F@@#J=RxkLWmhKJL|0m6nI+!iO}b z4jU%2n1U%ptU1i0JPNHvhwB~2+MNl1?y0@#+@7@-6cbc8X9iAFr^aArKVZTc*DUxU z@0B*>>K1<_PBqJ~^?vG0RBD@AcTv8`xLz>1BuRWj@6bWeHO9#sHgb-}c_Jp3mr z4&ZddPurcLLIzNebLDwlnMXQk`Rx-u5Y4tPTp=saS8{(_^4xlO7;DN4kz@L}l_-=G zCGtWlgC0O}@!YWB2Ag!0hT6M?FJmeY`;cFQt4@P7<_(kJStK_ML$Lv_I=I}n-S~da zqI{LsG>eZpc)9a%Zun|B4Rnd2t>&GV&Sdq{UG&}~LTwYJakWwXdn*#xl)Imst2K}2=O z*`H^y$1d-FNaKO3$1U@Q5@&os_z7jm%MGHy$iiRWrG0|3XxWh^WD(ViVc*lic#Hc; zEak!kJo-alq{4?Hgdc04Hl#v2kL@VSJM_B_#xmf1BWdEK#ZPa}srn@0=O1%Y_d&_}+rh^7%Lnc{Vis~R zsjTUL(UW;wtEFQ{!@J<&2lyV^sDzFyYRc#e!UJ#S0TCJkSP8(Bh!1YR%_yTWW1Jb_ z$M?tg11MOdv+*l>R!G{$-fySfCyTEJZcr(}ed#@)|47vA67>GH2im)L?!Ei~Mg~+P ze7P7#Cd>9jRnh98>v8peT^VR_{)<4vF*-PURrD|U9hj=l)>gF>bzA!{_RSxpB5OJ} zIiwbW}>xViYaN^59ZixTEo0| zKmM{AzLAw2`RtJxn|gy~rFc5QZ0U#Bs`KGiOBY@IZl&@&%TE;>SC6@_(B=0+ccdol z?Nf}sC_Lx7D1+wSjBM27VI40Xp%Q&A4K-w;KY%|=yF>6| zv_^53%WUqFe|t*8pc>ySz7;lcViS6x>{efeoY<2VOToGqSD*W^SDYCT=@hpFP%HO} zvviL1W1eS#*dIglhR1iaIu3G4Mdpuw$94s}cPr2ul?s7e3{`uA?eWKRx-tZlLno2z zg~y^ZS~B1b%PDC%UPXVAJIln1Y|n9bHMu-Kd9D(FjpDH12>UjV6PBcwKI8uK$A;p1 zN)t}|t_0Bx?|zrE>=AA~hmdr{tgAczgC$yig}{O@^d+=>qz|vA@m%=^IFIt{b)oGd zQrj%nVskP~Wn~}i`3TCzuB>Urbg}xQ*2U*G&~1aeCYr~MXi2knrWrMIKvk3ZboTjI zZrgr(4z^DGkpQxmTzL&o^OoCQWAx0aDx7Da32mOaF;=p)M(*LPvwsr2-&jc1o$?j^ zbIZ&>g+D$MmYGNNXX%eMor7FL(}X~6#*@cy>o*y1L=dhCzOM*VZy^u(iac_p-33XV zN$_Ha_z*@>#mT-6#J*Ga-&_2_5T4CGyK;^Ja4sZRiFtbFV{LN*&*k=U&c}+~#qG^k zVc4;waGg?SV9!4a6XNK9z5m!-dp+_xU-N2a+^ftM=>Nz<<^Lq!kI?mf{9IqWpX@}h z$645rQ@~Vf#WC*T_CLiXRwr905vDDbu^_qXUj=8sg2sZzLjFZgj~aMf>ha}z{lz+j z!fnt9MX%*2o0FjBcnH-n?u+gY^H9|b-*bMc{H>SpU2ayJcQ##XgWU6*M?QNAl;0*Q ztkD@0c3?7e{mAf>niBb|MWln1$nFFXa;O44+|%U#Hh=k3j$Kf~;Lig#bXl9qp>XL1 zYXZw+*U!I7c7GQ9#Z-~V428R@E5o+8iTter2T6PgM-j5+l(kd^4jLVXbm$6$0bw%H zmsJ;NFrCpxP9z8p%KQ+{55V641-0Q4jzND|F6dt~ZSF#$ScU(Bj6mH#4SaUZx)8R{ z=)9k6DXQv%DjZdH0K243w~AG*q~v_YrmyYVt?r=XutMz_6(Lp!Q>njTawqlHOwlr4 zFE$jvZVv%ngQ3n?#Y@$i1o^Jeb&siFY$*zItYs#lT>R`*k_5;8U>Mi$-r;>6MVd8k zc(f(l>?n&_PshB{o(4Axm|Y+FUubTAlGZ22o#jZL6RTMLd>j|Gw0cad*F8OwvOw0_ z^V9t(lI2$QWr2Uo1isgFQvVT{IR?-bF4Rn{{kGs@%U(b}{`ixn4UhIrK-ad$C(`*S zdR~$b0z3j_10YlHd-oZ&9Cf-Ip-&lZoJ+bTnXkK|6l6EbADwW&W0L&={%;5{bemKCKzw`8L>>DeedyIS8V*IhKj5 z8uRw`4I8%kHFEqKHdNwUS>dI>2h+kP-nkv>fd~J?@!7Aq9Qf9lc5f@?t<=ks#WFFI zPPikSrnpZ#HP>$vkJ(9ZxeGlo`$yjQHpCC^ya39$@(OCCtZbh)NYsMVDg_%R`dNnv z(8e51tb(5#6}U2?YekWl>Cj>*CBrqhi6BrbG{MJd=a|?6ONWASiE4Zqc(Fvz6Uj3v z>fP~PtwVxr@RVX!k!&R!7)OeUwzn`0d0pFdm+Zc^c)BqX&0QCL9{R?pMd@YH85IDs zcg;;sOZak0eMiy*yY}!#BDv-Ud>~C1Fw1I)kw$s7r7Tpub&JQbx&E~Q;j)JKP!yuL zy(}+&c@HStrWf7K?vFjE)w>Uibu~+|Q!lmFh>dZYA{a8%T{mVdi5AIN-w}1)oOrmg zZbBH>>06_7^%YLi?7x(MJ8G?scfd4^(zp_NRE|3o{A7L_mO5=usM2!y`wj7t;5Yq9 zp2&GXrJyQ<(nC@%szgnn`+~qz1tz~c1Ci-IGWQvuM7WBi(zLX`*m2{eVY&&5HI&jqGAgoCocU}0d_W0bSL4s_S;3ZhE*_aY&y zv_a!~(zde)u1kn(+Wh5X_o0&wWoo3ZX8*3H>3AdRg?lm!^^M((WZ#G^qi7#o=4fEW zP#4mOfIODDy7s+%_0s0PMIe@%dTy#ucbxy@2mJI4mXtqedG7G|Mo%lz1HMrY`O~^qiRnmF0$n}#-yDLP{*sq9xZVdq?n)#MIY#gSLn=g0CJcWr*gt#pb|$NxI*)?gqKED^c?B#V%N*r6&N3POZ3kXW!iyVmn@6ER}R z{A~*SDyEvZcSlokKOH)ve3(|k;p9!1y2^R)o`K2oR3_oaI;Rt4xg=|GujJUX;^0k< zS2KF?;U-t|#Mn}Lo=zws_JB= zg+x7TbK?R|IsiJ$IR(f+d4K)?&>#uZeBVtnc%HOT8=>e(5y{m&R?#9+OZ|SD?vCc! z%hb;-N$D$x3S6OpClPT9Y?-Bngeb@2ad`@@(6>e_dd1zxW-E~3w%7QB_T3>UZP#p4 zyVD||eMf{0^#SpYmuzmKlK_ci*8{oOK?g{(OjFktXNN&HN@jk@Nzft2Iu9eO8hzwAPkHv}74Odqqy}eJfF3`xL@k>qXiXgsayF z;X3xiA3xhLmYslEZE|Oo?jdJ?T+t?{oS4sFllOBN`q-V3B6GPbb0K!Rq;oE=`X*y{ zrX_CNGrHlyN8e)iWeGz(ZmTpP7a?aKC3=k6X-biF_S?Op*bjBTjN}d#e|1Chf?pO~ zQ28qootE}UNPkA&vV7I%dgRW?yC~eC=ERXXx5zI1UghWM%5x4X-IZ*BFc9l1a+Z@0 z^{bv?}TM(H3qUgUEqWK*?ku&?y6uu&*N~0@)(th>Kn4TOF~El5rF!(_oNPxXYkzFwbl+Oa9W&6SbFvD=PH1g z%$5cqMaFYt$jon=O~_$aQbeZBGM%3RU@FbK7pb4ayB_YFV6!kPj6}{7VGquP>n>_z z*&^+%4oZO7s%#MMI}e&4vftqjktRo#q0UC~*}@<|w3o(A944>}?918|@@_jE+`|2Q z)^-oM|F%-RC+#3y2bHraMJ>;Sb848hVWf!oJ$2QzSn%UmlJq|J{<#r*Rw+_o7udRR z*W2IhlX)3FtHLD+Ao1=QkjHK?pFg4>2_r^IC{WX%*^^QC)O^OnIjHWWLrZ`|p9mQS z)1gW>zT{|=q%&crmBd24v+$wi4-RAu1W6fQVm=)61<|QOxE7~~a>MP-bN~}r*Lc<{ zLMWd;KiS|JYfJ&Aom5lJy71?xb_CVbam=pUoc3cjB>zoUaGy)oZ{50{U`^_*j;k5`!Z-F@J>l%jbxSZy=4_Ko+XT(8bxfO?J?Z1&1XcvQm?c1T!Cr_U~%7>QDZ52kMMpZ5Yp2ZRpS$csBbu(b{Cs>b0q03XSpC_vVa%y%Gn88PB!D2B8?*q$>SHc@V~(#B10b3rks=8)g|(bG{;v5!b&HC zT7ZHM{)s;JZwOKMygl%ZIp<|P5c_@-OkB=KY+s+y+&(uZgYf!Pq^|@%+hpdz4O1Q? z^;Y0_+GH>Q|HYsO`Q2Cw!>!TV;F1iPu5bEoM2SjsE^r%mlnsopx3AG+jyE-%Xdg*lR<$b6)~ydX{HKH-y|Mf{CVYlF;cp{ezhb?7n?RuBhO*LxcAAE%9u0Zzcu z^9+cW?mZ$wU}Yq6)41g0hfmz;RD%I`$p+0dc@~}NCd1SlD-*KZl*R?fWvm|!V6;ym zVQDvQusTbeC>h$qptt3kyw=^f>|Q=L5nGsv94a6_nS(VdL%v{VG4aEL>OLFCcic9! zz;Di8w6NlL#G*re-*TH0FMZvEX^vIcZ8w4+_t+4If5C2tMt+d^{4^%x@QnW7FY_s* z@equL2hqV>%AS_HHu98MT0DR(_IQw5p4jWZ53F&KZv^NYf-1ZWY@dpSBHtm^jaP*5 zb%DkVwZaD_1joiTQ*q!)eG&oA=-o<=)*e(lbSWGFhJ7M33JVxc2RuTAauW#yOK*hr z0aI8je9oA6=;oF5!dgxR7VS<}p@fGE!__3Zmg-;k!CN;~65h`VXw%K;@KA_MZw+IVr+#7wGa}CG~x_4%s4pH$Thi&b^rj)-+=KSsD;CURbiX=ArL$ zJdT_bi*=x>v3E7FzBIh8SA56t6!Pl%6h1$lUj=|J;fbMG>A-125N@yH-JsFc_T9+Z zg4&jcx5@`&I4QJAO*MAtV*6rFHX3>B=o9wW*pt`jftpSqy)H(6ft;sdYg485UzFZ$|5|o~C(Mwl5V4fw1wr$4{o~*u+G6*wfIO!546QR^r>#`v}3u{ipKV`#l5(pAc z{}{TVCf3pfXwT-D2w)edc_2VGc)}VpT`2L#`{M=R&AFS=g++^pM!_27bFwAw$-8Fk zY)+D@LD9;bP|)^oi0fx#<;hm~Mx9C<#$TWQ0|aTzY5BfN`|~lr=F7`EUTX;O%Brmo z{c}u4lEYMwop)a9BDqpT6}cHC`K-VJ6$MByhYl^<7<+Kms-YiFj=Of;WlC@L%OxIIkw(8RK;_^l!ofL0Pv#2PzR7icI1B)#UJkFLuYg+H5Oz5 zdXT1t1%wNG`wl9Xbi>wMT&U!7Z}DH6zutWcvI|~3<>LVbFk#|!pshmy9fpsGit73d zA06zTEKZlT978l-$L*E%s6UZJqGms%>KU9dlEaDNgHH^FYN}@yvx9LtHo+yXKf#69 zW1C%&kzq4Nlm%XU_CPtf$_c%hkAAv>@GD9W#+CPXzLr6mW*Q_o+Mo-Pt?vj++_>7S zmdTmu$ebHgOT;!_^vqqExCs65ZD)l(7#nTN2UE?#v|Y_1lXGh07ZY2m!CL zm4oMsy|vJi9-BQvh&kpzXg|4ZE=5uFkhTavn<+=cN|dGrRmlLRix@4iHcXU~xOA+H zt%$^9wY#;{RX`ByV(^F$d@a_~tr^Q!72G2E@cS8Vba69S@@gWn!yOkjTozT4{6~Ez zUdBv|lfebISg~>TN7HU3-ju`5W%S7~w7Lezq2O z{v4~@BozWWLeG}N$gqN#mAz8#BoMACPIb4UI=1no-~YF#674u~Ms>E7W=1U|7C&h3 zaX8MYg?_azP;~!3EfLmr5ILgG6+(cn?it3FQlEfnsTqiDHshc);vJ2o#&Zl6kH9Sg z(4??-QZ8V~ji2K>x3v!RCBw%rv8vHX4Tr!W?{u~G6rqk4x$K#$?9;jY)jx@^Dy$Y7 zc2clU%TP9QX;JSd+h8*L^}w%g`*uVA@~!xsn*7l@^b!tiyR`bQim5M}=_0W_wE2E7 z{@Zn0`pXBX&o(afDEVEJw;2Asye(`BRj#uPYE}YZV2C2^I9LuG@iJMwgWVM;706$) zHuuCs;W#{mGB|;W^vX(6hy1a(fr?+f@A&l5R`~#Vs}V>vm;v}Fw9^cViXib8IN>2j zPkm?fmT6Fz9JZlHC-f=IIdk2DQjd7rvDdDS!lZ-JOSjcR1f>G1btKy6e^jdm{7Qoi zp0v+8l!^)4LU4ueO^aMq9q5Q!KX-wjarONw@RoK<#1_M|=z}yG(nf-+_R{=-r9Xvd z!B)uI&3wT}T9__(LNq)GCvPUsyJ{m;HMHq<2{gNxyb`dhb3ivuZTm?wpwh7Z8dLS=*XyRAG4dd{c#tWBblRE<{m_v*UFtm{ARbAkP9r8Z z80MHp!gWEl#?7G><4CXWUBiW=fs_|?$sQK&Z@6yU#)Pk7rs-;5m4dL`+$xL+&o&G0 zC9ubjv+X^|rL38z)8{<7+uC@ozMqdy%0GPQXVG6l+{;{^T>lC!S@W-ziA%coC;gfQ0x(uivjV?7`>aHvqYuDq3i9X^9p^PHoGn0};v;DB! zk0rsCDOJbycW*L8AwdY7=ClX{wZDr;$gAZXwr5<+;%VcJ^y!bNR?Snl^a^W;BMJmg*qJrCje95QNx~@EJug=&+aT zb%pgwg7J!^8Sc398ScGhCRwUXC(}-I+H>f7&psOoj_eYT7tTJy=3l2~kV*5dK4<}2 z;U7yktR&2E1N9>r1lQQOc3RQyp6Tzg(c=)7vYi{7^I(3T#H1V~A3<#|T>FRc@&buG zihjBo_~p8CTT&3H(?p1OWQEdC(6Gqh>RsT|TIc|7^3gF2{FT7sJU9VsZqPgPuue%_ z)poJlrC{%RM{05h0c5tw9I>|HMuDek&~VFJMbV)XXU7M1lHuGsVK)s!DWzBP5s+r< zciXc^DOD3--+e`}Yp0qW_#w(-1X~z_%bS7k*2sX7kC<%pgCrw}Sa5LSNbrEd%Jc5q zg9jq41aWe|X>L1Jg7TmehLxIx^7B_}$!DA!*TVb*!t8yK5P24KnYQVVl*>cwXZ@;= zM(YbjjI1~^_m28!!m8|aBXv}pJ`$D&rSv3ws{dAdOmpI`;yN{hKF1Q~$^KSvgBgZ} zS>x|)NMd`W`4-t+OC+^#{(Kf~QL0wy)O<;3JaQQj8&4BJqkY&c0Wso~LM;y^K=w^!Z;&jh&a0n?Ss$y3hBR~!%K!KmR!p_GOv;@#dKvJc(4p4D&SjhX7VlYVYBzMR!UR!upomtbS0vuh=m4 zXe;@-GjpNn{Rl|HOSf&|INTWq_V}=vB={~y7SH!yjcFgqe#rUfB7)<-3JEK<1muy< zdH!R+Rix@`Ifcv29nTVD87%r$RCQswsOcI5N*B9VRzhT{{ioV3hA#-^7dloO#s&t@ z1({ah!4{YS2V9un3Bke9!)7U|qETz&i$L8qn!7PRQLvgQBJIgy5^djijQX)pOCk(4 z5HQW}+HC>;#T3Z1%fs8*8kU6jKAdUw%Uc%v=enbiUY1nUv8q`um1~|V^XRfTZ3<#P~qRnV8>N^tM4!i5A*Y1TY^{K zc!lyoRjOI`M^S$!>7Ap6SShg<1LQuU7d)-c+h-dkg`{i~gn$>z8pJBF=K-3DCGt&K zFMMQB(XUqO<9oWLq^NH>9Hdr)*_rEh*uDLy?~*Ugm-A1C4eZsD{0Cf1UyUwk5^#@} zd$i|iVoYVVeg{|N)q6Pnv+=xoyEhg_RFB^x@Qqf_TK4Ao8OHESL_3Q}3)~s`b)>Dn4qxXe#^-c?Q*;6m zKn%rw+jm(lA9m<;`bBvfD zrni3*)!9R2v>f-*mOZ+~w0Gn7Sa`{ma>z%aV+v>Uc~@p10_#y> zb>$0#+@_dE6%aepD)YpR5oqugW`ANpV4Qzg{_haIyu~KD1SBsW!kGyt#3Jg5hPOO4 zL$zo)ipr!4_5r>B#q&xSOySmW8q;F=Jvafgv0`Za08Mq^$1<2f_<*r}_ki-_Pg^4R zr7FXX);`0nEFqS``DatZ%;c{PvhjEwz_Wj)Bs}tPs_a6Ajmuom6}*Qv4PK3{h17gI z`U6r{W>RvUAsCk%dwNX!=n^o*C&tqPGRdTGc>I-I?mMB%O^y{#s~GG0%&Vo4A2Rs^ z*XU~f&}Y2l7_o8y@%_dV+KlCDAq2G_B*S#_5#17++;gY{e6A$3hYduB1P= z@{ev|I}h$K7ruCg*eP!uwn?liBb(bVwOvINN|+0Q&tp_^%rq$v$kFawpfXyrIoItT zb@jSO`Xvy3HnLK3?8Qnc@h#@E8zSTVBr&w_Ecgvz%JS=A*&)x@@;x7(N-WKzZ3!O;t-`I@(SLfXI&1#Q zD4B+bZluT&=&Im?k6bK{ITPF=^z9eFcoB^#|7EY>I813w?r78= zA~f9eq&}GJWnvi3!Tw52-ghwmpE61={ghzmKG+*y{+iMCaJ2U)@eA<|`|9tK5=rg$ z_fa;iL@zl`3A*khe>o*^g@ToVy?(OK{>&fUH9v-U%yaIKgc-90YXNZgF?cAXJ?J&J zcb|BF-^cYN z_iWg%J5avx_B8N>l?AK|?!fU_oCK6Y7*iK}y!~r5j5q@P3}**2YILBXD_EYp7}(&L zZ6}ub`ZDehggo+<+<&G8U=}_$eux{Y(x1BCg$#9l#r48?!?I<@Z0Oh)@$3LSYBOyH zH|9b9wIz%sg9E$lUopJ9BmS7251cJHyXTnBV)9{ zteP2K69VZ$X-P%>XJb5GL;~@(05?u;Vzsw;e5SQ#GIpf#3s$USJxvMuT8S0hv&osM zSAm;du;?=E1nGFGmITW*1%zR3yl*A5g7H<>3>b4f@p`N&k|z$QJ?L^%>^6}wW^7V$ z+~TuUrkwNTXtDz#Ndk$#ckKKyj^_qJC^M4ru~G?mqwLIpGG!p~7}z$C#P@A2WwTzp zRkw^@r?I3~Av={}rUYo>2pc>E2vXbBz6#7ge#{?D*q^Y7N%_}jr6^t!K~|`yeV!*^ zcEhFvz{tl4keVeAbR8Aw6RV|c98_v!OljbD*G2`=;Jj(bW&DKF-6F@n`tZESJ3VsQ znK+I4oY5O;Hyiz)W&FBW3UpZ&zDK(V`5yJ$*`|wGid^WXEo*5z7)x{Y-0F4yV|<52 z64Llbt2A7WN8G_wLUx(stf*y>hJKi|R8=#r zzGDB)*P$IpYU^2WViY2byTXcN;Hu{-4rw-ltG(TAok^1xRy=6kkNVK{%SigTuxCYb z9OvV~UGrM`wgIti52jL;c|eA0ebK&drfr8Wci!O1ZPyJ*pwGOH5{C>@Gv>PPz4+H?C#hyT0|zmb`g)(gnn%Rym77 zxxOKmsf?SffkIwTtq(Vng*zV>Dtz32f~}*$U#U`CL2P=M<{phP9Bn`Lkpj=>7hYx> z;rNd*EWNb0;zrAgKx9{#@ossr5mkfD*?|mu>cgMcmdNGw;|!95A0LOqt^N?lKW4Ve z(T9A&=^y5ADRZ-}mKb-vL<;@~{7jsaa1RuH|WBUEwNv#~mV z(ZQD)Va>gV+lp^gkQ+AMymBVJG`tt#Afxd`N~YRI4g&Q2kr%t?nA>)lBZRI*syP3KY+-#toOvqi&Yr@}$_jXK)7j z;&@u&Je-lby$S9cs_ZXLcnrJT`mZ9ea?P z4)$yoE-cEn|9}$+_+wULf0~S(4BXiRYnfrnANi?9H-SBCZW8eiwaHrXG7k@pkN0Pe zEfSDWG0AW!fkfSbz})Z_xC+8>cIOL6sqp~L*=+Fm^X5aV_0HWlxDG^@@LGFxk<$_6 zVh{NnZ=chHGz}dDg4tir-7D`>El{cIV)+nB|5;!xj;CMZL+%HyL8j;X*Ddb-=Tkm7 z^AIl?IW05ghu58m@H>;r@_SFN-TTwVZ?%!5TG0W-O__0nX5q;r$(YmEx70OzIaa{v zP?(lG3*oL9>C@k4<&0$ z#3j*{v7uD*HVKbqpW)IhE8-p@?xWm+;dkTJ`2x|BLla$sHyKpnjifJre6aQ5PdZKw z6B5*JDjItVOT|%Q){8>O+?K&EN;#vFRa?P`;H108V}+k?4L*qfr9nGaYh8+|ehDE= z!5z9qox7JGU!O9YT8}fAs_^1i!^Y~Px}RiKOgQUGqh)@>mfSTSX#$4FnyY|8x*)d&s)p)Y%HR7x@>&ubu?Y?)|3?vSGK$@-zY#{^DS1_*ZRqEfFdkHJpovb5gA`CRc zz*zT-gwXDvZjff@H}cUpDhv7_r zhXGl-3zjbyeXx>{@y?w>Kd)fw&rxR#;BO7vMMrwNgQYFuQnI_u+BrD*$ty)Wtr#_t zq&ZgxltmYcKLwYTz@s?Q^7Fc}J+JG;ahtisCtSe0cvwjD5m?W?^F5asX2~h(JRz-s zTR8M-MC;sj8*}yzYIX7ij;}8n`mj={KMRafV+yCYuD~x})Qai@!Vj?|AueVH zN?=fd{$wGo8iOJ~BcT}J9FOV0{Z&+_E+ew}w{0!UWl}VJDmCG;C7gk$?qlQ{` zwinK{yi>qX+X({q5scBDGkJ^{W^(Rfyw7ZvkJWd^n_(XYPP?vo2s5YZ+~o~TDk&n7t!`w zwV6M|&s4Q|(p?&k>5yZ7t`@ePE>KU`#c%Jy?dEwiB}ac9e(RbQy)q>Tr@}`U)@MI4 zxUfA8BvOU@jcpG=7^EEbn!+>S@Y#yNBVt{J!2zhKMX*<+zrm-KaS=Q90RwcNi=fnZ zj-!B`e(58Iki(FQD157lCHNlx4W;*%Baaxp7LD5iNJtuWTj|p|^~9c~tJdpNOJuTe zOi_uB?BU?~e{m`n#TIZpu4b@s&bE!2!{vzMT(i$Jpz8bAmR3GL)n1(Y=gw1XJ;wj) zcxr|5^N7XVkbjH!I_x?e;P+5mO1oBZ5s{e4P};wt_A=#nDBVV;rO%6~m5m>gshqzy4h zSsk!?w+bu!e?y&?EZ;8LUK?D@x0 z9Y8Z69uPk2xYooo_1yN{X3vib7}?1>s_Ew*I!|+n;V&=pza|X-ku3`kHpN|2ALibG z$6F}evb9S5FM)Mfck|NEzB+Ql%$w``$ncrE3!7>JG=gz$VW~DYfUCFrHzg4T{pHZB z&EFE(?x8e}gI&yIj`Xw5rEQ_=7a>{)?|hiL=Rb=@27G!edEm46n?IK1QJWpSGk-;E zWg{*u`Bm(@i~d(6g7{{gts#lc)$lCS|LzD^lOa6TpP?*qIT$pQW@=OTy?k5fl(Un` zca>iA^x(S1sNk;UGmB? zZ)`lIkNe^X;Kae=a-(zV+s;msKg{*qiJ9VLQ`15GqlVSgP|<6A$;#$qocQ)JyuX}D zH+&h*MkP<^hDK{Nlq0(={lhgPrWE+U)6yx>8%>3S&V*2dHIj1qmjnG*b#pG8(fZj( zsJTgc?iYcp^b}qmUybZCzSGF%-L4V51$#`A-bmkc(CkVv>4v(jj)3)$MIjT)yZ>b3 zwZ@)|>BJFq5`#9A3>D8SxCBYeDotfa$kGGDO-#l!&olf0++Vprm_11h&w^A0Z;)W( z&7P=C{%A_8drJQt03#T@2&Xv4+wZJFT%uGh)MbJlnvD~1JtbD)Ar=H?0(!;1@EA)0 z^KblQ$v`i5CRMJ=8>SaeQ&`@M<}TNNb`O`W#OX;I^d!yTmq?2OXT(g5XmjI+%VLZh z+ei^LVMg3&1RhVpgP0W_zUEpa9oTP8nX!6(!b~kFV_j(HX8qBl!6X7RYQXs&R9^L| zpMoq9sYuxy?kxXG{;6tbeF=9{*1C_~AJEIZQ;9lkkk~MOm;3b(4>O5-Y9o{G6-j7v z`D{o`{%`m|EhCts@eDtzCVKLW!TW9Baocya^U}*JX}6s4l!d`#qmB(RGs(8-;qQWN zSYNq1{Nke=XV1Rtq2smQ*z3VFwgDadNd}8&`r+9iksq43g_Oe`ivD&+Wyqf;@3PI& zTr{$UcUbNLp6+lZ?>$7a6G0xHy!}DRKsaLh2IszmUT{?ev#y-uV_0Q(VNWuZcl&7| zFYwr4KVr^tvpTXGo+qzk%?1Gdad#Asm(>Mi2j$b!xLZIp?kbZYSK~C~Q~JnrScRiGsjzDnUZ4ZVml< zHO2$#=J2K8@(k#QnoExp-+cV6uG(^*p=v{avfF9=L_9({f-{Wx!VM;|HIqpZjep|5ky1@=?;;QR$vH7r$NJp zfRwZtbf+*Hlo%i-AStbEbi?RcJoo$ad%pkj2lux7I_F&HRTqmkcG*Yh9%p}qo)K3N zO>YFUI~H@`V~;tPWyeePm-8c6R*lFdSN%Um5JH#`ltNyRs!HbVIdlz-Zc=tg9#SK%AS zyb0r=P>syl5T1kdpW2SyyP&J4!)Jja6;v_`$X#utQ}(mF#8gV|gRMHQ&8k2-ls=N* zSdxVTi5e=sne@d#;T;NKJJwE5(Pf2Og5&Pw^_<UTv%q^E1mWSBPgh?<_odee|BGB-uUtcv~PazF2` zSQ;xRYIw2|g`mJsN&Hjy_-vFZ_ zl0I=&RGtbPXA{G5)zi_wXi+%ocmXuJ&j@qWeLdzrs`-jwBcjmDOWikkQg*b7y_%`K z*`E_znXjh~RbVr8k8H>|Ki3w~hCsi&ASIC$P>|r8OHg2q5YWT-&<)J$Q!5`6jlz|M zL2r!<1Pxqbm%9jB#+%$+6hGkBga?iyN@~_9xr;x3_oknV)OJmzSrI`zGL}j;fsnrF z#XH#%3jFkuKHD2J2FF-(>)*RIU2SaA3-4=Nq3XD>K;{7fn?TyFIIf~V-lw&ouS&iH zm`g}*3$@0Bn@~~?Voez|P3KgH7#J_F!8dYu)PHVUqC)CMD9_$ovVV4 zX%??2)Oxsip*;!u$Te1%TS=wZ_#366EER)#mTTONETo+74d~#G_giyX~ z-k-JKkC=D?<)p^5X@f`>dC^Qe;rjhlZ)z)E7BZb|J*B;Q zrYnx0E_kS?DEUZIV*LtaTZl`$50?Lv|DVhkOaU5&7<+Et5KJiZ`$bJ#JBqrG^7JSe zzw~^R&-3uzkn#i-zLCt!9KZF7u)S$_ayyOaB5Y8m(NhW?|lFi_BOnhv1Mx zoGkIjm^DuS?NQ)bOYn9Ch>_N(_do^2Ku^6``XEd=Tq`jls$x+1nL@R?f63taBf4d! zYyRGQS(#_I2HA?x>w}bkZI7XU1BqIWgKQpfUT8lA;qQDPB4pmjY*-dB#D{an<0Inc zSXvaM*oFJsMsjeDf7Rz9)t}1Qy#B=oubx=d`hIBHP=zq*#%S@Mek|WE3AWD2@wx{P zWM9p{F@oyy>9ccXn{JYYRxsW+=GXg@N>VWBZ<*rNN%c!HddYG&v*q4h5{pvvyyZDU z!Lr<+C}QlhPGp;0CrpnBt^7*`^BEwqD{ffyVaeFuTc8~+?^A}@_AeUzl>M(X64s0* zvXnR>FU_wXZ=nLKu8*zOcw0rU^dX>CPNgc|2QY@P5 z;>)-oneF{BCL#9h5smt%e;Ry3x>ynv4)b0hV4O&5S}yn7x#eTM#IR;;}oXB zMYT8Xv3wmAVkLIE*zXSy6STq%AiCf-;4yk)#=KGyt=i?>bOP~3^|mg&2O<=pua;HW z{eX2gbxJu2=lbnxO75)LLqMLmA`+HhL3q}VcfG4TQ%UDlg{N+;TsCSy;K(-6~mO@Q`S8Hy4uw;%Jc2evz zkf%F4-UyQe*R=~01WQJrPSEH`?J<6#_hEuVVR*Gdd*vq>L>PsvOqfhP<4K6`r|A&C ziA!U%$KA4e`XZ|NgBF&otMOtJQD|q;Jb}R zJX^X0U6a33($oG^ASBBgZR5N`1@zX9C!Nyucx&S{9Roem&HAFzk1jS952AEIbj;TW zQUDNW@IGgSM?sM+=}{9?WY<%}T#>!s%a;s!hdppjy6PhvTJqRPK-Y>Y&tq+W&hF^#= zDnHJZ+Pk25!fOb@kQu_WPORcWRK#Z!NYAe13xKNBxo>w9&VTPFc#&wtzw4aE^5TotBTE5c@JsM_wijmP5i{_=c z)>q?5?}=u=Cu1euQFIIER1 zqnDSAXQyr_2XB0&9xDQa?=@41d6=%asU6HEhJOWpU+zT!fbQq*0@+;}1BEi~F2Prc z-LgEARE(RGsQy{9WlKxuyH7^cdtbxU*Nz3yLxpmksF*O=>j;9G4I0(S0nOM^X8#l& zGAhzzNp@7mW5_k|1^l#Zl8{=Kh*#>DdL}W5G!-17ZBO{U-a)<% z%Syg*ywhX45PR+z^&Deao~&pob$lOOB5J%-Szj3qTkV7nu_6WYVd_mV9!64)CBYq) zX*#3(ox`0b5GfV7KH<9hrA(XiR&0;z__J`R#hPIXFXbvJ?j7?eL=r69;|Vu?DLT=X zmQtdx@!nf3RgtaEmOmpzE-Qz@8AroR{6pb4g+MHp9Bp+{8byLuew{ferh?_;#fRRn z1^u?h<#Mjgx|~WR;Kl>c06`oj$wd?idwSO2Ao+X6-?wK@_r(R^`ufu$YD6f)3Lo;` z1POt@S!VsD=#)A5_g~dgs@XDYuV4A58>%M4gCG46%&%8AFtGkGsL9@Pd}VlXCDA$a z3Jv&+yqlRXI2R7QRWYSy#q`Y=Q^S|+7S1e7tC)vb)J9WJJRuqH^E%{DQM&3~Zxl`! z_tcC|kB}U!04heP_zYL~l8AHgQ{#N6&FOi4WS{@<3@<`y{b3@OT?Do-3_d9y1L317 z@qK64nH1QEHn_Wq7wpbz@kX1Cq|d(*1yZNX6ZcU2sM$>?X}%p1`-Dj@ip z0&Sq;?S$CgO1m4r8Cr3o+ObE_5h*$$MV!aMj``BWu&B(t;i#Z+FK5&lGv zZaO5^_2PrR;>R2|Yavm0Kj6mOPs;ITh40Utz6}^|O>s9O!ai_T()yf2d;h>H`0&p^ zg%9>xm04#+4EFYljuHAmfR=ljNa#&&q-;NZkDLg@!DbY0L`hcKkyVtfbQ#DOz6DD4D4ZnPq!!A{pK$CX53lzgi8uSrcuV1&g`3L@jnyOr61jH#{D&(E%z5HZ4%Fyx|gFvHaAk(R-1Mv&!ZY zPtoqYWN9QiLW`3haR=j~A$WBjI?T+2^V{V3*w5#?yOLA@D_lF>o!JGmxi<1>h*eSD zB*KN~_Jg8y%(%4U1jugNT4jNX;>1)c?vs=cY7|{_sbq{sTh8Rm8}jH#cRi>sL-5@z z`Nunlj_!l3Fo&x}A;_p|Mt_d>KyCz1?ff)3h51@*o1G%ucM*C=*GU}F-Cf%bu=nJI zRVl*S>m2pH%7!h8@Nz=MTDdjO1Y*=El*R;))BIi*tAO}_Fl6%#mZsJ~ihjYDD>dlD z&nR|xyDv=U7!){-&JLjck#ECHt#XG|L%WL)l0ofg8K*1M|sAbrjlTt z!;w{DxFwwVrRdWZmSngo%93X;p0yqPi5*QtFK%#m%#ttj(OchD9^@ZNmyaZy?tmQt zag{_pR(T~H3KA!~@3d=p7b_UlBH{t^`Zhn(jpRtjNTm0|rc$=b=$YVyQF3KAR!;XW zV8^URt;@ISKDzsahJn41hn1i!j!q~YmUH`DnYb9a_z~s~N9MSPRN7vW6f;C8?>{12 z*egX}Bv%0xPw$$mOv=1B%}A1jO7Ax)`$=&b*?)TnuV69k@jIk~UNkOlCHyjGuYt7m zr`LZty;Ve$UHuyK?tucCHc_3A->ih_n0Fdlx#eXS^zPXZ9iC;&_HuT)fpjjc84xD7 z(etSXKIsMNP^iRx0SQS_`Ov%SF%tKoR}6?|Ir;Q#=x3`X^^*ZwtibRLvrMUM4h+jd zwju#c!0pt~zRH2g#;EblJKHJP@3`6liUt|4T?vi9)Y`|ZBX`Rzxo(SHr%(_o1SXA` zZVQ3ZA4?fEZYC##zEyPp^QR^vAW#smYCJCHC~oA-XqX=Rl>a+SUE7Je3h8b><>t&k`a_b zg3;5e@E4d!uhluR{C11{fuESqe++t1f@d;`>^&C_LTHvfr0Mh(YnTD@AF(HX;u-F< zOdY1wt$g@I`|p{s8G%$De_BK$mnbXKMJHq7N=^`%H0{#uOnqZ2KBBSK{jT&BFsst`E__= z0N#5H?hRne&Nyc;3gTYhhH_&klXuhKn;tM11>EHSC(pw^sCZEj%}WJw6&jzd?WAd1 zSJ)YqB0TGZ++>B{ct=!3a)a3gDCDQOT$W1U^xQGQ@lw@FRa|}MBDl=a{a&%S3ykl3 zwS=lOO(5oDF?;MvVMJQp*1%>KAD793P1rwg<}Zdh;PAk%1{C2!-UOfrHvBYeh^j#H z1I#}dn}Ie=*$$WtGQ}^sC_zteqRaux^}Qevj1&yV;cp^j}F;Q z;v!TJb474go)z8_1vi@FBtDxF9+O}Ywi(^VjY2HnJC2R&zcsuVBzUr8fTLWwpsAxO z|HPXaOXG9q{uU!MJ$?S-4z6EX@M5R7Z-yA3lr%ef12a>)ZQ%e48CiM#ndI#ttm2~G zvLfs&$g#E5XHsmM*mVz|dGk^N2l)7yw4K~e~^J`@MC_??)q{iQX z#yX%V%rS(a!u%8D_am^^-D~i}`07G?FNW%lWO(JMm-^P9Q(5^J4-*reTHi2>tXF6B5e?ytQ=cvz z@PFui@c2NP={Bx}zj{bS=q-D=V)oD4-;dx9hW$QxDSYeVWb!=0YPCf|CFRw_M3QQ4 zU5s=V&EwARFEmBCtr8y>!aCO^>7H5dmLlqb@nd-5_X;Y$vvjhouehVYnggd#RGMCK z(gs#($6dP#on!)nerJ1VlI!fc0;RmQ7Uoqx-7rM9=?!^5Z9l{8x@;V8ed$Jh%uXvv zcfT@asgcKO&{K9d6W#_F;Gz)KHXfjp^~(~Tl(n0TmecBm>;i%veb5>3C(HBO!;szmaoxtxojs5T z0zGLJgq_Eb(+a%n9lJT}=0`(N;O$<{qSh>I(Y447me?}ePaj0tFHCgM(E44zicdT$ zO*;X*{uywInUH^NXq1{BL85R!U;0Q|bc1E(=(yF#56i(EIjrK5WVE}59vbp<3Fc$p z*xkDqV?mb2#Q&9fd>@>Vn^eEkD|OP4Z3xhN%qKVQcjwcDt@dcg`)%5W7|KW0jFm=6 zc#jR4jdcDRcQ<-OTSnnELQDy+Bv=TjI(nvB67!#bwg=>E%r z01|0j*?zCUM+Ex2;&Czp~5kBgnK4<%-?bJ1x?<;-iSlIu2_K|4b!*K=TD%~OBbaCdlfDshS1 zxTfhzu<*~57NvbS6txX7w@*(E(_yD|5wbF&B=*_wC5bRooP>o(Xz#oUK+VHzB!?un zaQ4(Z90;U7ag=SC(e27>n|T2uk_XF*m*xL{*WeGIrc86-3EWB(mrT%1?G>*TYGXJ4 zFxrz$U{`$4^H^E^Q_CB=A8b}Dsa5y?W784mzzacy+frQ~9=yV+s(m(MjvoGbd3(WU z?=r0{=h$Jg3pE=?h^MpI^%q|zv*xmRH>uvdTw~93CL39|L=Tn}XPoy^RIwCw98Z&3 z9d}ZFB~vtdC3T41q#l_6WRB|lGFRIirmmXA@2qge{wz$rm3q^5^^5j-$p+dpWI60w ze3~fI@#oVn&a2IbJUkER`iYvBhS6y)50?9d3sBzF7oVx|U`^GV17=>DuaJR|vF9R( z414@F9Fa*2bar;api&ZbUVv(Qsor4z+LWT(vRz1CfiY^ogN2mYD|-T_8Wr^UFsRL4 zlKT3(ikS&6^pF)k57NydCjYOmrY#@boBjJ;8KyNZK$YxBOr?SfK8zdxyOT844cSe1 z1rM;nLx@*9KndjZ3&X`u-GYI5D75HL>pApM|5Vu`^6!c8uU__-@vLYqQ=8M#KgwXF z7SKpPU}uT<7cB25?|GD@71Cecf?JmmuQ-_jUct^3tibJsq@J^fN;Q<&UjCw@GpFL> z-$?r`?dQnELje_fODUhp4Li?Yv~gL?bD6$(-ZR~q|9Y2HV%(LlsDFIacnnwoZw&pM z8=6)(3O0&1)`fmH2_kRlQO9V`I!_wv80|89%%*M0u{ig^;O_MXc}{1ebkHqA$PgT6 z)uU|usN=@X7o5pk99?EOb9wM?c^t}VhH62)_3%EIUIlp;#q3smuEK6$Z9+iU^M)+m zz|6{#xJm~8GoA6Z+hHHpFUS9X;YrZlAcG(;+gXQ&6t*4u7>0Ni^Y*7tF<-y)(w=>Z zQ$y1EJlZAJ)5cbO5)4bgHA)Iz>53*MS;C39`)lYzrY212%(h^! z=bl&x-=-DT5?8U`0Fh#exUHk!cAY7j+8L(j5uZ@^oH;Fb3xb(vfdu`Z%H`7Qy4P)M z{Lu1B_7ne%q|#uW%<0X=6Rs2Mr*)kO)xSB~(03;sTaApJY0 z`>SCgA7LYxs^dbtq+m&_2k@n zVx3w*xP_?J!vRQ*EBzDVfDbSC0MIvPK0-R1_>aZ%Lgv!Zoqg*$3-JUrJK_C0UZO*x z%xfPtu$`+96J@(wU5PL6=lwdEt_)UAxH2|15*zGX`|QUz9Y{Iy1V`MA!uG2hU!kmh z$zCFQ3hu4S?fPSudR6~a3O~zMNdrYRPkf*C=@k!;;@VsTNsztO%j*%+lcIiP@jH3H z@~fB*1%xR6%MG2(xiWzQny?ewqDZ(oj#0fc144GQo(Q0T)Gv=&exnY&`7uc8HQJHH z51LI2uVZQtm++5H7Xs&K!0+FLwOyI7U@wNQmvW&EeV4kO0@=LYv-M4@r-Og-;^N@x8yBz3SP22#4)V`eyl3SSKkg?@1ufTuBd)w_3jAF8lr;v`9 zncHLF{`Wst`YP2}(x+fP6bgL$+>l_XDsE$(8>OoQ! zO_IOq0EBDij}|H5cc%@+bN+-FnHf&C-~I0#WTZ+)KV(F_ZCXdsJTv_J$qLly5m)&% zSjsx&J8$LH&|6c2TMmJ$J;N0D_!IIam0;91%u1l5rC$OLODD!s)f-C^?hS5iEMH~) zY&l*>J%6%$zP3$PmeBU$*aX$JSlu-{GaeCq$+&r(7#zea&HA@;ia6vCLUdkn`SY}! zrW8L~(@>h#nt##_dYc_EjTdq9@z0Ns&G-gxTbZ^8pjNm!9YOh28HQd&<%jygO?6?cNr)?1xZoJ^-x;*}5@#)YE7-?d*t9 z$i-9rJWzg6OeoK%Gvcy-ufD%i+bhilsEQ8Vg`QbMLWAf|Y$GKlmaql6@Z(AS?NthJ zJ-alkN7Z9{_59TRM1LK5yRF3YDxze-S5t@8GopWm=o3 z3UXY@bG8vk*3gGLQ>HS<>KrEas-EA(&Toa@7=job(eLd44h`ErmqPE@f$bHA1)S`l z1|`Hz#d~$23jGSxLv7O4tQ^*|7_M6G*ENJnf`ISOq!m9_fr-rsTlU{4t zzf8>qeuaKfM!5Ph!-flS=*OtIC;`mqot~rqh*J3@%R^+(_QE(0UxV_^)XtWj>f4AfVNz zs0f}9pQDc03BA9TG-YRBv)g+B+*N*DuI+*}r?dI|r28kA&?jRqcQ%B>DZnCz{K+*b zT1?{qYUf0WpMUFf@GgBRaw5?kwM6i2uYhgBQZ`H?xf6$KetAHUtQ8l5t0R41)6c|W zm|bz3(mr8(9J0zhzJ=zh!DRol0x&i3!f9A-FTNcEMtwQ~iHi+Z64>p!2wuL)b6UoK zR4JQxg&<^#b+QzBex0hCqO_u^$*U|$%xhjD({oIfy2p8_g1jIx&_UAHdZ!+&;)r7R z+>L>Vk*X%w>fH;=m!7{eDPD@Hv^Kpd$KBWMQ!dTjiJf0#0<hN~+uVFO;eIm+YP;1ub?iV|WApEF+*!lyY-1gN zF&sk87}S@5VdFq^bdmtB&5zTtV^yS_thW;hj(~JJ*|0lbhh!9%3U#zBd$bl=h`qD3i#zM1G zu}EYv8Ep>NNa7L-aD2@OL}P2P65rzMb(iSa+%L8VpG-l@PTcqKJ$88A^AYO(ReUod zJVnJGy!=<)v`?i6(ojsCmvfQivO8Ir%|6@6K^Ulj>CwA=et$Hr#H0EYJMOXuyDYFU zZcVKqMCjuzV$eXqq-Zks+XL=>ej-+heksaH8$?;AN0$eFVARE`piG! zGrR64&*o^Kt)_vV;=vOeLS_WV#QMpbH;U6zc`GKMN+{S1{k!&D>KBdU?nN_$6dnr= zq-OF+mqRMk3j8limw2;#aYU)Sv69Le2NIM6Yd}&+aLlrFvst~L=g-&{1E*+sLaNYJ zmX3l>8nF;pNPipKwBkzm`9kyyLMNS%?B=yog{Np%~E3GtVocu;1Cqgp=sYJ!cPBe}>_*_Nl<&IQ$JBf9~ntUINj|!zRBdo;> z7_~bJG*jx_8|d!p+e!<14e)xv#E)N7FmVgQZ4tn@BSP3Yj`zJz$TiI)yrnw3J`PzI zoaX;n8CG!hD-G@vRx+XS+`*&~JVu zo??c)t#aCVPA{9O8Rj_q z(+v4#RtHI6B9Z>SQSjr~_(s#ee9j~Y>jm=NAX}J=89+L?hy0rNA0+S(t=!zLd*z_5 zoRQ=l-QJ`sJaavkx*za>^2;MM3$S7$Mk-Ju81Gxpp+`p&>{yuW?D0*HhF2>y z`iM~V1v%ucCUSh|$s_R1n}!F^PCB0fYTcbj&AnaFIDG<)-`LkYomeQy$>$DV?+%DI zrUP`u4Zdplq}!t{21+7+-;wQHlAohGv3oU_awkfUz7&XXzaz=h+!*>QjnTf)cEw2 z&%VMx!jQ=Co}p_FVZz#C>)W|)=ejHgxfJcTlWe*x!wjLe4@h+l!X^E3L}-)L)~Jw; z851Zz%xBM|JMkCXnyQt7u5<}R6Ty5-AMYI#dMMv*Qlu^Z*<@j0o5;fPNW`eC|QC%kzLH_^)8i z%xi(B^S$57ljc3}(;nELA%rz+$t}DIC+_(@A~SoyCkCFwZ_8bk)?XoymWLG1w513S zHVgqcvVhWmar(xgJIQlYJ=080ujjXHCSehc%M}VdiOXG#usZuZKc5HhN%!1%W>g@- z-24h4L*G`09OOG<`gf^R;zKsujnvc+_9%*ocEXi2*TxDsk8Ll8yQ!i&GL0W$@q%>t zx4NB*TxVBbLcuN%lCdYnLTCyFq!V5dFzE9f!$k};vwoCdzq!k7-8zMaJ~oN4(wCTn1Ve+EtXlaMpO(4nTieeoPG~%`M&KJ zI*3g72z{LUFHa9tmaB8Hql+VPF~B6%zFwYcYA|nkqcUIqCaYD?Uu4#3;|bPhTI81<>STwlAsMd z^mXAcC{)@OqDgP#Xh;D|$!G1t{^W#WXtXlz|t+algFum zRFe2bE|)-0N@s{i-PuZ{mL4SXNqiFSve5-Xk9(ut2Dr>p6`Go@rw~L~sh26s!JpIM z#F~$qA@-#D{jG*?fm=x5ED>z6kSRhwTmgvfp{g-qB%;lTc#2sGo|FdzQYtfvLr{rz|hd5 zer1X=3jAs=viA{vsoG;6xngAgf1wAFJ#E66*tdM%prT^AtRBTB=w=_@fPU(?6vnU> zqn0m(h!6)tfguEa$)~su^|t;OJ;5acs(}NcSHPnd5a4tws=!c*uXs$Hh4j@WY;XRemKSlhHy#W0sHIvLf-`cP zFd7{^WdlF+@00&T3Fr($%h0*()S`jT@P}iZ{Q`QM|zXRTvFY(RXq>@V>y~S6?0h-!*Pg;XF6d5C0rcr z-}z~@iF)e4ldG#w6z*oQ;WyyEK(_XJdU>(iUcvIvCgKk601&3+RC(K=4G#~8SQJQg zC@9$pSR`?$&nOfQ^V@Lwi1aCaag8&9F9<4!)}fV~STVddOwBp13B$C;&u*dF^&xIm z*t7+Wl~*U+@p)LoXqXcZ=3Isv8z+=^;OXJcbV~RFf5Zq6Xl0FQ-id4W#MLSn>9Jg} z5r<6Mp`py8nyO>|vs*SFtPu|_yY3_{xM~EI)Pi0~@_C9Ei-*r~u)-HWchh9=+uRcP z`4%$qH-`I0g6VmTa-%Rj=i*?nM!Ib*I-9V*WkE1|?cM{sc}(<#g>c)Nuo*a@tXywG zm-XSsa46y08=p&Rb_F}^<86^t;w~*P&!oZAeFEnq@i5B)t;}7WQocYCw*MB0GQZ3k z({JXxK>pG2$6Ov7`f!UbD_*Bx6*09Qi+g~EO7B~kez;a=x%j*TSMC==Km1s>D;Fp!^k`33M1~zraj!^ zOe(r?dQv5#jq@VpMg3P%$5(&jn|BiZJ>LNdSM3#pu*Wm!DdWN{z-#5WJ=U z1v!>hzAe6{dWEmeb%E$`I*}tK$8}bajmW8m>`;d<9&96g4-+q>QUhR11DKE;Hp&#M zRDJ5%)lp1wi{t~Op(#H5(pC)L+H5Bl(e4G|V!qtGr!GvZmHy9eCdA zm(l~@cet_3ega&OD|U#zh>~yIo9o>%{C>;w@h>KvVP}@$G(NS(yW>5c4Q&WfNw`SD z!{#32o-h!$!pX+BioT7y?z~eBwf7+;AUH};2r$9W_ImODU!ASe1Z=R zQ__la$afBW1lJ;tGK5FvKpjF{yHeWDe$jquBKA6vwqm(Bpu^F4FHmc?nF%d6(e{&h zYQONd)BT6$rQM}MaAVJ1)8G<^enU5Up0{FpAm6c5zG0-j@Ht!D*LC4>-dd0VI4`*w z43K%4b1JrOjX>1$Z zJ*JtWi!#$Tpgr(;$?3}l2#tM8wUpgc34hirH4OO^N3M>lKEjGuk-LLx;|ZEYV*UwRA46EBTy{`Vc(9Q^ECaePIyngU&S zIoJx)jHF9`ECTD|l|E1FT8ihrJ=$cyae}{1VNF6<*hxmflj^s3el5;S(>ebO!Y7~w&nd>X@EYObwOFx|1Fh~p1)eLQ-O^1 zo6c}zHMuAUf4V(aZlWm}oaq^_Q(j;bjx}Xxis@MRMig=8l`CeZM|h_KS?BxKu+__q zlNbNNxA;RQp7OOKlM%DIr|E9Pae)(ayHa*IE| z&v<{MQ4s?*g74!UJwbcXMC0f0@W|_V6>%H_1Y^e;))UUxqh0rm%R}wHT{*ii-6ELW zxf=4Cq>T#}8^wz5MP7>sq>qXY<~KoP@|k@0k>z zAeBxaQslbgxD>mMAyS!AHA39cIFjA;Y9}1Yb@8?{*MZpQZV1!$+9ZS{=wApTr{ite z;^lZMTTp_j6)&AN3T}^7HrhSED?Ll>kd`emJxA)p=yPj1wSoc3#7|=BzVW-$kbj?y9=+STo;HVMLqY#DO`NqQ(l0^ntE%(k~jb>699F`p#qar*i*YP0bR6;srAO z`UclKM5q-IZo}T64Z-Hp=T2y0qgzkFcg{SsFMKC$mW`ROGL36dN=%)|WC4vh{qx_$ z(rvCYPwr4Ycoi08-x)>Qq~lNLoT2<%g%i&BPT8GPB?+%ARp*d>g0#GTtOWkCaOfzb zo`3Oq^g3$zk|mE7ntNUme5vmau1Xq4T*=sRzN z65mdT`tq-PSFB&tT_Bvcb4{=QWC}maAT+ooocjHflN<1V9&nL5@rAGudeojvG-Tl$X7B zNE8=FqKG^!a<84WUc~5vDEU*qv~{Ytvi|1O<8e|QsWDQW*StR`L9rv;cxF#f#0f#X z4*xLxzxwyt+l93$;Se%+Kjm4S)K?aAj!L|=(w^r$NnQw$69JJa#o zj4=(C;R60?MLEHP9%x2%m6u}(`a%nVx;y@B8#5Y@kO>XOH7~unD83X<%pEY?f7Z`8 zx0pT`>3Nnd^dkp8_L@~Mmrx$|zj)G(?@;4i)@b5jnGTCLqYq336ROC@fKhETb-(fj zrRQ5;cq3eavmuv;2#*WX2s`*XYq!zsNm8&QGyC=>W^vjLRDA;bHu%@sV)Uy*h7Q~? zS5nN{;3FWVfy{%ExOds*&q0rs>L%2Zki(@ZqsQfhehlZ=-8CLeb62HnT< zmCRQpPnbZJ&-(IYcWXtTql$Q>z8)pbWbuNlHos3^wmZxuJg1ITbjrVXAso>Ob)g2< zFj9pnI0DvN;^k{O607~Yn~!Lt)Pv}OYvTEQ^MiSShD^ix)Gf#s1$d?q9nbK4bSn4yXNTmUsPO zbLX-wQY>G&6~3$s59U53&4|=6ReI2&?Fr714yZY)4MZbir2rddr6;dzdzI7riIu_| zrjC=5qzd1P?($cQ8~tcdd*QEdTRbK#S|{FmcZ63P zl2m&m`zt&`lP*SI+TFq!*0}wy2n{l*n5Ap&FL3OkJB1LQKKaXWC4Q8Z^HVxnX_o=R z{Bezxh2VHn|N94>-y6>NJ47As^tGtsFqFgRV^a^0rLH_fy&%YL<5_mi$RIaKwfc;% z)&FS$j;G|VPlG=^5ik6*Us}(m_4t{D|L=~Via+J{s5wPe=Yp^@wu{GNAa;___{fpb zC88arXJ<>wD$Hdppb2NrSbklr-1P^B|8612`YosK$8Cib;G+T)u~?B`d1oNAT?!iq z;~OUtu4b?@zyx0_jCuaei@=ph%y3QbeSxJ8f(Qjhx-NivAoLfdE&_8e1%yH6JcTK1 zl12-p0iPB$f=&phhuL57!g0ACVE}ae^0=>HyEmof{`l8QDWYj4r?VmK^6>If^4e}Dp*OtP zflx9yNqCARCJojD#pE~>ak*=U?R$zV)QrAl)YA@mUO8)+duH(twsr5e7qy6HL1>xZ zefY<<%LHLc(=1*^nRZf+^A#(?m$<74WpLnd^MG&809u0LgzGYg@vQ*`s1cM%aCyxIMiPEqaRx zzgnOY1xgN|93+wtO*Ngn0*JNPfCZ2rqqX_arSYrIH|^e$NzRUA=C&Q1;cR@9B zV7C{W=z1xEhK;Spl7MBN?(8wZT$lSBYI4F=pgEwTfqBp`1Mb`{X}IbY@*C?F@*^W$ zZpG(Z^Vik6OL9s7mOyBYYXxUS0g}6_wO;n1@N--Pn$R3HdlgoT7Tp!Gh3?#;% zST8pYK%;9oOP)js5w~40M?Jg7QDR{2xAsTA=dnQ634u}Ypbc$tx(TIZ*@NjX1wnUk z04~18Pa9-5N*0)S)>z796Z4p2pv0Rhykifh_C)y zBP$l$VGFc$c4Is_)B(9Cq@M)0Jruxs*%FixiOQoB6q8RPNnk^*L`T!H;CA+ncO}_! z6tjO{3IH*0h*lJ@Js0-eO<*CIZqa4EKZuB^K&986ePI=O(-^8v=IAyLB0l>;0dJ#8 z-rqmgZw8+X!(vL1rTK)nQv_oCTK%m&j}h6ANLh}xV>Rk=!n1ATj#XhZ!K+rO(DZ^M z!c(>jci_y|v%QocPzc-Gvtu>aBs!sIk1hbkQ4Odz{h`UMY1il8k}ouP=G+dfViIHB zKusYLv^DBO;KM(k@t40eV4q39RUk_Vj(PUoh9YMPQ|5TBPN;7f^&jyCDIF;H$AZD> z<~OV73VJ&2u*0mLyD@rCg}~R>`dYs*(Wk%+h187--}^2%>uFDBUm4FaxsiyBcxNYW z9(*BGyayA$VBzH29MAstr0K-0njajxej8DfXBVoX<@JFX`uQ~5FC?-2|f^mbh2fn{#_ zqXq~e;NtPwl<+nS+UCVL=6Rp;9d8AM0I`D92)A(j3N?4jhCrvpXi~FtyQEd(oJA(F z#3#fTx*5c9y#7a^#Z@3xoSXz{ZNY3#VYhp^o?f` zQ2LD5TV^;yB`H>S59am%(R7tzQMJ)_hDJK1OF=0CMM`P_NtF^11Ox?@l3d#|Bqe?Vf&pj_aeYIi;CYXjxJCzw#d_sj z|FW=>zUaVb$U+6p3&*`krpEZG(J<`z`%(M$!-6RGElh8@upk~29_!vlP7 zDU58+6eeDkZoSvuDNgIvYoq98Nk zspu&gk%qg`VBGss(Q_S}uW@qd0=Y*PvrWPy$~sraL(5J7ayyY0sr~}NRT+H0?|7sT z-~{WA%!ujb45E~+0xjkI?&S%TEVWv~zhOWma! z$pQyalDKF2cI*4@tFKjBu6n}%@@sd5b%Nd(ol~;P2D=vw+`4a! zX2*)u_NU`@+1AvsU)7lN@@B9Wf-t73C{=(}Fecj!Ho*EUm&I9$>w*&p3RkMBe-7zU zrD`&(RV9@AaWUQCJ#7MA6ECGXE_I?Qb-j0b{o2Oj*{`e9S>F( zZPO9WF9@9ls&*`OzXuFM&W4d=;=q1UWy<^tMCR~MX%`ulK182# zC_I0-ml0YuJl}S4p0^*qNhAn|G(!dtHOR@u{ujk3my7MS*#H z4)gN@y5#3YbYG{(k20*8tZyS1%`MmOsZ9J%x0{-aW`>a9XKP+yqzZxFmz3Ny_~IOF zv8^V_tc?C5u+ZCa!i@7=1zyTl8pf>O0hAMFY^jLPRp84t6$Ws1?R2(xSB$LSA%}RU$s_Vy7cs`ed;Bw#qEOLyPd6fEN zbv!i8PohXK`~$J!2>-+^mpDm*e8CGbK6KqE3Jg2yn?U z9XbUDU)#!n>8OED+VWj1co|dsiK;W#$rsrT9=Y%dZ3s-+Oj@FbQ|!_oO-^gyf=J zQ(d>#`G7rTwU)tM7TY1+v}e(&7rTT%o`GyuUeT4#r%PVq?76NnI1~4joM6b$;c%h2 zlxnW+0iZ$_0w@R%ACnk8q#;O+HED=Hn z2CK|N`%!O3efUtYXeC3b9QRq&2zb)bnsVp)MG@BLnwPJgBmnnub3TS&yukU9@wZ@L z@talW?qNd&koQ=}#I^q9^$WWJYiXLp4k$1d6b^h5aV9qvTzqxMg?6EOzX)0f>)LM( zAtF<`t%Of^7Wpq~z?3&-J+FBRKM5=0-b--wj<@PU zK9YZYd>2@>Qi$k>2MFf+P1qd$`2u}x`SI>&C7p=qrTEJXC+X)Osfl21qZ8|x2y4=J zAwSC!=o~&x5p~L+)SjiRdTaTNc~y!Mo@gHKO=>%K0p$reB|un5nV0b0(J1sQk|m^1LHjSmnBkoQjUYdVa$UkiS)G z+rD}5YX@bhm%4(?Q-&F<7@u^iLM9}q*6W63FzMkakHgvR+HC&FJyfhBw?qup_2~YH z@)-|l4cDJ-$#&x)V0zx3G_~Nw;oe=ijZb|`u_Xa12)lFV_zM&s=aasQ=jb)dMshF} ziDhpFSqbwA_x+WWqbIi2MuPSOkic-*65;gM*SW@eKc{2`m|lFI4Ea@$VjEPo6XYJf zh+f|FXRYOQPd7=7JESDc)Q^#rBqAdkmR>nBr3*wQQTsp4>?WlCfjWZZES+RQ!e}tmP%?jAn)cQWqR6chAD=; z0m0wIg@QKgKP*EmnC(+w--zU|FQ|)iDAuY(GZHIsdNDYJe?jCIpt2w zd1cM%`el6}jC}3I;~DuM8-l6y0xcCVRU0EWxigcwEG*~944HI}L?;w!EM|1&TdE15 zd}wZXAcZ`WILwI_yL@m0YFcC!5w9c&p1}b>z)SCm`!Sfj!q{E&S7uG5B;bkJSEG^C zr^y_VyieX>PLtiYcCg%}@WeK>C+hvoBQ=#`64v92JmO}x(@Iq<1}=JG_1uRePJcr3 z#!jj@X4T$w>D*~0xo&fq0_8o7?f*A~k?B)8KO+I25dQWt_U^wruU?BG3%QjNZQGaD zMG#b_W=^gCcsP40ptG3dxD(*u0NFBDFO)$wGG9OJ9Qxm{fs1VZmg|iuFgmqg5;t#N ze93VL_SouSP?L}|Z!)MJh+(auhexCHbZR6pihu-qkS^0xtVCqkLu>nLO^HH+sl!X$f@2I-<4m4__FNRV$7pJ#;|ZPd>n>?9sq~^ zIt?d3E#>yEbikk$7Ucz3XV;MVHmqhhm1ibqNI#j3(mo{Cf}~oNuIIP?=dBYCQ*0I~ zgwb>3+WUfDO&7H4t7H8Abtjk6hQ@a`8F*;2`{RP^7RCdXYEgEd^vqCTIyU)s!rnK&*PC3`|b*=3|z-7QogHA%L1P_J4_LmA~{vYVP;Lo zA2)g5V4MN|ZF1uGY4@sLSJxDzElbOhNurZw?X$f8?K@hR&rEpeb=RmPeEStYj=gZq?($NwVXP?VK5nH)Z=BhlM;JeR@BW&&P6Q&_ zn#C;M0>kz(67UgS8~L8y{c)4hjxgpkQvQ|@8gj{DSN;c~@b~=h6elG@jfR(*0+5^X zH3MNN&>2%C@Cxa4g)(wn4zZN}b|7ivktOK80nFMPeqqCTHbb)aQ6DY?rWK?1liuRF zOL$!!bbNU8%S;9$mkILzEl%YvhdjEcJ~Ra%X%(278x0On_GEZ>mnFL~W52b+t_Q|k zeIG-^?953j&fd=v4aRBkqV%|m7aah_@_MljFnY*S03=bYmw;F!XP~-S&!`V5dr*FP zU6?jy!NKa8JL_3_WerH>f$RkBojCvLYhTNr9~9k+4Ac$-=th9=%EGxD@G738Hrtx^ zXk|hNN!}TS0<5yc>hb*~hf$XO`n~9!F!_HW=0R2A4s*j;rMNK1&%WrJ`fo`e{*%8u zl8(vIH}v-}XK+x;C=ku71uVJtZ)*VF0K82kt&iTc0bn|Fp|e-?Q*+btx8|)c@;d-* zeIo&fcvO4;)QPfIa$!D^+_(Jjvsj6n9ZPq=RGIJ1CR_#$5!pf;BC}7dnWGr03vVyr zx6>;5slwe-1O{LGGvf(H*H^3Uaqv^;65FUE&{6Ym{ciZ~X`2Y=dCR{Tv;6}XCx-|> z8YNZ^xfHwP^&l#^j4|&^Qa_u4j4g6$0^`hc8uniPDCKdIg@XN(%ERwkU^C{e?mJae z!FnwSBuK7<|41?-krmKx<~rVhKb(g@WqExxYzwBb-o}g167;{0Y3EJ6{d?}2x_BF8 zo4EpnTeMjNgjR~24~iA|%o42A&8vU^rrVcG9Z33OPeZ`SAAC}Pk*L@8UAust-3Q}4 zBc8X3(u+Gke89cD{QX_(OU76yH}2|QKztlySS?<{6>Cs+IH6`IzEr!8g|RLp#unwl zZV%Y%G9q@cTOsds$o=A8CnseHujl@Irgv=&!+;_+qWUka690r~b@9}7_L7pKf*{J@ zRyp8^`xi!PCuAD}Q1-&r$FoTrWRw%lfa59YeAAZ7C#~#=PSzTp7UNsXj=O1K7-IJn zbB*)kLhO;wv)otqDb&mRT<|&g!O!mU#8BfqtK4RUFeq_#8KrHo&QUoOwNVxR0pqA2 z*KPwT?W|%@btZDzCNwREtC|GZ^BYf&JZM@t-;~f|zJ%*DAocLZ$d{sR+Y_L+f^D~W zxL;*+I_k5MiIrN_-L7-%NUH9yma04jKHGrde$Az*F7b~GYXMAfz1S#;<^o=9lC5_S zKA%Ufs`HEfXs0z1fVV8%2B-TKs1@wV*K#9RLQ%%#L}8Ju;e|}L_O~q!v1^x<(TBD)wA+NsSbAcM}^KrEz`dF z1cItM?Z|WC2h=)uAu&cxyfg+u~cDgirvJs)$ToHr{*+sAe$ju=(>B@UwT1)X0mf)(Y_2!POLec3E%;&-9a{=L-jh2x z?{N7j*;7=)GVz@r>~eHl33#FwW>jkozRkZ;Z=lIn`vp0{=>K!%b55WOSUwP1N$rF> zbt1t())tfL5iAgFweBfTY(i*utsdDm$cHt?vJxI$eZ%R{b7lBv>^2wBbX)4)maoei zK%KUF8=xfB#>8JnWi}7#@Z-Y`ZHOFZHQ0RDPKuT9`=2VxWsmJ6tx=T{9*>G|f|+yD zOYq_9k|H9q4gTO2WK_Gbo!{W+m%oc_idnQzeUra5uaoEeyXpi2#~cpPE=;87kIK8{ z#p!y6?aUhbgp}O`4Jn3Q!O_^ak12)fY1X^)QzQii9Vkwi-|?Z@*?)S!jU#MRm?cIvrUU~=e@~ZJ3VKukRa}7}N6CQ*401|2rc-eHDLzaT* zAP0)FGizGEya0J9t~Nuj`#tQN&i6J?wTs}0Z8D+8gkOG1#=5>oqg(%7j=V4}0JYOW ztUU!Db0`F}RvKW_(uhwo?-2s}Kf`)HOtSO0cs+bSWrzR>9k2qsHFeYe`N%Amb(i-5 z-IP_;7fVOywuQuucpn}9v@Tvb15myXXEM*V3LlemQ(n7n-Fssf#OPD&|)jlDZS(@e1>-rp;CXmqTS84ia%4Zjj~svB#mO z^)u0bIuS``;iwpz&?GNubqDp9eM@xJ8eS&UpTxy$Ly6t<8yJ?ZY)KB{OvJDWc z54eZ{WOLRIuy6f;KzP@xk z9N*oLW?+H} zdAAi}<_g;l3R8$kWRAU3(VYg2WA_bHg9qYNakU~mjfz^`?g=(r zbz>vfm|5dz<0z9nxYAm!+r4!9z+_OEPlC__4*mP`=KPY!)x#7q@a>+c=gJtUss23^ zHMwZna5rxW@2L4M1l!XDcgN`0h`_Uu3UZjKkI4^C01Q!6r%44bCpKaXF#R>j}g zG9e`fhNB12&aPXE+`Sb~4z!Q_q#50Rrx82%a%vSJ1^6F_>DM5$GKz@jSf*y(?uP`( z@g!idRk;hKP8kY8vdJDGV-7i&J^l9_=bk9g;*_mC03;+)H6&I;*fz`yk8|~m3OayC zmH>lbhgEh9yP3C1b$w*bBbCa>a$`@iYV~w#qmaPey9b;18U7_ihHT26SN4Z>hnYw% zL4UAGrj$~6=2L;%u<5f)Aky&k^`JKa?20PFzW|oA5E;7U1u?QuzxXrBh?X$;vu)g* z{f;kJ#bwe7PzzB3X#<>&?l4<%@17poMiB5ShM1!#E zs&r~~tuLy{K{BG4l}4AgAs%YvN|k2KEFp#)EhGMT@ofLiXv%%l6P#9B67JHESAqer zzUC2;Pq{TcdF{9muAx<@Qa3@99nUZOk`$iRSH6DmRE|^$a3&vW1TPKhA^3l!4{D98 zIIY}iEUpn&GbSGfW()G?op!2JG4NY!;;z1L(Uw`c%E7ie3|6-?ug%;f%ez6mU9+_w z15KpZ@8nXFVq51tcph+dpTAZ6vG~(kdt09Pv)?PFw(^NuB-1UnFSNtOOle*~k9D>) z{j@{8`PqPfg(KdDrfC==lq|RJTwMtDlOx8){|zpy`*0?Kc95WZq>$K*fOC-n&59t} zds%Eg?djoJb86`T19@Dgyb5?4gmJzq3)1Zi8Iw%LK`nl3#@GNzRn1bi)A4QK_F3u$ z%q=1f(#IFI?YV#_>_Db|TOQ)v$4QlD$S_CO_`iUMyY$YaF`qxx`UG?(e}REyR!|R>cF_Rr?I=+jVLJlVt)6z$OSr?B z;9K|Yr5ZR&JW_`cvz0CkY1BZoCOyM@fA~x~v<)CRr&C%8wMW3HaDnLuh!KFY8{o(V zglhyYF!>DHr5;C$C+GpW@H5wGhwCRe)J+H|PFK}9e@m0Og-YH8;vTys&(zUi*ESN%j7{c?>SZ`*i>8sMSuP0lp~5!p!EBe5DBdW=HR4MXXZ40jWOns8ksS_&yKOZ z^GBYwF=_SMou3EADXKf5(&n{GyGAzk6Y%#v6W9q1T%bqwfhFMbWc_^en%Q&4@Bid6 zcM>`)!cqUSEpdWs+avCtd?WhbUcI(k??iQxHOB?@LJjqIO*S>p zFw;NpzR)KScWHoU7 zN_9E`2Pm(x%d1O48419*JD_61&?P``)nEZThC|)?D^b&lU%>z!UYOJqyD4E55I+s= z6~chgQTd2JyW=Nprm<_3p_ZnebzksvKd+));d{zB!h$&+(V-CD`T2wK&K0A0hP#Dd z{y0U>H~FcG&`#xF!Y4Ujodl^K=ncoKYWw>ldv)S1i^n60UwQ_)n&!LMD= zEMfRb1l)h>jQr7ky3l5amKh|~^@YW`F5)f-#Jon3Uu9cgwPZu|h3k^4z154e0%m+6 zp`Fi~wnfVKmz}{qh$H|?-Wt~s$S-=3m!K_`8vqQvg&05-X6lv};4iZvAs0dK_1*)d zbeP^YApRPlF@0=eO)&2h(HNhbFUdqk()_ycmeuRgCQSl3TZFS2r)Qly5UbT%U|A~u z0z^ap6Y1TK?4>s;f__^rqUeJk>m=3Chfvg#~Rw(9CI9JIpB$H;4u4~LzNeY z8 zoMjcTq=}(0!$|doTzazp_bsd%Vw!JabTa4-xOtmJ0;^E;6+m{bNf>k4grPPY+3J{B z3b64_?Kf%5o+0DI-j?$%*Mh1j$5T|LREvwCU=}tj*e+?3qNX%#ULfn)A$i_JgzLA0 zTUGsz`?gku;BsR+BKei1>iq41iZjD+=vO>{*IfI_D21h8JxSipilqe=sA{DSw`w4d z7yP$D(t`xxg+6rMQf0VbZk<6MFg^j5nyp2z<+jjmLxS{wPAst8f$A2Jp+j%h)3?7- zkJp800!Y)k^o#nJ-uzpCO{#4Fi${Tj%K^H<2}YH&gM_2T#dN968>gevkjw_unR%L_ zQrkFux{pxu@dx|+8h2tf>*KB`4CUd_OEWWz7->FbbIwmI*I>v&6uZAIP0r=?hAIVzn zxyif=Pgpn^9yHpAd&hf#SD^F8Z(F`DjX}GL9I1MN*8w}Cracr@Ngr_KZcK@Sf8Zd% z*SR3UkqlNVVU*ZBLL0|nPZnn-ei}0#CV&^N`$#3eJ`?A~A2yxJ0&G5EkeR&N3GMj> z>&4xD9=0ZqaXiuM{2&hyHwV8dCOq;M;CHO z^-VBwj$*?s72>*mw~3@vpl3HQWU58>rF=ClFCrImaA+Z1x%cI_a4kv~vO=O}M=7?> z@JyX=c-oYZy1f{C@b}EF!@lN%aM&PU6;TF!FlL~`v zY(Yvi41M9884EwU4}R-3;#7;8*agRTMWj{4FH@uG?J@j${6C0Ef7QFFEj0HKMdV+d zdQz6;iaHrf{Pjtab>aIeP(8_iv&P;PQ`}IeDhVa+CP%Dn4-!I$fk{2cYsgohmkPoV zrRhJF++H*1n7w!{Uqu`zDKN+mM0~UOav_COABRIr*LJ{%fX6yC?DhA2!@F7X%ulxx z$VNhx6UZL*mxHMkEB0k*Z^X|VUAdS1(ZwTz{_j*9L^<;l;PN`G8RPrKrC?v<=9E}xYtJh zD=IJfoT=uX6j!TVy1-?8y*dt($y+pG`d-61PO0>=Du?4BP5i?b1|EGIFz`@GjV2Nw z+a+c1^#|7(SlIK&{rUsH4d#Nz97f$)BuE6Tm2*ok+?Q{DKK_OU@mlrIgCmxCcM&L~2AWR#ZeH;;g;6s#xX>YwVl*HnB}7>gbm?@V z$2mFw=jtEQ?vvcOi*`&KH|iq;vA3u}sfy>*Xi|aCg?d`oWxZn{TX`-E8TG4e0dIYt zFbY)pYOB^Hh)b%}7o3Knm_Wk8+qJIfox2NE?A$Mrc4j)K&=~Y!{C6hh4tcds7P|{1 zQj2-Ik;o7DvGM;x%#R`0N5`q(kb{Go@0I6SD$B`fFhK668EQZpDNYDex0}5(^^~PA zWyFt45{v6_#jJ0Dz6;G@N}Q2-;aK5qlfF0NKk6&9%`kSvWu+15;TeW&)a3mLL z;c5MgB*b10v?O7K?s&i%#AUq?4SUxSh?oy^GY=8O(xW&gy{ao_$h52zaoaHh{S4rXPSUQ$6b)UPZCGO<6yZy!F(@PP+8y)@T2L z!g!I;KQc*k##ZVdxn=82_=cByR?>6|;r3+@*l;~y~(TZ<7t!T=O}dlP9?Eo zBXzGxAh2C8&4`EH2cli?UaEs-d|?;pMBRRoK1BCe*qwA}QGOKl<87N<{ra?W3^StI z@UKecM~DCv^GBWc@w}ATIR!JqY1#OeH?7$ETlBz(-IUon8ZWD+Wz#X!kiRc$#SQ@c z3H)ddWlCUo*@W)_gFS5t)hsE?XPN{+A7#j!b<2XqF?F%6g%fKU4s_LoC%rUZ_F1{V zBR%!e=G@DyhQnQXM}_l!YMDM8jv7-e!jHY}=QFYB`xMGQqx~2{va2}@nd9-OayyuW zp!VcohbJ)1UuGPh#UoRasyu@#PfLA;#^V)w@MR>*=)%D_lqTbeIp#OQ8LWJYt~m&O z-|O-Q4tprPJBi&D?p3ocr6M5iyec*=4sTigvBIeg%lyA@DBvAHzCrV0lBne^%;=P_ znmxQ-LkJtvzY~UvA=x9CUtN$>vm&rGs8kGYRcA(N%S`C*be^iFrmSAugpAB#uZ~wx ziSBhU(_s9#eh>jUZoIe5U-e0g8?{~zpZ~mH+x^<_%TY$~WkK|3k_m*rBh4h+m|6Wo z1HDX-5YlcUYDncmZi+y%6`5X)c6pL`IBY?BPO1u7l89yl5;6?<^!uRbNAzGgf)b1- zZ$>uXoh6L}4}kSQd)cnp8| zjc<*qp!JT-2#FTqr!Gj(8xQ7ImLksY>7cJ0>O(}XLw>)!I3z<%K#c+*4{~A-r>Mcw z?xV@Q4ZsIYBA}sIiL5 zvs+5Kj>WBFY;O^7oi-+Rf;~!cu@p2jK90%cj4Yr?c5%bpjd|7GHDQ1Ht2GUYhMYTy%!LJ1L`#5y zO4jWvp{l(+v*P0E*T^eY^dY`cSr`}fIM=TU7S0$X;8L{?ZE(Q z6#ungz>TC&6wCo3qWuM|60&;x$oNh$rIH9eY`h}#$Jr=TylFUmH2`xO<5JEqa9}?e(985{T+|vw z!rM}Q$PS-YF0cx4XJc_{(uJ#Trb)U+U~N5Nj#A}Bd0y$;z6yWfFR;J&3vLm#E^ znb92=`4k$tESY6LGES0D!4*1h*6E7^R|LmfzzG^(_9#D+>w2jq@+|K6ffWZ=_|v=f zlLtZGQgxJ{zmPgZ89`a$?E`G)>x4`p1dtPVy}s4ZmpShZ{RkC=I$*Cy0#R(xzAKul zu(5yb8yV)oSMDTY3Kwx9q7c0N1zvul@xVRVCDh?2&PdK)>bJwL_Uevxnt|=liZ^5# zkOQYA(jIc375a~x8}WziiT^746h!|1v;w=|Dl2s~FQV0?SFa%4FbwKiBzH}vj8A3a z0zjeAKlrG)qi@6e0NtgRLSE5OeuB@`EsOZn-oc(;nD{R!ncMGG7r=%)GsHI)2Mq83 zs$|;^-ktXtl4><>Ll$)7z#m(8)v0J53KbK3WDk5+x?aW;Ry;*ksA{OlihQp7zBRS{ znSP2tBW->hZ1Xp3m#qp6VW(&O?Z4L+41cqL6*00CAUu@glq|FytYdqHCfL0l3eW}FtW+gwA*FbSWI`m$AJwp+%KpR zPoZ_r*P$cNKcEfmnbnk_0yhv0(Y2-3kX$(%DQF&ine6F>0;|9O%fGg6=^`Pm$HJ-Y zVT~BzykOB-`a}p^WIeom3)QM;R5rx4Zzd}4&YgxkVO%tjD769R1q&~KbPgBOlN6js zh|KonOdI^evxD`I&^eBba8?~#12G?tq&E#ORW9J-2|Kly?w-F!I-6e@-lQdhgTXLJ z5NIScS5z#l>j>o7jZ^gxP==S_G;L%Fok^}cV77GXWFI!^9geMy@}MpA~#4lVIsZWn5$U_xP!6|t}lxatSNo!F_=?0NW{CBs?V?j|W$_5PP>`7wcvM|5#gO24Y~k4q1z6 z9R-Z4tF z*XwY#UOKtGB-z5CfyS8*sY5~dRX}Lr3>zNm5Vlji5^Gx(XL*CV;8HGG=*w`pf`<_c zXA7IG4>!WLxKc%Ono#-GOXpz{`IH0VqN;3BD(Oa&Q1kE{&0l?`BYXLG>nO1+n6=!< zlu9ryr&s$9Er~GnQhGy+qy%wPjPHETn1cM<3eZE*7=#K@Xc9%B{>lRa*zPNVJF+l{B(p5rU z+&;~-d&##qFz~aDv8r+VrY~d&)3CQ(%hUWXZxao8IxA{dwu4aFVzj-+1t+X9QgZ! zO~+*fKz_s$YyU`{gN*YIEb8Nf7%28Q@fGVq@zk`p*OMg_*Pk+NtcJ=(?Ld`l?SW7ze z0>*udoOm8?do+@FS#Drtt>rqKci2#tU*0t{FuVMd6Xi3gtWRQu_G|3{amdx%*PJIb|XUy8gzAa9b+PS7>~FPl{?<*(9mKgk$Xv!*S*t0 zhCUHpa6Pzd);dfk;71~UX`bFLOQSzoNd`Ud6wk&SVlp8T=(9w_DzCdGsdSE#YgU+esrNiq zaWYof)p+-cp=gG(FytrpKiC3ySXtNgqSDNK;;%>Z_xOafwCjh((ArRVe|X|Wvzs&s zTv8X$Te|`p5ssmmWx&wExv1FftT%=Prv)4u2$wm<=D*MS-g&kul_-t9zmM020=fDEalcx@H&XO8IwTtF8u9n@ zD~4B6Eqr--RkH%g*Jq#2LRQXmz&Sy^QQ!ya=YA*1GLH4yI6hvdtPp(4(!+F%<-rU20a5Ce&s2w_=Y;1SsKGNSalAuNt _|1c(4z-?SQ-qw zr~3jZ89NcVSIpbat4|_u0j%EkD-2nqH+=x7+}u@?UAy$uEL8Rx(*v3m$0r`s?Wtg& zgKs#X61Rz7Rd@NJF}W*T782=LcKKcr^UU{`^FNsuGyWxxBM91xBP4a}-w&tYumd@X zz(T%?sg@}$C}L&zyzx}RLF{xF2Umy;a2>jd}RU50^|1b z4=t!HVp!|13V{g>o|WKQ;|vD@c-NND@Pa5QbXc z&HXKeXk$V*d0#c=lh0&<%x?j8F`}s+S7m4)??xK4rGs;eMDHz0{uvk(1j7kDQxo*b zy!qrFM9g7zX(BDk`f+dYpAT3HlS-QFPv=+V$o!YX{$->66jGeb_b^AC8RyCxrm%N&W$Jj z0+$p$I0j$2>*}tgD~=ETxn^#7c;7Q@%;B@k%)+a0Vh2+6l!wi!Ozx{~f?B`@72g7XH}DOK8oUr|2JJTS!GH^ix7Wjc1R6$P$}F(ZWPuey=C z`b%jS%rXfz+CCD3e2##1OsHm}&lc~Lhd>G(;+f)M% zkdrtw%!#ow5iT53;&}WD2lma0flQE#{jGz)y)aB7^aACRnFd0Txv03ucN$df@S}AQ zIV(n87vI}f!IJ(^(W>sR`8Nt`#_ictqHLXW~* z03EZBbNlICIqNVG!2g*p0usV>lc^HWnRUm!G}fQE@LJZJfP}OnL&tR&_Rl}`Zd})N z%RIV0MPcE<@;ZJPi{v;%#$NPs5UO11oagp7YDeFOJ&ev*;66zT0xKt4g8y3SY8wIvHf6D5~cnr030E-|9zw2@_0r*>CJ$!2g6oPm|x%| zEcq3FPz2F(Ma>IYe@8FBNFy6bP6asUkO{nbbId?p$znnEaq9<757_t&8me%yJ#E^) zBGka6^I`vP4d}fwUB(k%F8H+M&YTm&vzzgcekrt>)xuoK;rq7YkOcyWPxKBLYCyzgMk_nA>m z)0a{Uoa0ODpNlG;o^H7L6=_F;{we*SLu!t*db=e@uxXnK{h`IfO!h2Zgd3h!zjN(- znw-=~%Mg?))uxc+PN8RurjEmkFE1jFI;oEWh^r=Z{o%@gV*{RWf$F(^_NnB<2Hue; zv5x%Rt-;levi~9y>gMqe4qxzNufA@6x=g-x9eOASF`MDuX$CLkek{m1RimxL67?^R zQ6$P60F(H)na}J=G`VpiX5-){R}b~NDu3^+qg-@R_qIZKl&jA`gfN0uiz=;wAcX4( z|Egm#>I``_qi__E&zTq$_xd4%PoK5?>t>Yy&aY&Cqtbgj!U&{Y@#2f-@jlxT(V_Fl zi3e2!dXSlk`r$9IhhTeiTo>O2ROrY4FTeV$uKu%0nv;>bK6m0|2Af_+!awsT|G`m- zThezGyd+M|IK)Mem%L*2@Jn4wP`p&K4)u6p!EHJamisD9i^Ri37}`Mm`FxH`DMKE3 z!b?bejDg#-z3?;#F`k7eRAhY%1$p5aY;@~xCPaa8&*V1r;P?ije}P575^CDAm=IjT zh2QVE=jk%`%s@n!RToE6GOv~=UN-X}AoTZN)a}s4e|&`w-e#;6U{KCFMh!DuQiPE> z7j$`pr~QP;&`$~{wIQxw@U6X$&}gVOyyXch z56~h>mNI#p`YlHj{ylHqLf5mU4e(jyuWlK=nR^w}{_`6zV5(2bhGwhN-jgh)q zJIIaa(5%W*cv46X&p%@?bk{hpA8l$_z##dL1yx;PZ@<7B2Z}eaRnE*$Q_Nh-_gA82 zs$7c#)~@u}GrSXldPubUO@gaDpS0`Nq3fX%vFm2;Oc@5h28!=Pu>s99H>kLCu0M0B zVt;!bV#n%{<4tK$Bm#B3ab0VW6f-sDCc+|!811}`>19D=rC$FtQ6$=MjMrTGPNUQD z_Uh;bK@kH?y8z|+fIWj_89b0gKp*m4z_5-KA?8Y>4`Iccd?$&yXW+COUPJh>wD5Kd zoR1NNmLdh3G8<%kl(NW%iO>|*jE`g@Xt2?|bfJoF-o*=L3xR2EmLO9`?#*x;F4Z}F zB+3Vc9X>)o+Xu%ZmduowE+!*^zvx|8X3g(PUpAvm@{EX={52Yj$r?>PZMU#B&oGno zYYr4=$RuJ0XRPlMUgj8e%x5gt`Qx5&4L>k;5efH^J)HmG^acJy=VuLZA*J4P>Uq~h ztoYv25VD^FAbp0GL_V>Yo=ctIeywCLp$LZC?OcAd&F817%Q>JqG1AUZR~95DetFTI z@B^-(JW`{3vYdC?QM6x)MKo9G6B%0n9^{=h_DWAJm7d!}?x#w%P(0D;;dm8ytR!gP z7X7{1zrpxydorR1f>Z(FZ}r~{-z)?@V>Zg1uOYwz*i zcw9H|g8Pp{7rtxfh(&47NemORNDB5zU$Ptz?#5OrbE-M09oL)AgR>f*bCbYahsn2J z5x{X`cE3ysUax6IUV&A9=xH@nm~deV^sQmgf_x~KJVeA0(}M^{l7096Ud zH_OI_U%NK%C|g`a|FnnUXn(U?xNwV*V*(bB^&_w zdD~lWHg?99Gt?FUpM->s{=5pG73kwJ)9SS-{QBx=c})|OebrC;LY2q3hq7lb`k@y< z;#DkrAl|L)e`xydc&gw3{nt5;aqK;^M<^kitRpkoAt8GtgpBNSNcN0EWRz8sm6dUh zWMqWwk#jP#N7gaV`Muts@8kCe9uI#Q_v?P$_w#vO*YmozE*|Fq385mRzu&O(D6Ta% zYJf;N+ZlxX!m|F2{m!LifsK$_7AjpC`=lw`w+FR|wFQAjKVIHjI(s{S#wFFUKAteO zdO_W;eq~+ed7)>cmjaNm9MoJb2iEY3w~4r;w(mmYj#QsYTJrRk-u(BY?4u9vT6ZHO zFvw?^h_)s6&Q_9`4Q%uWZ_KDt#@K-6m{T zf%8J<0u_K&uNij}UD#G+)8`&jd10;*UMdB>`=$X)rKfz#9Fsd9+!O z5daelkB||+m~DAE;{^Eyef@BZ|3hDl{UX6>NOdpV_t^_o;5YZFK5)r+{*Dd9 z_d@NZTd?48dgATu+UfZ^g(m(rDb5qyL(D^hhGbMdbAX|3)d4y0LuE)J^9=_X{_pst zLbkN-SHZ1+Kc$yj0xA33>r8Q;B$4+(2L+HEB|U9-cXFnpBePyPy~B4=Y~eY{x8++f zQUp%ok9yz0sJ#~&FvAVbAizH&t$_VQPzx?lP9KQ*Z zpu6|e=wsq5brDroi&CQIxmrf(qTFt=NEX-l7sy({FKTq4k5FK)iNf9{3LS4=ero0T zTNi5Eg?3Nux6k?AHuHr_gm*4%P)DOreP{>28BHv}*w=p8Wd8B4`9aw6M3cz``p$dQ zxaG{wSN3Z>EqcF%fVuxcV~neQdfoCe^Y`xA-|Z8wwFO2hzGy;Tg+I@O zde7nqwKhyDcJ7-+#N8nAFX|#l13OVII9J}-NZjLl-rx9Atna|1^EE2m&59Ig_1gb7 zc}Lf(FOz3}j11YLBYE1E8S(_Qhxy#xV34=?OvmoDo)`U!T>e>Jax^XuoVm5$qx-C5 zR`|+lbUu?e?*bO{Cyn$YVUvrrWZ4yzjq9zcGJW7t(hI`A*q((U3Se8h?`)(d1Lk6= zvAq9{ISjQ+GhqKJecF|}H%FE(0&u#(F|1R4I!Dl(YY-D@|Dr6wV8R&4Ar#UlRXV#n zS0B*NzeScBm4f2Sd*(lzqc#OI{dA3p#;-ut|1{JH=r6kq$GmcUZ)G^_?Do1A);BKM zEm_V^Bcuwnz!t7Nw5pu0me{NQrF9y!TZ4@dAh{(j`11bCH({mpWN;!;NSSy3iXS-b zCCN)N&;E45goyogE>OLt{Yn_Fa_=0f{K-{0ACX!yTJBO; zHSw>$K5wJBF8ki2qEY5Nhd$$T<>08xRgqg~4??N0B)tc-o2FrwpvTwlI2j)V>(=@x zpGI7s@}eOcomxA*5{cz6Ki4vXt|^slCr?uM1XN|QkN6iPG>`Lv7pTCN3jWV}?Wt1m z_3zdYg2!)>MbJ>{2=p5oMbr}2J~bvf;W7B&BQN(Ssv~NUR5$KDjr!D!U0J-*ChD3V z5@8LYgqnpsBECiIo<$bqHti0kX{X&uGi0I$qXBGaL)7f`S+%11QmW-9q!}87NQKep zm4PUVoA+l=oqRA?!3)o+i5L>J{l4~F@LN;E7dYeTdHL-@X#3DZ;dgu|=b_Ju3l4m9 zV)k?R)eLmdkof3cd81@^yFhxxi^%GSx`Gd0|2H-OW+SKLtYctJB-6O87@?*)H;8(i zWXbQ%g=QE>3&D6 z0tTG*Bz8|!sE8;4CH*Vf}HN6`hrMQev;mzcon~aJ|Bt>eS+l277p9|%uUNmZe>X=6? zYF{{?PXtMM-O^%h;k9b1E2h7^BMv!h!f&_z>v$_U2W7g45R6+cCq^04pD%J!O3(fQ zcXGp#`w~}ZGN__Fa5h_M1E;MXo{IiUXN>!tFBA`pdK9W39)HI{6869h;T82rq%V*% z&~OfuPKy!y0nSf~QlBRF;`Ih~Up09dM+c1?-&BY`#{P0;q>S#Ux{tO;5slD{ZO*&` z;%6a^XM~1O$VkJdZI0*6!(K+f%XyGOY4nVKiPD<7oc<+7Zj=8kmxi$3;X-jJ_j;PT z=WNHn;T9_J7910+wj>9~_~&^0Og(Z#gbE{0xw%bPLF={hv{7R~xC66<94De>pQ`QW z>;54u_JT7$ms2R`KgjO4Y{&ZkMCrX%n8Qa9=P(%1TuHvcb$k%XssbcOctjbmK`Nl- zugQ*Z_z-H%mE~Q~_=4&`A-3Ed3}{*TrXMqixYN1^mE22G)v+5L9Ne&M2!h+1@$Ydtx|jL zv~PAn=^7_1{$YYdZ)x9vA{Hf-gUEh109_JZdS-O&b~NZg7bx5|3f7+G+R9u^L$xnF`8XlD!4_!|H4ea{ucT27nDx0gH#2UHRI{p^pQ90ioA4W!M9sjF*!-`j4Ob(cCw82+)=6uHw zfa~GLOEJTS`Nu>GcPP3i7lU33z>(zB5IEdPkUcX+WcgHNy}95Q_(krLIff*k`b632 z44IM{iT4{{74C00UaIMZWVJl$(pWVVt$7eT&z})Lb=0L3Va##ISEntNB$GsXu2>)H zK0`eXy;k%XVnMmquFu!ocC)&uO)jH@U8{-mRT+;`vqO#{WB1 zhY6bQp}_Hgmch!|Yy1?(XXFMAh;%_JK`QQR0L?`85Xk3PiO%lPN?9mT(jJ-o&QbDx zWKc`n*09uFiVR>r&n7vIZA+t*p6VD_bF%^DgdwoV{1^Bu(cyyRT=<-%G|5@_xydU8_fqCInsWMYK<@Wa^8Iep36%D*7L%9Z1s^LTB_f62%UWs^~;}31qRJ? zSEy#9)BoIs6dDAH){E5Lwu9OPdbgz1c4J;eMOOcNlpkOnoumc^0XIV}-tQK<&0|dz z{wRza!!}B!ilxk}_Plp*^Gbc}xs0`WPl)x@c;?rMS4nMUE}rk8c=aCX4W`rF!K@XU zZq!&+$fi^AH*l{Vjn?W~R*yDIotd_F5;rPz`_@z`*KZwDpJ_q$s1T2dP6PLSubq^q zLWsDyY4=uM^87^8KYkFA%bh=7IOJ0Jo=@{l=)NM>AfHLlO6sRPlY0rnID7e3zw3Jn z`FljM)YI|BnpM*+-h$Nd&2GJ2g*OSEI^Oqx7Qt&2GW=*QsJ_JZ8AP1Ny_jfP$t0>a_BjV`(c-={=#?t2`U?_gXm{{tL3y1 zBY z&V66kd4c)VnOQJEjpKrNLAG~VhkiHhMZ3cQNlzev!t3U{Fo!2_Ce*p4Pj3gL>+8}L zTo7F1m3GvW&gY);dfo=m0Htqqmym`0^9f%$zf}HI38H4f-5l-Y!pCHOYq7uYi~P|! z3r77)1=Ts#)3A{K&`q7Ej%Cb;ACCCkz;Kh){?qZw^3^s?Z1ey=Yv$g%Dsn_LKQMx3 z3K~@Wc${WDnaDf-LbgZ^Mt$5r`ee_iysG*seNhwJMYj}s^F85(bnE>V77f>@W*hff z)C?6U?(0O3sqOL7*isUx%mPRr{4t*FNch_LiM+^E-L(NNd=;5URcCU7gs(j#JrSda zl!Bq7DCbDx^Da9DUreA#kWDqhY?ndhxp7d(xQ@eJI})ZR+$NUBGl~sHFuJ%h7le5} zm4LQPc*Sb>N_Zd&ZOaDA^xm%YP@Sb!FNxE@C#WKEsHgc4D8EJgr*uDiEmls4>AU7#k){*S>t&3*JIqP%;1iUw6ts5AO|p6O7$?sZk{+l9r0P1m8peMr~l@z5g3*t2H;nn{lObg%Y z{~5n4l36$)$Qwj)A{KWYw`+t$UN@oe^R5n$@WBY0pM=LUrfG@>SjByTEalRt%kWxI zSS9O;lWaqtO7k7|YD@E(cT}Vx4o)^3ew+ODv+&-N#FHdGq;qUS(cCe+{1r*9x*35Io1Yr769y&y9|0}kmsgoIq|Z10I~kRKE?FrP1!|lu zN3D6&x$4bTK!vEj;>rpX&{?63v&TQs5Ri88`GTkPEKib@4v3F}a`OR`aevYx>SwDw z-`kb6o7LT!j_I!S!%$~asxGjap?8S>{*cs;YNh9afmR7;IPBmRY+9Lo(NkM~_f7MC z5nTCJUPn1Y$Lz=z@NZn4t#~-^RJ1P(?^{!fUsS!e<~~pt&bm5kDch>l%#)Kg=e&6Z zV$XO~g+e}*!4ZY-65b5r7053(Bo14o${_CHiRq7T>H}WwaK-hIA~0dd^p9z>%jG{k zDGHs$pL*uaD{_7(n`V1-JE42onZO+`y5M9=w4}-3AbRB+_T6z`BR~CIZ3%wNHnR3M znL6I-HY?=UGQ<%2s1>Zt3mTuJ?VYoN&H0Un^RiD(Ccj42J$=Y2EEJaS@7Gsd+=W3_ z`%7g~e!O84Rvec|n0-RFoD;87E^rJjx&ydE`3PQ1m>1Jd_hvs$8(VB;b<3Au@shPe zG1qtN)doM%Am;-WgVNCS$Ie&3h~CNsOgOUDzE_qj;uZ!nDsojzJxCGEK&*Jz9rWqR zVgtItcK<$TD{#L@eRst_AL<4)s_5#X=0o zCO4UHda_u@F%oog;2HS&#W5Q-HnMXLGFs7IU%Q`hiGIQz9wT0iRo zgGUwZ2$);QFw14bBQmWwWCB17?XIA)8(CImG%zx}hUScGOEj-H|8M*IoG47+bWwY0 zn9Xl-heDJQ;2{l&*Lv3yQ9c7vm?1Hdi%{HH)sX@V69GU7_>}x6OgIQX~HA6IpJ z{tA?Jee))jNDb8_Be>2V+~))8ufP#QhYF-@hzB1|ukiub!Dmdp9Eb?&{Nh+Ezwxy4 zlF*1kvp@T`^L{#1x{-rS`egSeb$9z(oOlwr%<5+Gnv;@(##j=Yuo-DwJ*qle9Di_- z9PanaTj@#44K{>2^=%b$VvzTKEBnc{eX`FOqfES9|JjNZWgq96MBO<$v7W#i7(ZPN z-+NXcNnCeBp;d9!Tr835@5h&*$J3u(r4e5tkC`YqPj0wi!<_2>iGhbV6HsXXuh@1>R)TK5riI-@-aUYpO`0H50|uaLxOR z&mm%S=KI(W%%`8xi_fE=5WV`GYCc#NBTz?TR3EUz3>|S%NufZja4Eck6@_jXn0F6$ z4)u{X-%1GYw4b85ARbziB*q$am-_AYVbBI^r%ZdpD#>TLPzWC#D(x@pJGXu_=FF+^ z;ir)CV3qXHqto`P89|(pxy2j?o#Gk3tAk8i`dL|9b)T6sPJDvmVpH%+O{EX>KJ{~} z55|`l`F0D6e-%N)ao*0~BZa=kun1S&eKO+DHM}}0PV)6db#gc=!qUdq+x+<%4!j=* z1UYM_R7lves3Qw3_jcDlp~WK!E0OBO-?wiz<_!I*Q+>5QwH_iBEOMBr(Sp=Wx{UPT z%Fqf&Yg?6bCWx2$6b-Jczn!SFKj+6gwPME@IIpWK^RuCAL~SJ+bz?DBlW!c})(XvXir2up=Ld)d7eSiit! zq6drsJ7jx~^r#yz@6sm>ccOp9WBGU0i!oAEn_=32G=KYs7s=4XMIg;V&|RKWgbyps z8>eitrE9un7Z(^2y+@nE(O*U#deUrP33F(P0a1y_mIu&uaX~UO_*fJUzt+* zMJ(!@_j-BWL)%k=s$KC5>eh2@oX)hcFu)urlP3ZwXqMmPON14X&j6m`wEL#*3-Y2j zc@kPp1FNzxCLXNGiqa-vLmF(H16H!nr%&YU%} zTv&A1g9#fian&^w!+5M{kZRmGWcS_p(s0%5i?oV(k|)R=dK-J!J?u!B3)9~2pQXsE z$Vo;B{WeO*t4zQ+0q}Z7$^d{v@3Kk3ZN)56-ar4hU}oa}8D7V?%!b(HLCxE%X-K)v zDSUDCGavBGo&24h5O9waY9ixG_P7OllkHVTl5gNQ&WgMadvCBXR$sk-d`P7k81NAR zxLspJwaR!1psY#AeUV1|pHt{o7J}&-Ad@FI^o4|qd>w!d0iVVeZ1v|V9~jcN1+pb5y254oICjiOZ_Do*Z$=l?>(R0EB3E&yOt@Iq)^fbULgOltluY%kzabK>;&06XBxG-k!p0uw| zP=Y=bq28)|PY38p(Wz}F$E~w#n~8an$l*W6wgQS_NEnN@5R<1>ZT)MR0Go;3`pqjL7qFA8CofuE-gX_fNvFW~)MHSZ-ZrxTrzr-cqH%HvqOk4zSEn*z&QlR&1a~o<5!cV=bhap@aE;Z+l9b{RdO<<$Yo} z3r5Nb34u`-jqH&O&fu*}Ei1!ut?flM^LN1*#7mWy%iVK+6^bq@qy0 z0FKx`w3>kNd2vXj*{2w@A!kE>*uyW-wh&v6dWP2XRNdd>T;m`@9{@@%d?q8K0N5E{ zITBs)6<*V&2fU^6;P1N;&qY$5+K=h2O|xb>_ZZ*Zh2zu-3?F+Vlln98zN#l&5@)P` zqVbmbN3$`R5|w`Ae*CmnM%`+ezFV9s5K_$^!QPv|gRhZXhnGA2OSS%cFlL%045a>p z;$jU&??cO_l3wBbqlLtS4((>HVA?62`Q8*$?7OFf@5`w!TFusZ4=Ku_9o>HDu!=pU zxUL;-(ZIRuUOV+RDXOojG8DY7*TuyOdSYNMwCrg@UBJR$T@CEmusmH3s!CHJUB>Hr z&;yCZIaZ}7-!!tTL<1$i@mKpRZiMmnhNHWGpmZPR9jR;V`BY(z7YNE*V>Olo5BG%A zhfdX*lKD5^D4=GXJc$_Md8D`;iwZBq@~zmB6-;+6l*e*Ch=JWl>Jqq-wX?B`i~!g(g~cP$R9k49JF zu%?Y24NFMfqa5P56kJmCa^5iT(1|C>s|(?>5hAFGeg2R<o9)r+yP zYJR^Iqs1L&*QO~J>R~!6tSV;rO*8M*oRr8(OqIjNKDmq6;JjQ@^JwLjJRCjHX>+7+ zeDmEY?e}9Y-=SW|>LkUDbNja9YMa*=$`luO+X!^ZUXv6H1ee{)G0>-Wk+Iqxb^#7N zQ+@RcZ1bpLYgF20p+H6W!*G0c+fV;}J+|`Vx-j~`qops9@HGR-`KkHlbW0ett?y@?63uD9sm1?Lhkor%-JxnOd)AEj_iMvd&FsOXQ##>zt?!Ylkc(k5 z0#k2<#QQ;U&M&F1SP&Zjps2eVETaQ7{r2ubW)4G&-6uUbR|3Pd7Mj zxpg^nJKH-T@Vur;?L}~qQ@naPmH}D#1O%YAKTlox_PXtm{Y#25AN8cy)Lob7eUK_Z z*r}!~oImkx(_;E6i}^|9)DaG=p-&_!zzR4|4}7y!C%H$a@}YhAp8RSx1H?*w#E=i5 z9b!UN9h28~M=CR+nxm>4DGAEv-y8oB5sNi)da{u$9%TnC6o`*2vL*c#h{mq(KShA6 z(okPyr_j7l3XmK^ZvW||cp?%leS&P^QrwW`Z9Plre7oN2`^=IG<`9bjQbTUZK#H`r zRfAc4<1zrhD0=T>L5F^Yr-Ry^6CJc=+{_Zru&T=BixGH}O*VS03_Y*< z+L&ML4uYffV_L8%8HgN2p-MnF0%oBT&px_&+tbWrK%l)aVSFd@&(+jsx7!!b>}2J% zl6=73i`T~A!5gM&{Ex4Ddqa>Sz%al&ASG|zn&xfHP(lHNy|;>RI6orN7IAxt&DzHvLwjQV={ zdli7i78B~NRj~}&?s1qlSS$7U6KL~dJ3!P?NQ?n>BQ9M+2sqHpy9&6FghwnswdHAN zMOyzui$5nOmVuapn=+O1d5X^OFRD+WzV~wh0}--oIiDJHm@-BuzhAV5JIXjOd4E4=*tuurZ;;lF z5EO(j9&2W>?=?K8S@blXRM7qSndVmjS1!E^$7%8RNVw5C^P>JmAwueD z|Io%RxQi3ca(_@Q35SgVZiqlfu*G1ekM?cg>DIHQz_+8pO}ktTok(R1ARp-jO?POs zH3G~m=urmeBcBC8 zdfc94F@>xEMlVWRh&0Rlmw(afOCt&AHtCsYF<8kpz0rf)#Xp$2@PjL)>c1PY)i8oP z3A)F0IAYpEDzp9~g{-RuYvd9K6!!cOZ7o~<+5Z&uQin$1Rn^IKXHFTce)$)#eiw`E zc2%40`d3`QRu^ZaQS2|F->$YDJ|rBVLiA(5BMjo!ire7 zW5>n#s*O+ZjXq1gPmI*Ssc-O3wL%k(SxT%l2Oo{b*A8V68L#aPfTV`K z{C35#rrI&Dik8TgCv0JnDMOlU{}%cEKF~cR zW|F-uI6XZrOo1V=(Qu4>oRboy&r$=Z^G?bEKK&|uCx0RXVJDb?T(AKf->6rrJ~aIa z=RzP1w~$Yv4W%FBf&|Y$MuR-XwII6WbPM2E0UTC`LYm)41I!$Gsv>|Pn)y1!Nf{;W z!ASO(J^6boM8?l~`RaVq<2A)njV1*rqH5%!^MNnVf#p65467^Dq4Et-K>`XeRil7D z8=!WG3DAI@n0W6I$E0g+Sua3yLQK~1cPTL38Gr#B1?GSi6)KMUALg`oeM6HtxzgAn zx5R@>BsykUNj&zaLgW}enT4hwO0*inAGCdLYy>LqFO-lvG*XDP(JnpROZ#oKjuhU5 zq&D;R&}M4K-+v4dk{^Snzl3;D6^Z2Adx`@sJcwV=KIsQB5nXgkHTblL0>_|x^)Q-0 zqEGtM#6jc&)Z{rZlKWRy7x3bQBVLz~7D=3}`jO)JnNTTJ^e72$NDYOr^*yL>SREg< zO~nrcG3}BhUtj+EzrDIFfbk%$D3qW_DTC;-^)(+(7wDcXY+9Is4^KQ&^_@`XzzgsM zDZ8WRoI844i4lsQgrLknfNN`o@(g^}{#Yac&vJX~DE4mS%70s~SF2#bf5?;T-+Y`Z zt4xSNYD!qZ_Oc7=yPKgFO7hT)p-7R1j|F0ejHthJBMG?MrsnOBCf)(f65s%BC^2!n z5q<+2epdi0bTdQ(YAy~9FK?m&t18-aPB?1Yk@gy7T7U`C;i{XN9XdzSZvsuhk~)y?x!n8ysVrHaQTW;4r?>ZHlOblg>Ro-jabB;83#42F?9-f=SUHuRQ-e$4%P zHv^zQ>E+RR%Lq}Fzi44F@z+6|#&)Hm3*TRwR3`LA+ntu5`}UZOzMikd9Jzw zy5l#!i!nCRKTqwuCo#7b)PL-b#B;!|v4YYGTLVbI_6u0vd4 zhYT;tUN|7a{-}A^Pw?crulcK8Du3h;(~KpL;x4YwJcj|N15Rsf#j^1U!O0UI8xCMqA%p z5t7E~OsF}1Sdp|7&u2sH*rfOOm+D{GgNL#G4bq=yR0F7;?_Nw47}*IJPJ#i&F`Ny7 zU;RmB)?er!K($#eJ^3#AyJr%A=hMF`9y~=;&jsmJw8YZ8U88*82=da`V>#@4*$`9Y zTZB?qUsMC>ynpr$l9v)N{md692j(-p0IC%Poq2h^<(G=%O){Vb>LrVMy>Zn#K9t&U36_uq~ z-&o*j>cL+Fi1+##M8E%VTa5W=QrpoqA=d%90uY&)&jVRc?m9Q!gHJvR`48xh&vxx`Z%?r8|Y)_fv z-Hf7Wpv1<|5u}o2V&RxfwQOO8Cafr^r{XUhF|A@t^B`L6Jyz@TQ__F1PgMBId$~v}R}OnYdkiKs1^?%=Y>}$3j2}9v>#J2V7Bi25Hc&pn#lIjnoLRwh{+7 zg;y(MiM3A;<@TMvnp+>iYNN!4NBg42Vp`Tq>gCX1ff=VVut{)X5;CX>Aca)C<%!?$ z7_DH0BeFnF54^=2?j-q!402ZFn$5qPHU@o(#tqT27YmLR)OT6N30gpzC?iT)$`wQz zyj?``9l5}?Y6A@6`qvnTZpaotrl!ye)N^ezIm7ZCsO}^q=RbfHd+t$_#@5slnBrcG z_20W+iM8LDLp@(m54?7gWQwy+XFEG+qErh);r#-KT#mL+^_`VWdEO5`Z)_`CAzQ8|5sE zXE9bg$a`S|V~g7I-XZ-9IoHDoPJitkR2b#>pDEj}+skeEr+klW(g{y^=jiEo7030s z_wsxOMs&B@D%W2pa$VSv6U}y!peFx;5RGk;6JFhOd{;8y@?95b4l_+Py=j@&ydbzD zgu6BcL#cWGph)Q9O90wFtI~VO-vj$&$5-Id;3a4`k%(wqS_NMV?ROy(@-z|LaAB$$ zvSuY{y4OlC%)#hBJ<8@@yNR+YrG?Zz6V;IGOaNsDU`rQ5n%&zTE4H+hXK|)L4&IGW zwTFAEK}RXyR!04xC%v!XFjSO%JP1iN%dNYTX=VT;TO0Qd%gk?{7MrRcr-CVvJDr_X z&(Uu#31{h8-=EHGv~xlr;ZV(SPEvPN&fj~JI;6eJch;NfNyCl^S`4xIW7fOJ)Zq@HOR_nEuv&B9OEGzzgpbIXeM-P1Z zaRc{xI}fN)V!Nw{`KqR|AB4(+skpjnfRP0+bjYmAVtT=RxB90bAaHWC`FKLxHJS6V zGbbG5{eT4pN33z#7JdP;d#f-?#8=|k7ZxbuY7%OgRldl#Pu7;g`uQ!8^kmnD@>K!m zvEt59liR62#%vIiIy{S3j`_AjCl-MmH~6$;vWJ%WQ&>?G!PG*QA$sMjmSC;@&}aDLCm|1-_IIO3{ONk(PVuq=U=sjV{*n2ysS$^35Nl zCh|&Si);l*7xGd^8amRU^gwR0TneI>#ejnOD-@W}54|ZU@%Cqe`l`A$BmR4h5EjeS z=CMOPLnEin(tj+tc-43dB_pSu{=*fNSfMMa)qj$AISl~Oxok$1HK>r2S>fAoejew~ z{r|N9_~KjphF-gJF=315rNR0XD1p<^KicxA{8c0=XBo3-;pNb|^Gk1l_;FNjT2%#67SVP7$5s#gTp1@-rR+t9|j&nv$nF1NMRE?lg& z{wg0{T8kT(aG>9rqKT-fYO6}_Z$gXQzX74cB(11T?v(mS4`$O~XltCkE9bIYsO3Co zIb#dOZDTkbT)Fu9k;AU1Rs4N*5mYwYc(|K7BQ*UQKakt#dg8FB-!|ssTKQl9RJe2@ zw!p1es0w5~1QGxt(D1voJ|E~imRD(E)6xg}yjFEeNFJBz$nygwBo)+bI*@7utJoz_ z>oXIL1~qQX+0Qfo6XB46DW>E4aEwb7Fs{`;Y+1eBO~|503FC~bvbjMmm9tb*sDi)r|*&~i_GCy@FB8H3(rN>Ak6&^GHi%XAsP#{Vg(HbVAi zNQptOF9zB=deySWrrj=IQy)H@Nk|8Xzq4bd<6n+(U1oa)Pd#pR$}`N@aY8$#p>Ljy zMYO$N!c2Df=@6#3ZAbZWl08pW>!k~UyFt`hvt;U|R55$eC5!q=MUPC)VjtQj!B80W zu4DR1ns63Q!(acYu)NBB_6V5Mcm=z%UnB@2;D>yMo@~=@I21t{X5H)`7|p_jdhG%WAv7ufGfT$KK0@%e#cPk2WhAFLYIO zS~lfN){_y5MmJv`^)AJqcLs@F4%p(IGYP>$YcRn(?lAp_2fo4;e_E!iS@zVI57TBe!dt-xUBXVBuNUTCrvIs9eTUb@D zq~ITQOM2f3_h83wO+5NUfAv>t!a*P_e$442X2RvH# zD~*)=6#RqTX6t3TxBWP%fN4}6#2CX+9rjZ`{AN}Cv$p%$?!>MrgYzrjX&zt%QOl5Y z7d5K98*Y4~PPgQLP$238shLoU_rz9)Qj9?Zaj1!u2`8IQjsj@@@E*m)_NRn+d%U~g zQ7sm98B<4+RwPwvC3J_68>xN6RfDGv-Fvrv{&6D-jqNO_al;=NJCF=uot@Nq7teDc zoJA%1Vd9 ziHz>AMF&dm?K+hbyK(lW8((M>LKoT|rX4fTTNNI*k3K0yzL*O9zL|^>j-YdVRb8Nv z_;%4(e(!!t9bs$;KpK-1yE@R|ZO6p?HOnDxyLEc}*$w=G()nbAY2Ts8H3lDfml|dQ zPfTz^?`}g?)?=|Vb#d4v>5&Djn2uy%kv`-hIY_JN$8?b1C$FP^8&XUsj_2RTC>4g=(sabyI1|7R z&QRKuG_U`_>b>D<_jW75{h^J}`h$a~?_j?X`S3HF*4~T%hFEC6MuivNsk?gX(2CS@ z!1vbPDy`UQ;TptAo#BG+TumnzkM+eYU7>uqUpw?vY4IF(2f)(qjxO?(FnyZ1GMGoM zRG)qMP%fH)><=9z7Mnb_b9RUg47F3lo!(FJia6L}YB?M}IBP$wP2|b^NPX+F=C8Jl zFnMm-fXUWB|5i}HG_u4XEpF-D&?sX$dfoBT&iJ`0y%w})kJMY(^l-k8J4h7dm*9}; z8<9TuQY}L3YbH*S|2?kYF@x}%0Sv}~*USIpSQ&uOgKmM&TVx*^8T7bRVydghD1cY* z*N5%`VGEV&Okj1MnMV(>LSqMej^I?j9*2zi2S^Z_EI(Pg=oP)PFa7vkzp2b=JoID$ zjtKYwj?erz%d$+fHI@lSEjD9ove^|s7#JbK2Txh%X^3cA2=OaieKDs?MVO@pMDrbS z`zml6```@qPoiSwUTxnTGu#?GmU#KA@f41k1y6WC$ZPVLN%Ohg4v74MfBTWlDUh#z zie8Do$UFQn)Fd$45?IA^c2~X2EbwT?*{4=^Hu*)2D}VxrCJ7Rv*!#$?QXG1*4asd3 zTA=-5OpU8XvgZ{8opK2lDFr5sH{`_0YPt(;=J_89bz)!%w~qU^T3*U zVJUykOU@S@WV3k1H0C%?-Tc8hlsMXJr@VM%{O`u@Y0p-=tDjeWrz?4MFA0gx5OiSf zbXwh+bfJ&QVhCrqgl8s}q0q z3e;Huu0)T?6)TTD=|&}WOVBhoKMGIY5lhqcnr|UMjd9R@pdNV*qW7g>5aytbCOn1i zmHc=O>frCZsr$9mqcg;a%5#)4QSxAY5;NGf^6ze~j}^0;{b6(W|7iUV;A9Dki|Hk= zNEUTw9N_{RqITYA&Y<}6cd1A20veOz1Ahxm;Vf z@{jN+_4Qk#oNS#r0nD%tSTn39kcp>>^5P?PWIs3S0W%P#2x(26OonpM!y#|Mz>BSr znq)pVlb^_JqJD5^LWat)lz+pvMA19hpM#?hRM$jzWMd}T{8-P^4$Z3j1I(m zN>NPB%)uo0L2Xh)hW($}3iQ%!{I0L__;9n@hWkAGWd*~D$BW&HLO*ZtThfclwj4*# zY-tIUY$Ur&ZA6;TD8t$_4;BD^d%N%4a-lbL^yfFU^)4*QJBjVe4 zK-S;B`+goT#`W)9xjzD}`2su4)qBA?b^MDG`@TcnpX=W#ZCp4pwX4Ivczjb$U&W{B z`z7qSmHEAIutbou7owos%!}trEZtE!``QPt;at@!T49H)4INR}Y4xf$B0EhA_kRts+l<-^;@bmTz`_@ z?)vM;q{Pqtz7D%gT21-YhKl?*2qr8BYqD?s!x6ny9-{SiBbRDl>QQ8SZ@T#=cWeF4 z9u?aaxXBh!B*FyeNvEu*NE`hNu6^C(!<2&JS@PLWN^D;X`0RzH;NZRMOmh=-y?W#C zryi*?WQ$y)tw@~`qL^5EhaWYGAC)9P%n}-d9*RFM)49fs@e}lk9qy_cZEDo?kvXBy ziy!$5nEYdoYC6Mxp0O5+ZiqiFW~+n*$3t|OpvstU7Sw*CPHxCKiUNUi*KRbHI92U_ z_4|2T#F*ePDp-`j``~UJE%Bu}O93_iJs6(m`zicN#D{YP#3+E!&`Y!eHJ4~Ko3!AL zo{VV3*E{brGNJpbI$A}>lsYnk4#IGXT$H*;`Y+F$N%TtGZ}&=~4Cu>K(o2{7I0Gu8 zXvRP&xO#Wkn=DEyM9^d;+c2hrkvNoue&eIv@cyFUq6mycrjvF0iPgNo8FfEP_y7*K0bxA_wgQ&NSEC)@9b})?}2f z(7Q6HgDg8!QbMtI7b>n5uHDk^76Gbwa38nS-Jjvy4BTq^XbQYf&PJDRN2mO*o+{r> zWjNNTHJ@kU-w56Jrfjs*Y1(7mtqf>HP_-x<&i_Th4*}r{mexGlew9JFk(G29qMp z42xMr9Ov~65+SJXhO(f4pXJseUjG{oJE|S;aO1SjbQ0k)|n$H@e|6jAfhO~ z@i8c=1=TGCUc*Qqrq=U~Sus;N`i&b*ez_ z1ys{?gxX)Phc=n}3&j(^UsVdvu zM9ccC9lUL!#*qqa+>Cn(`R@uGx!1F6>w{s&u=KH|eP>eUr0snGm~7uY2Mst_2D~2z zDBd7L+g9qFw|7r!>7?OejkcegHm)^j7sqN^x$^muo47sCdt>%`0zXtSs>Ws*_2fN5 zgj`6aVDe=`su!({qaL$(_WM)n3^JHfV$MJQ8ranU`JOv5J4ps{2I^}n%v1(H>z5w4 zGrd^>>nzZRqY(E*(Va-zlD)EGA^<-EbnYsypDlPT#Lyoe!P)jOyB3YRbA)?io06KP z2C%PL!(D~|1+VOq^-4~Mtp=zg9`PcbO@;&5%H3D7%c(#YjhyYA7j)Wu+`p%F@lpNKYz59rzYxI zb+KCNn|j}dxT#`_J3IYF*RFqQcT6@C{#vNIL<-*O`<_hb@!c{hLBMtxq4BEmYBCpQ zY|Ry7%}~j=QcR62CtS(fdMC>3@(jV9{9lc>#412Ky4dxLYf;#a=sild=5UcZ2px=@P7`q912z2mESf0BI0#QiCrYo1rME@Ivi_C~YX0M-?R zqdz25X48acJbUnW*Pl$NVokjkaRA-r#W7Y!YH-)>}gPg!Yy{*|v-^f$`AvAkC5 zg5AB->sV=u#{v28y296cD3e`#}U+>9&orTqc`ZCDsIi`i}Qc z9W!K~($D>>bEk2r@Ok>&b|~{H)VMy9#`feQ{XuX#6{4AxAaMawM5wAEk6=XB6@kWP zeh6#T6eXGQHbo6#62nrt3swTzzN7lq(`ZA2==c)U z3gg5zhGi{n6|mbn zw`SAl?n6@Heu{>F^(=`a8*pZh7_7%B*E~%-mbw`Ad)Nf$$QtZVrsc^P;qvof0gJo(U_5m|9VzP1OF{de+rq^i%U_M`yJ|RZA**~fmj%mr4+O7%iI#W0# zu6;f&odVECzkqq?{L&wySfP=)klvdHw;)rr3l$>J3Fg9&8naJ1snz)&(}KWB&_!a+ zDxJnPpQXx^q%#Mz4Moi8l@`)6EDBDxE zqrze8U=le++y}0J7KhttD1qDW*I{JdVVOC6BQ7Z#d-sUxiJ*X$#oAYqh)S;G1ifrR zHJO9+gSHp+1>ew&<<@MOxsK6MoTrP(3tnG1KJ5`=dhayDX`v(1XZ?&S+GH`g{G$oP zxTrC+ZBmVuV7Q2$*i2kdaB}(W;XTp0}1ThLy96nd7AaAbozaUf{Enbt$^dQceq zVBBHG<}54;S$$U~^tvS(A^YKvN$j~a=)Rqk)@C3Bz_^G*?oX>PWU62bRJWumI!*o9 z5K#~vJ}na+2^LJVM0hQ_yeXOkp~OdK`Sw(-;A%VXtM!r4s`lA<@|13ri~UKzGn6lb zS`te$-2mb9HW#xf6+0S?+Y6a}g}amT3x7A|lCwz9U-NL?MXgoi13a`A%95w`)YOQcF>6$`zS`^t0z<0vIynOm8rbE)6~xZ^4&$; zb12BwkOmPSmp2wN?tAiaNH(&89Xmr5YityV)H3g8xH{!U-Ur{pB`usp_WR_M@TgCZ zB+DAi-mTqmOwT;$_#9hjc+cf;Ed}RE_mr2#?`IIRlYE~@*oV8T>B~n(*&whkV)tt4 z=Ns5wl%Z4_kWf%#3@N#RkyGm@xB2sy3%Ye(mL!WgN-ZPPyeb-EX2!kH(`7p~JHl

NU|K(7V?Ak@$_YkOF_K@{=ErlCPb6~2-h&?R-#|)?4=fBg>;*`%&?xTdy->|O#qMtsJywiW;5E1v1 zV5M@w`$W1!Z+p>T;_(yI!6~(aaOK7q3cp>=xjqK{)=f(iw9`!PC4H#1Y~ezN=JX?5 zhC8pWp(S$oO@z#XTJDA`2dVLa0Xj(otsjAvQAZ=$xFj7Km{?hd(+2uUM`M0x5*Dr~|S5y~2p|vh>@Mp4zzI8KF&t^#);)OG^wl`jH zLdsiOZA(cS6%N|KPO87_M1JTv|Im2SGG=zqOJ4etTXiwsPV3uka8@e1t@~HIU~~gK zlwDo}9(y3e4O!2wry5?z)#Op)EqG2ZE489!>$PlV9@i2%;mWgg@GT51ru&^Z}>r|7xdMP$y z;HSW@^SV)#&Cfw28=^1?h&EvKAQoO)D}0dL^}5j(4hR#5@+wS#1Q;+Yq&8bzNh5#* zjr8-yiufddHG*h`Sguy!l+1th@WhQhP?)8h4j%uzJhD;6U*+V8>purDyIv7Bx#O1f z9$lXR4&K%|EI2CdE9Lk_#HOI)Ek_B>wNXgnuT=#k0&yB+V)h5KANcS_cTZp2485>D zIhj(#bex&n=}KL$(&vdcod0AcjHKb|z_}skZVf2!P)$-m+l#}@H)-n%B`?%7Vj~W1 zk+g*FKDqqvWz3i&<|wPHBEE6zc^*m10o8I8!_1#`$DIonS#spd23VKJGd>&L$1Z>o zBHKsSsOqC5S1A2VXf%e0rkjXuMsHROr{S{HVK0(7oYF3|Q~pKwO{C4ddr=D;Vs(;^ zF1kP+)@a~)^G=nc_VaB`kOdy~VU-N%5kM{Qrm*Jq}*q#vDJa zIPwd1h1Re(Ejg-E6)tk?Kpz?CBNiMU!m&fPSW1G6{obr_$noj-e?Re$aL-JgV|351 z3z(voYK^GvXfm_z%g>stVnzR;CHpNrh~=woK}nbQ4v(b$=R1p0M{oU2Mv#KH80o5( z+%loMrU~2k*DRJ$?EdFFTWh^FY2TenDME*oB!J9EX#>DFo>Oqp+-)&i^C^8er5SMF zEA0{S^hOSU_UYQ`;~d&rQ!hTeMivE1Cg>YUXsY{u^J}#ldeOkkft_Gfx+y3(5Aj~g z@UVMEdliP5Mpv*s0Ln6)fz`XOi2pSAFQaOlIIaB1DF(Yy1Eh!?2&hG%Mll5}%+7Rw4w zLNqCX^gA|RI+2)yKeM|80p^H}8tIAcn92L{x1P_^yG^kRck1y#SqFd*I*^Y@q3I2& zugXL_?}d>d*e9O0&QBaMbvnIN{yoKkOPQ8t)KY;WUXbgCV>NStnBH` zdQ*a&DR8!M~^fSp3eqw)dI z^&d^T$3nDcd)dERc7;Ct6ipfpy#<}R7-7KzwXG}>(-$tOX{+~j_kN0FwseXue3m$D{EzFRjM89_cYhC z8EhHT27#WqdSIV_co9gkYL4pY5a>S(6pjB92y<%kaRi4LJQ}Q>VFf=3t5@!n6MP-| zK_pNVZ((+UnE;X=E&i-cpe2HzfhOz`Qw-^#!AGUKhr7?K{j6Wg+4!^Wb;LWDCnYjc zB0{7<00d1fc?r7I(dI^H3ua8s^-`;yH!hCFD}a>ie`Yn8FKhg+tfkyUi| zUERC+xWmX_Z3*(Yh}wV7S1UN1G#%?BPl9X>Jy@RixydsbbNYByuh;Gp+L@^yX5{>-6oMH3t(yY3 zLZp&XB#SfnoU))JiPXGHQHSQu$Mc(D|45PZXQm71krgv$RAS2aarf0-Lc8$Zeh$Cg z)+uOKmWX_)uJfQT)G{_l=TC{NF?jG9iX}f93qUer8&%IAS>&sxF5s;!dkB!muVvSFFM3dvak~UBW|0jQ-qz^}YbrzUsNEtD&Y0O54 zD5=2B@eQsvtT$5dR>d!{SM)W4g!=zI?$~{%z&XS^gf^v6UjsdlY>CTY*T(5eUz&+< z=tP|Fa6sFK`yJ1$_t?L&m%lpZw@L)n^3zpCqARJ3hP;$mUrM0+N7BOQnK zL=FDW5}Tl?OXLn*aJbL$t4_J@g=0!J+wIhl5{d%vm?nBMuy((p7*lZ-VFjLh@4HmtyKA=8S5Z{ZbWrox zq*L_JrpwOwAq*jm=_q$!6+=?6FUFt{^=So&w9#ZRj6;y7DQ@Q_@I$Dh_ONVq)>Lnd-%C7TSTw` zq{#0@*gEYm5dwF3;n3H%3LG}gJ%uGxnpUn)Yk z^53uYoO<*v+?d;4vn3|uqRcw2zFY~cTPW$zG~kqUgk<(?Np+Jxyi0*RcrI~^#~vSEPDrF{ZZ$piU5V4yL{CtHi!b<-Z+__)ozj`R z*y+)^Q`jOE24r?_H?P0 zN7tD!nNfUwo1Uq&k#WmfquC(EFdK?ays*G(|LLj+OCQ*)LbmcO%hU?PI5Xb6(OL0A z6M7{B)^kDorjS^MTVYk@vKiiPY2%%57JO`CeqW+wHbe=j#fky=wnuaMy0Ixqf~rR< z@$V2(pXnApeFCc=dC6X=bxg&y1ty*`+k+C^D?Ishuf zip$LV5<{=bosq0f2|M?xgNRwH$pY3$ck8JqqNrb}4VCp}mz+Mz>;%0O z7&=dHA>==fj0U~ZQs+SiG{{kONO=Fdb6P-Rcr(Lb3&> z!&?JDcWGvbKl8%9cdKlBmk7j zMIj$57Dyg(`NZaEJ?N9MG*wL;FpM*2yO)?%>+)a1RLToM_N`idHJav8W}8B^+pjpH!lAot$dVApVUzSB%^( zIxdMzB-nyB#RDHf7im&;y|4Q#zq&AzM*YzT4v)xq{3L!+5(Y1yi66K0>O6aSbi_4w z8SE$o-e<3EKBx!!dt|0|XA79{9L?yo2FHU3EP{0eK%@^_PLZD4lTd{LeP|V55v*7s z0wZU9YNTVI3SK#&QRzr{I?l8~+FDI^D_D5;>lF{h_x40&Mm%uGtJ<|PAm%p?nX$g7 z5e{VH)8c#LIwQEKFw>UgC>4CR$paXpFK4WXXYhRzDP!c*6#SQd5nL?Oukys?6{&n# z)CYQuBy57Kkyn;;QY_`q7$LQdeTxc_O_(49M&3fw751zfRTgO_#qtmj49F_9QVS$Y zXdARoi$y;|#99g8?opPTX_jpk7;E(QLESa(Hqa0}dr&bNk+3@5k=ysr(a{f{#;)G7 zfb6_LzdK)$PNHeMYM|uz%9oNgs5Q89tIF{Bx$H43F9pxb7k?SdiV`i zGR?DafKEJSBHMQ??R5Ot1#sJBmYR6dJH~On7U4;meQ^_dGryytCJ{193h{-OP{rPO z3wad8yij`TG@+w=6Dsf{9|nD^OxGaVB@1ojWlAN?#VC7Tm09pVwZ%U*)y{6ucYTe3 zXl4;98$|89s;J8RJeOoFV_yzZ;-C}HIF^3LuRC~C{U>rEh6U6$q$f=A_S+~dB{7hN z+poi)6N_3A7I;lmb6TnSxhj;3Gyi2O97j?0u;{UU%s)`U^GLeI4r4&;ExGl47-KS= zMj$F}hAu{ou7U~ijEQc_{fn{LX%b5Tv9C^Y5chs z^UawJ@#8E_&dBp*NBV@&ptpb>sfT{(k((mKMcvlCeOdw#V-JN}+n-EZA}abl)#q!d z@HAE!WbK{7%t}a+ikWrf-ChGy%*1Ph+N^#S$hs^{AQ2G1Kz?cN<0EEa(622>=D2TW zcB5ZjN>A{`vu>J++%tYymoE#Q5&>r;oMb-^ib5=9!~7*ix7YkE6;KyWBvEZ z5X;!E0#$kHUWey(R?;b?KD&QX!Y$xFb@TH!CvUoF1s@B;d#m=#`~y0uEKSuDE7G+a zFM;WBhT4i9FYo_=;9WR152E&3;?Z2VH2Y||3|ET(6JOD$gsZQ+%Ig@I)}t}4*My=4 zPNIu2tO{J?#Cq(1M;fXYSA{9=n| z>!!nQ)>GDoJeB0&OzSmD=r=Qp0t<~h$H?NapU#`CV6*19ad3;rpz4{a*=56d#0t55 zb`@`9+WgzS5B&=xRJf1@m$e`|cO0KQbNTp%yCX{Wa^JxntfEMPYgD+Vy+J7f5xHDN zOCT&NvuC&OtVZ@x!xAVM7(bF;n$_+vxddc^eU_JGUiuZkIcw?M^@lz{0@Y#>*>us- zHIgtic&n~)3Mk;~gRMcz8u9LFr#_icSP{$kB21bs9H$EN}PfFe*xo zj@z};T^M1a^m=OF{ue>hyh|tm3#!#N>NCE$j8)Xj3qS{;`H5zu1}kTkpT>LP~cRIz^iYc*RA-9B5B`-Zcf3Ml$Wjf?WjHF zkSr~6nGT#0S$>Nxz=_^-6{(G@g>bFvJ9Wm#v7k)F zb1W)3E0rY!&Z&Fm3GRJeEzlk|c+*t;?uit8!5>2TZEL!fwu3}soifkCDGYkqQL%qfZ)bw#!|6WqZueY9-iE~QOnN}e#UP4}qQRS_ zh{{Ze$%qa}i$5-N_Yi?hX1Xu_?gDjS?M;~N&x58?7cy|d!p!^ABdD9y1eGrI2s`MA zh#kS*2X_unZ;B1n{dx(8L39~R5rCVo0}hnBeN(x(4zyD1!>hYw$QRe1*Z4D zT_}o_+e!Ch6fU=Ijm(xDKT;B|vg2G8e%%W>1_@Ishs7KD=f|HvyT|Fjb^+ZHiF}*n zJ#}%As$bhKwJ*z`_&bk|p-w`^)fw$HlGs4X#k6UqEzKw$3v?8muTmUpcD^OPC)FXE z2D(~t=mUzO5szy(d}#VKK3(@pp2i9V5izKM0i>Wx65zl2Do$5_k4<@8AQ)6p8 z3_jl2$b^-%2rm!M-w{{B`kBf2T~T7I%l3@|N6!%9;!Yj z*?ljSuR=@6rkpnuEKqX$gFwirB7WpoA_oTP2=QT|x!Fdo^kQUTc7>(!?5T>6iDoj$ zo{||CaHLDVQ8Mr@^onwF{&Ev0HnAOuNk9bJD4q^dJ=Ogg!EseSEH8g=@p>ZED93$a zcAvqR9Pr#wX36=`4tu(+gGqJCl^jTsM80g4wgh$7UPVo}7$QH9DwyIYUwD}x=esT@ z6+@c_Rxe!noxXL_lB2U0#iXIQ^W{5AjH+QjjDhGc?$c;k{j8;q`~W`n||o=a5| ze`17^1Pq;O;8b>~enQ4LNml7GW{MuaGJe5P2{`aIfiLRUN8*;6)lMJQa#nNLjn>mp zx~=#6?M30Jw?+0Pv6b8fDUj|AM;<6J&O$Gq63=c8KThA4xh^^2ADMXp$OFhDcnXo~ zC*Yp-3Y)gSfSq22-lfltI+J9mCQ=b3J~@a|vtsjz@T-sst^z8If^hDH`&=Xpp<(sn zCY9fh;G1*d4<&unFL`2G)uD}o*`x*eO)H8y5?kBa%#X?hMC~8pQe93jX1Qs86V0XW9#)+_K9-R zg~7_?Ylz=^di$&E zz6W^qtA7^ad*J>ZW-rE$@FFo*jTZ`M;FH@F;)P(}J$}G;aO0KQfJ1(-ekR|)FFMJ2 zed^b|^_{`s!|yGB?~{CyTkzivO$70T`u3*pek&&e&tr^z`VP_9@xyYHkVkj0j(mUG z*6mxf6;H%0`#wv>+K{*~48W*o{%kc&f=!*u+{0%pJ@*QeWpCuACF{>g4aXL=LLfIuI#L`;IoSSoj7!U zB1&vjTw6_8Q*|SpS^r3<-45iTS8mUSO}`Y%z5mM`hW!Z$FuI4#7D ztvMnOhB>aevy0qQskFsN-`hU*B*CgvYaka=s>hWPd42KRknt_CJp#Jz!kiuW)0n57 zI>Y9*e`#pFzU$6|_dUQf9u8^UEURG=<6KcdWGpg^9s_@E^w~;?pc7kbk~Ii zI?1yOCX(Bevf>KHbSf_9)hB%RU@9y{;@OT&RvV~TegMJ7v&pGOg&?(Up z6e=`+I5h{bxaOe&pqmsTezF;3CX%81g!mrO<~)1gyaDNfr*Zd>EVde2nD3~^gK}+VthSKf%=rCeM^4s@FXW(Q_Cw9CloE3J6nGrB zVXwcd3uiTQO8gAEcfK_I1G>h0DcsQ39M99e0Nft~T;fb#>K)H#{>}cQNzn!K`a-L% z-BAM`NZsWr^g?H(&`$^MhRm@SAY?4-f)hTgm0PZdf7yh%o$PWLawq zK0}J)-LoD!%}*h)$xZ|u6xrpO=~Dj8?t_hvVa)F>Ac-F15hRS~3Xg`276=PiO*fR8JmiI3g>b22 zN;pR4ec0zr@}h$CQLoF$A7wDgP#lGq{1Rg?Q1p4_yqEvr3IYC30zBLRN6PXeA&eeu zaDQ$*&P?TZP5rMwT=twsrlIoZ!PnO1GS4r0Se}(9ObMy{VLMIUnD^^&RpcRKpHpUO zybVTL|Er~`GgL3lygX(kYR2^d#d~UdYxCW2NB-@`g9Y}T*`#s0QsqBPSA1%pETy2` zJFEBK7mA!kxA~29C8T#qgd~qEvCa=<%1XbmYViOD;i{=Yb^$O_ExIu$sfud&E zz@pE z1nGi-)vOm}qExuxGnWl3OtckLzI6{UgZV}N zCuMJxm5|L+R_I}jspSiPt0D&b_MFa*-6_pmTr*LW7GY=c?3 z-J-TgS*Qeafv0+OB##z&|HBHqBb5Je>j^3*l1TM^z3GY)`_xh};)3iGMOu4Z zJa(N7t&Kw)v&R+#BodEhoPtCCV5``>z5VZ%@)NXENPr?7a__5cx#i`lz#Hw{!|dX` zkOJLXl^=VZ8i4uVpc`&i69XK4L9{VI1Kw_5 zZ?9>ZsH+KbXQacq}lUT%>;D)u}w;?@4|87K>(mH8B7(x{^ zbY8lWz8;eiqX6ynAMK&Bc6t4Uj(G1CmQ@auRywOc-e}aQxWnor!r%QxrZR*leX_d3 zV~XC3#GrR;}#>#uDIF+oE`UO7xJpbm0dFeEk!uL`F-|sYDPkV+AC1Li{JV$~T z)uO&j6A!z*yGFWYe-LnO__uj)kb=*_an6&5XMRVr#M#G*Ntp@~N22bWr*u3%M6h1G z`B!6Tqu92ZkS@6D|6Yr65UaS;F`a_wJC?^u8M}ms$SNY8C|q7oe+Ogt-ymxgPz@=- z6-Ret4lhy^S6>GHKNrC5VIfPNQWvR>Ch^VoSN5hI{hvR9t6nEWa~V*|Avq*79C&GO z_CnBd|1Tj&c6KR67IHg-6Y`<1vNs2tO26Rsx!8&FrMkB7|9)psYvo<^cCi;eI9{XR zONXWIj__m)R4@enJNNDdXrT?)mh(ZLU$0J4O9JDzUu1{o7XMmwWW_Cl^5ekJGG!pa z4tV4PTvE+*&hEa60WSI(%r<0GOoFK067e_wd*HNXQk?h_Njh=$KN)KLmv*fGM@Fkf zeH%uD0KCi6FT*Mb-MQn@Wm(hZ27VX(r~lw7rd;84>yp79MePW(V(m+s;h5sk`#{K} z&~G@v4Nt+$i#)9?3!A_*mgF;u!Azgc|1V*Z^DzA)vrBvE-gZ^*3(Rc?BgwFm@-}rK zK29_SsG}?p+OjK|(s4<7W#N7@6j+XiyLkjlt#&82}vdJge(LSdO_StE`*LUNr&c5C?kmGJ|#kbPe6iA*#x$MFD57T$Gb1sww$|tU=izu}^nAR*k zHLe%hTa6c*Y`1=AZgX7?PQve`x)FMtr#uZUzf!ut!2g4pjR{9Tc~z>TxasWfi(2~P zj0BH@)#iZ??z5Ac0+FMCOS2v@Hl5V${fAboemVv-4tHW*)A zwJAAR`O!juLW=pC2DMs~5bkv?w&PT69{oKp>UH9wNO^8|4~j~J?Z(-t5!k+4g&s6J zMs-dZsoIC1TPFY|u<`pOef^yF_0;qUxF@%GA(w^oIkK-kP(>FC%#bgPW)o0( z5xhT;k4_6a40`BA--T1UNx5s%R6DWR&?J|N@Y9^- z6kXRZdb^lywB53R{u22z@@t^LuW4@c9~MDo)NE zdzYNBQBJE7XBl+KBI)P)f^kPimgXkchrhSVDO*cu!G)QK=;>MH9Q7j;N{PE}#)0D$ zpJJ6x8Ud|;RVB4@Z`Ct}Tse#C0MrqW^}Xg#^FZN(<27<%$;_!?SLGp7yvdqi0N_0< zKb!^$tcz7rRQ=zRtYLYs^AQK6Z^!FaDZJ0~6%QYz-hXC4Nxh<+-}!KNm(`r;veKB_ z8F47k=9tl1=oZ5aDLU+*oPSG1H*a2%2wek_%NSLNBI#{OnX=`Y% z>6uwLIlsu|ka!*FrwiuT6!~-at~DEEqsLKg0-1!F+S)d!3MZZG&NUDVACBYKst#ln zW0P{(oVttYIs0)r-p6OVb3YZx7TT*xyzF`V18%b6*!sk#gm^8+2;>p1y6gy}S6JEG zISUI%D{mFd&nidAn(EC?nx8k25w=!qPL8Da;m-p9Y6*??SYmqOKFKpr!O7X2gH6B+ z{a|nRO0ox4+7$Vk0JT+Fp!TV>IWTv18wO|#o@S^I+RG>Iz4E;IkumkWq9Ge&Yv6M$Q<>V$*Dy=PBN48v#0fz=}r$L<}^+V{C5?31jP zD0{J&dcRcoNAFR~J7UAJj@2kNSd*FaJ9I3;_f^A3*Bm{SQY2;Po8fYT;9g34)qoB< z<@|>beRU243+U5%7ljF^Ax3Z4wg`y4`wy1PEWFw=;Mi&UlnX7FGhMljGE{L~`ATV3 z$G`?c_U{1tl6M#NZ&P<>;(sNG(jaPNE~}g1sia$0v_zf^hT3tDD0w#A>Fk{$E-!tv z?i)61h}Nm5;SP%I9QHzyqr!fibTqu1euTGW z-#2!P5O#0Zdci{Au39aosN%n6g7dO)&E zbQhVTJggzmNou2)V9SZGEEe4#nUW3Rsi!6#<9)O#73uWjEaN;Mcj48gtGnRH%dq8- z%Fw5@V*(7((to&~!*}}lUq1_wL3?oE^Ha}Z>zA%?qtd1SdI;N&DZ{zGF>^3Jpik~j z=!0`J5W_&IM{K#lLTqtDhG51FX|_f>b<;%r->iQ_Yj3(+8W*8Dx(rTC2^;(!Umr4p6CG7v4jS zHGtbR>yLF-YTbVPGe~;vf|F-;=~wDDr6Psu-)}EhFOCy0e?vfsUC&y0Q1WF|3bQgS zJ}8xZK(mi*8^*c2VxzZk=f8&1D0B-WhYPCHC+ywx5oE(|4OiEQ;ahAC=tY{J*oY-1=*s1fr9UpQiIuI&_rVj6)|hzehGl6VIAfJS zLF)l6Y`5wlWga=>7shS8z^nVhM6gvX3A3PT&a4CUbR{L~(LQOn$8Q~3$2lPo@5 zXp4!9QY!K%?K3;47hj_sK_AYq=cpv5bKO081jSobk9bAv@=D;0y{*qr5>YtGKSM@5 z(;NzMUjf(P=N2}cxWonk`Lr0>9HFBVfRnI3Zz@-pcL^+?X|A1y30>CcQ%-*3D3BTE zpZuu`;iF*9#5|iWM}|bPV0!k){UgUn$-_5dJ{3FVqd}aUG-|?P-g_I9OA-y0+l7;7Gs^OkOy90MTy@6z-mW?gQ1Zht5Ifs9saN#1yG_x7|2o6% zI%+-Ly{p~e$+?xU?jfHZ_IK%c9i2kzj%)ZijG-VTf>78d30j7F+i{UvquZr6!m6(0 z)QHAN=;X406CF4C_urTz-~8c=J*1}Uv=NaQ$Sl$%w;-A(1jeK|)jM1F7wMwFNtoo$ zFS~@UIp9uG`+paX4>j1y?(>V26JddF-*P9Jh(EqCm;P`a1F&wh{2tXTN9O>x6Ud8Y zEqiD1{tQjwm~sZ>C6lS5AY#$_2j%nE&y*LYrXQJ!K~h}vz`NEzq}|f~gj;YO^6bJa zjqQ_FtpKG|U=Aj&Y}Nt{y(8)fhwq|y*OOSFLh<4SQ}|%W4q>~8YQ&1ACd^Rx7J!y0!Op= zhHKJP8laRd7SG$t!8X;we2u(pV2$cAOJT zWW`ve2%E)AB9ESPjXVUcaTkS;&w;XX7Dy3bk(;mGylL5@(P8>n@W}Gqag&T)Q@Q+H z7!71wh}F_#Y#`O=EYC;_Bo@ggDrX>c6XfwH4{8NY5cr#nQ>2e+Mla2LbIUJ&zm@NF z!G6V|D;W7r&-mV(jyN@>a9IDIpf(fuKbpQfp6dUP_nc#8CVOv*jAUk=Qwe30Q1*yy z4V!Z$lC2V1M|L74t8kE2WMoD-Hpe>l;f&Aye1G@eKmFnT;Cu7u`iW9N9VP^pRyB6vD#nm znS>9f+R_|TfV&q6j`5Uw7P7+3bcyI$k$0(g884Z%U^L?YRhcS$ z-|(f9y~^0DIzRs&oSx;h!h2wKP7Y%Sao{v(epI*Y4Sv^3N%MHkQO8M!p%v-LO|WcH zl8Jpl-xV^Do`>$^c|`{5i^y3N(~J>7`U^Fp7xd32;-oURg-$ikLMRTGryLnPWEWVEkggS37m8mxW) z^OI|<-(2wAMH;JA$8>!SPhI0gO9a;%bz5Uos1qeR#kt{@9e{;a0BGZxaS}E`X{h{t z?_^>l`=iBLDA1QW!%UIt0abV+Q;Yf1YnmtKVPOrj!ZOa9a7B^ z8Yli3JMkzR=Rci>K#`+Y~hZ`_nzf*};bu&F&f2g<9dHD?B-9 zB78LP-qC;#peL)w*z>FH`8<$2CrR?IV@5;w3jMk&f{clJnqSsn|PJ2vGB*V`t`wrRnJS#Lm*@tpwBb>MB=BzXvI)Q--wDfa-% z?T>*|nE-4h3Z=KzGg{ouJj>_{itVLyW2)trX-j7m5=3Y~Y|t$AO1VmQVipX{;(l z+c}z^HDa|JRA%&#f*o380@yL~yDB)nw7O(U!@43b(|?pC#jB#Dw}j-rIRW-plPRM* z)rL60;SSL6&nLX7Tbo#gYAwpwv+o3d8Dk>IEv8U&=}M}uJJ(GK_vY6Jh8KE)mPua8 z<594(cbS}&Ji>%TxxhpkzQ(r|@4oq#uZxuG9Bkkx4e8F`>WF{!FYL-=n(Cixy(b_2 zXngqglv7?J!MRJfxSFc_Fn}SRzg?he7)jPeaC#MYh zOhWN=e+2IxL=JEIFRK>}o{qqT&LSS?(( z{o7`YWYdHwK0y2oT}}x;lz?oGFCi}r+9}?g1u_+1MTYGg>%F1ZK(S3mGwo3CQy)*$ zLnLY53YfhPxM-x!a*16l{v;SN!BC(R{*3TALaWtQfq?X(CLpzF2%ZeXtabb|RWVui z1+_Qt+)pxsDZj&-jhl>zN7|KulUNl>_)pTy7pZh%3S|Ggm$6OrkL?6!e9X2c`SATq zPw{@L^kPBL`g3C@PxMF~o)S`b>&tA3OHV_@LGWj{m@3q)Uw@!j#orLTT20SM+V1~OcFveMt#1h;H_2Qq z(SCpS_GP61Rj^0KYyWb&Q8-2K7F_?CY8Z09c@ce&WKrMNPXv#~Tq@>09lV@%<{=C{ zIz6($%{%dl=&QLlFM?lVE2tD@Y0>+;lOo}ySm{io7%Ado<_X?2o;4r zQDGuYV}~jLbMO$LdAiDsWvcLs10g%%y1f7(UNdO|nH%8)Hs#33z0&k%P-$q)&PJ1s ztaKKHL}L7msAE1((XxJ$3cZYKPuE*M_tifMJ`M&UI)Xda6d(uOQSuVIYQtgR^~_7f zjP3QGtFC_Qv6*MRGwx*Ifh#cMhR5VFDfdi%c_#l}_(<#k4Jsikf;Hf{baqecTX|k! ztQ?b2EfN!A|(Cg+RbjZT| z+vcVT0NXBe6YFFpBc-+nY~Vus6z`Eo$-nH`H2=t3g?mN!?hnM6@r6{_V-*ID)Lg6e zrybsRoh-GpmY8*VMVYOomB-o1K#YHJ*fw*Dtn4I#*1-)Rh* zPU2pz>0=G6VyuZ8HR}pLoT(ThbBrs_K>4;<{}In8%4wAQntld`a zaKpNIay3j5@L$BjQd)B-!e6s?Y=N&W+uDo4ZoU zbbR4ai^uSae+P73MH&?JMexp>kz8Mc}uNJqM8xpe)7x&YqTb6*5Y#6Z1(vT z{*-zyovEeP5jVU7Bi)@Jl%JD6FXU=^nt1FP|4#p@PuFy8TguT$KuThfSMX{!9}-rA zIu{2uF+wA>BA4y56CIrvVv+~>|AR%U+JG)Lt6ar~c94W8A4W}itKMR=lO;Z{`(X$U zCb3WlOvh%0jh|GX6)^p&p?eR0fBns>>U$_fkm?XK1xS5x^5+G>%wxu>^;)0oxm-PB z#xD{``#E{kmnxnfRV){<)6aR8m)f^}uag&@i60q5E6V_>Nf0C~Dm3EC9$oH1XtD>s z@wk!u-q~Z}#%GOJ&v;?VxLNd(y^j__su2Yu4+l&_K#}T;D=dFCFA0nLvtbj6bB~r& z2D}mk9XrQgppB@ID)|1Qixj&2`7&NoGI^UpEQQ$yj2`MrtX2SThdu`y_e2Nq-6D{c_+c*djKZ zLizRN+9NXPsCERz2UCeVV^QlKsm-fV2E5nK$Iij|wl^Hh=?K?akX;EM6!uHucYMBn zL|%J1GAT_jdWH@oZa(+`gf9`$$Hp-YQg>cYLneEqHhrlA*jUvVndy~A)E{bmzNm}y zrSjw-7|!B^s#tAjid%x8%PKf=sEP*k-@G6MEC#EuoU(Z{F-brdujA{74~i0k68{EJ z@}~PD!5({+;BV&1J1GBr*-&KQ)=7TSP<#&9F%%3co%~P)IL4A6-2!Ta!O;jtUF!gk zGUko1%B}fx_4zE>2-J{9wtfFwjkZ5ley^HWVN4B_khhxk(MtPk>H?(3ASgTr-b`+; z^mrn$f9Cl2bZydJZR057wl<4_Ulilnt(xy>P-0R$;;UA7j{4ibcaK&`%UDzGv-%&% zlpAP_H0UebN2G{PKDlw-0XT49lX8gBIYY*{Dq926zJGw9M!?BLP+HHMHBL;--SXS# z{`gXL4X&1_hF`w}DQ7nO>__E!v`|9iJYhD&Ne`KmF3)ky05+`w-v^|EI}Xy5eG|xEm&8th>c;>YmEWEv?4On(oTCh- zrpyR>EJX}7Zi7)Fq2TIKwxhnE+_3jwnkSvv*m5D}<4~*S2rBFj;)uk zwUnn*4}1Tt{;07bB8a%{RP+<);-U#!amDJ%z;PC&zWno5I26sNoY&&O3T5paxH0VNFx@u^2DB z4@XEOSgc+pyyIrgk3Te;w6_*oUkUS*mP((YWV&;SD0;aQax`67HiKGrnZ)&Y|Y1)UXk- zl9f8Mb;1t@w7b1g$>fx}5($x8K~YLy2UBMWm4YF;H8MMfZ@|H&m=k}a(4};dhcW9R zVMFm50Lx4CLd4;uAdDjQ1CEF@*7Z0cCxf}apFP85@`e)GT~s)Sg%XxaPKk?3)*}A3 zU_-3+iXluOo|*;1$ERChnQ8d5RBfHHQk9WYH+VwzF*@>g&24RD=n7gJ=K*HyXD?Ae z8|r*kN5~5zWm@gytllnn#lN2;3X{*uz-g2gn2sYSlCBZGuQ5I?8_g9~E zZm(6+i#UtaGH>0zBq<5YkYj+Hun5JRyw3SAL*3bJPW?kddaFrmzC#OtKmDjRHM@I< zG`S5!K7_J$^hdC$Q*xuJ9F3} zLbsHKf2H2$APee>9T%F4QCwExfBK5}5xhW|l0|*#_1$ zjS|p#v3bhczhLgJ;G4Eo2>NyzM*oI66;6_L$_pLcUnt$%=PsRiWtw^qyZ&+Y|BB@8 z?l{q)CY`7L@8sg^X$lD&od=r})F?zU;!PM~Q-4tDf6YN;yjm5zq-a4%)V?{~9Iwwo z&R$R`YA5EKjQh(HxZ|FaR31AC3>y*3}JHE(LA<6BTvkr8b;yCgY$ac}O?e5l?Lv3}{^AF6zz z0-3BeU|aarDKOy&mI`G1iq1Np?V6EJ@YH(x6ENfyrXrt_PSm$lQ`d-(bg$SCotkt0 zW<>p4m4*R{wdd0`otPN;UilW-M&uu;oq*=+*VMTW?1>%lxPho0*RrMlT+iaHzqf_X zE=~&v|2o~P&-$x)#qvBxY%%J4yJPm=d7{t*fi>s)W;m7V$R728gpN_IVE;~(?{f?v zVI>`Yrt(OU)haP@Y?tn#^4*v$L6Lf(bmU2ZI^jF$Q&)&3<{HV?;~xq?_tV0BZu>cv z5ydpPym}IMOY1xFJ&f{T93)hD|8Wu7BV9V@dE3)KJhbvySdlKL72|Dg*rs_~Q}zqGutEFh!Sz8| z;UtMa;_YP>8I9+rZIpEUP-UW}=+08S<4@Vi>z8B4ani)w-hzmovx8Pd1wK?iKuw!YJlb^nXNw z9(mpWPCU5b7U?-e=L{JZ{TpppDsYo2dHMX;Y%h$TXg&7y?jGN`dReOS%6TmK#_k$vYM( z{E2Ea$?_0FJyXU>SrWnVp|Ay!od9pw3^IIg9#V32FiQB&`Wrcs5~P$(anyMn0^a^n z19X4P7xl^YP17&`Pt3!t+;ZEr1^G=2=JLkoB2ek*ql|d1XqsUvif{vzGVLAlr~#uP zk=Gq}_v7wW;hwiwlebph_@kbQ@GY8vDR2Jut($gv5|H9GVmt$6h9l!uA);c8zS5L> z^*t@+e!If_s(u}L1-h}-*2~qh`Xg&qQV(I*9e#DI7|e-*SHD6xxafVvbURy)-d#{% ze3QUf3xKwraDFO*j=?euy z6q!V70R~SnKJPx^QR?-G%Uu zLX3{~A}I{Timw3wj@prJb*}ex0jX%2sN*>`;T~To+nR5U=`FC>2i^5g=Wf8#R?Se? zR-*@G*DsKHRR?jS=i!^KvCQ(ddSHGXTnev0n=e;ISo$Dxd{LV?Rmk6S{GL>vTC{E# zm9+HzSGqn&81LyPJ zA5!mZdR9KGtm|HACI9#AwI%j1{!35{vfmD=Wb%TmZpyU$kG|3p0;IX)^AcNpuM*xQ zI=*+Lzgqb^{n3S?SBUq3d~T}u&J}$1Idxfe@*`WpleF`6c?aT%5_4@B?k2?@ENRTa zie)Z|YEII|T`{f$@I9cP+O~fPq6-`}b6qnJ7g`2hzJ`ci2=K^k7j)2+c!^1Y0QL?q zuYD9DI*s567x1UnhdQFe6509p&R&=J=M$lCP-rcp^uD&AOTMjo^=yd*>Nyv2K#lQw zX+MkoOStdwiHZ$`#Vy|Qz?*8Flje0uS{_W!zIOB~Y&XUGDg&u`4fq;`&rChGH!SaLpafPj zir~-{C%zN(P1h)?V6)l%w;*H6YoR;wu}~n?DW~vi*izyf=vS=NkPOF~_)w1SX=8oQ z$x}DZFT~zjc~m*FaFf1Cp^H@e44*m*mdjK-{OP6HDMp(ffm&ZHZoZ|65v=3vmt(K< zesRC1ZKPcKBx=zOB1gEb$apId8@hBx)(blmKEym9pBSyZb0nauxsn5xO5LKEUWUXF zp>A<-BmIuUM5SFORO?q$}xB|9cDc@Q|YR-Z5DlOqaXhB=f+)tJ{0nd#D*_nX5wA7=R(EggC*RpR{3B_`|q~ucVOPeR6v6knF`>4M8z%sWL?1staV-wqig;|u)TXA zQN!tvD|n>NLvVii9B=?={cKy@H-1%jyA~4Ep1yglTi7V6-~K@XEKLP;;Lg#f5(B$A zsB2`G+OZ%$)zGLZRRXcW{%Gx!_#{Y^`fBnvm%?<3@+`mT-&tmqN9%!l%pshNDW(Jq zofwP&v@?)DeyG|}8@xUzrVFNP4#UIvz|oWCzb8%U>YQaUdbRf-{2TVb{xIEjgB*=N zO4@aq(rn5jm9apTxgK7_b+3>Ja^OT$zIj36i9(yLGyPUcz#d+h&}t;N5#Qn=Q6!y0M3R z9LIKewUDT{)Mk#6RVDluK4O8ot?j*874tkXXz>pxY2Fx`l7-)48I(9s;J_TS<`Rpu zU7nJ=88~b$$Uq^Is$KJB5*{%5zn&{zLpx@gz|4lCK8LY6eR8aOgso1IRDDw0zGYh} z@Qvxz!I9Vcl*mU%$0Qw##Z}&&BST+Q`8#Ta8zp!fCl>`{Au9uVt{8nTpy+P&NZa)fitc^nb!wVJBLyCI)4N5RSBX~|W>gak3 zWX|51lcQW4Pp!Hsj3rSeQ%h;C8iY}5#6T*DUlh1@z)4hY=ablv8OpUm2)8L~?x08l zb+);A)>|M!cvq;>7WbA?@&V{f$6`Q4RB`|UZo_*&g}eU()V*9~A>5pyjT%|^umaRu zN*-sHGKGOwH>VQqVs!yNd(GZpl&loWe#Lxz3zT|Qvs@sqd=ej8@^-t1?s|RwXXmt2 z{rZs2R{9VHoD_V@BR_0tJ`3K%ilz#XtGTo2`{;*r*jyfzLJK`UNZ(hYX9Z_xuiiX3 zpuR$|>Ebb4(M^bC+aA|nDNsCS@{@`zas(IB;=L2e*=$kX5##7FHse?5f_9k~-7ivW zXlkU@1UakJEA&xWa0wwR^W7X;ik><#FPTvF6I)%^(@510=uY{%oi`YU0uFaCtb-uX zjXN+^rg;5FeMGR$DmA10w}ER22o}75^kirrP`zg>wT?n6XkZ3gZo8P@Uoq6v{yN<_ zOjH8HlGx-y@HR**8U}ZNd!((tO+GaZ#k)nno2}_sEA>By|F)eX=__MI_k_cN?_G~O zhiC2qeBq-tdK@zlw-dtyiUdLY7slZ-w4nNxOS_@7&}*LI`0;oPPBBNmr-S}7pSv@Q zM2}_tB*`UKc&PyE#SMQ3kgt~9TB!&tzZZNGI^bV*5!ywao9crDk-U3blYg;DEfTFN zOVX`7yEMSPq1600ebfRhUbuUB|J{plG8ReYeb%c3MgCAbG!I{5FaM&C31w9c9m}F7 z19AQlZzq}ZnS`cMyhvR*65piaAp^&z;wkw#kTlagS- z6bSpcd&+(BDHNu`t0mBj7`K0D@7FsahHDUZkE**FtPa`nYL7Y6#t$C`Rn83sYEJ9m zt!oCP9Lx3rr5I#fzhbc_w@dW>qxqgb$4`qGj?7xV9N;Mr*{;V?XPKcsw> z?)f)nGj_qQ$S6^0R>=>TomC<bx`<;dU3eT)Uh9Hrm2D47?6}0Yo>2NCuZhb#C2rlaiSHRy zjSX3*1}@K08=siuz$4Y@n^?1BwfKd(zCGH$h;rb`DTo$?QdaNl(jb1Y? zib~}nG`^=Eju-jeCwwUFYdt=-;QEGVYi(QaYbjjsaTPD@!vq@H<`9*^?rD$1X{F%(0fQr!r z(n(*Nzj9?bec!8z>O|wTdZ@B3>hcIkP7hFdsFA&R(K$7*s)47*m%U+%sM~?reRuJh7s){>2c!E$j`tFjlAP5}6sCj7tOK7BG>k5Yzh*-1`e8y}>P>rvA&R+{8n!Uw{*y z$|rEhSzI)XWib~J4~xpnhWPNbHaxkEbkuf@a`@dBMC9O2u`&qO_&tU!-Mmmd4cNJk znDW!SpTi+v<;hv(N%2Zj3EPVK6%z|HJKU7(F z2yt_(u8DO~vT9l79S!J=)tp45uKtJ7I52#K)jIAhYYF@u9=P!GpS8%i7OFbW z{$LF2{G$CR9rQrnapO1oPSwyX`y;)Es4Yy2iPCL5V3?Cc9Jh^KzKBO|jObvF|49^Z z@Y;s8p7VS(NXfh4|DPF><0v&i*Y}u=V5WG0?>6@%VAR-ZQD`PGQ z*E6Lv8`LyD)-ypfsi*1jm_Az9e#~}E&+3EDs)k=O@X_*&yo-A!UgWr>h}f4AkwR=Q zuPUPX{YHJ_d}FL)YC5K&?^mOy)(BVSdm?6CGbm+Zn9k253VZk9)3*?lCeO=zyCn2_ z70v80-%G2h?t$Xlc8a?IJ^5SjbAVD%ou;xcIfv6vj9i~yIUYzQoYu1w9w1&L-aXv< z`5n1Zb1iRjce0e+N=Y%w-1Nf36Q8cV=?nOXc(>_Xe62;^3Mk}ve85y^Lrz?TdqJy$ z@B9g%C`o`%Aipd1sC+!Se0Ykp72fusXy_HxQSYNSZ6#bL^L&4?P{FqjX=DD@F(iS z`e?M;`d^zT$UH4b`I!_P;EdsVnmjc`mcN>dOZ!XM3{pj{4+97SKxCQ

mba5dkp$8z@7(w zw+>=0o(ZZLi^@|!j37km%M-tW{`y{ahNKpdk3q`|O+MvXd*0xBfYSB(!s50|w982! zpk*Ds&ef-znc&m=MiTw%aQsy69?H~lbB*+lr52mf;(eCfC6gg#;Zs;U{~e~^nf+amlz1b%c816? z@zDie8p}P0Q+r+I^QdIkH7x4PkFP-CUN?2pSJF!qx+2PA@(m4M>i}}RH}8awf$dgn z2$Bx|0jQ^CUd8Fy)XRWn-_y^!rTyk^--PrkMlH0*G0#Wfy|VNz4INvi|J(7y;G-5g$eUK^K8to92b28xKxi)omEk6(5rNXo5e3; z%l(|qS=~!bZJ@1;o?{sj)r5Ndt#2|c-8#*nAA#jbG;(Y{a9D|d(<;!Q7Jn<@$n)bq za^R6`%$n2{Xq!z*!Vj(!ySFb$W6tg1D4&ijcxv+3=5xZ2tBj5}|N9wj zm-ZtPpy^W=V0Zr*q-~rBI|&A?+TGf|fM0qaCea#I`|}c;Cm!D;lAe|&f{|4r8h%86 zbv6_rcdK;fdZiFw67Yqmle1a&qxV7R=+FqU-DV!yd@%p`*%6=~58UUbI^IaUc~dq( zr|(rq9wNWJZ34n*#%)N178a)?zHqC)qbawlgDkTc$Rg~Q9}K{1M6Mk3;9N|~XP_Qt z{LfMzz@Cjv21b(33(`@Rj(U&w5Uz{XTg)}CQ|b??_pOBf@rj^OcH;=Y^YN7^EZ-%q zlio|b!ylC;+)qU^+F^(6HYnzI>3uc1FMN+b1*qcsKQ|sF6T}(xs}L0{5^e7|zFmBF zWI8F+5b|OTzYJ2JMY*tYDvbCBcM1Qkex3$~m(Sa_Xnz5z27+H;{lBFawxiN*D)yJX zi@YNqM`ah7TFFMgx7n^kww+!nJ>%9{NugPdwt1*2^!Kx7t?j+N;D;;dwLR^t_Mblz z$!j)vDHK2-eHJWzQ^N{iF>E^Hwg-&(75#b1Gguzqv2|fddg&s8YmY*5yO7gr(lKIO z2s`$FtePp0C=)iQo~(_g3kOWUv|nbsF%_rMToRW(F8pUbej^Zupzw9DPzoww~W@ zTHS{T@_j7^f0?}eLcBoiotPQ%;!KeXkk zLgyD8l(wiY(%|*_t~B>_Nkf7KCxHq!Ec7)<968xn#pbmH`=c= z)UC3jX_WRC8_7Ts_u)qK7nZJH@ARQ{16v`*DKdcaCnvWDQU!gk%%I8(o6&0Tix=Mo zgc-lbP&wEdGz!IVd~$jS^E{O69x&m7e*NNpW#nPd^4eq~8mKeQJ3GC8u*%gT0E!6|tz)I}J}3~}lHKON{8Yrmz*bj?}n_le1I*-5RJIbUp}`Gp{79~9O36ea&% zsR$flCEvafEB~DGtL^`uWt|->I!WQ&I;m=3NDH4WJj?{8(*cG8Oti;zb3cNrAG`f+ zO_F@LFmSHr!+v{Cq+o1AY&UC^-xjU9L5YLt3~|V`a-aTd{G1fDSl|$x9yT`u-)woJ z6iM#O%^9wVC#~|0N53Dp#x~Mbu~kH-aA7B@ksrY5Jo_>>h9t3q`GW(hoT+N(+9%)) zp+}}|fXf%rPZZ&PYQ(OA2y`;T4E`C<&r|~`RAeO^S(OV@g(kqb5WU=p-(VS7h43|! z^m0upy14p6gWgPk?6w`#Y|M?#wSZrmbZX#+UrFqIG3U`MB*IEp@s1tE@(!(Pg;ujM zLQK7|^U-URjGIH@&BVC4_Xie&AA+TEbjp`GkX?^z#I`$W~74$oL=~sW`%M5Y`jW00gCK0IakP>TuY`{z0 zKmwB21$?}Sf;L~*7DQp>!$}jLS=jz*ZI&rB;puOtxVtN(En4+DXUQ%JXqm{uQ~E=- z>EE3W=+;XtXEz&DPCAx?pW3la8b}AEC8@fYWH&Yk9xV5*<>xC|^gHDD*k>LxEi#tMzbJB0k?F#AX5j? zH|Gy$Qo&K9#1q({Ud(q3FMeawx92Wsf6R8wgrrv)IRcww7v+j2uf<(6PKRHgpDW)= z^W_`>gJy>2;1nG4E#OT7!86nGbU7g5yhy&L%OG+y(9wp= zcK?9gasFD^plSL2BuxyjUcPW-z#o+>?SJp@)te)8t1}uFkNCTXK>rp-;Y#K6i_h9~ z1i4+&ro4Ba!*^RQV9b_K^LIhkoJgW}H0Fouki~M6!;2MV8BeYxNiLlh-n9EP)Y)%I zX5&WH%&mHfZfjDg)HI%R42Of7aM3+h6h-pyX&{Y{wj$Ak>(d4*QeCchZ4+Ra<)K6WDNc^ru(k$x<@HN& z$UvmyN6-c1RH2bRx+x2-rAggfN1NMbsI%E{U+Cm)(@`wwgN@x9K-iBd z=bk^XBCh9rlPHg+>8lfrzU+JusM5ystdhpl8A^@iZpWx*U!;)uJ!9Ts+?-FpC08zx9cKW?`N->&%PmzXg0 znWANk3-dRgiR?mQE-*L6>)2(ltZP)s=Txh1^$)#2*nR9Do^}ToTH#5*{{ELW^P=~E zYS>BL$#-5G&t)^{9?M4X;+15@YkC7!f04lz)P)zgU3X7-9?Ty<2c)aic~7Z+-Q)}C zlj44`<>8#?P9457dd|7x7nQf1ea)X~^E(WlFCkXhhxgfP0*LJs;3}`9Q~HduYsjfY zr_uXgNxAs&#nV?Jrm5FcyYIGnu@bS>s{9>Jrjrf-{=`1uSN^4s!u+`k3q7v;yNkJ0 za6$6JNqADTX*v|IMoTC;C-xE{dQ3|}3F;^buIc-GuB^kOu3xsR zPR#~X{=0^^I`-m6`;RPzp_J!Q7Yuk+`Tq`v_MP~2aiNJ*7wO1;FeL6|Xj_f_$pE@X z;J~d|bDLX_Xt1Cl3GL-2jpklXzxauwrari`riBq4hqsn~EF#~pqNp@buF=BZ^}rKs zAQpr-f6yGjbPr+x_QK-E6?SX!vLeMGv4ptuX2mqD!PKnwQEtWDIM)-bm)&hfM%F3? z=p}vQ0rOnEW895TdzWgWxQtyfxIYz+{3L!RUHe7tyZUMiKI<%R!v^#hP}DwOnU;zR z+B>;zxg6~5EnN`zVP3*%7cLx4euPN{T=a!r<6yI4k3n2V6)9MZ&6lB9=4*MxDtAk~ z<_1!(=L_AW4*T%sWKpOMa_rm(YWqQ2c%W8@)Zi_h{1me~vdhkwGf1|N zurHpRdTa#C#^G0WV3)s{el9V8_4WUA0oI9s4G}}Bz6+9vc>1m1oi%<47=wUAuLC+y zog;)<|Hkf-0r(p4lF!;5HIHea->G(-%lz3@!*>uVTt)00_+nCFh7!}2HcR#R)Y%-n+k(tP{WEQf#!mJ|y;K@&pD0nz+*wy6^ z7g{Gb_y)*4Td)2&x}>9yf~sAMHcdD}9DdwGhcgjeo}R-K`Dmk-dP$V$%@2oMQcgo! zTpR7K90f{G8bF%%GpzvL&ysylXOnL=mv1IbMlCeuXVTEV0?>7gPa3O zCLpy`V3M$Z9{D5x)(0G~vLxk$cL&mE2J2X@z$T2nL*j1Yuu4_>Kxek_ouzwFK@5q| z`Xc+Ic0}eyw0iF}`Z7M5YpznRAi>Un<6j7~f+W*eVkR9j8MAf~^8Hr~GPBZuqwVnk znI^4~VWC&7V1A7xcBgf6Ap{T2P4;Z>&QdTFJgK>vd3u)@Ldj>L?5m6FEmPgqjfnn& z4dWmRw+o#-o7%#Qfy07cul@kg4`pE)oT0oGNvoHC#MmUZrrVJ`iGSSbv@$>rVK`H( zV0s_VhFbGrr@RUWLO) zY6LpaNaM`KznyApp9q_2*)(4dgj`G6Ow+*KlC6*Tkn3=%W2IeG8z9$a$pm~6ychaq zDvd2xO?Gz5>?IrRAO60W|0U=JwB)U}UvGB7t&F>)TJgfJ>9*?cY@|fo*Aoq%*uB&` zp}Hi|-FCt~wMsOj!fOhUG&@J<`KQ+R!Ut5sX%nl^x4s=*FoEdOB4AOrN2aClyX3N@ zunW~$%)LG|pOO1GcvbQorsWK@cRB?>^H>^@>Dpi3#c?3RhD=R)CVGQS!~^P8B(=~4 zR%&0xN|v27y+n?ggLES!ba1ZcKV7F}y`?^9{^~ML0(-}Z=KIM7Dl=igc8C^GykV!p zTJM@2C`pl*?~RCKG*?mQ4&v~*8MF`*pehq_Z>fd|qup#xsjG1(NcaeAfJBDWTPZRL zF_ixtnns5H6UZXh%7?t}64tP~!}7TdmqG0PzFj=!KWMX1;{)$FJ!@fR<(`VwmIoVN zzY#_XTWsP@FvVr6WDcX1uoc20EhV?bX*Gq}+togJLPB6479DkXul+ zkalBN#t|GqO+_O`>gf)9^AS$LPD(+i^W7}-Lp9^yX>(>%#fDtDRlm83&pcB3qc7)F zW5TP$c$vbgz5I%Hkxi+fX`|aRa{Bf$y%J{(U)fPIm?-3o?wUj?+%1 zt>-Gx5JOyGMErzl3msw9-k<;RzTg#zvcLG2Wl>6O#-*0lB1F3+Q0&p`OX|vAP%jt1 zR_jY_PnW(#hq!gXzdh*ynz%ssxJM!>kMv)#a?%#sIrgC(G!J3O z%T~xwSm855G^TAzumB#!8=vPw@a|!u5``iaXpmpdem`xEcBfipL=16YE)vT6dqDsE z{S#J;t=E4)q?Ph_9sQ2vxCD&m!jeBzCN;aT8Bo&-RdDN@*Z=vzRXmeOj!HR;+h5A1=8dF9FbmBF&_9+H};CkrW{iTf9l@e`8{f$7_b z`}EHgbh^uvk)^-M{o%mJ&z7~*e$>{$PdObbQ#Qz(dWd{`J;W5#ehSEM*br#G6q0F75aPZ1c(lo-?EDhN8$%l6V z<>SniFvw*({d2T>4fn2|rvO7=aY&S8^`6SNGf-`@ktj8e)U-hv`u!N;Ibb?DTaiW7 zYsilPiL!{dpz7;2%bE6wSkpxxMVw0_7}R^Tk<47E4nX3wZm!-lLg-izxv{VU@anWz zPKlh7FTps}qCd97d!Ax+=0_N>b&;Y9WXI7iuWT)U2j-;s%wLMpj9sXt0~WBNUZ93ZM5Fhaq_;d{g%mC&)XUiv-MN!#XD|>N62d=Ciz(V_KYme zgiFBm)V$MtUq(4Y^^G4{M8TrNT^N3Mn@mv}fftA^b1>+)Cfpr=GPT=gZ@%Kld(7{{ z*T7Z+wNmE|1x6qMKL7lGIsm7#V9q(-uM9*I@>BW#(380Y_lKkD8e3Ldc#YV|>+q-N zR{);p$hOJzeF2|4EoQ9x_6Sw~adyAh!Fy;;AcIaZNi?jnNI6&J_bc-gKREgw?KAKH zH|^y4SFwgk*+&bCpzi|PF3@rrxU?&@d>SKR2zhFQnvzyuxuQG8=JBtSCa<H=T>*>>J58;McQyd>q{1A=y(Fxu1yPyqhH#9{|Ya$<*M0* zV^I@5wczHd>X~*7V+Mst6_s4IlT$kpCi5=GxZ`mmcfghP>I6?f_Pe`{S9-iVSuBLw zKlYZ^zP&|lFF>_*-fO3sOmSeMH0NXVvX>sVIBRnr43s+gVYq94ZdPB6LFTKa?T|jt zQ6s{)cJoGt4yNV}C&?!WJAtK_EVCaaXV4B}L0=rk9-jO1~ivRA$_Sr?{!0B3LPO>BW| zneotb@KhZBQ$kl1_2C1bGVyApz<%Y8+MhC6$zh(VfQ1{4)H&^nd2j_RV036Nz^Ie3 zol;jjC-cHL$Wm^0Dn9<-k!Lt|bz$f-?gX9Sr&qfD)Zrw@h4a@8W#U2Ho=?;xC-8*S zqCfL@vHr}Y#OFIMnM3BwQ$r66N`0gKb7HSLRe#NoE=+rPOB;B5eW=z~!S?tyH|+P& zo7i28I@aP*o?>g7W7>$Bx^@hA0(rl6M{H*0(mm#!qYT&=Iqa? zRYq2FsaY8Vq!%SB#7&B1NLuo7j-B6Sh<8#53x5{Im!jM4L_w2qkL97D%vb0aqyoKJ+ zvZBzhD7Vw6s)u(tMQhDMYj(A_(~29+EI)&y@Bvvwpa-`62#Q`V*=noTM-6>q01png}a_644`N4 zLRD1y)*fbdQ{wyC`#-z87qIijE|`d2jA$>JL3OIS=Y|!B0!udVmQH~!dvx+uRoq&3 ztjph;Nh27D*0!LM^UtT@p3*-uS(9K!HQ*ISw?*zTC$n$8^R41YCd(}bMKgHoUBhd^ ze;+m4k~Osh$(zbtSc|*r)`UzDKFb`JMX2OZC9nLAa)BDza_FYsu8g*04UvqvfOAO* zl&oMHH@|TiTpezBp)635Bp?MD`(X{P;oiWo{6A?S`#HBFL*Vz9`Xx8izr%H6j!$90 zK)7^8;ZTwMLJk?;r9+(*nZH-{Dqh-}45>va1Q2$&lz?2F{ZUj2!Xa&Ff2Z8@q2 zcXiN{`h&p+vOQAJ;x|=I&vZ1n3SQSH&Q(hGKvYWS00s2Ug(G^OIZSpA08(On(C2v~ zXB>wePnGYz!ezYvIA>%b(apkD%cx6d(=E@z)*8%KDK@@EXk_oL73&REPzb8LRLiOV zF0b!(zIy`EaqS5fcJN=v)|2DE9>U8n*9v5usqX*w7NvG1hBJWVvKuSNtm^7;Ht{rDsM-kecJ3TV~yDOaj2YJky`-^gI z`uBMGNK2<-%(-~_!=KadJ`oEZwG^Qo3?I3=pp~g6)r!`M*5198#hSpmzxnHO5LMx+cWT)LQDxOSopSSd zNyCn+Q2q$<=7tXVRgU;v@L{C4_zi{+KVCK0K16qBu|? zr#ZI_Dc9~i8xM)>x!j`c8Jc$cKBv~bL~E)-Lj6+7&8I-mi-0R4bh-zV?{K(QD#}+V zXpdk@QQ+uu#8MX+%&e7D65SRYYbjSIQb$2c;6(h8NB{g}p@muTgLYSBG5+K(ncGvS zBU{>X90z3kFbJ@>ql{#b6EHV9w;Jn<&`eHQl*Bm!8;<46j3N!}8UnE2CyU#*NBNqCYkNEYkO%$$)~ep`E;wbyW-g}@%=WLOO$4zSnAhKd=ppn55Y8bS z3OftWFdeiL%mO)Ag=gcc)N}qOG(H5F{O3=Pn9oz%?vXt_0Q#~zWSrjvMn20%7x>@F zxclY;K2Bk1E+*;gXtkyPL(_N1Q~kg1pL48i5t3Q<&dA6(hsa(H3OQCr$jXj$tZcF> zduBxvQNmG(?7jCU9NTf`@Adxt9^b$G!Ff5a=eVEu^SZD5x|GwPb%;<~4>3W=X;`sQ zl-5*}RdeXqGgf_)|GyBCD_;-OxQs>q(6u4RZC@EZ>UtmxO9-hjf`t(phyfQFmlgVp z@stkYvwP5c7L!v=xud9G=fdMTh&L>il1deG@hze!b&|uqA(So`?eFT2j0;WL4SCyF z&oUCNAYT;RRiy`tTFEnamQLLMkX&8}Kg9nCm;1iUKCuGBe_Y!dQBZ^2hRE9Na58oM z@89y^O8KYN?bOd*8dtsS#gjS1-srDQHF05h4RI_CrFgoZXObeZGx2_QCv~#Jy;fwd zl}wV?A4CuzbOCgfLk7yn&4`w_B0UkuQD|U<;(Gxv9)Yl^-Cm;QdDq0``pwlh}zb>`+ zXf#}(T60)_oua&0)FTC+Ph5CP`tutKIeYLP*EBppVCe^a3)W3(k0X;@UoqFXD?Rs@ z7aG%w;iO}AX>3Vp>`xiV^m{%oLUOA)_52Fg$lGwpz1^15o1qw_vOIa@`$cg0PYzjS zZ@EVVex8i~gHQR392-uEb-;xQRt_l|yqL@WAv}E(anZNDWva9><6M(VLDr5&p$bEx0{c+@*UMs&6C z;@fc8GkVRtfmKXp{{g9e)Bo#JxWXcrF3{G*BEVN}6e(}7Q~UQt7r|HdxC2>vByDvPAX$6IrJjvOTJujCh zc-Ii44^1ACw6Qp4JJV035w*ydbY|<$Nq-HGema1yyCKrf(1_2SJIsMtzAKNCZ#{r$ z5EigKk6Pq#MlUH^)asMw0Lz1AJ>(wm4=U1ShZ1hVk3z>adRK|tgoo*0p;x8PRbhSG z-$M?;Ocq~Mzlzk8;ULbpMMnMAXa6)wE75P*Ib}r8dIzpr%kSD~ymwU`bo5`b9!xo$ z%uVqMTC`Zoo<`%VYrXt8YrXFNi`i}rC>}5!d_~NU^EJEnLdWdG(+B%80!O}LT)Z8u zDhsqWP%Z_V-HpD}HmYIXSBQ^Oai8x%jA!X@B z(YSBf7Tl|Riq_cRmEbX>k6qLTY- z+thlOh^)IFD9yZ`7^T$7IET26X>J@%qe*VxEw$Y-PX=R$-Q0W@HO1iMQ|qi*Mo#oL zDg>ya_a+8^ov$x{+{uF0tU2RE%@cZy*?lQY_>5CTm!zA}ECGK(X1%8xCR4_&HMUf- zW=zyzkF}D@2tN&{b;{Ygo9>I9BqCYpe%YVJ8JLm80< zmyU>S#TsY)$&|}ODFjN1pIK05)yXO)X@_Y2V_BA_&HXi$nJ=eyVdN02L2d5^-|iyt zYpV-0+6CCbdlfq(5mjWm+E$DZY(5FLI)==0JP&B~YKXNQwCyFKD|QmcAqKx7fj4MX->db>u!&fL+fF07 z#3RZ0z5Zayn;=qK4Yz4w_AosvFODS?u4=BA6w+}W-ykTtMN|*}KK|M3k$~}(oR`r9 zO;{pjICUK-YkU+E6y2X#;iu8x75?Q3rRfXK42p6uR-;crqB4bxmm?K1BOktS?e9=* zH*$4}hVUCZ=l}V!ci|c3{5 z7A(*A!;7IfvfRPfjODV%C3CZ}41Hz|Wm_decFK#ffw2e$u-Y@)XDrV+!)2bixjvD2 zh}G5@<0#EUN@Fii=i}N~S)17RlGnan8{;|sjz1Av{QEhKIE`(qd^V^@lR$d?9kEIl z^UD(p(`;Y9u1YZ%QDz_+Sy$K3e3-qsd@WA2@Ea=Je~;^Z^k{61w^Eq0SUQcVgHrt6 zd;Gge!}d5q%e{iazeH-_M@iY(xygJM3_RyOO+xgBYF{ zqb4O{HCHn$b&i}Ks4vFClaFI#zC5>PBU2kWdNlL!@xZIF3iAhJR9Z*Ag)~dt%eChR zMEctb4iVR1$7)QoH2sEJ!B1G-8FKnqC|(=USub*Q1hycgsKIJmei~z!cb=#yyi12z zj{THU(zd6Gkf8hOM)viK$7)AUie<8o5%^q)s|y^;&mffdrzqpXOJtvnOAjZQd|myQ zS^Vjcw*ka$Jp-jAHSGP2o#bK( zDPEP5cMfBvG2cB+reLpBAD@dyJp3>y`~|L{@J=)|n1Q_FG}q;w&5JBd0BIY?MrG3V zRVA%X(kFT-6WW!Z@kwOZ`W-oP*5=xSvszD$y3af+{OlOz6B4`eZzvCLY~cLGUP@PZ z^X2?kK&`$XUO7P$;T67J#u#c3o~8FzJoi6!Jm`GBX1z(#g4kkzDT?S`in)EU)5P0-a70;FPrZC)dZjDp9W(!tot8%E zek2X-s=cPZ&6Vv~e<8D*G8E4N2g|PlQWCu74$B`EG@p9ih&(#D&0kNoQDmMGHM{P3 zlkEALtn!tXOqoM8$LQ3X+3z|)A@7#*sJQ7JlT$Tb*@@w?r9O?bYB{x}sc^85y4saBK_7+-hhp$nC|IN^6ha%O;l89oiy z7;=UIzlvvK8NI{&#CwhpFiIJbh~nhzTh+AZy%JcN1Y=bcyfPP&IL+p`sjgy%&Xpi{ zvbC}mqKZ4X(yn|ZsuOiGHNAnZ(@(kTxNf7Sc0T#mQS>TDxkI!%oD3YgOjkBuSykrt z(QVMzmsA>tgJlA7Xbo^-(5aOiIpB;0E+Y?i-j^dXkN7|L>@@G(_&9|l9qqKPB`I>SfXgzAKaKIP5U&aP{!P9g#mrvGH4NYR@{NOxuKZN&1=Z z$=M>9{U6@M`jEIT;;ST@g&rJQaD**S-B?iN895L{swHz4|YDC)HEAFBWj9Vz2`T+CsvlSqd6Qm(`&yc9T!P!kX@$+FqJ{(?p8l|X^ zk9yPgt}UkRJx?T00#ACBw>?)%m=M9enX;J;;_OW#pZ@s(%l`u*QBBLZMh(&fVIu9i z!a^3bo{l1J6<|rT>Xq0jg%T}zTxNQIzf|mV=;A{XVnqON#ib0s%nngYCX6uiecV9% zmeQtvLP$T=J;Ma%2Cw|S^ksw1@@k4k z$;}4YCvYWqCO%`dTADO|ROY@zEytl2Eq=z11M(s*iTij6@6`eBAwu^H?>=1Q(l0d{8Gg0jSUyXv zl`1T{o;M56#5&|lnmQuZn9oD01D{XY`o!|<*$!u*dF7~W$U|$^aE@N(-Rov8G8znT z*)Kd-zWqcbGrC}d9xWUx*}xQxEs4uO!oX!5{P1AF|LyJoZjABID8FD-Z{m!qEY^ z*uSPkuz`f|a>rRjv_{dpDiY;h?CZozl5@vz?s1g#Z`4NzVsG8PXF20LG$6( zEH8i~1WCMwE#&bXw&a3LnP_3b;A18iy*SO*iwIcv&_~Dc4`kS3+mUC1@`KLqe?Ddz zG1I~t+m#V(7n%ik2OiFs2>YQrA7JSD?x|Cm@ER5;@u^=Cs3I_wGa=VRDM6up{eCkc3cvEC~%_KXrEw`31yKa%*tn*-h?8z#<59v?hZ^Fsp^wj>3 zALk@SLGHt(L09E~m_mPN zCC{%Hx=rhK^Ph_0CChd}DhpCB-n{)sb+p8lJvj~mOUuFe*Dom&Qxz!FdfNndq^t*B z-#$f#wE!QEp&+4u9-X2-oT&c;l`j7962?tJb6;+B)Jj_zj(0O8JTC|z>@UE5`_nXm z1^%QW!1%(1?S19N5NQpQS2C5RUg+AHoET6=hftHi{G3|_pq-nvyW8HeLKkMAit?|{ zLtp4f(8R7hhi$|m_U|0h0(gu8MvZ0__X8_Ed1v*TXh8jEjY{lAG!{U<5s$jiu|~X; zM=8X-D8#8>kaw`(9@#AUjV|*a?kwziZcJwYqgap|-8&C{vBF)OzB(#|ur+8U22X`-3nltoC5v z4-+cD1^)Z9fD4>sw67is#bKO7lUI-GKj<-Cc#_`*oryfTf%Phn&%1xo_@-YoX@j;_ z6!DzP$aV9oDsvY#6^wCAVV9!|r*lm$iCu~@E4oz=j(K$HD?rvoje>=#DwDq1hwRC+ zUHkeO_W3vABXLUG#F-Orl8or}KwQo$r`^mD!{f}8*Mw;F^O3pzVLhiYV4HjzulMAp z5wN)5UH!^oL1ZFU;A(5e{D_Y9Q>FJm*xBpvdb@6>w2~o?_t_D>Bl#`yI!7*}FrNla zzwoqR%GMNO8r&^^g;DQMTq*EYs92f>u`s$|i8Er-B4OtCohLr~C0$RB+z1ea<>Ed6 z*3;{yQNl*y*|zWkoDQD>{~LJ{%HfZt^RfvyCFVIWW8*|g3lhZG4FO-`*izDvULXd}jgOkk*io@-DuLO$S(DppwgPeJg<3FTok zYx@{^gZFJO$`{4HUh1E~TlIs>tv^3Az%Q4yrI3SjIILWDATeJ7d`#C8@U$lQzKE$k z_O>H)2up@xTg(FC4Kpsta&7MIVrO4itkJTJ+8OaOgbU?d{jPvX3{8D|^@-0X_%hss zDA#EGJ!fkv%jh~wg{(Xf_EXi+%$9vM9%pU47D&=Tspm9a7(Rv^Sdj}F<256-1rvS_ z;ysg$dHgp4aCh#8vzIXb;gXJG1mD|sOik9w)4eSt@nw8}j43a)3vCWE;vXIoq=j2< z&1D|nl5+H0T?j&Pvf&qMj#}j};4_9X=a5)$*f;$jWijYAjc5j2=L&jm4qhzKZNHyI ziuYKP{rDWp&r8*kmy)iV9=h56;w|TgxEEtOz#`Pv#+p2Z@?l9~*FlxtDY%QY*CZRR zPruUI4fl+(JskLF*FhG`>}Kx{MJ-5HPAz;jEDTu!7AuGy#WR%OtmC>X?{PfV;x4iB zH;VHSlkQsR{@;;66`Yxlcp;pJ;*k)zpP{D)SDT+>#JG;EJbZ+nEo=m_wC9w;VT)Opb4DMKDyuw>Dg-?W(A{=U($z5d4?w>Qf%Ic z3dUO(qw`n^(DeV>*va%AP#JE1%CK9|A4-{o7d0o02J$6QKUgiKA-zmoBkvU0eDIlW zQypa>zMS~e9JUELy^+rS5d3H&WX^=yNmiZ}#;T&c%$;r9{bP@zKn!uUq4DC;eQwXQ zhjXAQ4woy{k>$4odE>0sIWf+RTF-*Ht&XZZK&X`T>38ZCf8*X(d#4E9!BP;fMVm?2 z#UMYBELiIQ>5y2SnVce0%SOcO3d8}StL`TIrxkg^)NC=vrh$ZzrHg0@kx8He@fZ^j*$C;Wh{@Z$CA@4u+1k+raW`45Nfh=H-Liu#QZir*Z%KlKW&oap z%$Y_@$kzKvZ3GXJ)T;6kDm`$i>p}L~;E~+O`TFL2s1)t9rv?98H5|NlG0f^oRqxf!j3Z zEsgt|kZj>h^juym49ZvpjlYMw4ZT{T`J#I>4H<^hwC=G+K4jZ^25zgSXFFM6UvCmt zVy$tl4SIaZ7_SmO#^KR&>%C!kk5_Ekjc@ip)Yp&=9)K9E7_$Py96Ep^>?-|EO|F)y z>@Nm~7o6>LYn@diZaTCcE__r@`XH~gv~>kCc){Z9yp!Gj%^=O}@m zh+9T6aG-7djy>5s@^UXUeAfBULcQiP(>;@n+lI7wow(jNj);?rM43E03tO7^;KPN*}^N>Jfns> zfBKSuycFdt48Vvaz)I0NBgMd0rm*7HW2PTrjqR5!%G==vdh?Jz8%7!Es?wV23R7~7Qt%Od*IsqjcD0MDC zN}0+)$@a1!QXYeqpQxJmT?{204QTKqG7?Q!zudgba7j~3im)YA85>WDp8621hsmnpAC~WLeW7AyCadHT zJRNhDAkoY3_ZB1Ala6o84Q}Le04z9aB3A%dZL$M@eN=4GOGi5BLo3BDhuwtUg-JEl zOPPYB_~U@2LHEL2M}F?~#v2PDO~21VUW*Gm5k&qXzaL%^O`m^f#u=CNz1lCYO8K*riJ#P6^hzJ;IXiMnJ;_EB62|ApQTwxM6a(xl7TK9yBwK?x*!1zUH3>3XGv)zmDcRnoN z?!i<3EMc#*hoKn}UbNUJ{Y3VGC$TK=K81xNJ{|=;;eGqxs*4NCymovO^Rma$oLEbr zlW@y}-M`%Wta)ju0dXZk~U==u3LMcgu5~90F`v4Gh-v-vud~^83 zyNYk8!y=ZHEEEvDfDTCFkE^LBmTy35&QMq^JaWjgDsARe_!UHY=joka$#{%|h!H;8 zWVtpm0%Ly8_(T>@@SVFT*OFEAp^RTog?hFUefga$-awlkh$CeYRhzE#w<`%iHRc6s z$Vj|hl)9&lDu!Pu_!E`aaL?(Pz#_j6%5wiCaO?A@jt9igfXadEVuAPFdfxKA7_Y!I zj_2X2)l`L(Q7`Vx>^c`F$*JkuFl59kuIB4loIge*;ea6Q({<`Y!FZvigck2$cR@-W zKT59iZ^BGPQ!nL52S=j^&9oqim7s z2_?-p!gL4~mtqRWLGQGqB!o_1ljhF}uKrMykQ6ffiFG@4Xdrv3=BTp&#+6z` zCKg3u2^`*W-+sNXck(SD<%SlGUj*nuu-2!b@|GqV|Kille30h=F^dI*`bU9DP|7*+ z%^lp+Ebw4l{E*!n45|{E5LlkQC~S)9^5#Vi5aob%-}$#;txB#iC1g$Si%ur4s8kHq zUa?6-SDlI2=6;ls`nYv$aP~RKXryO1EXgwclirulqbG zu6kITNt1UC50qlOZhh>RJ}h_nMq-Ozk>lnMe{iePy!hT@63}(%nwzXcsxm|Yy8vo> zX;5bZ?`I(w&nv$T9G<~_;ZUo!H!=>`aUqBO4}_ zXsuYEm$>LBTsZlCu`)qMvTUs!w+!WhNDFqzg4JVH(Lu*h z$OduPGlQU-&0*>{EcL&;I#Fj(YPI z&3hi!-VRGPTHE3as5U=lHVwW0nFI5%tG_Qy^P5QGQuwWB;xrAdB9ny3rDga#jIs@V zR!2R_Af^rRvNq<=d81DE(Q*FDCV}?aON7d*AHH=rduh&A{%zICVj|t=gSg#*2{Kd2 zautmMq!?jNR#Ud6g7cy^7I7mbG(Hxcsx(%{#*XL=&?<*QVZ!GL?rvLbG zwN5xNKAYUy!31=Z(p?_FLWuiMVFrJ?r#bQ?C1q&~&r238NT2Qr)gKLE-)2?F5@&^b zQTkVP(p2)Q5ff4WX2VIx%c4*2C@=&aS(#RNYJ@j(Qg}O??mj13nvkAPK z_$~8Oo!_l@9+vssL5pEIt{t$@FiG%6W*W4l}|<4a9Ym%gxVDV zqyD@(qQ!OTmsrqALALudt3%-=?XdaaYdbk+75}ut@UGW(h~7^Qr7aS1%G#r>)j4?X zmRCBrphTusXj3tCl;-Ua$}^O78F=TeHTUIDZXGgC=JGYe$$LC!Abm5wOR`sUU*E7$ z+`UePRW6+sRzw|}iVtqJpi|FzY4FkmWKx5d%&n8O+w~Vk()DD7r%Btg8>{W(fJ!mu z@h83Z*yNIO-HQ}*$2l_lFOtLYLVY+M9N_>7*GeELPTvh<^G_VpV*!)#x;L$bKENA4 z7!?yGRbHS(9oedq3NTbefJ@(&bVq~D{YtENZ1?yKX@nI|eF$e}6OKgi)7;_iNkQKW zo2LM-U!%3Xa7+A&0-Ax3{g?B?67-@NB~#Qh=EQJ(d7*@^4PmlcD{JPwduO^2^+*Bgl z5_xiaUDDG)eNKB84P@t{X`l(2VRQ%YZT#J|gM0pzs%1}i0i9E4h({Leaj$y*6#)1C z#$IYZ>haV=35XaM(itRxJ97+RPYig`|4OO5ZygtJeg9pj_mGv8^rr&2%WSzS$YR)C z=S-QF67b*UL`DedOUoulv!@o*mnPK&zaNN5Ed)7H02?AH)+Iht@Zmx@soj5O*8-S6 zdMmKL8ps2DEf}N^sn`W*F{ewdljS4MAmFC*G3xP@4KHZ3pp>o51Af&nB~Odkt!nt}!!3C$ahKm|5JIFlys1bGR2{8u%)~nP`D;2UuZh_M$<^qzw3e@dH^J|BzcK5=r~DS z4K*e>MYqd!QfS@!yFQGm4-tc3(*KG0?*<*XI%s` zEN7ju@EXPWlYBp|CIjZ-_h(GOEVmp(7BvU~!ZI9!95~l7s1k*T>zwLCqVGopYF!Ha zL8lFb(n{Ij3ic@X>RpT;J2%fAbcdx#^2YF=8tWll%qsMdL)|;Xk?S0n(tB5oHJ<-g zK(+sOvfcNmDsBAj(ahie*E8IPtoYyGGqjga4zM|0DfQPB9iDm7Oi~zGu;0E!%$v_Z z@Oq_B3kMtyrAHf!O~;}mFb{iDRAQJ3wf7$pJag(e42b%cIPv?Wc|@6hG4w0<;x*QA z)%|<-?h@$lpiQ#Vt^F2BjVdqCKmxmN+3M7UtPAB+5c&DS%QWP}$?{>z56FI;m#Ion z#a~c)Dr}bV3si#Vd6B<$E&-)>j*9^fp9J4FjoST??Qi)SDK(6(=f^X848Q5XnIK~$$bAQax94Q*(OnK!h1`I@@ID7#ZXpsdQ|CZvR<`gX4_I*Y^Xg)L z-DsZB!%<)1FG9U-e8;Yja@x)-0ShmZ!yCk(*><{WPEPb80X4w<$?PZe^yy9PKY`>B zQGlSd_Ebd%+%4sF?jHKH)O-#7*dhuIW^F}lc-Zl12=|b7=j4T1c29x-hMAk z<}&m43$4EqBtrtkLB>i|UaaQVcesq$vOPq&eZIEk6c)kLNAb!EH{$TmPahJz|9qM^ z^LH~DGvBCR(2o&QwT>?!M>;L>uusfQfz{am#&6QFa1hVaNIv_$1{OyV!Z44GfC;bg zsySzmlxwRs9HBp~P!Rzu_4u#TN!A3~HQtE}gyUWX2*JE+1`XSf61euyAh*2$OQbL) zz+tYdo;twH$4&Q4pl%b33ZyMIUj9PJ%;5hlDeE4gNA`41t{gjQ$@&(?6JMg&w0u)} zkk-VCa>Sp)?yTzW57gVf9pv8)Ai;pdQ4*in%MDK22#V|cb)b*3v|eGGL!ni8AWhpI zz{oU9ZVfxACB3v_43@pCL|y0)^ZdiZm2BuRn_bT2_(pTUUnhma*uv_ zuMWstwnF8gNQ-5}UkZ$<6f^DZe+I9tq)PgH*cKWeZr25<0DFQXX}g}cGj*Dd7$l%6 z@TMc7=FRR=1O=aDyUYS5u!FVI3D7U0tJdyC+8PbjPCh*~^Y#ABp32mu-s@P$kk}7@ib-j5-Wk+(F-`b zb;F)_d-hRBVQL+yHx4N@2L-aBeuyJzqHHkHMtpRN`s0;8h_kIh=j)7ZajJ0CHjd>CUQBX{0<0ea zFW?ENAh-MrCEGu1bi`L4Dyw4xFkVW0W-;pZJ{QlO$G_leHVF{%GLzp=acYriAd_@3 z;atb8pqjfuNml?24Eu}x*Wed-3=SvfKiQZO*%t&r?3>owDf>F}G<<4EH|F7-k*NGc zOZjqW+;61Fq7h%v2qWO%dVSiOePy<>aA*2PT4JQP!SK7FiT~2_M)h2AgrVEhb%80 z^cSgX54JbE;iQ2$52|fn0%N5RP(>VK*tM;*1}XK=m5t(j`sc8uFj$RaV0)liCniXc zrn~!#8(#{w&$mkBE0{G}SfQ}+4mk+Ar%p?zY0pF4SgLh%?h(}&u?~>H?)+F#9I@cFq#`exo z;^)l0%p_-~3fw9QUa{{yg?j(z&)orn_^+k9Wnl%%pyO(k@S#OKG5+-8lQClVXqQel zi!YKFd;7CNvXiNWF(q9l`73c%h^4$4Fz+KyOb0#<{L3!m9W8s^oQDCOs?>Px+(GO| zz!mtpO0=<+bo2hg_Sb;o?!`BYiB6}E3WW>D*&VlIyUDxhvYAW~GV{D!7k}27x|5Lr z8EGW#D0=EK>Su+qn^yxw*X28U(gk?i336F2$#gwNGg3@?b|@Q8bj@gux6WadZ12)U zuPffl>YlezXIQTc)b8+M;S8TRA!la2963&u#Q=(u%xCbumz&VPA&1fic>c*KXnQ{C zw4QpMLJg=4md;-|O~R>(84~XKJo^IJhtRe0wH>9(^GrYoSRIoHc*duLR;~^^ghI;Y zcwpati?|@E-vV87TkoTFN?Ux&*__EIP*W}+j>^QUMhM9^ZX!%Wx z3h}p@#Cws@hoo8F64e>RF5v30=Pg02-%Sf1NgG1`PPU*XI-ZuyA6O`LR-?wg7Z7FK zA5>pFW+m+&ozuCp&)dm<8BQTb9HfD;Dx{{k1_-S(wNj=x^hH{^vLKjwc=4tO4P91Z zg!kRO0(n|?C(^9Sp+y7u3NYbWXRLQ6JWIxW81g2=Ud>{Mpa0V+Z{_4ZH-0rcH{#)` zksqO z5`P~$hw9#Hj9yBJKV8CVU-~XBX_Y~F9ejHX*OMn?JwkJ&&5tXW&|>HO8-zEN93rFp z&WFX-d6(*2Yk!IuVXlj7I@upRk!<6XovDj6 zv80s@hYswrgcj7I;Fj}iUEg`B$@KViH8qMb79L>}axd1xo!!liA)SGRveerRstF8h za$I>n%)`#1x#`F%r4`&~LkQ$My9uA+CM@aikMBwsRTj&&8>_8Z<@z=NO@AN?@Q#jnvdFkc zH;-SVpup{<^oZGpw!y)m;$1qerOs=X(b-5W@znZCwJ+JeQvUftY&*$l>~TitXYWM9 zh);Hol38=|g=U?yr=|#*bR0oJM=fzRN-Sn{i5v5934Y`9XYAJ~lUyvJEdnY-HRn|Q zE6vt*FXbDRESYF~!G}2dhvi?~3YzMP$=)a>72({fRk&Ko{r5*ybVig>F-0BfZo`hS zm<^6sHwrd@a_Q;xK^OIm?31bnv{xe60}7k#Y%#^A;=%P&!IZ$|3Oc7uz5N#>-0)P& zOT4hdUtb?mt+w6ZU($D94@M=tqV`bw|F{6)g>#`-&f`Urp@RD5ou8UPP@UNdXOyzU zyD5sPY$+@~(P*9qlq|U4UB-99r`B!+@iIm!KAJJh)C1hE&rEWN_aVe3e#bdFj(b+!z%sv6-~dwCoAP1iXnx^!@pB9CGgw=^6xY zk=|8%o^SUHA(Da+EGZ0oQ|bJuo9q)%O>&=nmMl=%jm;oCol_G4KjR2EaVY}aVlL@# zw4dT$K4l8D#kMLb{tH-)MCB-{Y;nGtB~k9bJ8>x&X!oJXo3znk9(MhgmIiX;40a#b z{;F6wU>U1;>tWt2a1|Vhj%dgKy(pf+ssQo4c!YT8yj!CCk1g3nf7kEA9N4w(;bEn; zk}qC5ODOqZ!4Lv&49yOx+Y;p5=};;3QBN?6_gRUTFAnYzh^#B3BBKe>>JLzG2uX?x z^YfgLAdir{Rg2KWTyU62EyZXD2pqly7w-r*BS3gpM`?IS6MDH8#quMSW!xKjW{nd- zR$Y!YhK#&F`aX_UR2ByB-xC0HH~#cevM=VRF6=HjhfqVZ;u#eR@3gji9zt~-!hMm% zrzH0McNfEU>M;xk&{@gy;e#~tS5IQ;UGA0F0P;^#b9Rpe2JOZQRcEA;>YbSmiM!Hi zYf8aYuNW&c@4=exOe5FyD8j%f+1+YVba5r4!#$uXMRDKjF~3&0q6_n~K;cJ5-~Ze- zca+zSg2*%fnCJkujUf%SzOr?eb_LZmq&H>wE+MGQlF6p}oUn7>U>Gn4v4ksl+Gp+X z<4b4^9&=&(?i4}zjMd&te9fH@Nk1Q4$W1o-Ey)i_`me+=wX`9JVX=Z$htm*uf4t-Y zk58eS7P{LHfIFsJ)>a0&mm$N7`fIxiO2Hz9Q)fbZz~TF~7F~!_*cfs9GIf!LN zjAE~u15vawroXeyn_7aVw5?{^#y@g_Ot=gBAIb8W+~Zw&G%(>ES0hOy<7m5Qg%c|m zCcNiWho0L*mALhf%37Dw08!y1cx3w5rn)fY8SdFHxF~ilh!nMyQgK`^G7_4?{a-QL zjsQ-%NTT+lL=31d{et`Q{-YRwia4`AflG<>zrNC*vsU)CC*>N6{4mc(2&~rl^E)@J z-9*jCp7jaSb@@CiaJRE6(zhOZ!hz=oY8J}0N||T8E_a@3{k)U_W}RCP_VXJRw=uA{ z?@H8cHz<_!nWwOg1@9#dGrpj%I0!e^ISLn7CTHF@6EAd}C39&E-BiRRR2W0iOccx; zl0sxJ3sZ&Iqko*<9sc$ipmUBatz^iBUEs923`pDxf8|IGSufLn%N$S!(!bO-fD3Mz z{4w|`&qP4HUF^E~+f?Qy9P*aj2Yd zXKwJWelu};s?K4mi1-9-PK+F1Up&I{$Y26nR62V#5?V=go$!3=b;Cw*qFJBtlOoI_ z6X+{7sCX>vp%DMd(+dSRB^ncmH{0Q|xXWmV@gex3@C395E4cgLTZ*POaBAR%%`STL zQNjqsG03YQ`;W&q1tjxN`u>NBA%QdN3xPs&MY7g&s#bmA!VN3fSLce$5*k=ZzsZ;+ zPhqkGP%$W8IiF(a@yWL7g#|?uufQIgaxT6`5Qz+sE-(l`Sh(N$XVJT8mAA6d1J*Gg zVS~}7k4=*zRYS<$`XKtI11a}e-sE2mM?`X0G{L)4qnPvQ_}JB6+_^H<;CXol2<~&T zM4a7B$K236LYY`a>7Pto1+zv5S#frrFJ^Yc*2C zZf`)ZVQ9M$spt17(W$5Sr)RZ0?Zq&0Y23{rN`2$)R!vAdfcJj2Kz%*FpuvA*tWzoc zZ^PTAA@egtP94=%-IbJ(90I9O0hpKt0f#< zFq)E3?Qh=B%o{URk>+x-ykB=MZs4l(9W}!nO&?tT%UO6xlv=KRglB(2V%Xq+gnyVJ zM1&-0I@RwMRFEt>e*XwPng3wwo~t_WNf}{rI5ef+9|C@KXfZ^eO0N4y{Qt;Kn~&lUZT(Y?xU5r)TMzQq%<9XQ=eV&{J0HGlS1zFGUcH%kaX>tNy|S z*Y!|RsdTE!Mvw%J@y0*<6`^mQu^Wl;wlPrnFO(JlycDl-Lo_E=Ez}gPnZ0gF^0Wd` zZ~v5}tEvlk3%N3(CzA_eqUGZ_=`7*vi7FeP@_sq^7zIG~2%u(h8t6L{`DrM*i9$HAr&%@06NSb!)9S07Ub2or@%q;XRqS6N6u_t!0qw9)ukq7YcAU z@23PCLS_?(yiA#gox(;_$;SnTvzNW7t;zTUEkH?>X$ecZ?FErSWIpOTzF3Zitij@e zSF@?)4L<}xK=_+RnrG2(H{1b|zH4{l7{3O9#}>XSzA;tghe~t% zu9!91=Ql<1ddeoPVLB7MLDKPOG29s>Qx^}7xaX4{}^ z^wiY3pf8HBz-u&`XlYXJ3V`*E+V51v zQ-(tc)sp&$1?Q?_i3Q0t7+yUbwL)^=?`)OzHk9g_S57#}N*w;NlJN{X6;*z>il|wI z_2a1udruCl#30nlxHqT~*I5sx^)Mk4v^~F8RnnA{i?R^E3rDm1t(Uha5zWk z3@IsHkJUxyyeRq%#7sN9dz0ElCZM=$KAg>f)AD(^ZOpz=uDojUZ{&4K6&X>!;8#sg zt!&?lG6-q|)?t>q%rv)nG+WP__#vdb%Ep#qDPMcgK1+F>0U6|nPbOdmujOsf8oor$ z{UIB?#*Rq5brj7ST89FkQbrOU!qw92`Al7)Y8IjTicpy;lgIKSI~jo+I?$VESnY{g z&a&MA0768g#6L4y-4ZiX(9c9XT}9YG#zY1xE72!F?5FF)(=uzkDQ!mnHID{VC?kY_ z!C!GT-JM60_7B5MhXy{>h^HgjjIK5BG4@UnG@AgXE4ihS3FB=r5DZ@oI!eNVY@_6F zTzAxZj_*^FJ4xXOkm50*ocubRX5U7>bW%40#z+lI;2Q9`HT=R==Tii|i!A%9SIgts zWOmt^m0K0ps27Ev{xonCL0SpaQrx${=OyOf-4;tZ#|vb#T;}!=_3p(F7D0EBf6#$y zN1fgb-R)P~79-O*!uH<&1uSD%`CdOE{oF}x-u2*p)t2O}>rD+2R0HEwNg zy~8FfpZSnBwZQ9y#k}pGP?lNgz`e@tGa$^k1x#Nyn-eJFP0dNQ-Q#vA8EPU1_}8!6 zp*sfdADTbli0kLwblQiH*1Mg0?4i{7t`AFIfR({+lZH9&({gQojuf?5HpB4!V&OG# zZ5q;F8OU>A#|V>Kj&0d3MRCk~N^_JuBapM514?rLdSe0H6+ZaBVmPDt0X#zCA)_$Z z@KRh8iHbEB_pD|Q;z*uG6hxw2q<;v)uS{B7f%;$z%Q-3);w5Fu@LNDvOUwpaS(^(RBFis?f zlM2k3>+|QRA1#PA;TI_N*7*ET57sXr-z+1_j;u-N+gqmDrb+e>F#oJ59}^-)NmB3M zG9x9gZg6er!INk(Xk)BgqmFw8)paV>@40%kAEq4+~1-LI_pZ%w;GO+yT`VwQ!;9347P68o$jO z+x&r(!iz#bCClJlrv}^J7;E7u<}6keL5WyKQl1G9Iu1!T7{_q>+TF? z_nMC8+l5jFu?-1Lq7vgKQr;)Lyd0uEo8RS+Xn<9W`PV+0>C*#Q;1>nJWij4-x5A%Y z(nLIb3s5ZgCmVrOwyQD=w$%U0MgFwV5${`JG$TGjwB9g~5xII>Z$wNOELp}NpEWz| z{!*K^9(?8^>(NbgsV|X)ng6h-9`z_@^vchAU*l8xK)AfUcl@0h2$4no#ipxnopO;I zMjO==gS2EM!%fC~9HojQh=lX;6^E%NWqDj_u!FB2X)qv&5u}{^wg^o!Vb*?$;j|A{ z@KZ%cKsXjFI%B_LZA)%$WbB*@ch!`%AP8dFu9;2L`Y!$BrNiH9J`6qux77vS+Vu5wil&?ZZeeCddR3H$7AFt$<{~a{dx>v%?>4B$7hbb2;%*zS@3IQ z`kRYFlae4f_sWN#TmYj<$bmgODbGdLPgf85gQMmhWy2HWWyf9F46W_FGKz{`-x9{} z6JaBHu*at(ar)&H&^gF{g?uiD490*Od5zVTJ&=sIX60imdG+L$dKJsb>iJy?zBs~B z>0((opIrXgO}PcBa}H_@uYOxQZKQc3_Y?nDv?c+ILVtT$MKTBtoVWE@#gY#MOYfXwOZ5b41SsF9I&7#>3Ts_66r zaM9)x`JYEZ_HA!UlGQ0bp-{I$>LjlVHWJG2b5P-L^pBuyD(?o}gg!T_%sDg;9&|<=mWAy!Wy{mu1wF+m( z4|K^(8{6$3390ptLF8xJg%pmbmv^mHu3qS{We{O~5^S-ouP;;O=#!au^s!k{(ja4g+O& z_Xv@-B*mj3Fs}%XiO01mx_JF&M#&z2{^hXZ1pL6sTRftOGGwzIgj==dpq^$o-E5AKb? z=h(PQ7}K=QUnNVHP13YY`AG$?Fk3x7_8ia}VSoCd=BUgQ{lJ;rW z7cI)Gq{9+drv)vUpV2&dtZlQ~-l3_*N_2ffVt`8Pj_sBX6begY7zF-0bG-M|!9%3@ z>_9KDYKeYseg!-VJ%O*Q*L+?Dz)UuDdPMFKN0pPBO<&{Z9|3SZ>{Su-$0&5~%?}*M zb+KL07-aP{2Uj_k#PnOK(B>=<2leKWtVt1W*vb5MHwbky9VlMhG7848fGMCscwyBN z59tQ191C*)pC5xgy0+nGbT4SJdal@6vyY@Kr(M8f=#;fX7r>Xl!(BXhMa_$G;u#qv{(WK%`F9vtWq&cP>UB;zh&#oV}0*K{Yzi*cP@ z|Ni1W1P2}8pni!B-m>TPv&UBw@e{oHa+5Bt&si0CA3OM@yyj8dbwO* zRC+toEiGt1jD&d{1|@mJUST8RaFFH>bShShKRillaLx;iKNt^(AvxP8LEI4VQZ)z= zCXP|ML}8uyf+NiyxM9D3_RI{1hen5yq=71{fWHUF z2!-NESg^+rZpixCY}9-G`nB}KEbQ-w_>!X~`T#3pqmubENk$T*O)pK_G`xSLFtSw! zu!;bh_NZ_90=C!}AZ8EOi%s%5LWCWtpD|s$4)MD`f<(Jso3mp>uh8@1H~- zUkP4-!Ge|KXIO0iv|~{wonEZRrlxIrUy(A#Pri7J;E<))W=trLtR66XYGqf-yUp8+ zk1PfpBXrHoM z;@UvB=O;JIN?jW9a{&Goll2G4ZmSbouZ!5|a-GQps#CFR^EYM%NEu8f?~Yopih#vW zj2U?=BK`lSgy7?Bv#O#QD4%cgwsNT=BDihYDV(!3o&(A?i+kr%aeW0D$H$$y!yr4{ zuCoiJe%c+sH8*J8NYJ?RvmSE?xEquoRnmisW`6N`y;LzkSQ^`s9`Z92qWUosIf0^m ztLAd`yfY8^b3Y1vNY-SBIDSQeh@-x3#|k&Op&P2xOh86!Y*b<+(JbI7aLGbu2M80f zB7Gb`Iy_U1*TlWg;hx`2;y{3syWePdU=pI)U=w3&7*>)|JqRgA;6qlLmW1{sv}W_g>-%y8+MhB;ym}itWlyP^50N49OhHdxs*(mras515B3Cq@!Nsoh zo?4fQ9vdnd&^F8OX7E2#`G_FBMRt;(!mU*M z>w&$-_s3xt%onl&g8{{gcLhK^l={ho1*0EX4!T&|9l;q%3JJ&fLnHQS21*=0vYKYY zgo*stbJ`p{ClS2RtMle_$)FFJRc%_?@&$__AfJyQJ;H{eURTwAlt?xYA#|f(nAEp% z=lsq3iUqz30jKG;Iw1g_1MWSk#dzs#!6K;#3&p(_r_Zpfq^9`3;o_eirBK2be*vA2 zwUwR#4hpM`PF1_6&$3KDu;gAr>zEbyzWK(H`Vg|4-@aA^SNhbi8YcUgBN)59oj0IF`v`ZdEy+ z?}7K+eipl>%U?0-7I>#VAO#rUY=Z6zVIl19ABWIRZpUevX z)nh~oetV(uMxF7xY_Iid7YCesvkGx&5Irl=#kEOemP~o^_=?9ew){T32Xu@H$Gb;R zp3%9!It%t=%}EQ1uR&JW5j6=SsRMYe8T>bRTZ%iszr;oPXR;2Ye#~v+y3zWK%qD;+ zjDjHO@gFz87AJb|4>FnU!>fNl(Ypydrs@;9^D~LE`3w3 zXga_qm8zo}`W8_7cRF5S^H`dPcAW6f$kjN7soS^i4XFnogyd7ZkW98cXM)IWLwG=HivaPm* zyT$Uw)3cj2xe?<2-Gj+DI%AyDJd_)!Pybv$B)MSOeoE2N%YF_riWay;i};R%_L%mh6z+flBnnNv-p8#;8_HR}8o$#OGkkyxm9 zYaenJdYl%Hc^xYQ^mNTEE9~t`RqIabx8^i1Sc_S?GMsDY29SK#o+~(-wiW6>pTlua z&{y&muce4o^Pw|q2FOqh?hoJ+NU%1AI)$YZgh(=Z^&y6tNDBr)r*Z6eO$)9buPZ&rf8xw{Jya-C`a@Ee7kG%)t7GCGC-XlF$|AeSS-f$&%Ml$OVp0*~`$ ztZbPWKnLTPS2YC@jtL>i;AT5mF4lK@|# zH8ugeObUaAQ7G2rU5NDw)wsz0!`$Z}rF*%ogLN+qmGN`I7mutF&|s8C>a~>ZQS0kY zw6E!e7cQkKJ`PH& zyf|oJy-qe}=JUf$`Mm5bE#l=$@O>61p1U}Q2kQf!fmcD`GYneh8f0kH?C63Nbrt`+^(y6k-L? zH~<{Z6ltaCQ5t7=5@!?CG^rduk);1Uo#{LOP_5$he#41YmEs)(bW3xlV2R-?xt%d_JR2enqo_)pkhp7ZqP=Dx!B#Gj&j9=_-bwW4?bBO!))=S>9sV4QjpEuY1I z|L}IXru=Rd6Ai7kv^BIOfg--rD3lR43j7trMLyvmYUDZ;9i>!m|J-Br28Q^~TFZoD z{;FL+a0L9-2>`3z366(dNIL3Laoj${EE(Z|(Ix%EHXP#fjsPD9seUVLs`lf*)vKQ~eBsTe`=T)5|24u~M_RkgZa>3#aV>y}hK17v2NrT?)LP$*=|4+gl1Y za=a%@qx9&O`w*dag3;ml1~AeZw}RQu;oE55!i0vA>Q&%GP0&rftf4GB{~qR_4x;R_ z4H+0$xTTL_fH20*vgNSD6Vo=BcQC2K&52>vn<b(MlK(dsmSI(--(-O0cA2rRU1OmGX2k z@TR25y3`K;yy}w8xdJ5eFy(2}2}iyr`w5vMMh~B{)i8?HC$*E-Xv8?eYHm{A=lE-| z8ku;7`u;Bql`)srWPjmUOAw4-gVkJ--RpC0#3{G)^4rmCOnJ<1ge-CDDjoT zApvUbyHUx$4lyv5LF@9|jASpdJ`!I11*!yYoK|1(j)H2>$20dHUih+ISMAB)<78>a zmGQ6sb0(Pktdbz#Z(1dzcp^<8n{lt`YGSj&4f%{!O=Vte0wdLFZAT}+8i5n@8BySz zkTY}bkfB^zmwlbPR+=T+W{fA9Ak`OJ9&?}lgKH5+^uf%%s2m?>3gaf1XH=j(}>P*m6zZTXF}D^sj(K`Kf}$+#|$2C;8570s4727w^A{)q{>u# zb-#eikcVkO2j>gWUyGKZT+^yJ81*;1M=Qv!{#a7>=6Z>vJVcD~1$nTNgEO(=R^gf- z@FS^0WAUSd_k(;e(f!<4_M|myvB_e6Q1V5zRr`vk`j>7kTx3Aj`{KK#R;5(0ipQU` z(A|d$G}YwC`5}Doe6Ct7^T2xnZo{^Y(#_|Gr1gKl0mu3suBJ>j4 zi9>f-e60mhm}P7mAI}9yD?;PN$HQw$x^f%dX?YQX1S15j6_oITpK>yI(b(VTk$(?k zvHlYFQ_O_%cUd&|BF>e$(KGGvF#z=ux^8L0-4P#bsJ$V$(u=LS$u2emR-L>+gs~HD zg=Da;N7Q9Fy!S^WSx_Q~zfLSgsh_0PC-|SHXTb4}k= zz-pS(jPIy+Cc}|e9UWu0XvBc^I+Um^60g!=mp=mc5^zII?yg!_z^%$pO5K6sN?^7x zU91kH8qYz?H^6lWk<57}tquU;uPE)dO`)6drNqv2kuBTTnAIluokp8)gP~l8^qN=C zGX!Fu;a1NC8y7AJ$7Kb>XA@Q#g+2kJ)#KU{mJQ#8bK%YDEU$11(Sz-+=G1xj^@3wL+s0M253 zK$bh1!65v*_t&D+58EVs@nMt>9ie#prwcNr`k417ZR`9ofGg|XHMx7{3=x!Nv0MxS z{?a|KD9vHpX5d~IeI+Q6)Olj)w}QinKM}CzL*8ug7Y}J^ z{ld~z_!k`|8Fjs4bq|7g{s>nMf&99(;x)7|Y6{V6Gv+8aI`mXMG0wgc5TIPxdXI3c z(hH_yVSru!SU;tJx8Oq7ttD4bM7$)Po2qk9k5kqXo||+sO>AgdsZ`Gz`hI$*AVnUl zYz8d9DUol>?Kc%ScGy>KD=GxWtic5?men#1wZTu$`GLJHjN?f5;Y&Ur4LTlH*%9FhLhHLkae@ZimA(Jup3TpGt7cD zub;Y}9|>Fu&M1xwhQ^G7D*h`lm~5Z-v^`N}i+;;IXWKs9i!1rw7As2_la)r_V#pLQ z?8oG#52G^Jy`9PnyY7ep*XT*Egr^+N`O22p^#H?!QPznqRfY;tqpcEE&yRFothl5e zwwf}hmiLIJQyZ%rXD^Hc^A;+e;jRGl))GgrXw8`NjUAjdD@riu59X`%mm|1zjY`@3 z1~7yvBc;SOR3RF=m@htYiltlOnc#bE9V!cbg2i>4V<5GC?um5YI;a5mTeCflUO)9b zsgXn$Ebc1HV8bgFhNt~DyjFCU3Z`NaAN>)3UgBDvyEaD1U^ScCs!H-=U*)Fpz8B|OaKcPJ~tnK+X5S+y;UHp1I+9xat z>*|+33twhUC?@>LpZ?PwFR~lTDLU7CV3i!#uq9ePw!a>JIg(T%hs@+lR!9?nY-rkh zg%LJ=Br?~Tw8$CYGjAjS7Tfx!_ZeaR@CZN<(WtBZ{DtzUEuuJB+Twlvh$gV41$(6D zKx(|niLl3MB`sOExqDtoZ)jcw0`uU6sl4?)=h1DlpI4Kqbaj?5QNd__RSq$K{yR{~ z5M08nqB}0r<+N0Mhf}~*q*&luQNVPvMBrM87n_s6DvJs$gWAl5{$94)Bm|hTk~G@N zYF6%F##u*{{iU$#*@>!ym4d*mm0UIPcxrSp=R3C40~G3}8%wrc-YBzUrR_sX69MUm z><-Xjxw|{nm@U2g*w!Zu`zR{u2_I6)(YmG4x*Q>1Vk}%HJpE2gM?2f>^?O-r@qYxuurpTpK}T zt3hA}@q)*Ds_TN7^+e%+o>Loex#}k>Hz{HGX}pF*D}Mow_zT2=w&S7%ZdA?7lZp8R zx^rLrR8{o0Uwvm9a({1>w$*8X1=<5<&eG zh2ZlyRA$(i%&&aYzak-x zg_~DH?Txf0ZVrK{qE`RfU#Ns+^a2t0iT9Re&@jq?inPSGC7pN}q-TnOW^Q<>Ue}Q% zvaVI4*b6sxY69aalWh^TXfr0>Qbu3N8l<=xSIu8Z{%@Ug#?4mao)$CoYW!~gLBtp_ z9=hu|N*WqfNr=Qt{TrVA?-SY7L9j$0HMX2RxgT^J{$ZkJ-wUcW6^4ts$l&+gkZWHt zhIEe~yHKRGKZsIok-+b}jqPXl{cuvXTl6PU*lLrr`+jXDTFXhn2XP};;T(@**v zmi;z~RIg0_=pP>#vpqjJWtAi8uDy0%=pgqqrCNu^{k2+;phdluP-;~=n{@W-Oml6g zT5G*tlJ{X;c(OXCk6A|*t~fZJPnai1|F371nwM42+wQkPW|L>1dX^(iKd+Q%6K}~a z|759E5UKKxZ0H41^1RQpW5JWkjTWeS_M@bLe?~)N;rtz?ef!DlkFis^jxa`!n~ED5 zAV(NPV8*)HHwwH`9Ao-Mr6FG+Q zap@SzYA`VgtnB)=d#WeVwUqmnGGs_OnU~jHT`Px7`3~Rp?;cPX24TFzE;CY25IE3+ zmz!37Ff22Xj}kZ#s&;n*{v3E@uJ(JmVxt{%;*jM8>$Avs6tI^gP~1M^SOF)BAB34b zW9zF7C4}DS%*i4n<@d;{d?Uq#5IpCwzX#zbAz8Yf_C~G*0p~2p;5rx#2-dw3b}#g- z`ZPaV6~yqVl7roAzOPwr%7RPZ-x>BHf$Q$}DxX@moGBveA0GWrY`+6^|87_u`-_aK zbxOdUck6GNvL_+Dk)$d`PZ50eRj~Cfu&>GhXm55Pf%zK7HXGEs-ymyp=EVFIPKkG^ ztP%KcST_BVLS6}#T>Jnf_k+b1A6ff-M^doVf1}4rl~MQWN(fji!2iCk-E%F0y?V;N ze~%OS4^$$!r{TAYq{X{X)(S-K?N6YnNa;eE03d%ZB+op4FFtx%P4%aMDUnK&dyu)H9sG^aLkghJ50cvG*oRF9;uR$h} zwD#GLY(W05B*FtFGhI})zyTCk7aGmDV{cXiEwOr_lv;rT_Yi`Ig|=_soX)^}m<6^# z94N++nWLtVKcv!&Mh8gV8FQ`VTT89S-S+V6>yUS|n+B7Pk92Pe8z4P2A4$$WrwpkD zDyQyDyjg~5v=e=#klnCM_PG7fNP%`IZ?Wb1nTmyUDmx=t_~tlNQXyePD|r;oeG)Nj z)*rIKulpJ@HBtQi7>&oOllFGB*JsYetecig{r$kDO88z}H|r>y;<=|kiq+xE4j;oU zx7BNu&(A?M$2K53}A&*sjxX3x%Ko+>-BIK+1d!s|cW5d-M z!PWH!Qg9IM^XA0HC+}M0?{To}2XK4OO4RT1tqR@&iYB^<;x6|@xuyzR$nlht`n1_O znTnO7u~tm{syVpGv&>8T4J(Q2owSSPLhx3yaTQIk3OKlsFW;7eeap{OXd2Gw3ggb` zZd49ciWeXHdDxBUkev~_ci-Aov>mibesAcwg)R%Mms`H~>7*N7W!)3>j2e8-a11;9|{Ba3i%uo>sK&uS*c zVvz{IVYaDr`eZ2@e+=IVll%cwb(17bz#E`IH&qG zZ^y&34)t5p6TU+NmwiXf9F0z@I00Mn-gkh4S;zwX!l^ zLJ362^Le~qU6@0elzJt*+PTD8dmgeh^pH$7YJ)wRw9kql47aFhi6{KD31($*ex1 zDUFZ(jC1CebEoE$F@sPoIQPXU!7BDuIxBi`qVM6(Y*qGT=}YA}s~wuT(T38O_YKZ- z%9=?>HNP|vt4g3F5y>4e!2neH_Qfb{5Lrv=&iDiOhlz|m_)z5pC@$W;(u<}Tq~IQpT82(@7D^7Pi%itUfKij}DfayoN0v&Iq66OX z0}lJ~`F>5;D<(o(6CH4@Dz2TI->JSkG?~-4__c7sDu{4()_nAqeqcE)TT`LmTFD?iKalopcRP8#wulQq{tyI`&$4fLvW`}5QYCJ|J-OLkQb`f<$=w| zWy}RjWW4jAt5MO(yd-nJ4popGM2T15K4hVKGA(%<-yJi$Gd_w2e13;tdFK+sR&kfI z+ilS|xp+4&@yO)KIBT8>B?qGq2yxCZaojJ!qSp^BXDfE6)C%-+gw%4kF}2t5$$#Qw z&pV9axBe2Gep;tp&`DbN^}6bpMd~BhUJtaLYPHB$_T^P&FCFT=DN^fmLvF_!^il>} z5mdZ+aWHapqpEK#S=0w=pRrmeJs_)!`<;@|a%`WD_w|K@b?EA0B^fAP$m?G-= zEHT%JjT0az^Of7rJUiGz8VX6l3m(j4K{!SLQW%pmM_`Fd56T3K*@hSdF+Fd}6U#I(gP20%aY+=!Dg7gQ@HlvHZwS5=OUTHO`n z;SSkGAo|PJD7nVI(*$?8+jn9vF!Ww0YX#I> zdXB;*PVxM95uUB>;c>LXlhfrz8kskg`B^Q_q)CtblGP%mx(Woa0LqjV0!jKg(}xTx zV4-YFjpXAk7~2+-PGN`YOiMD82c#R`)v=ljnE3A11kP!1ssv z(|c|Y=lHPp-n2u+!#kY!p@{2EdDydCB7A(}_h41~1^R%mJsE#2y_q7M9UsS91>`c^FDuQ+c;6 zk#eY1)rr%o-B2>&6$VK+-s#dp)yx3E_zF?7@XDq0LXl49wyb^2cDpDONwp*Ex3?N> zl0(=>{tJ0u6wxk5_Aq<9Dii8dn+;NB*(Nai#mm5NZ(P|Ghr4$=wT~c^Z0|lYUqO99 z*DI=s1L~gVcd(Ly8>(Xl7@eE;pQINFrcIpZw2I1a#1n@nde*KgHuJmXX3}`>r=!CLjwP@2oh|ps94-fVR4_3kB96)E7wxe-e0a8sf8zVe(hW)iIvF+$)Cz9Mt=cM_ zfw@f$Bwx|pnn0$nynFWMCKLfr42LE*zr9%;LE{o_te8&Z)^aNSC4G}#t!F2;Zhi&AuD&?JBqI$8Jab*+Y+I71l8|qfKI<-gDa>FT)FZ;6*_yfXeC|p z>q*O}l>n2@&vR3uLS4W2il94t|CS%o+AZ@y>;8+Li8dU# zM@LTn-T4u7I$2dTjG6R1Pn-l`_1|5NHpG)&vZauVAr`W&rY(+RFd91Nguq`rQd}zB z>YMn>e5^mwe^ahb&AU1#j^j7xVQ??dDxi#QJz1u38^%0_GH2#TIG!!s?}kOC{xPCB z>rA5^_N~gTHZHp1`$`5)6xO9`l9OX&GF4m>{S#rpMeVNJXs;1TQb5&Q*8Dz;=!t!j z@ds&|Y=&82ffC*IO`I`ropx6z1IZ|>WVH=En|HeSj-H97&Dlp%T-uf8Od-nI&#&ZX z(8mnq@nWQN@u?i6hM60%@{ax}n-^Fa46HkjFj`9%`0Z-8%%<)#^g7Eye;C`a*czCimXW~A65}%zAk2#z;OAqI zppwd~nc`MTkgmhQTm+)a`LfwdT6te^%)d?}FVRyX=CKFLxC@wiVDDlw9#OZ`Tks@` zaWx3EsroNhHvIvo!$(C;bT4<`3l<~2=h7;h+Skxltaek%8AfxnE{at^$=mY4Ub6n- zQ`9)WVJ%y|c*%NJdO}?S)!F-*_wM=(Q6&AI1KV8n!#Qp`x136bf8-IkL#zFsn%<)Z z$NonjhTr(2nMH+Cx?h&kvxdG)djq(-hKB~OL>Hep4sfGh2iJMCbnK3nOWv?v+0;Gwe> z)|yGkY;u_ES}N!uTq7_l75afgGXpe3yIx^fjO(>|)x5G=-3^ZGoCvTcEJpt1h@54; zET<}L+@dTD{dYaH8cxYn3%xLFR9~e(Ihkt2LqYD>FxpG*I zRdV^F4#&a9jvbX&BjPI7WQ@Qn3X4d^V_IC1#w1!T!v~Y<*5GIRQqGo|cOubXKa2ifnbdTP0T2AgoAUFe9L1aj znFRI|+jY@zYp#Dd| z-zw3vyaZ=S6)23>x?nBt*{SZS!5ht6^;^wbr%>AMOCgDAxZ;&!@PALs;M9O8QXj{Y zn9r@z@#6q?ZZj^mo$0QvgU{1ntm#1Zh|fZOhvfy9gvh3H-7(p-^5KLl=EOe-Qq*0u zQu|hE`&-=?r-c?``u*a;D%OA4(5Kcx{pEf*cT&ldJA1FkFvEQQ$u-5~ch1kmEaTLXS z0ra@4uSV$ogjC~vpRVjE0HJwUr~8sT&5kC~`rH(2AF>izbX_(_KI&t6GQX)3ST1(Idi6;0bsGr;m*|q-;CAbMkfsM-FrPPhec>z*8h)L7yg{p&O+tSE!HV z;piaxYXIMDY7X)PLr3v8CPj6s3!-)Lj)%+E0hcow(m~H+RZf5}zZbKgp91m0V{|us z@5#(WYqwROQHC4My-)2zrCXl-Z0VO)Z|BJDWU147@T7=`XBdgVZDLQyiaIik|Y2P0q2?vsR?!?yFwUKc0pQ2|BN$=s1grV)Xio}=a7NZ7B zQZ}(N4==NRJBnqHdaJ>r*Q>kN_24Hc$Y^b;vG4lzG*|faGCITc3Zln+ZhaJj&G>?@ zV^v}*PXbRZpV4d5NgGme?axInC0MKofplQkxsKqI|CW^?_Zrjr;mf-6$(O=wQoSXB z3GpFz)!vhsDA1zuI{wo?VXHa4vNR?k`uo=l(+w{qJvoB0?tR(b+O$r-aqy~bX2-46 zxx3xcBS*lb5^6HM*)^I9^<#>H*f-a61$x~IC|4>b2ZC5f z;sC|d1#g)F{cEDl|H6S#0nSkhR-Cp?oTH`Iw}+?LzK&)661jPp{rG+#TL9efQ7nq` z;W5YJ7q3Q^{#o=Ss}fC1;jt8fRSSFx1zO%Z;&yfr6oU{pN%5=Cr|L@A=qV7ks8_FD z_RY1q#Z9N}>Bn|(DVK4yr6a@%wmo|4g%}*Z@(WS&+9|I)t61ejRm$T|4!^CZtTG7v zlBa)UFCF^Nnu951Ga_v~$iKbQi=|Q$SSSB}b0({520@~&>4F8hY(GXg778D}k#tKc z&5tm=6>1Inrx}$`1`3e>MVQXO$C^nwkN>)_%BI+J;Kg&%vC8Z-(>c$0J!=GCy|}8H+KteB6G%@sgb%o~u2oCVvnj=cC;vJI9Ve+G zKFK-ra&3H~Gdwk2Rmo>?o?fm}M7z=HG1VG-WeF@Z9%tWi?H?hy`E$BerVj6iSIM8E zZyKw2wjt=3A-zbM?|&%F=hkyKn7Xk1L?>PxBL5qyiN;h)UR0fj5hl8H3uQ*}|NTHL|vMvWeXtw~rre zy?;<+2Y8bgp`FR$yc3Mer=S{!^=^W#S7fJDNFqw=@tg&r9mTee!&Y-6@*Eorx+PP2 zlX4Hs-Z0HF`f|NBM*I>a()1YbznDqSRB8lxC)KxjdX5J1lvB>Lmny|))x!8f!`3^S z&Hrbh7f#r4WlSzX)jqwN*jIC*3zGC+<92Vx3ot2tDFbK^j?~vj7$DA$#SJEyp7<*) zfu*{_sxDs##Id!hHanQO_HOH%v8RU~o~oIy8f!vHVy0d1!Vl&!pbsL{7#CTVca$GS`Sz2$zlN47wdF_A?WB<&!c%l)i_3& z+({}Gz?5J3t&veV50lcBw%61OB!eZ?x2LI`&k+AoV1q=h%pNl&O?yvKQem$Yy_3i5 zmz}-Wuwe3e26Sdeus?TS04@BY;yCBhv^Tz=Jf1x&+>E1U8e)~oYSx8rHpNsa7sO=j zKwk$;jvOuu`DC=YkgBR`FrkkJV&Omr9WP-eOJe-#R}z>l}GS(ABO1r3-QWROT0+ z7wYysmQAov1fLz1uNijoY1({AX1e=HZMYYFD;M3_RE9p z8019&q*@5^c@ z+Qji4Gi@ih2izE+Mo-g{`ZfMJ9LIGhJN+YgL=W zRGYUtXZ8u*(}SmNsE-U3T!WKoX9>>#8ES*`Ee^370fA#017EJmboovbb=NX6y#t`1 z{7eZemZ2(5ZH=5BUD{LA$Fg(k6_44wzuYSY7DhR~lSZt#OUiwgu%9ZXZhjXNaw45o znPz{m6<^z%Qt~b^R!y`3y9Fr9geyU!_D`g)evqica%p^zmj3UzTZUf&`x2+RY{Q)Q zCM$nFT8~6`YZmn~?rl+oD0iGTZ0ICi)Xh9>=7s%MT5_7~&uN{r*^{+MJ0tkdf0ENr z)nb4+8^y7dLl_G#J^l65_^6CCq07X^0mZpJx3lEYE!Y>FLp(utA4x9M=LCt28^(xQ zUKS<=`;&4>;I0TclDcLq7?&Apw+Tb9=|!W`a}o)|GC5$uA1Ar#VFv*V? zwMmNT{CSS=OEfh*%&op_@-Aldgq)CO{k{OY6pZbz|N47SYhxoksZBFKVW2))c|QII zMEm5w6}-~A+tpU4J^1~(-|d%^!rcR97{v+Nv*5ojtyS%GEC$}NZ*)faUz-N1Kv_gf z1Gxr`)NK}xGBD2jp-3aYGMR zt6o-?8s~F5t1<VDa>}X!1NB(+NyTv6^ z+KIw#&E6o??8o9T%7RdZRAcHF^-`TK55(dfXI|tA-)@gPd1+oWqXTQ`;U7`AIXSxz zmfvlCI6Y{6HKs`NV^+y;eQ1Fqwr^h9ys2{PJjWU=wh$)~z3N z%{IlM5xZYt0Pv*ldB%o_c!%XO19DRg}W!NYU z*MPg-7uylvi{o{wi8_&o($)fLMPKIao9^DG-P}eF>W$y(k&_E&j(S3)yuEv-4?`oY>s=z6|2x0pGFXj&#B4KI zink-K*DwX>rsQo$$t9h?l`KnTW)m+}vE>0ASgkC+p(5jBRk*BVIN!HGK=E>C2enf! zA0sgBJ^?%iXO8i<-hefgZJPGn;ISLIE2sAUp{{FXJeriDE0oV7*^IJ0o9yixQ6aSG zRg%l1$Dqi$WwUIMU^?J0954XnrP5DW(p#r^RQHtJJJW^`h`CleU>S(Kep^OU4EN~(=rx>9sP z!tO=mgm*8FT_dN_(~NEJ5xMB-^a|sM`P-o{vZOxjaKmX8q3xvN_II}6Ue=y`ocRow zV~uZZgm{2c1aaJSNqvQMz^5G+zL+izUZ|~1+sSrVA3eOKUY%_PNS$PCh=^8PjYoPa z7pUQPL(nP6e3H#_gmWwer2aOUiO~UTjd}{-`kf=L>j~-Hs8*GEWhwB7OtqLO6SnVe z4_pCMllB6|Yg5|_u4Tn2ofcF*2rlE8dj3}zee(tFi!GhH!vtQD|1}6~^husBnbf)M z+%P(YZV>%%r;y|K`z&?^u{?!cER0ZoH2>55MDD(iRUFrkR4KtCrnYpVXm}T1)8^uA z?4tDl|RWiE}^f1j!Faqh%ZieO_~AIr$MA5`n8V$)+kO&%Do4*86i3{Qkj^dXMp^JB}3ue?#LSRYJnSE{R|zzv9WOp!x#ZIna6Oc_D(TT zCt_k>RlN}BOEw2m=XNsTMS^#@2r&r)M6$BV<>V=#%4An;VW8B-Mujz}VB{17eQ%Zi z0r2sAjRepUIlJ0;yX>+p-Q~^LAayYA;xyF0_&uDy&w02&7O9rT3r6E z!fBd?{~Uf}GZlQP43V6W-Y9#2|5}^dlQzg#b3J#8sOi5WOHMBkA8h8)N_AbzlH#WP zmUjhHK`67h0H+k_A<7gN!TF>9>27Q>lO*S@#nyz*MN_J3>gC9GFY@+(M}d4F#@{H8 zySp7QK74FdI7N42ri}Dcjyh+>eyi+1B^#7pc^6h^$eQf$?(Lb8CUB*G{6Evzv-;W= zX~#QImomgA>9!Q>P~9k(zUR33FINBncm}xgy&Y(9^TRZc)HnY31?<=kyYCpJH+wYy j{nP(nU*C6|`%risI+DICFSS`P&@WvrL(O`1+sOX|U#cD& literal 0 HcmV?d00001 diff --git a/assets/omoios-logo.svg b/assets/omoios-logo.svg new file mode 100644 index 00000000..0d7e1507 --- /dev/null +++ b/assets/omoios-logo.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 8623ab129362e8cbfb9ea3359c6de645d6987fd5 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 17:49:19 -0300 Subject: [PATCH 025/290] Add LLM-based task requirements analyzer for dynamic validation - Add TaskRequirementsAnalyzer service that uses LLM structured output to intelligently determine execution mode and git validation requirements - Integrate analyzer into orchestrator_worker to replace hardcoded task type mappings with dynamic analysis - Update daytona_spawner to accept TaskRequirements for fine-grained control over commit/push/PR requirements - Fix infinite loop on TASK_COMPLETE for research tasks that don't produce code changes - Remove AgentExecutor from services/__init__.py due to openhands.sdk compatibility issues (can still import directly if needed) --- backend/omoi_os/services/__init__.py | 7 +- backend/omoi_os/services/daytona_spawner.py | 60 ++++ .../services/task_requirements_analyzer.py | 340 ++++++++++++++++++ .../omoi_os/workers/claude_sandbox_worker.py | 63 +++- .../workers/continuous_sandbox_worker.py | 61 +++- .../omoi_os/workers/orchestrator_worker.py | 95 ++++- 6 files changed, 608 insertions(+), 18 deletions(-) create mode 100644 backend/omoi_os/services/task_requirements_analyzer.py diff --git a/backend/omoi_os/services/__init__.py b/backend/omoi_os/services/__init__.py index 88ead46c..58b7d8c1 100644 --- a/backend/omoi_os/services/__init__.py +++ b/backend/omoi_os/services/__init__.py @@ -1,6 +1,9 @@ """Core services for OmoiOS.""" -from omoi_os.services.agent_executor import AgentExecutor +# NOTE: AgentExecutor removed from top-level imports due to openhands.sdk compatibility issues. +# Import directly from omoi_os.services.agent_executor if needed (legacy code only). +# The new Claude sandbox workers don't use AgentExecutor. + from omoi_os.services.agent_health import AgentHealthService from omoi_os.services.agent_registry import AgentRegistryService from omoi_os.services.agent_status_manager import AgentStatusManager @@ -37,7 +40,7 @@ "Executor", "Reflector", "Curator", - "AgentExecutor", + # "AgentExecutor", # Removed - import directly if needed (legacy) "AgentHealthService", "AgentRegistryService", "AgentStatusManager", diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index c4ea7661..90883dee 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -25,6 +25,11 @@ from omoi_os.services.event_bus import EventBusService, SystemEvent from omoi_os.utils.datetime import utc_now +# TYPE_CHECKING import for TaskRequirements to avoid circular imports +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from omoi_os.services.task_requirements_analyzer import TaskRequirements + logger = get_logger(__name__) @@ -136,6 +141,7 @@ async def spawn_for_task( runtime: str = "openhands", # "openhands" or "claude" execution_mode: str = "implementation", # "exploration", "implementation", "validation" continuous_mode: Optional[bool] = None, # None = auto-enable for implementation/validation + task_requirements: Optional["TaskRequirements"] = None, # LLM-analyzed requirements ) -> str: """Spawn a Daytona sandbox for executing a task. @@ -157,6 +163,9 @@ async def spawn_for_task( - None (default): Auto-enable for implementation/validation modes - True: Force enable - False: Force disable + task_requirements: Optional LLM-analyzed TaskRequirements object. + When provided, these settings override execution_mode-based defaults + for git validation requirements (commit, push, PR). Returns: Sandbox ID @@ -217,6 +226,57 @@ async def spawn_for_task( env_vars.setdefault("MAX_DURATION_SECONDS", "3600") # 1 hour logger.info("Continuous mode enabled for sandbox") + # Set validation requirements based on task_requirements (LLM-analyzed) or execution_mode + # task_requirements takes precedence when provided, as it's based on intelligent analysis + if runtime == "claude": + if task_requirements is not None: + # Use LLM-analyzed requirements for fine-grained control + env_vars.setdefault( + "REQUIRE_CLEAN_GIT", + "true" if task_requirements.requires_git_commit else "false" + ) + env_vars.setdefault( + "REQUIRE_CODE_PUSHED", + "true" if task_requirements.requires_git_push else "false" + ) + env_vars.setdefault( + "REQUIRE_PR_CREATED", + "true" if task_requirements.requires_pull_request else "false" + ) + env_vars.setdefault( + "REQUIRE_TESTS", + "true" if task_requirements.requires_tests else "false" + ) + # Also pass output type for context + env_vars.setdefault("TASK_OUTPUT_TYPE", task_requirements.output_type.value) + logger.info( + "Using LLM-analyzed task requirements", + extra={ + "execution_mode": task_requirements.execution_mode.value, + "output_type": task_requirements.output_type.value, + "requires_code": task_requirements.requires_code_changes, + "requires_commit": task_requirements.requires_git_commit, + "requires_push": task_requirements.requires_git_push, + "requires_pr": task_requirements.requires_pull_request, + "requires_tests": task_requirements.requires_tests, + "reasoning": task_requirements.reasoning[:100], + } + ) + elif execution_mode == "exploration": + # Fallback: Research/analysis tasks don't need git validation + env_vars.setdefault("REQUIRE_CLEAN_GIT", "false") + env_vars.setdefault("REQUIRE_CODE_PUSHED", "false") + env_vars.setdefault("REQUIRE_PR_CREATED", "false") + logger.info( + "Exploration mode: Git validation requirements disabled " + "(research/analysis task)" + ) + else: + # Fallback: Implementation and validation modes require full git workflow + env_vars.setdefault("REQUIRE_CLEAN_GIT", "true") + env_vars.setdefault("REQUIRE_CODE_PUSHED", "true") + env_vars.setdefault("REQUIRE_PR_CREATED", "true") + # Add agent type if specified if agent_type: env_vars["AGENT_TYPE"] = agent_type diff --git a/backend/omoi_os/services/task_requirements_analyzer.py b/backend/omoi_os/services/task_requirements_analyzer.py new file mode 100644 index 00000000..9f3d94ac --- /dev/null +++ b/backend/omoi_os/services/task_requirements_analyzer.py @@ -0,0 +1,340 @@ +"""Task Requirements Analyzer Service. + +Uses LLM structured output to analyze task descriptions and determine: +1. What type of work this task involves (exploration, implementation, validation) +2. Whether the task will produce code changes +3. What validation requirements are needed (commit, push, PR) + +This replaces hardcoded task type mappings with intelligent analysis. +""" + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field + +from omoi_os.logging import get_logger +from omoi_os.services.llm_service import LLMService, get_llm_service + +logger = get_logger(__name__) + + +class ExecutionMode(str, Enum): + """Execution mode determines sandbox behavior and skill loading.""" + + EXPLORATION = "exploration" + """Research, analysis, planning tasks. Creates specs/docs, not code.""" + + IMPLEMENTATION = "implementation" + """Code writing tasks. Creates/modifies source files.""" + + VALIDATION = "validation" + """Review and testing tasks. Verifies existing code.""" + + +class TaskOutputType(str, Enum): + """What type of output the task produces.""" + + ANALYSIS = "analysis" + """Research findings, answers to questions. No files created.""" + + DOCUMENTATION = "documentation" + """Markdown docs, specs, designs. May create .md files.""" + + CODE = "code" + """Source code changes. Creates/modifies code files.""" + + TESTS = "tests" + """Test files. Creates/modifies test code.""" + + CONFIGURATION = "configuration" + """Config files, environment setup.""" + + MIXED = "mixed" + """Combination of documentation and code.""" + + +class TaskRequirements(BaseModel): + """LLM-analyzed requirements for executing a task. + + This model is populated by the LLM analyzing the task description + to determine what validation and git workflow requirements apply. + """ + + execution_mode: ExecutionMode = Field( + description=( + "The execution mode for this task. " + "'exploration' for research/analysis/planning that doesn't write code. " + "'implementation' for tasks that write or modify code. " + "'validation' for code review, testing, or verification tasks." + ) + ) + + output_type: TaskOutputType = Field( + description=( + "What type of output this task will produce. " + "'analysis' = findings/answers only, no files. " + "'documentation' = markdown/spec files. " + "'code' = source code changes. " + "'tests' = test files. " + "'configuration' = config files. " + "'mixed' = combination of docs and code." + ) + ) + + requires_code_changes: bool = Field( + description=( + "Whether this task requires modifying or creating source code files. " + "True for implementation tasks, False for pure research/analysis." + ) + ) + + requires_git_commit: bool = Field( + description=( + "Whether the task output should be committed to git. " + "True if the task creates files that should be version controlled." + ) + ) + + requires_git_push: bool = Field( + description=( + "Whether changes should be pushed to remote repository. " + "True for tasks that produce deliverables to share." + ) + ) + + requires_pull_request: bool = Field( + description=( + "Whether a Pull Request should be created for review. " + "True for code changes that need review before merging. " + "False for research, analysis, or draft documentation." + ) + ) + + requires_tests: bool = Field( + description=( + "Whether the task output requires tests to be written or run. " + "True for implementation tasks, False for research/docs." + ) + ) + + reasoning: str = Field( + description=( + "Brief explanation of why these requirements were determined. " + "Helps with debugging and transparency." + ) + ) + + +# System prompt for the task analyzer +TASK_ANALYZER_SYSTEM_PROMPT = """You are a task requirements analyzer for a software development system. + +Your job is to analyze task descriptions and determine: +1. What type of work the task involves (research, coding, testing, etc.) +2. What deliverables the task will produce (analysis, docs, code, etc.) +3. What validation requirements apply (commit, push, PR, tests) + +Guidelines for analysis: + +## Execution Mode +- **exploration**: Research, analysis, investigation, planning, creating specs/designs + - Examples: "How does X work?", "Analyze the codebase", "Create a design doc", "What billing system exists?" + - Does NOT write source code, only reads/analyzes + +- **implementation**: Writing or modifying source code + - Examples: "Implement feature X", "Fix bug Y", "Add endpoint Z", "Refactor module W" + - Creates/modifies code files that need to be committed + +- **validation**: Reviewing, testing, or verifying existing code + - Examples: "Review PR #123", "Run tests", "Verify implementation meets requirements" + - May run tests but doesn't write new features + +## Output Type +- **analysis**: Pure research with no file output (answers, findings, explanations) +- **documentation**: Creates markdown files, specs, designs (but not source code) +- **code**: Creates or modifies source code files +- **tests**: Creates or modifies test files +- **configuration**: Creates config files, env setup, infrastructure +- **mixed**: Combination of documentation and code + +## Validation Requirements +- **requires_code_changes**: True only if source code files will be created/modified +- **requires_git_commit**: True if ANY files should be version controlled +- **requires_git_push**: True if deliverables should be shared (not just local analysis) +- **requires_pull_request**: True only for code changes that need review + - Research/analysis tasks should NOT require PR + - Documentation-only changes may or may not need PR depending on context +- **requires_tests**: True for code changes that should have test coverage + +Be conservative with PR requirements - only require PRs for actual code changes that need review.""" + + +class TaskRequirementsAnalyzer: + """Analyzes task descriptions to determine execution requirements. + + Uses LLM structured output to intelligently determine what type of + task this is and what validation requirements should apply. + + Usage: + analyzer = TaskRequirementsAnalyzer() + requirements = await analyzer.analyze( + task_description="Analyze how billing works in this codebase", + task_type="analyze_requirements", # optional hint + ) + + if requirements.requires_pull_request: + # Enable PR validation + ... + """ + + def __init__(self, llm_service: Optional[LLMService] = None): + """Initialize the analyzer. + + Args: + llm_service: Optional LLM service instance. If not provided, + uses the default singleton. + """ + self.llm = llm_service or get_llm_service() + + async def analyze( + self, + task_description: str, + task_type: Optional[str] = None, + ticket_title: Optional[str] = None, + ticket_description: Optional[str] = None, + ) -> TaskRequirements: + """Analyze a task to determine its requirements. + + Args: + task_description: The full task description/prompt + task_type: Optional task type hint (e.g., "analyze_requirements") + ticket_title: Optional parent ticket title for context + ticket_description: Optional parent ticket description + + Returns: + TaskRequirements with execution mode and validation settings + """ + # Build the analysis prompt + prompt_parts = ["Analyze the following task and determine its requirements:\n"] + + if ticket_title: + prompt_parts.append(f"**Ticket Title:** {ticket_title}\n") + + if ticket_description: + # Truncate long descriptions + desc = ticket_description[:1000] + if len(ticket_description) > 1000: + desc += "..." + prompt_parts.append(f"**Ticket Description:** {desc}\n") + + if task_type: + prompt_parts.append(f"**Task Type:** {task_type}\n") + + prompt_parts.append(f"**Task Description:**\n{task_description[:2000]}") + + prompt = "\n".join(prompt_parts) + + try: + requirements = await self.llm.structured_output( + prompt=prompt, + output_type=TaskRequirements, + system_prompt=TASK_ANALYZER_SYSTEM_PROMPT, + output_retries=3, + ) + + logger.info( + "Analyzed task requirements", + extra={ + "execution_mode": requirements.execution_mode.value, + "output_type": requirements.output_type.value, + "requires_pr": requirements.requires_pull_request, + "requires_code": requirements.requires_code_changes, + "reasoning": requirements.reasoning[:100], + } + ) + + return requirements + + except Exception as e: + logger.error( + "Failed to analyze task requirements, using defaults", + extra={"error": str(e), "task_type": task_type} + ) + # Return safe defaults that assume implementation + return self._get_default_requirements(task_type) + + def _get_default_requirements(self, task_type: Optional[str] = None) -> TaskRequirements: + """Get default requirements when LLM analysis fails. + + Falls back to the original hardcoded logic as a safety net. + """ + # Known exploration types (from original EXPLORATION_TASK_TYPES) + exploration_types = { + "explore_codebase", + "analyze_codebase", + "analyze_requirements", + "analyze_dependencies", + "create_spec", + "create_requirements", + "create_design", + "create_tickets", + "create_tasks", + "define_feature", + "research", + "discover", + "investigate", + } + + validation_types = { + "validate", + "validate_implementation", + "review_code", + "run_tests", + } + + if task_type in exploration_types: + return TaskRequirements( + execution_mode=ExecutionMode.EXPLORATION, + output_type=TaskOutputType.ANALYSIS, + requires_code_changes=False, + requires_git_commit=False, + requires_git_push=False, + requires_pull_request=False, + requires_tests=False, + reasoning="Fallback: Task type is in known exploration types", + ) + elif task_type in validation_types: + return TaskRequirements( + execution_mode=ExecutionMode.VALIDATION, + output_type=TaskOutputType.ANALYSIS, + requires_code_changes=False, + requires_git_commit=False, + requires_git_push=False, + requires_pull_request=False, + requires_tests=True, + reasoning="Fallback: Task type is in known validation types", + ) + else: + # Default to implementation (requires full git workflow) + return TaskRequirements( + execution_mode=ExecutionMode.IMPLEMENTATION, + output_type=TaskOutputType.CODE, + requires_code_changes=True, + requires_git_commit=True, + requires_git_push=True, + requires_pull_request=True, + requires_tests=True, + reasoning="Fallback: Unknown task type, assuming implementation", + ) + + +# Singleton instance +_analyzer_instance: Optional[TaskRequirementsAnalyzer] = None + + +def get_task_requirements_analyzer() -> TaskRequirementsAnalyzer: + """Get the singleton TaskRequirementsAnalyzer instance.""" + global _analyzer_instance + if _analyzer_instance is None: + _analyzer_instance = TaskRequirementsAnalyzer() + return _analyzer_instance diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index f338b902..ee3ec22e 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -2110,7 +2110,18 @@ def _get_stop_reason(self) -> str: return "unknown" async def _run_validation(self): - """Run git validation to check if work is truly complete.""" + """Run git validation to check if work is truly complete. + + Handles two scenarios: + 1. Implementation tasks: Require clean git, code pushed, and PR created + 2. Research/analysis tasks: No code changes, so skip git validation + + Detection of research tasks: + - Working directory is clean (no changes made) + - Not ahead of remote (nothing to push) + - On main/master branch (no feature branch created) + - OR execution_mode is "exploration" + """ state = self.iteration_state config = self.config @@ -2123,7 +2134,55 @@ async def _run_validation(self): state.code_pushed = git_status["is_pushed"] state.pr_created = git_status["has_pr"] - # Determine if validation passed based on config requirements + # CRITICAL: Detect research/analysis tasks that don't produce code changes + # These tasks should pass validation without requiring a PR + is_research_task = ( + # Clean working directory (no uncommitted changes) + git_status["is_clean"] and + # Not ahead of remote (nothing to push) + git_status["is_pushed"] and + # On main/master branch (no feature branch was created) + git_status.get("branch_name") in ("main", "master", None) + ) + + # Also treat exploration mode as research (no code changes expected) + if config.execution_mode == "exploration": + is_research_task = True + + if is_research_task: + logger.info( + "Detected research/analysis task - no code changes needed", + extra={ + "branch": git_status.get("branch_name"), + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + "execution_mode": config.execution_mode, + } + ) + state.validation_passed = True + state.validation_feedback = "Research/analysis task completed - no code changes required" + + # Report validation result for research task + if self.reporter: + await self.reporter.report( + "iteration.validation", + { + "iteration_num": state.iteration_num, + "passed": True, + "feedback": state.validation_feedback, + "task_type": "research", + "git_status": { + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + "has_pr": git_status["has_pr"], + "branch_name": git_status["branch_name"], + }, + "errors": [], + }, + ) + return + + # Standard validation for implementation tasks validation_errors = [] if config.require_clean_git and not git_status["is_clean"]: diff --git a/backend/omoi_os/workers/continuous_sandbox_worker.py b/backend/omoi_os/workers/continuous_sandbox_worker.py index bc1ff570..23a2016f 100644 --- a/backend/omoi_os/workers/continuous_sandbox_worker.py +++ b/backend/omoi_os/workers/continuous_sandbox_worker.py @@ -623,7 +623,18 @@ async def _run_single_iteration(self) -> bool: return False async def _run_validation(self): - """Run git validation to check if work is truly complete.""" + """Run git validation to check if work is truly complete. + + Handles two scenarios: + 1. Implementation tasks: Require clean git, code pushed, and PR created + 2. Research/analysis tasks: No code changes, so skip git validation + + Detection of research tasks: + - Working directory is clean (no changes made) + - Not ahead of remote (nothing to push) + - On main/master branch (no feature branch created) + - OR execution_mode is "exploration" + """ state = self.iteration_state config = self.continuous_config @@ -636,7 +647,53 @@ async def _run_validation(self): state.code_pushed = git_status["is_pushed"] state.pr_created = git_status["has_pr"] - # Determine if validation passed + # CRITICAL: Detect research/analysis tasks that don't produce code changes + # These tasks should pass validation without requiring a PR + is_research_task = ( + # Clean working directory (no uncommitted changes) + git_status["is_clean"] and + # Not ahead of remote (nothing to push) + git_status["is_pushed"] and + # On main/master branch (no feature branch was created) + git_status.get("branch_name") in ("main", "master", None) + ) + + # Also treat exploration mode as research (no code changes expected) + if hasattr(config, "execution_mode") and config.execution_mode == "exploration": + is_research_task = True + + if is_research_task: + logger.info( + "Detected research/analysis task - no code changes needed", + extra={ + "branch": git_status.get("branch_name"), + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + } + ) + state.validation_passed = True + state.validation_feedback = "Research/analysis task completed - no code changes required" + + # Report validation result for research task + await self.reporter.report( + "iteration.validation", + { + "iteration_num": state.iteration_num, + "passed": True, + "feedback": state.validation_feedback, + "task_type": "research", + "git_status": { + "is_clean": git_status["is_clean"], + "is_pushed": git_status["is_pushed"], + "has_pr": git_status["has_pr"], + "branch_name": git_status["branch_name"], + }, + "errors": [], + }, + ) + return + + # Standard validation for implementation tasks validation_errors = [] if config.require_clean_git and not git_status["is_clean"]: diff --git a/backend/omoi_os/workers/orchestrator_worker.py b/backend/omoi_os/workers/orchestrator_worker.py index 19aa009e..960a2b06 100644 --- a/backend/omoi_os/workers/orchestrator_worker.py +++ b/backend/omoi_os/workers/orchestrator_worker.py @@ -16,7 +16,7 @@ import signal import sys import time -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Literal, Optional # Configure logging before any other imports that might log from omoi_os.logging import configure_logging, get_logger @@ -29,9 +29,13 @@ from omoi_os.services.task_queue import TaskQueueService from omoi_os.services.event_bus import EventBusService from omoi_os.services.agent_registry import AgentRegistryService + from omoi_os.services.task_requirements_analyzer import TaskRequirements logger = get_logger("orchestrator") +# Task requirements analyzer (initialized in init_services) +task_analyzer: Optional["TaskRequirementsAnalyzer"] = None + # Services (initialized in init_services) db: DatabaseService | None = None queue: TaskQueueService | None = None @@ -55,15 +59,25 @@ # Task type categories for execution mode determination +# These task types are research/analysis focused and do NOT produce code changes. +# They get execution_mode="exploration" which disables git validation requirements. EXPLORATION_TASK_TYPES = frozenset([ + # Codebase exploration and analysis "explore_codebase", + "analyze_codebase", + "analyze_requirements", # Research task - doesn't write code + "analyze_dependencies", + # Spec and requirements creation (produces docs, not code) "create_spec", "create_requirements", "create_design", "create_tickets", "create_tasks", - "analyze_dependencies", "define_feature", + # Research and discovery + "research", + "discover", + "investigate", ]) VALIDATION_TASK_TYPES = frozenset([ @@ -75,7 +89,10 @@ def get_execution_mode(task_type: str) -> Literal["exploration", "implementation", "validation"]: - """Determine execution mode based on task type. + """Determine execution mode based on task type (fallback method). + + This is the fallback for when LLM-based analysis is unavailable. + Prefer using analyze_task_requirements() for intelligent analysis. This controls which skills are loaded into the sandbox: - exploration: spec-driven-dev skill for creating specs/tickets/tasks @@ -98,6 +115,46 @@ def get_execution_mode(task_type: str) -> Literal["exploration", "implementation return "implementation" +async def analyze_task_requirements( + task_description: str, + task_type: Optional[str] = None, + ticket_title: Optional[str] = None, + ticket_description: Optional[str] = None, +) -> "TaskRequirements": + """Analyze a task using LLM to determine its requirements. + + Uses the TaskRequirementsAnalyzer to intelligently determine: + - Execution mode (exploration, implementation, validation) + - Output type (analysis, documentation, code, tests, etc.) + - Git workflow requirements (commit, push, PR) + - Whether tests are required + + Falls back to hardcoded task type mappings if LLM analysis fails. + + Args: + task_description: The full task description/prompt + task_type: Optional task type hint (e.g., "analyze_requirements") + ticket_title: Optional parent ticket title for context + ticket_description: Optional parent ticket description + + Returns: + TaskRequirements with all validation and execution settings + """ + global task_analyzer + + if task_analyzer is None: + # Lazy initialization if not already done in init_services + from omoi_os.services.task_requirements_analyzer import get_task_requirements_analyzer + task_analyzer = get_task_requirements_analyzer() + + return await task_analyzer.analyze( + task_description=task_description, + task_type=task_type, + ticket_title=ticket_title, + ticket_description=ticket_description, + ) + + async def heartbeat_task(): """Log heartbeat every 30 seconds to confirm worker is alive.""" heartbeat_num = 0 @@ -544,19 +601,27 @@ async def orchestrator_loop(): ticket_type=extra_env.get("TICKET_TYPE"), ) - # Determine execution mode based on task type - # This controls which skills are loaded into the sandbox: - # - exploration: spec-driven-dev (for creating specs/tickets/tasks) - # - implementation: git-workflow, code-review, etc. (default) - # - validation: code-review, test-writer - execution_mode = get_execution_mode(task.task_type) + # Analyze task requirements using LLM for intelligent determination + # This replaces hardcoded task type mappings with dynamic analysis + # Falls back to get_execution_mode() if analysis fails + task_requirements = await analyze_task_requirements( + task_description=task.description or "", + task_type=task.task_type, + ticket_title=extra_env.get("TICKET_TITLE"), + ticket_description=extra_env.get("TICKET_DESCRIPTION"), + ) + execution_mode = task_requirements.execution_mode.value log.info( - "execution_mode_determined", + "task_requirements_analyzed", task_type=task.task_type, execution_mode=execution_mode, + output_type=task_requirements.output_type.value, + requires_code=task_requirements.requires_code_changes, + requires_pr=task_requirements.requires_pull_request, + reasoning=task_requirements.reasoning[:100], ) - # Spawn sandbox with user/repo context + # Spawn sandbox with user/repo context and analyzed requirements sandbox_id = await daytona_spawner.spawn_for_task( task_id=task_id, agent_id=agent_id, @@ -565,6 +630,7 @@ async def orchestrator_loop(): extra_env=extra_env if extra_env else None, runtime=sandbox_runtime, execution_mode=execution_mode, + task_requirements=task_requirements, ) log.info( @@ -1018,7 +1084,7 @@ async def idle_sandbox_check_loop(): async def init_services(): """Initialize required services.""" - global db, queue, event_bus, registry_service + global db, queue, event_bus, registry_service, task_analyzer logger.info("initializing_services") @@ -1027,6 +1093,7 @@ async def init_services(): from omoi_os.services.task_queue import TaskQueueService from omoi_os.services.event_bus import EventBusService from omoi_os.services.agent_registry import AgentRegistryService + from omoi_os.services.task_requirements_analyzer import get_task_requirements_analyzer app_settings = get_app_settings() @@ -1046,6 +1113,10 @@ async def init_services(): registry_service = AgentRegistryService(db) logger.info("service_initialized", service="agent_registry") + # Task Requirements Analyzer (LLM-based task analysis) + task_analyzer = get_task_requirements_analyzer() + logger.info("service_initialized", service="task_requirements_analyzer") + logger.info("all_services_initialized") From 1f2faf9a935ae65af816806be8a4efe01bdb305a Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 17:51:00 -0300 Subject: [PATCH 026/290] Upgrade default Fireworks model to minimax-m2p1 --- backend/omoi_os/services/pydantic_ai_service.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/omoi_os/services/pydantic_ai_service.py b/backend/omoi_os/services/pydantic_ai_service.py index 6efd7c21..f3bd18cc 100644 --- a/backend/omoi_os/services/pydantic_ai_service.py +++ b/backend/omoi_os/services/pydantic_ai_service.py @@ -46,10 +46,10 @@ def _get_fireworks_model(self) -> str: """ Get Fireworks model name from settings. - Defaults to minimax-m2 if not specified. + Defaults to minimax-m2p1 if not specified. Returns: - Fireworks model string (e.g., "accounts/fireworks/models/minimax-m2") + Fireworks model string (e.g., "accounts/fireworks/models/minimax-m2p1") """ # If model is already a Fireworks model, use it if ( @@ -58,8 +58,8 @@ def _get_fireworks_model(self) -> str: ): return self.settings.model - # Default to GPT-OSS-120B (cost-effective alternative to kimi-k2) - return "accounts/fireworks/models/gpt-oss-120b" + # Default to minimax-m2p1 (upgraded from m2, cost-effective) + return "accounts/fireworks/models/minimax-m2p1" def create_agent( self, From e259f85ddedbb112752caddc3fb5fceb0a8b1a1e Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 17:57:29 -0300 Subject: [PATCH 027/290] Add subtle renderers for iteration/continuous events in EventRenderer Replace raw JSON display with subtle inline renderers for: - iteration.started: Very subtle play icon with iteration number - iteration.completed: Shows iteration, cost, completion badge - iteration.validation: Pass/fail indicator with feedback - iteration.completion_signal: Shows TASK_COMPLETE detection count - continuous.started: Spinner with "Continuous mode started" - continuous.completed: Summary with reason, iterations, cost, time - SANDBOX_SPAWNED / VALIDATION_SANDBOX_SPAWNED: Subtle sandbox IDs All renderers use low opacity (40-60%) with hover:opacity-100 transition for a clean, unobtrusive display that reveals more detail on hover. --- frontend/components/sandbox/EventRenderer.tsx | 158 ++++++++++++++++++ 1 file changed, 158 insertions(+) diff --git a/frontend/components/sandbox/EventRenderer.tsx b/frontend/components/sandbox/EventRenderer.tsx index 67dcb8ac..e62cd5e5 100644 --- a/frontend/components/sandbox/EventRenderer.tsx +++ b/frontend/components/sandbox/EventRenderer.tsx @@ -2431,6 +2431,164 @@ export function EventRenderer({ event, className }: EventRendererProps) { // Skip noise if (event_type.includes("heartbeat")) return null + // ============================================================================ + // Iteration & Continuous Mode Events - Subtle inline display + // ============================================================================ + + // Iteration started - very subtle, just a small indicator + if (event_type === "iteration.started") { + const iterNum = getNumber(data, "iteration_num") + return ( +

+ ) + } + + // Iteration completed - show cost and brief summary + if (event_type === "iteration.completed") { + const iterNum = getNumber(data, "iteration_num") + const costUsd = getNumber(data, "cost_usd") + const outputPreview = getString(data, "output_preview") + const completionCount = getNumber(data, "completion_signal_count") + + // If there's a meaningful output preview, show it more prominently + if (outputPreview && outputPreview.length > 20) { + return null // Skip - the actual content will be shown by other events + } + + return ( +
+
+ + Iteration {iterNum} + {costUsd > 0 && ( + + ${costUsd.toFixed(4)} + + )} + {completionCount > 0 && ( + + complete + + )} +
+
+ ) + } + + // Iteration validation - show pass/fail status subtly + if (event_type === "iteration.validation") { + const passed = data.passed === true + const errors = Array.isArray(data.errors) ? data.errors : [] + const feedback = getString(data, "feedback") + + // If validation passed, be very subtle + if (passed) { + return ( +
+
+ + Validation passed +
+
+ ) + } + + // If validation failed, show a bit more info but still subtle + return ( +
+
+ + Validation: {feedback || errors.join(", ") || "checking..."} +
+
+ ) + } + + // Completion signal - very subtle, just shows detection + if (event_type === "iteration.completion_signal") { + const signalCount = getNumber(data, "signal_count") + const threshold = getNumber(data, "threshold") + + return ( +
+
+ + TASK_COMPLETE detected ({signalCount}/{threshold}) +
+
+ ) + } + + // Continuous mode started - subtle indicator + if (event_type === "continuous.started") { + return ( +
+
+ + Continuous mode started +
+
+ ) + } + + // Continuous mode completed - show summary + if (event_type === "continuous.completed") { + const stopReason = getString(data, "stop_reason") + const totalIterations = getNumber(data, "iteration_num") + const totalCost = getNumber(data, "total_cost_usd") + const elapsedSecs = getNumber(data, "elapsed_seconds") + + const reasonLabel = stopReason === "task_complete" ? "Task completed" : + stopReason === "max_iterations_reached" ? "Max iterations" : + stopReason === "validation_passed" ? "Validation passed" : + stopReason || "Completed" + + return ( +
+
+ + {reasonLabel} + + {totalIterations} iterations + {totalCost > 0 && ( + <> + + ${totalCost.toFixed(2)} + + )} + {elapsedSecs > 0 && ( + <> + + {Math.round(elapsedSecs)}s + + )} +
+
+ ) + } + + // SANDBOX_SPAWNED and VALIDATION_SANDBOX_SPAWNED - subtle system events + if (event_type === "SANDBOX_SPAWNED" || event_type === "VALIDATION_SANDBOX_SPAWNED") { + const sandboxId = getString(data, "sandbox_id") + const shortId = sandboxId ? sandboxId.slice(-8) : "" + const isValidation = event_type.includes("VALIDATION") + + return ( +
+
+ + {isValidation ? "Validation sandbox" : "Sandbox"} spawned + {shortId && {shortId}} +
+
+ ) + } + // Unknown events - show collapsed const hasContent = Object.keys(data).length > 0 if (!hasContent) return null From 015fa7f95e26032daf0484ed96b9079985d4c4e0 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 18:16:43 -0300 Subject: [PATCH 028/290] Switch to bypassPermissions mode for all sandbox workers Changed default permission_mode from "acceptEdits" to "bypassPermissions" across all sandbox worker implementations to auto-approve all tool uses. Files updated: - omoi_os/workers/claude_sandbox_worker.py - omoi_os/workers/sandbox_agent_worker.py - omoi_os/services/daytona_spawner.py - omoi_os/services/claude_agent_worker.py - scripts/test_spawner_e2e.py - scripts/test_direct_task_injection.py - scripts/test_sandbox_claude_sdk.py This eliminates permission prompts in sandboxed environments where the agent has full control of an isolated workspace. --- backend/omoi_os/services/claude_agent_worker.py | 2 +- backend/omoi_os/services/daytona_spawner.py | 2 +- backend/omoi_os/workers/claude_sandbox_worker.py | 2 +- backend/omoi_os/workers/sandbox_agent_worker.py | 2 +- backend/scripts/test_direct_task_injection.py | 2 +- backend/scripts/test_sandbox_claude_sdk.py | 4 ++-- backend/scripts/test_spawner_e2e.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/omoi_os/services/claude_agent_worker.py b/backend/omoi_os/services/claude_agent_worker.py index 98cb4c60..c220d69d 100644 --- a/backend/omoi_os/services/claude_agent_worker.py +++ b/backend/omoi_os/services/claude_agent_worker.py @@ -239,7 +239,7 @@ async def track_tool_use(input_data, tool_use_id, _context): # Configure the agent options = ClaudeAgentOptions( allowed_tools=tool_names + ["Read", "Write", "Bash", "Edit", "Glob", "Grep"], - permission_mode="acceptEdits", # Auto-accept file edits in sandbox + permission_mode="bypassPermissions", # Auto-approve all in sandbox system_prompt=f"""You are an AI coding agent working on a software development task. Your workspace is at {workspace_dir}. You have access to tools for reading/writing files, diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 90883dee..1ec06a00 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -1949,7 +1949,7 @@ async def track_tool_use(input_data, tool_use_id, context): "Task", # Subagent dispatch "Skill", # Skill invocation ], - permission_mode="acceptEdits", + permission_mode="bypassPermissions", system_prompt=f"""You are an AI coding agent. Your workspace is /workspace. Be thorough and test your changes. You have access to specialized subagents: diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index ee3ec22e..c4e8270a 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -838,7 +838,7 @@ def __init__(self): # SDK settings self.max_turns = int(os.environ.get("MAX_TURNS", "50")) self.max_budget_usd = float(os.environ.get("MAX_BUDGET_USD", "10.0")) - self.permission_mode = os.environ.get("PERMISSION_MODE", "acceptEdits") + self.permission_mode = os.environ.get("PERMISSION_MODE", "bypassPermissions") self.cwd = os.environ.get("CWD", "/workspace") # System prompt - use append pattern to extend rather than replace diff --git a/backend/omoi_os/workers/sandbox_agent_worker.py b/backend/omoi_os/workers/sandbox_agent_worker.py index 26c5a3a4..dc6f9054 100644 --- a/backend/omoi_os/workers/sandbox_agent_worker.py +++ b/backend/omoi_os/workers/sandbox_agent_worker.py @@ -88,7 +88,7 @@ def __init__(self): self.poll_interval = float(os.environ.get("POLL_INTERVAL", "0.5")) self.heartbeat_interval = int(os.environ.get("HEARTBEAT_INTERVAL", "30")) self.max_turns = int(os.environ.get("MAX_TURNS", "50")) - self.permission_mode = os.environ.get("PERMISSION_MODE", "acceptEdits") + self.permission_mode = os.environ.get("PERMISSION_MODE", "bypassPermissions") self.system_prompt = os.environ.get( "SYSTEM_PROMPT", "You are a helpful coding assistant working in a sandboxed environment. " diff --git a/backend/scripts/test_direct_task_injection.py b/backend/scripts/test_direct_task_injection.py index 827d18f1..0a712aef 100644 --- a/backend/scripts/test_direct_task_injection.py +++ b/backend/scripts/test_direct_task_injection.py @@ -181,7 +181,7 @@ async def run_agent(task: str): options = ClaudeAgentOptions( allowed_tools=["Read", "Write", "Edit", "Bash", "Glob", "Grep", "LS"], - permission_mode="acceptEdits", # Auto-accept file edits + permission_mode="bypassPermissions", # Auto-approve all system_prompt=f"""You are a helpful coding assistant working in {work_dir}. IMPORTANT RULES: diff --git a/backend/scripts/test_sandbox_claude_sdk.py b/backend/scripts/test_sandbox_claude_sdk.py index 8e23c225..516d4671 100644 --- a/backend/scripts/test_sandbox_claude_sdk.py +++ b/backend/scripts/test_sandbox_claude_sdk.py @@ -181,7 +181,7 @@ def test_claude_sdk(): # Test creating options object with GLM model options = ClaudeAgentOptions( allowed_tools=["Read", "Write", "Bash"], - permission_mode="acceptEdits", + permission_mode="bypassPermissions", system_prompt="Test system prompt", cwd=Path("/tmp"), max_turns=10, @@ -228,7 +228,7 @@ async def test_api(): options = ClaudeAgentOptions( allowed_tools=["Bash"], - permission_mode="acceptEdits", + permission_mode="bypassPermissions", system_prompt="You are a helpful assistant. Be concise.", cwd=Path("/tmp"), max_turns=5, diff --git a/backend/scripts/test_spawner_e2e.py b/backend/scripts/test_spawner_e2e.py index f3401c00..089210b0 100644 --- a/backend/scripts/test_spawner_e2e.py +++ b/backend/scripts/test_spawner_e2e.py @@ -173,7 +173,7 @@ async def test(): options = ClaudeAgentOptions( allowed_tools=["Read", "Write", "Edit", "Bash", "LS"], - permission_mode="acceptEdits", + permission_mode="bypassPermissions", system_prompt="You are a helpful assistant. Always write code to files before running them. Use relative paths.", cwd=workspace, max_turns=10, From c774302187788d7265b6c12432b9535c12299148 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 18:20:44 -0300 Subject: [PATCH 029/290] Simplify settings.local.json to use wildcard permissions Changed the Claude settings.local.json from an explicit tool list to using "*" wildcard in the allow array. This: - Allows ALL tools without prompts (Bash, Read, Write, WebSearch, etc.) - Future-proofs against new tools being added - Eliminates permission issues from missing tools in the allow list The previous explicit list was missing several tools like: WebSearch, Skill, LSP, NotebookEdit, AskUserQuestion, KillShell, EnterPlanMode, ExitPlanMode, etc. --- backend/omoi_os/services/daytona_spawner.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 1ec06a00..ccfa70c3 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -757,21 +757,10 @@ def escape_env_value(v: str) -> str: # Create settings.local.json with FULL permissions # This is a sandbox environment - allow everything without prompts + # Using "*" wildcard to allow ALL tools including future additions settings_content = """{ "permissions": { - "allow": [ - "Bash(*)", - "Read(*)", - "Write(*)", - "Edit(*)", - "MultiEdit(*)", - "Glob(*)", - "Grep(*)", - "WebFetch(*)", - "TodoWrite(*)", - "Task(*)", - "mcp__*" - ], + "allow": ["*"], "deny": [] } }""" From 9c75525ad213c983b57340431d7d0c56b6c2794b Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 18:21:28 -0300 Subject: [PATCH 030/290] Add explicit list of ALL Claude Code tools to settings.local.json Explicitly listing every known tool plus wildcards: - Bash, Read, Write, Edit, MultiEdit - Glob, Grep, LS - WebFetch, WebSearch - TodoWrite, Task, TaskOutput - Skill, LSP, NotebookEdit - AskUserQuestion, KillShell - EnterPlanMode, ExitPlanMode - mcp__*(*) for all MCP tools - * as final catch-all Belt and suspenders approach to ensure no permission issues. --- backend/omoi_os/services/daytona_spawner.py | 27 +++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index ccfa70c3..afb7e392 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -757,10 +757,33 @@ def escape_env_value(v: str) -> str: # Create settings.local.json with FULL permissions # This is a sandbox environment - allow everything without prompts - # Using "*" wildcard to allow ALL tools including future additions + # Explicitly listing ALL Claude Code tools to ensure nothing is missed settings_content = """{ "permissions": { - "allow": ["*"], + "allow": [ + "Bash(*)", + "Read(*)", + "Write(*)", + "Edit(*)", + "MultiEdit(*)", + "Glob(*)", + "Grep(*)", + "LS(*)", + "WebFetch(*)", + "WebSearch(*)", + "TodoWrite(*)", + "Task(*)", + "TaskOutput(*)", + "Skill(*)", + "LSP(*)", + "NotebookEdit(*)", + "AskUserQuestion(*)", + "KillShell(*)", + "EnterPlanMode(*)", + "ExitPlanMode(*)", + "mcp__*(*)", + "*" + ], "deny": [] } }""" From 19de9b3f6c453a54b2be5eea87fa1ff3bd7cb160 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 18:24:09 -0300 Subject: [PATCH 031/290] Add 'local' to default setting_sources for Claude SDK Updated default from "user,project" to "user,project,local" to load: - user: ~/.claude/settings.json (user-level defaults) - project: .claude/settings.json (project-specific overrides) - local: ~/.claude.json or .claude.json (per-user project state) This ensures all settings sources are loaded in the sandbox. --- backend/omoi_os/workers/claude_sandbox_worker.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index c4e8270a..fa0fa423 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -1031,8 +1031,11 @@ def __init__(self): "append": combined_append, } if combined_append else None - # Setting sources for loading skills - setting_sources_str = os.environ.get("SETTING_SOURCES", "user,project") + # Setting sources for loading skills and settings + # - user: ~/.claude/settings.json (user-level defaults) + # - project: .claude/settings.json (project-specific overrides) + # - local: ~/.claude.json or .claude.json (per-user project state) + setting_sources_str = os.environ.get("SETTING_SOURCES", "user,project,local") self.setting_sources = [ s.strip() for s in setting_sources_str.split(",") if s.strip() ] From 20147560b77263af71a10b5d275eb07364da2662 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 18:50:16 -0300 Subject: [PATCH 032/290] Add pricing, FAQ sections and update landing page for pre-sales - Add PricingSection with Free/Pro/Team tiers focused on concurrent agents - Add FAQSection covering launch, billing, concurrent agents, BYO keys - Update WaitlistCTASection to "Reserve Early Access" with founding member benefits - Update WorkflowSection with "You Sleep. Agents Ship." messaging - Update pricing_strategy.md with cleaner pricing model - De-emphasize workflow limits, lead with concurrent agents as differentiator --- docs/design/billing/pricing_strategy.md | 47 ++-- .../frontend/landing_page_enhancement.md | 239 ++++++++++++++++++ frontend/app/page.tsx | 8 + frontend/components/marketing/index.ts | 2 + .../marketing/sections/FAQSection.tsx | 121 +++++++++ .../marketing/sections/PricingSection.tsx | 200 +++++++++++++++ .../marketing/sections/WaitlistCTASection.tsx | 59 ++++- .../marketing/sections/WorkflowSection.tsx | 57 +++-- 8 files changed, 681 insertions(+), 52 deletions(-) create mode 100644 frontend/components/marketing/sections/FAQSection.tsx create mode 100644 frontend/components/marketing/sections/PricingSection.tsx diff --git a/docs/design/billing/pricing_strategy.md b/docs/design/billing/pricing_strategy.md index 477cc1b6..760ae627 100644 --- a/docs/design/billing/pricing_strategy.md +++ b/docs/design/billing/pricing_strategy.md @@ -21,25 +21,27 @@ The core value exchange: **Users get powerful AI-driven engineering execution; O ## Pricing Tiers -### Tier 1: Lifetime Access (Early Adopter) +### Tier 1: Lifetime Access (Founding Member) -**Target Audience**: First 100-500 users who want permanent access at a one-time cost. +**Target Audience**: First 100 users who want permanent access at a one-time cost. | Attribute | Details | |-----------|---------| -| **Price** | $299-$499 one-time (TBD based on market testing) | +| **Price** | $299 one-time | +| **Concurrent Agents** | 5 agents running in parallel | | **Workflows/Month** | 50 workflows included | -| **Overage** | Must add own API keys (MiniMax, Z.ai, etc.) | -| **Limits** | 5 concurrent agents, 100GB storage, 1 organization | -| **Support** | Community + Email | +| **BYO API Keys** | Early access (before public Pro/Team release) | +| **Limits** | 5 projects, 50GB storage | +| **Support** | Priority email | | **Data Rights** | OmoiOS retains trajectory/agent data for training | **Value Proposition**: - "Pay once, use forever" - appeals to cost-conscious early adopters +- **Early access to BYO keys** - founding members get this before anyone else - Users who exceed limits bring their own API keys (no cost to OmoiOS) - OmoiOS gets valuable training data from power users -**Marketing Hook**: *"Founding Member: Lifetime access for early believers"* +**Marketing Hook**: *"Founding Member: Lifetime access + first dibs on BYO keys"* --- @@ -57,12 +59,18 @@ The core value exchange: **Users get powerful AI-driven engineering execution; O #### Standard Subscription Tiers -| Tier | Price/Month | Workflows | Agents | Storage | Best For | -|------|-------------|-----------|--------|---------|----------| -| **Starter** | $29/month | 20 | 2 concurrent | 10GB | Solo developers | -| **Pro** | $79/month | 100 | 5 concurrent | 50GB | Small teams (2-5) | -| **Team** | $199/month | 500 | 15 concurrent | 200GB | Mid-size teams (5-15) | -| **Enterprise** | Custom | Unlimited | Unlimited | Unlimited | Large orgs (15+) | +| Tier | Price/Month | Concurrent Agents | Workflows/Month | BYO Keys | Best For | +|------|-------------|-------------------|-----------------|----------|----------| +| **Free** | $0/month | 1 | 5 | No | Trying it out | +| **Pro** | $50/month | 5 | 100 | Yes | Individual developers | +| **Team** | $150/month | 10 | 500 | Yes | Growing teams | +| **Enterprise** | Custom | Unlimited | Unlimited | Yes | Large orgs (15+) | + +**Key Concepts:** +- **Concurrent Agents**: Primary differentiator. How many agents can run in parallel per project. +- **Task Queuing**: When users hit their concurrent limit, tasks queue up and run when a slot opens (no lost work). +- **BYO API Keys**: Pro+ users can bring their own LLM keys to bypass workflow limits. They pay the LLM provider directly. +- **Workflows/Month**: Secondary limit. De-emphasized in marketing but still enforced for free tier cost protection. **Usage Overages** (applies to all subscription tiers): - Additional workflows: $5-15 per workflow (based on complexity) @@ -84,8 +92,11 @@ The core value exchange: **Users get powerful AI-driven engineering execution; O **Free Tier** (Always Available): - 5 workflows/month +- 1 project +- 1 concurrent agent - 2GB storage - Community support +- Basic analytics - Resets on the 1st of each month --- @@ -200,12 +211,14 @@ user_credentials - BYO API key storage (anthropic, openai, z_ai, github) Products: ├── omoios_lifetime │ └── Price: $299-499 one-time -├── omoios_starter -│ └── Price: $29/month recurring +├── omoios_free +│ └── Price: $0/month (free tier tracking) ├── omoios_pro -│ └── Price: $79/month recurring +│ └── Price: $50/month recurring ├── omoios_team -│ └── Price: $199/month recurring +│ └── Price: $150/month recurring +├── omoios_enterprise +│ └── Price: Custom pricing ├── omoios_byo_platform │ └── Price: $19/month recurring ├── omoios_workflow_pack_10 diff --git a/docs/design/frontend/landing_page_enhancement.md b/docs/design/frontend/landing_page_enhancement.md index 6542ec57..67e7eb83 100644 --- a/docs/design/frontend/landing_page_enhancement.md +++ b/docs/design/frontend/landing_page_enhancement.md @@ -1233,8 +1233,247 @@ export const typewriter = { --- +## 13. ShadCN/UI Component Mappings + +This section maps design document components to the shadcn/ui primitives available in the codebase (`components/ui/`). + +### 13.1 Available ShadCN Components + +The following shadcn/ui components are installed and available: + +| Component | Path | Common Usage | +|-----------|------|--------------| +| `Button` | `components/ui/button.tsx` | CTAs, form actions, navigation | +| `Card` | `components/ui/card.tsx` | Feature cards, pricing cards, content containers | +| `Badge` | `components/ui/badge.tsx` | Status indicators, tags, labels, plan names | +| `Dialog` | `components/ui/dialog.tsx` | Modals (video modal, signup forms) | +| `Tabs` | `components/ui/tabs.tsx` | Tab navigation, plan comparisons | +| `Tooltip` | `components/ui/tooltip.tsx` | Hover information, feature explanations | +| `Progress` | `components/ui/progress.tsx` | Progress bars, loading states | +| `Skeleton` | `components/ui/skeleton.tsx` | Loading placeholders | +| `Separator` | `components/ui/separator.tsx` | Visual dividers | +| `Switch` | `components/ui/switch.tsx` | Toggle options (monthly/yearly pricing) | +| `Accordion` | `components/ui/accordion.tsx` | FAQ sections, expandable content | +| `Alert` | `components/ui/alert.tsx` | Announcements, notices | +| `BentoGrid` | `components/ui/bento-grid.tsx` | Feature showcase grids (Aceternity UI) | +| `FlipWords` | `components/ui/flip-words.tsx` | Animated text transitions (Aceternity UI) | +| `TextGenerateEffect` | `components/ui/text-generate-effect.tsx` | Typewriter text (Aceternity UI) | +| `Sparkles` | `components/ui/sparkles.tsx` | Particle effects (Aceternity UI) | +| `FloatingNavbar` | `components/ui/floating-navbar.tsx` | Sticky navigation | +| `Announcement` | `components/ui/announcement.tsx` | Banner announcements | + +### 13.2 Component → ShadCN Mapping for Landing Page + +#### Hero Section Components + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| `AgentTerminal` | Custom + `Skeleton` | Terminal effect is custom; use Skeleton for loading states | +| Hero Headlines | `FlipWords`, `TextGenerateEffect` | Animated text from Aceternity UI | +| CTA Buttons | `Button` (variants: default, outline) | Primary = default, Secondary = outline | +| "Watch it build" modal | `Dialog` | For video playback modal | + +#### Ticket Journey Section + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| `TicketJourney` | `Card`, `Badge`, `Progress` | Card for ticket, Badge for phase labels, Progress for completion | +| `PhaseInstructionsPanel` | `Card`, custom styling | Content container | +| `FeedbackLoopArrow` | Custom SVG | No direct shadcn equivalent | +| Done Criteria Checklist | Custom with `Checkbox` | Use checkbox for tick items | + +#### Features Section + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| `BentoGrid` | `BentoGrid`, `BentoGridItem` | Already using Aceternity UI bento components | +| `FeatureCard` | `Card`, `Badge` | Card for container, Badge for labels | + +#### Pricing Section (NEW) + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| `PricingSection` | `Card`, `Button`, `Badge`, `Switch` | | +| Pricing Cards | `Card` with `CardHeader`, `CardContent`, `CardFooter` | One card per tier | +| Plan Badge | `Badge` (variant: secondary, default) | "Popular", "Best Value" labels | +| Monthly/Yearly Toggle | `Switch` + labels | For billing cycle selection | +| Feature List | Custom list with check icons | Use Lucide `Check` icon | +| CTA Button | `Button` (variant based on tier) | Primary for featured plan | + +#### Stats Section + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| Stats Cards | `Card` or custom divs | Large number display | +| Animated Numbers | Custom animation | Use Framer Motion | + +#### CTA Section + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| Final CTA | `Button`, `Card` | Button for action, Card for container | +| Email Input | `Input` + `Button` | Waitlist signup | + +#### Footer Section + +| Design Component | ShadCN Components | Notes | +|------------------|-------------------|-------| +| Footer Links | `NavigationMenu` or custom | Navigation links | +| Social Icons | `Button` (variant: ghost, icon size) | Icon-only buttons | + +### 13.3 Pricing Section Specification + +**New Component: `PricingSection`** + +Location: `components/marketing/sections/PricingSection.tsx` + +**Pricing Tiers** (Updated): + +| Tier | Price | Features | +|------|-------|----------| +| **Free** | $0/month | 5 workflows/month, 1 project, Community support | +| **Pro** | $50/month | 100 workflows/month, 5 projects, Priority support, Advanced features | +| **Team** | $150/month | 500 workflows/month, Unlimited projects, Dedicated support, Team collaboration | + +**Component Structure**: + +```tsx +// components/marketing/sections/PricingSection.tsx +import { Card, CardHeader, CardContent, CardFooter } from "@/components/ui/card" +import { Button } from "@/components/ui/button" +import { Badge } from "@/components/ui/badge" +import { Switch } from "@/components/ui/switch" +import { Check } from "lucide-react" + +interface PricingTier { + name: string + price: number | string + description: string + features: string[] + cta: string + popular?: boolean +} + +const pricingTiers: PricingTier[] = [ + { + name: "Free", + price: 0, + description: "Get started with autonomous engineering", + features: [ + "5 workflows per month", + "1 project", + "Community support", + "Basic analytics", + ], + cta: "Start Free", + }, + { + name: "Pro", + price: 50, + description: "For individual developers and small teams", + features: [ + "100 workflows per month", + "5 projects", + "Priority support", + "Advanced analytics", + "BYO API keys", + "Custom integrations", + ], + cta: "Get Pro", + popular: true, + }, + { + name: "Team", + price: 150, + description: "For growing teams that ship fast", + features: [ + "500 workflows per month", + "Unlimited projects", + "Dedicated support", + "Team collaboration", + "Role-based access", + "Audit logs", + "SSO (coming soon)", + ], + cta: "Get Team", + }, +] +``` + +**Visual Design**: +- Use `Card` with elevated styling for the "Popular" tier +- `Badge` with "Most Popular" text for Pro tier +- `Button` variants: outline for Free, default for Pro (featured), outline for Team +- Check icons (`Check` from Lucide) for feature lists +- Landing page color scheme (landing-bg, landing-text, landing-accent) + +### 13.4 Animation Patterns with ShadCN + Framer Motion + +All landing page animations use Framer Motion. Common patterns: + +```tsx +// Fade in on scroll + + ... + + +// Staggered children + + {items.map((item, i) => ( + + ... + + ))} + +``` + +### 13.5 Color Tokens for Landing Page + +Landing page uses custom CSS variables defined in `globals.css`: + +```css +/* Landing page specific colors */ +--landing-bg: 45 20% 96%; /* Warm off-white */ +--landing-bg-warm: 45 30% 94%; /* Warmer sections */ +--landing-bg-muted: 45 15% 92%; /* Muted backgrounds */ +--landing-text: 30 10% 15%; /* Dark warm text */ +--landing-text-muted: 30 5% 45%; /* Muted text */ +--landing-accent: 35 100% 50%; /* Golden amber */ +--landing-border: 30 10% 85%; /* Subtle borders */ +``` + +Usage with Tailwind: +```tsx +
+

...

+

...

+
+``` + +--- + ## Related Documents - [Project Management Dashboard](./project_management_dashboard.md) - Internal dashboard design - [Frontend Architecture](./frontend_architecture_shadcn_nextjs.md) - Technical stack reference - [Component Scaffold Guide](./component_scaffold_guide.md) - How to create new components +- [Pricing Strategy](../billing/pricing_strategy.md) - Billing tiers and pricing model +- [Design System](../../../design_system.md) - Color tokens and typography diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index f867c13b..127bff71 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -11,6 +11,8 @@ import { WorkflowSection, NightShiftSection, StatsSection, + PricingSection, + FAQSection, WaitlistCTASection, FooterSection, } from "@/components/marketing" @@ -55,6 +57,12 @@ function LandingPage() { {/* Stats Section */} + {/* Pricing Section */} + + + {/* FAQ Section */} + + {/* Waitlist CTA Section */} diff --git a/frontend/components/marketing/index.ts b/frontend/components/marketing/index.ts index 8c5f28dd..253ad4b4 100644 --- a/frontend/components/marketing/index.ts +++ b/frontend/components/marketing/index.ts @@ -8,5 +8,7 @@ export { FeaturesSection } from "./sections/FeaturesSection" export { WorkflowSection } from "./sections/WorkflowSection" export { NightShiftSection } from "./sections/NightShiftSection" export { StatsSection } from "./sections/StatsSection" +export { PricingSection } from "./sections/PricingSection" +export { FAQSection } from "./sections/FAQSection" export { WaitlistCTASection } from "./sections/WaitlistCTASection" export { FooterSection } from "./sections/FooterSection" diff --git a/frontend/components/marketing/sections/FAQSection.tsx b/frontend/components/marketing/sections/FAQSection.tsx new file mode 100644 index 00000000..063c47cc --- /dev/null +++ b/frontend/components/marketing/sections/FAQSection.tsx @@ -0,0 +1,121 @@ +"use client" + +import { motion } from "framer-motion" +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from "@/components/ui/accordion" +import { cn } from "@/lib/utils" + +const faqs = [ + { + question: "When does OmoiOS launch?", + answer: + "We're launching in January 2025. Founding members get immediate early access to start using OmoiOS before the public launch.", + }, + { + question: "How does billing work?", + answer: + "Founding members pay a one-time fee for lifetime access with 5 concurrent agents included. After launch, we'll offer monthly subscriptions starting at $50/month for Pro. Pro and above can bring your own API keys to bypass usage limits.", + }, + { + question: "What are concurrent agents?", + answer: + "Concurrent agents is how many tasks can run in parallel. Free users get 1 agent (tasks run one at a time). Pro gets 5 agents running simultaneously. Team gets 10. If you hit your limit, new tasks queue up and run when a slot opens—nothing is lost.", + }, + { + question: "What if it doesn't work for my use case?", + answer: + "We offer a 30-day money-back guarantee for founding members. If OmoiOS doesn't work for your workflow, we'll refund you—no questions asked.", + }, + { + question: "What is 'Bring Your Own API Keys'?", + answer: + "Pro and Team users can connect their own LLM API keys (OpenAI, Anthropic, etc.). You pay the LLM provider directly for tokens, which lets you run unlimited workflows without worrying about our usage caps. Founding members get early access to this feature.", + }, + { + question: "Is my code safe?", + answer: + "Yes. Your code runs in isolated sandboxes that are destroyed after each task. We never store your source code or share it with anyone. You can also self-host for complete control.", + }, + { + question: "Do I need to babysit the agents?", + answer: + "No. That's the point. You approve the plan, then agents work autonomously—often overnight—until every task is complete. You wake up to a pull request ready for review.", + }, +] + +interface FAQSectionProps { + className?: string + id?: string +} + +export function FAQSection({ className, id }: FAQSectionProps) { + return ( +
+ ) +} diff --git a/frontend/components/marketing/sections/PricingSection.tsx b/frontend/components/marketing/sections/PricingSection.tsx new file mode 100644 index 00000000..a1a45884 --- /dev/null +++ b/frontend/components/marketing/sections/PricingSection.tsx @@ -0,0 +1,200 @@ +"use client" + +import { motion } from "framer-motion" +import { Check, Zap } from "lucide-react" +import { Button } from "@/components/ui/button" +import { Badge } from "@/components/ui/badge" +import { + Card, + CardHeader, + CardContent, + CardFooter, + CardTitle, + CardDescription, +} from "@/components/ui/card" +import { cn } from "@/lib/utils" + +interface PricingTier { + name: string + price: number | string + period: string + description: string + features: string[] + cta: string + popular?: boolean + href: string +} + +const pricingTiers: PricingTier[] = [ + { + name: "Free", + price: 0, + period: "/month", + description: "Try autonomous engineering", + features: [ + "1 concurrent agent", + "5 workflows per month", + "1 project", + "Community support", + "Tasks queue when limit hit", + ], + cta: "Start Free", + href: "/register", + }, + { + name: "Pro", + price: 50, + period: "/month", + description: "Ship faster with parallel agents", + features: [ + "5 concurrent agents", + "100 workflows per month", + "5 projects", + "Bring your own API keys", + "Priority support", + "Advanced analytics", + ], + cta: "Get Pro", + popular: true, + href: "/register?plan=pro", + }, + { + name: "Team", + price: 150, + period: "/month", + description: "Scale your engineering output", + features: [ + "10 concurrent agents", + "500 workflows per month", + "Unlimited projects", + "Bring your own API keys", + "Team collaboration", + "Role-based access", + "Dedicated support", + ], + cta: "Get Team", + href: "/register?plan=team", + }, +] + +interface PricingSectionProps { + className?: string + id?: string +} + +export function PricingSection({ className, id }: PricingSectionProps) { + return ( +
+
+ {/* Section Header */} + +

+ Simple, Transparent Pricing +

+

+ Start free, scale as you grow. No hidden fees. +

+
+ + {/* Pricing Cards */} +
+ {pricingTiers.map((tier, index) => ( + + + {/* Popular Badge */} + {tier.popular && ( +
+ + + Most Popular + +
+ )} + + + + {tier.name} + + + {tier.description} + + + + + {/* Price */} +
+ + {typeof tier.price === "number" ? `$${tier.price}` : tier.price} + + {tier.period} +
+ + {/* Features */} +
    + {tier.features.map((feature) => ( +
  • + + {feature} +
  • + ))} +
+
+ + + + +
+
+ ))} +
+ + {/* Enterprise CTA */} + +

+ Need more?{" "} + + Contact us for Enterprise pricing + +

+
+
+
+ ) +} diff --git a/frontend/components/marketing/sections/WaitlistCTASection.tsx b/frontend/components/marketing/sections/WaitlistCTASection.tsx index cc41aa32..4054e125 100644 --- a/frontend/components/marketing/sections/WaitlistCTASection.tsx +++ b/frontend/components/marketing/sections/WaitlistCTASection.tsx @@ -2,12 +2,27 @@ import { useState } from "react" import { motion } from "framer-motion" -import { ArrowRight, CheckCircle2, Sparkles } from "lucide-react" +import { ArrowRight, CheckCircle2, Sparkles, Star, Zap, Clock } from "lucide-react" import { SparklesCore } from "@/components/ui/sparkles" import { Button } from "@/components/ui/button" import { Input } from "@/components/ui/input" import { cn } from "@/lib/utils" +const foundingBenefits = [ + { + icon: Star, + text: "Lifetime access—pay once, use forever", + }, + { + icon: Zap, + text: "50 workflows/month included", + }, + { + icon: Clock, + text: "Early access before public launch", + }, +] + interface WaitlistCTASectionProps { className?: string } @@ -62,7 +77,7 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) { > - Limited Early Access + Founding Member Access @@ -74,7 +89,7 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) { transition={{ delay: 0.1 }} className="text-3xl font-bold tracking-tight text-white md:text-4xl lg:text-5xl" > - Ready to Stop Managing and Start Shipping? + Reserve Your Spot as a Founding Member {/* Subheadline */} @@ -85,9 +100,28 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) { transition={{ delay: 0.2 }} className="mt-4 text-lg text-gray-400" > - Get early access. Describe your first feature tonight, review the PR tomorrow. + First 100 members get lifetime access at a one-time price. Lock in your spot before we switch to monthly pricing. + {/* Founding Member Benefits */} + + {foundingBenefits.map((benefit, index) => ( +
+ + {benefit.text} +
+ ))} +
+ {/* Form */} setEmail(e.target.value)} className="h-12 flex-1 border-gray-700 bg-gray-900 text-white placeholder:text-gray-500" @@ -117,17 +151,16 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) {
) : ( <> - Join Waitlist + Reserve Early Access )} - {/* Counter */} + {/* Scarcity */}

- 847 engineers - already on the waitlist + 73 spots remaining at founding member pricing

) : ( @@ -137,7 +170,7 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) { className="inline-flex items-center gap-2 rounded-lg bg-green-900/50 px-6 py-3 text-green-400" > - You're on the list! Check your email. + You're in! Check your email for next steps. )} @@ -152,15 +185,15 @@ export function WaitlistCTASection({ className }: WaitlistCTASectionProps) { > - No credit card required + 30-day money-back guarantee - Cancel anytime + No credit card to reserve - SOC 2 compliant + Cancel anytime
diff --git a/frontend/components/marketing/sections/WorkflowSection.tsx b/frontend/components/marketing/sections/WorkflowSection.tsx index b3c53baa..743ba8d1 100644 --- a/frontend/components/marketing/sections/WorkflowSection.tsx +++ b/frontend/components/marketing/sections/WorkflowSection.tsx @@ -1,47 +1,52 @@ "use client" import { motion } from "framer-motion" -import { Search, Pencil, ListChecks, Code2, CheckCircle2 } from "lucide-react" +import { Pencil, ListChecks, Moon, CheckCircle2 } from "lucide-react" import { Card } from "@/components/ui/card" +import { Badge } from "@/components/ui/badge" import { TicketJourney } from "@/components/landing/TicketJourney" import { cn } from "@/lib/utils" const phases = [ { - id: "requirements", + id: "spec", number: "01", - title: "You Describe It", + title: "You Write a Spec", description: - "Write what you want in plain English. \"Add user authentication with Google login.\" That's all you need.", - icon: Search, - color: "bg-blue-500", + "Describe the feature in plain English. Add constraints like tech stack, architecture rules, or coding standards. The spec becomes the agent's guardrails.", + icon: Pencil, + color: "bg-purple-500", + highlight: "Spec-driven constraints", }, { - id: "design", + id: "plan", number: "02", - title: "You Approve the Plan", + title: "We Plan the Work", description: - "We show you exactly what will be built. You say yes, no, or make changes. Nothing happens without your OK.", - icon: Pencil, - color: "bg-purple-500", + "Your spec becomes tickets and tasks with clear dependencies. You see exactly what will be built and in what order. Approve or adjust before any code is written.", + icon: ListChecks, + color: "bg-blue-500", + highlight: "Tickets & dependencies", }, { - id: "tasks", + id: "execution", number: "03", - title: "We Handle the Details", + title: "Agents Work Overnight", description: - "The boring part—breaking it into tasks, figuring out dependencies, tracking progress—all handled for you.", - icon: ListChecks, + "Go to sleep. Agents work through tickets in isolated sandboxes—writing code, running tests, fixing issues. They don't stop until every task is complete.", + icon: Moon, color: "bg-amber-500", + highlight: "Runs until completion", }, { - id: "execution", + id: "review", number: "04", - title: "You Review the Result", + title: "Wake Up to a PR", description: - "A pull request appears with working, tested code. Review it, merge it, done. Ship faster without the busywork.", - icon: Code2, + "Morning: a pull request with working, tested code. Every change traced back to your spec. Review it, merge it, ship it.", + icon: CheckCircle2, color: "bg-green-500", + highlight: "Ready when you are", }, ] @@ -61,10 +66,10 @@ export function WorkflowSection({ className }: WorkflowSectionProps) { className="mx-auto mb-16 max-w-2xl text-center" >

- Here's How Simple It Is + You Sleep. Agents Ship.

- Four steps. You do two of them. We do the rest. + Write a spec, approve the plan, go to bed. Wake up to a pull request.

@@ -99,7 +104,15 @@ export function WorkflowSection({ className }: WorkflowSectionProps) {

{phase.title}

-

{phase.description}

+

{phase.description}

+ + {/* Highlight Badge */} + + {phase.highlight} + {/* Connector Arrow (hidden on last item) */} {i < phases.length - 1 && ( From 20edc9a7e2bf58ea9b77fa66250ab28677117f5c Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 19:05:05 -0300 Subject: [PATCH 033/290] Fix permissions format in settings.local.json and add stderr capture The sandbox was failing to start with exit code 1 during Claude SDK initialization. Root cause was invalid permission format in settings.local.json. Fixed: - Changed "Bash(*)" to "Bash" - the (*) suffix is invalid syntax - Changed "mcp__*(*)" to "mcp__*" - wildcards don't use parentheses - Removed catch-all "*" entry as individual tools are listed Also added stderr callback to ClaudeSDKClient options: - Captures CLI subprocess stderr for debugging initialization failures - Logs CLI errors at WARNING level for visibility - Collects stderr lines for error reporting The correct permission format is: - "ToolName" for built-in tools (e.g., "Bash", "Read", "Write") - "mcp__server__*" for MCP tool wildcards - "Bash(pattern:*)" only for specific command patterns --- backend/omoi_os/services/daytona_spawner.py | 46 +++++++++---------- .../omoi_os/workers/claude_sandbox_worker.py | 35 +++++++++++++- 2 files changed, 56 insertions(+), 25 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index afb7e392..841426a3 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -757,32 +757,32 @@ def escape_env_value(v: str) -> str: # Create settings.local.json with FULL permissions # This is a sandbox environment - allow everything without prompts - # Explicitly listing ALL Claude Code tools to ensure nothing is missed + # Format: "ToolName" for built-in tools, "mcp__server__*" for MCP tools + # NOTE: Do NOT use "Bash(*)" - that's invalid syntax. Use "Bash" instead. settings_content = """{ "permissions": { "allow": [ - "Bash(*)", - "Read(*)", - "Write(*)", - "Edit(*)", - "MultiEdit(*)", - "Glob(*)", - "Grep(*)", - "LS(*)", - "WebFetch(*)", - "WebSearch(*)", - "TodoWrite(*)", - "Task(*)", - "TaskOutput(*)", - "Skill(*)", - "LSP(*)", - "NotebookEdit(*)", - "AskUserQuestion(*)", - "KillShell(*)", - "EnterPlanMode(*)", - "ExitPlanMode(*)", - "mcp__*(*)", - "*" + "Bash", + "Read", + "Write", + "Edit", + "MultiEdit", + "Glob", + "Grep", + "LS", + "WebFetch", + "WebSearch", + "TodoWrite", + "Task", + "TaskOutput", + "Skill", + "LSP", + "NotebookEdit", + "AskUserQuestion", + "KillShell", + "EnterPlanMode", + "ExitPlanMode", + "mcp__*" ], "deny": [] } diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index fa0fa423..395cde20 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -1211,9 +1211,16 @@ def get_custom_agents(self) -> dict: } def to_sdk_options( - self, pre_tool_hook=None, post_tool_hook=None + self, pre_tool_hook=None, post_tool_hook=None, stderr_callback=None ) -> "ClaudeAgentOptions": - """Create ClaudeAgentOptions from config.""" + """Create ClaudeAgentOptions from config. + + Args: + pre_tool_hook: Hook to run before tool execution + post_tool_hook: Hook to run after tool execution + stderr_callback: Callback to receive stderr output from CLI subprocess. + If not provided, stderr is logged at DEBUG level. + """ # Build environment variables for the CLI subprocess env = {"ANTHROPIC_API_KEY": self.api_key} if self.api_base_url: @@ -1223,6 +1230,13 @@ def to_sdk_options( # Convert cwd to Path if it's a string (SDK expects Path) cwd_path = Path(self.cwd) if isinstance(self.cwd, str) else self.cwd + # Create stderr handler to capture CLI errors + # This is critical for debugging initialization failures + def default_stderr_handler(line: str): + logger.debug("CLI stderr: %s", line.strip()) + + stderr_handler = stderr_callback or default_stderr_handler + options_kwargs = { "system_prompt": self.system_prompt, "permission_mode": self.permission_mode, @@ -1230,6 +1244,7 @@ def to_sdk_options( "max_budget_usd": self.max_budget_usd, "cwd": cwd_path, "env": env, + "stderr": stderr_handler, # Capture CLI stderr for debugging } # Only set allowed_tools if explicitly configured @@ -2367,6 +2382,21 @@ async def run(self): async with EventReporter(self.config) as reporter: self.reporter = reporter + # Collect stderr lines for error reporting + cli_stderr_lines = [] + + def stderr_collector(line: str): + """Collect CLI stderr output for debugging. + + This captures stderr from the Claude CLI subprocess. + Critical for diagnosing initialization failures. + """ + stripped = line.strip() + if stripped: + cli_stderr_lines.append(stripped) + # Log immediately at WARNING level so we see it + logger.warning("CLI stderr: %s", stripped) + async with MessagePoller(self.config) as poller: # Create hooks and options pre_tool_hook = await self._create_pre_tool_hook() @@ -2374,6 +2404,7 @@ async def run(self): sdk_options = self.config.to_sdk_options( pre_tool_hook=pre_tool_hook, post_tool_hook=post_tool_hook, + stderr_callback=stderr_collector, ) # Report startup From 2bb933c4ba821d241243a54a164dd7756a850d55 Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 19:17:23 -0300 Subject: [PATCH 034/290] Fix sandbox root permissions with IS_SANDBOX=1 environment variable The Claude CLI was failing with: "--dangerously-skip-permissions cannot be used with root/sudo privileges" Solution: Set IS_SANDBOX=1 environment variable which tells Claude Code it's running in a secure sandbox, allowing bypassPermissions to work as root. Also simplified settings.local.json - with IS_SANDBOX=1 and bypassPermissions, the allow list is not needed as the SDK skips all permission prompts. --- backend/omoi_os/services/daytona_spawner.py | 36 +++++---------------- 1 file changed, 8 insertions(+), 28 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 841426a3..9269cead 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -201,6 +201,9 @@ async def spawn_for_task( "CALLBACK_URL": base_url, # For EventReporter to use correct API URL "PHASE_ID": phase_id, "SANDBOX_ID": sandbox_id, + # IS_SANDBOX=1 tells Claude Code it's running in a secure sandbox, + # allowing --dangerously-skip-permissions to work even as root + "IS_SANDBOX": "1", } # Determine continuous mode: @@ -755,40 +758,17 @@ def escape_env_value(v: str) -> str: # Create skills directory sandbox.process.exec("mkdir -p /root/.claude/skills") - # Create settings.local.json with FULL permissions - # This is a sandbox environment - allow everything without prompts - # Format: "ToolName" for built-in tools, "mcp__server__*" for MCP tools - # NOTE: Do NOT use "Bash(*)" - that's invalid syntax. Use "Bash" instead. + # Create minimal settings.local.json + # With IS_SANDBOX=1 and bypassPermissions, the allow list is not needed + # The SDK will skip all permission prompts automatically settings_content = """{ "permissions": { - "allow": [ - "Bash", - "Read", - "Write", - "Edit", - "MultiEdit", - "Glob", - "Grep", - "LS", - "WebFetch", - "WebSearch", - "TodoWrite", - "Task", - "TaskOutput", - "Skill", - "LSP", - "NotebookEdit", - "AskUserQuestion", - "KillShell", - "EnterPlanMode", - "ExitPlanMode", - "mcp__*" - ], + "allow": [], "deny": [] } }""" sandbox.fs.upload_file(settings_content.encode("utf-8"), "/root/.claude/settings.local.json") - logger.info("Uploaded Claude settings.local.json with full permissions") + logger.info("Uploaded Claude settings.local.json (bypassPermissions mode)") # Get skills based on execution mode # - exploration: spec-driven-dev (for creating specs/tickets/tasks) From 6093cce896583021df6a331b85077c0baf9ff55c Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Tue, 30 Dec 2025 19:48:00 -0300 Subject: [PATCH 035/290] Improve iteration event visibility and add continuous mode logging - EventRenderer: Make iteration/continuous events more visible - Increased opacity from 40-60% to full visibility - Larger text (text-xs vs text-[10px]) - Color-coded backgrounds with borders for each event type - Blue for iteration started, green for completed, purple for signals - Amber for validation failures, emerald for validation passed - Spawner: Add detailed logging for continuous mode decisions - Log continuous_mode_param, runtime, execution_mode at decision point - Log auto-determination result with reasoning - Log final ENABLED/DISABLED status with all settings - Worker: Add comprehensive continuous mode logging - Log config loading with env var values - Log full settings when continuous mode enabled - Log mode decision point before execution - Clear visual separators for CONTINUOUS vs SINGLE-RUN mode --- backend/omoi_os/services/daytona_spawner.py | 39 +++++- .../omoi_os/workers/claude_sandbox_worker.py | 70 ++++++++++- frontend/components/sandbox/EventRenderer.tsx | 111 ++++++++++-------- 3 files changed, 163 insertions(+), 57 deletions(-) diff --git a/backend/omoi_os/services/daytona_spawner.py b/backend/omoi_os/services/daytona_spawner.py index 9269cead..d9604b97 100644 --- a/backend/omoi_os/services/daytona_spawner.py +++ b/backend/omoi_os/services/daytona_spawner.py @@ -211,14 +211,27 @@ async def spawn_for_task( # - True: Force enable # - False: Force disable effective_continuous_mode = continuous_mode + logger.info( + "SPAWNER: Continuous mode decision", + extra={ + "continuous_mode_param": continuous_mode, + "runtime": runtime, + "execution_mode": execution_mode, + "task_id": task_id, + } + ) if continuous_mode is None and runtime == "claude": # Auto-enable for implementation and validation modes # These modes need to ensure tasks complete fully (code pushed, PR created) effective_continuous_mode = execution_mode in ("implementation", "validation") - if effective_continuous_mode: - logger.info( - f"Auto-enabling continuous mode for '{execution_mode}' mode" - ) + logger.info( + "SPAWNER: Auto-determined continuous mode", + extra={ + "effective_continuous_mode": effective_continuous_mode, + "execution_mode": execution_mode, + "is_implementation_or_validation": execution_mode in ("implementation", "validation"), + } + ) # Add continuous mode settings if enabled if effective_continuous_mode and runtime == "claude": @@ -227,7 +240,23 @@ async def spawn_for_task( env_vars.setdefault("MAX_ITERATIONS", "10") env_vars.setdefault("MAX_TOTAL_COST_USD", "20.0") env_vars.setdefault("MAX_DURATION_SECONDS", "3600") # 1 hour - logger.info("Continuous mode enabled for sandbox") + logger.info( + "SPAWNER: Continuous mode ENABLED", + extra={ + "max_iterations": env_vars.get("MAX_ITERATIONS"), + "max_cost_usd": env_vars.get("MAX_TOTAL_COST_USD"), + "max_duration_seconds": env_vars.get("MAX_DURATION_SECONDS"), + } + ) + else: + logger.info( + "SPAWNER: Continuous mode DISABLED", + extra={ + "effective_continuous_mode": effective_continuous_mode, + "runtime": runtime, + "reason": "not claude runtime" if runtime != "claude" else "continuous_mode=False", + } + ) # Set validation requirements based on task_requirements (LLM-analyzed) or execution_mode # task_requirements takes precedence when provided, as it's based on intelligent analysis diff --git a/backend/omoi_os/workers/claude_sandbox_worker.py b/backend/omoi_os/workers/claude_sandbox_worker.py index 395cde20..b544bbb0 100644 --- a/backend/omoi_os/workers/claude_sandbox_worker.py +++ b/backend/omoi_os/workers/claude_sandbox_worker.py @@ -1070,8 +1070,21 @@ def __init__(self): # Enable continuous mode by default for implementation and validation # These modes need to ensure work is ACTUALLY completed continuous_default = self.execution_mode in ("implementation", "validation") + continuous_env = os.environ.get("CONTINUOUS_MODE", "") self.continuous_mode = ( - os.environ.get("CONTINUOUS_MODE", str(continuous_default)).lower() == "true" + continuous_env.lower() == "true" if continuous_env else continuous_default + ) + + # Log continuous mode configuration for debugging + logger.info( + "WORKER: Continuous mode configuration", + extra={ + "continuous_mode": self.continuous_mode, + "continuous_env_var": continuous_env or "(not set)", + "continuous_default": continuous_default, + "execution_mode": self.execution_mode, + "task_id": self.task_id, + } ) # Iteration limits @@ -1092,6 +1105,31 @@ def __init__(self): self.require_code_pushed = os.environ.get("REQUIRE_CODE_PUSHED", "true").lower() == "true" self.require_pr_created = os.environ.get("REQUIRE_PR_CREATED", "true").lower() == "true" + # Log full continuous mode settings + if self.continuous_mode: + logger.info( + "WORKER: Continuous mode ENABLED with settings", + extra={ + "max_iterations": self.max_iterations, + "max_total_cost_usd": self.max_total_cost_usd, + "max_duration_seconds": self.max_duration_seconds, + "max_consecutive_errors": self.max_consecutive_errors, + "completion_signal": self.completion_signal, + "completion_threshold": self.completion_threshold, + "require_clean_git": self.require_clean_git, + "require_code_pushed": self.require_code_pushed, + "require_pr_created": self.require_pr_created, + } + ) + else: + logger.info( + "WORKER: Continuous mode DISABLED - running single iteration only", + extra={ + "execution_mode": self.execution_mode, + "continuous_env_var": continuous_env or "(not set)", + } + ) + # Append conversation context to system prompt if provided (for hydration) # NOTE: Must be after conversation_context is initialized above if self.conversation_context: @@ -2451,13 +2489,24 @@ def stderr_collector(line: str): import time self.iteration_state.start_time = time.time() + # Log mode decision for debugging + logger.info( + "WORKER RUN: Mode decision point", + extra={ + "continuous_mode": self.config.continuous_mode, + "execution_mode": self.config.execution_mode, + "task_id": self.config.task_id, + "initial_task_preview": initial_task[:200], + } + ) + if self.config.continuous_mode: # ================================================= # CONTINUOUS MODE: Iterate until task truly completes # ================================================= - logger.info("=" * 40) - logger.info("CONTINUOUS MODE ENABLED") - logger.info("=" * 40) + logger.info("=" * 60) + logger.info("WORKER RUN: CONTINUOUS MODE ACTIVATED") + logger.info("=" * 60) logger.info( "Will iterate until: validation_passed OR max_iterations=%d OR max_cost=$%.2f OR max_duration=%ds", self.config.max_iterations, @@ -2615,7 +2664,18 @@ def stderr_collector(line: str): # ================================================= # SINGLE-RUN MODE: Execute once (original behavior) # ================================================= - logger.info("Processing initial task (single-run mode)", extra={"task_preview": initial_task[:100]}) + logger.info("=" * 60) + logger.info("WORKER RUN: SINGLE-RUN MODE (no iteration)") + logger.info("=" * 60) + logger.info( + "WORKER RUN: Single-run mode - task will execute ONCE only", + extra={ + "execution_mode": self.config.execution_mode, + "task_id": self.config.task_id, + "continuous_mode": self.config.continuous_mode, + "task_preview": initial_task[:200], + } + ) try: await client.query(initial_task) await self._process_messages(client) diff --git a/frontend/components/sandbox/EventRenderer.tsx b/frontend/components/sandbox/EventRenderer.tsx index e62cd5e5..0576c710 100644 --- a/frontend/components/sandbox/EventRenderer.tsx +++ b/frontend/components/sandbox/EventRenderer.tsx @@ -2432,23 +2432,25 @@ export function EventRenderer({ event, className }: EventRendererProps) { if (event_type.includes("heartbeat")) return null // ============================================================================ - // Iteration & Continuous Mode Events - Subtle inline display + // Iteration & Continuous Mode Events - Visible progress indicators // ============================================================================ - // Iteration started - very subtle, just a small indicator + // Iteration started - clear indicator with iteration number if (event_type === "iteration.started") { const iterNum = getNumber(data, "iteration_num") return ( -
-
- - Iteration {iterNum} started +
+
+ + + Iteration {iterNum} started +
) } - // Iteration completed - show cost and brief summary + // Iteration completed - show cost and completion status if (event_type === "iteration.completed") { const iterNum = getNumber(data, "iteration_num") const costUsd = getNumber(data, "cost_usd") @@ -2461,18 +2463,20 @@ export function EventRenderer({ event, className }: EventRendererProps) { } return ( -
-
- - Iteration {iterNum} +
+
+ + + Iteration {iterNum} completed + {costUsd > 0 && ( - + ${costUsd.toFixed(4)} )} {completionCount > 0 && ( - - complete + + COMPLETE )}
@@ -2480,63 +2484,71 @@ export function EventRenderer({ event, className }: EventRendererProps) { ) } - // Iteration validation - show pass/fail status subtly + // Iteration validation - show pass/fail status clearly if (event_type === "iteration.validation") { const passed = data.passed === true const errors = Array.isArray(data.errors) ? data.errors : [] const feedback = getString(data, "feedback") - // If validation passed, be very subtle + // If validation passed - show success if (passed) { return ( -
-
- - Validation passed +
+
+ + + Validation passed +
) } - // If validation failed, show a bit more info but still subtle + // If validation failed, show prominently with details return ( -
-
- - Validation: {feedback || errors.join(", ") || "checking..."} +
+
+ + + Validation: {feedback || errors.join(", ") || "checking..."} +
) } - // Completion signal - very subtle, just shows detection + // Completion signal - shows progress toward task completion if (event_type === "iteration.completion_signal") { const signalCount = getNumber(data, "signal_count") const threshold = getNumber(data, "threshold") return ( -
-
- - TASK_COMPLETE detected ({signalCount}/{threshold}) +
+
+ + + TASK_COMPLETE signal ({signalCount}/{threshold}) +
) } - // Continuous mode started - subtle indicator + // Continuous mode started - clear indicator if (event_type === "continuous.started") { return ( -
-
- - Continuous mode started +
+
+ + + Continuous mode started +
) } - // Continuous mode completed - show summary + // Continuous mode completed - show summary with stats if (event_type === "continuous.completed") { const stopReason = getString(data, "stop_reason") const totalIterations = getNumber(data, "iteration_num") @@ -2544,27 +2556,32 @@ export function EventRenderer({ event, className }: EventRendererProps) { const elapsedSecs = getNumber(data, "elapsed_seconds") const reasonLabel = stopReason === "task_complete" ? "Task completed" : - stopReason === "max_iterations_reached" ? "Max iterations" : + stopReason === "max_iterations_reached" ? "Max iterations reached" : stopReason === "validation_passed" ? "Validation passed" : stopReason || "Completed" + const isSuccess = stopReason === "task_complete" || stopReason === "validation_passed" + const bgColor = isSuccess ? "bg-green-500/10 border-green-500/20" : "bg-amber-500/10 border-amber-500/20" + const textColor = isSuccess ? "text-green-600 dark:text-green-400" : "text-amber-600 dark:text-amber-400" + const iconColor = isSuccess ? "text-green-500" : "text-amber-500" + return ( -
-
- - {reasonLabel} - - {totalIterations} iterations +
+
+ + {reasonLabel} + + {totalIterations} iterations {totalCost > 0 && ( <> - - ${totalCost.toFixed(2)} + + ${totalCost.toFixed(2)} )} {elapsedSecs > 0 && ( <> - - {Math.round(elapsedSecs)}s + + {Math.round(elapsedSecs)}s )}
From 0c6debeef9c019d56ce8de2d621f9e48fd6e507d Mon Sep 17 00:00:00 2001 From: Kevin Hill Date: Wed, 31 Dec 2025 11:50:23 -0300 Subject: [PATCH 036/290] Add conversion-optimized onboarding flow and product screenshots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Create 6-step onboarding wizard: Welcome → GitHub → Repo → First Spec → Plan → Complete - Add useOnboarding hook with localStorage persistence and step navigation - Integrate Stripe checkout for Founding Member ($299) tier - Add ProductShowcaseSection with interactive screenshot carousel - Update HeroSection with real Kanban board screenshot - Add product screenshots: kanban, project dashboard, command center, agent view - Document onboarding conversion strategy in docs/user_journey/ --- .../user_journey/01a_onboarding_conversion.md | 453 ++++++++++++++++++ frontend/app/(auth)/onboarding/page.tsx | 237 ++------- frontend/app/page.tsx | 4 + frontend/components/marketing/index.ts | 1 + .../marketing/sections/HeroSection.tsx | 40 +- .../sections/ProductShowcaseSection.tsx | 208 ++++++++ .../onboarding/OnboardingWizard.tsx | 100 ++++ frontend/components/onboarding/index.ts | 7 + .../onboarding/steps/CompleteStep.tsx | 137 ++++++ .../onboarding/steps/FirstSpecStep.tsx | 139 ++++++ .../onboarding/steps/GitHubStep.tsx | 108 +++++ .../onboarding/steps/PlanSelectStep.tsx | 327 +++++++++++++ .../onboarding/steps/RepoSelectStep.tsx | 222 +++++++++ .../onboarding/steps/WelcomeStep.tsx | 95 ++++ frontend/hooks/useOnboarding.ts | 317 ++++++++++++ .../public/screenshots/agent-task-view.png | Bin 0 -> 675890 bytes .../public/screenshots/command-center.png | Bin 0 -> 380795 bytes frontend/public/screenshots/kanban-board.png | Bin 0 -> 452182 bytes .../public/screenshots/project-overview.png | Bin 0 -> 634990 bytes 19 files changed, 2156 insertions(+), 239 deletions(-) create mode 100644 docs/user_journey/01a_onboarding_conversion.md create mode 100644 frontend/components/marketing/sections/ProductShowcaseSection.tsx create mode 100644 frontend/components/onboarding/OnboardingWizard.tsx create mode 100644 frontend/components/onboarding/index.ts create mode 100644 frontend/components/onboarding/steps/CompleteStep.tsx create mode 100644 frontend/components/onboarding/steps/FirstSpecStep.tsx create mode 100644 frontend/components/onboarding/steps/GitHubStep.tsx create mode 100644 frontend/components/onboarding/steps/PlanSelectStep.tsx create mode 100644 frontend/components/onboarding/steps/RepoSelectStep.tsx create mode 100644 frontend/components/onboarding/steps/WelcomeStep.tsx create mode 100644 frontend/hooks/useOnboarding.ts create mode 100644 frontend/public/screenshots/agent-task-view.png create mode 100644 frontend/public/screenshots/command-center.png create mode 100644 frontend/public/screenshots/kanban-board.png create mode 100644 frontend/public/screenshots/project-overview.png diff --git a/docs/user_journey/01a_onboarding_conversion.md b/docs/user_journey/01a_onboarding_conversion.md new file mode 100644 index 00000000..4ed61b96 --- /dev/null +++ b/docs/user_journey/01a_onboarding_conversion.md @@ -0,0 +1,453 @@ +# Onboarding Flow - Conversion Optimized + +**Part of**: [User Journey Documentation](./README.md) +**Created**: 2025-12-31 +**Purpose**: Design an onboarding flow that maximizes conversion to paid tiers + +--- + +## Executive Summary + +The goal is to get users to: +1. **Experience the magic** (free tier value) +2. **Hit natural limits** (creates upgrade pressure) +3. **Choose paid tier** (with Founding Member as prominent option) + +**Key Insight**: GitHub connection is BLOCKING - nothing works without it. This should be the first real action. + +--- + +## Onboarding Flow Overview + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ ONBOARDING FLOW │ +├─────────────────────────────────────────────────────────────────────────┤ +│ │ +│ STEP 1: Welcome + Value Promise (5 sec) │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ "Welcome, {name}! Ready to ship while you sleep?" │ │ +│ │ │ │ +│ │ Here's how it works: │ │ +│ │ 1. You describe what to build │ │ +│ │ 2. Approve a plan │ │ +│ │ 3. Wake up to a PR │ │ +│ │ │ │ +│ │ [Let's Get Started →] │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ STEP 2: Connect GitHub (BLOCKING - Required) │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ 🔗 Connect Your Code │ │ +│ │ │ │ +│ │ OmoiOS needs access to create branches and PRs for you. │ │ +│ │ │ │ +│ │ [⚫ Connect GitHub] │ │ +│ │ │ │ +│ │ 🔒 We only access repos you explicitly select │ │ +│ │ 🔒 You can disconnect anytime │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ STEP 3: Select Repository │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ 📁 Choose Your First Project │ │ +│ │ │ │ +│ │ ┌─────────────────────────────────────────────────────────┐ │ │ +│ │ │ ○ kevinhill/senior-sandbox ★ 12 TypeScript │ │ │ +│ │ │ ○ kevinhill/api-gateway ★ 3 Python │ │ │ +│ │ │ ○ kevinhill/marketing-site ★ 1 JavaScript │ │ │ +│ │ └─────────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ [Continue →] │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ STEP 4: First Spec (Quick Win - Get to Value FAST) │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ ✨ Describe Your First Feature │ │ +│ │ │ │ +│ │ What should we build tonight? (You can start simple) │ │ +│ │ │ │ +│ │ ┌─────────────────────────────────────────────────────────┐ │ │ +│ │ │ Add a logout button to the navbar that clears the │ │ │ +│ │ │ session and redirects to the login page │ │ │ +│ │ └─────────────────────────────────────────────────────────┘ │ │ +│ │ │ │ +│ │ 💡 Suggestions: │ │ +│ │ • "Add form validation to the contact form" │ │ +│ │ • "Create a dark mode toggle" │ │ +│ │ • "Fix the broken link in the footer" │ │ +│ │ │ │ +│ │ [Submit First Spec →] │ │ +│ │ │ │ +│ │ ⏱️ This will use 1 of your 5 free workflows │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ STEP 5: Plan Selection (Soft Upsell - Not Blocking) │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ 🎉 Your first agent is working! │ │ +│ │ │ │ +│ │ Want to ship even faster? Choose your plan: │ │ +│ │ │ │ +│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ │ +│ │ │ FREE │ │ PRO │ │ ⭐ FOUNDING MEMBER │ │ │ +│ │ │ $0/mo │ │ $50/mo │ │ $299 once │ │ │ +│ │ │ │ │ │ │ │ │ │ +│ │ │ 1 agent │ │ 5 agents │ │ 5 agents │ │ │ +│ │ │ 5 workflows │ │ 100/month │ │ 50/month + BYO keys │ │ │ +│ │ │ │ │ BYO keys │ │ Lifetime access │ │ │ +│ │ │ │ │ │ │ 87 spots left │ │ │ +│ │ │ [Current] │ │ [Upgrade] │ │ [Claim Lifetime →] │ │ │ +│ │ └─────────────┘ └─────────────┘ └─────────────────────┘ │ │ +│ │ │ │ +│ │ [Skip for now - Continue to Dashboard →] │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ ↓ │ +│ STEP 6: Dashboard with Active Agent │ +│ ┌───────────────────────────────────────────────────────────────┐ │ +│ │ Your agent is working on: "Add logout button..." │ │ +│ │ │ │ +│ │ ████████████░░░░░░░░░░░░░░░░░░ 35% │ │ +│ │ │ │ +│ │ 📋 Planning → 🔨 Building → 🧪 Testing → ✅ PR Ready │ │ +│ │ ✓ Active │ │ +│ │ │ │ +│ │ 💤 Come back in the morning for your PR! │ │ +│ │ │ │ +│ │ [Set up notifications] [Explore dashboard] │ │ +│ └───────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Step Details + +### Step 1: Welcome + Value Promise + +**Goal**: Set expectations, build excitement, minimal friction. + +**UI Components**: +- Animated hero with overnight workflow visualization +- Clear 3-step process explanation +- Single CTA button + +**Copy**: +``` +Welcome, {firstName}! 👋 + +Ready to ship while you sleep? + +Here's how OmoiOS works: +1. Describe what you want built +2. Approve a quick plan +3. Wake up to a pull request + +Your time: 5 minutes | AI work: 8 hours | Result: Feature shipped + +[Let's Get Started →] +``` + +**Technical Notes**: +- Pre-fill name from OAuth/registration +- Track `onboarding_started` analytics event +- Show skip button only after 3 seconds (prevents rushing) + +--- + +### Step 2: Connect GitHub (BLOCKING) + +**Goal**: Get GitHub OAuth connected. This is required - no skipping. + +**UI Components**: +- Large GitHub button +- Security reassurances +- Permission scope explanation + +**Copy**: +``` +🔗 Connect Your Code + +OmoiOS creates branches and PRs directly in your repos. +We need GitHub access to work our magic. + +[Connect GitHub] + +🔒 You choose which repos we can access +🔒 We never push to main without your approval +🔒 Disconnect anytime in settings +``` + +**Technical Notes**: +- OAuth flow with `repo` scope +- Store GitHub token in `user_credentials` table +- On callback, redirect to Step 3 +- If user already has GitHub connected, skip to Step 3 + +--- + +### Step 3: Select Repository + +**Goal**: Create first project linked to a real repo. + +**UI Components**: +- Repository list with search/filter +- Language/stars metadata +- "Create new repo" option + +**Copy**: +``` +📁 Choose Your First Project + +Select a repository for your first feature. +Don't worry - you can add more projects later. + +[Search repos...] + +┌────────────────────────────────────────────┐ +│ ● kevinhill/senior-sandbox │ +│ TypeScript • Updated 2 hours ago │ +├────────────────────────────────────────────┤ +│ ○ kevinhill/api-gateway │ +│ Python • Updated 3 days ago │ +├────────────────────────────────────────────┤ +│ ○ kevinhill/marketing-site │ +│ JavaScript • Updated 1 week ago │ +└────────────────────────────────────────────┘ + +[Continue →] +``` + +**Technical Notes**: +- Fetch repos from GitHub API using stored token +- Sort by recent activity +- Create `Project` record on selection +- Create default `Organization` if needed (personal workspace) + +--- + +### Step 4: First Spec (Quick Win) + +**Goal**: Get user to submit first feature request. This is the "magic moment." + +**UI Components**: +- Large text input +- Suggestion chips for easy starts +- Usage indicator (builds awareness of limits) + +**Copy**: +``` +✨ Describe Your First Feature + +What should we build tonight? Start simple - you can go bigger later. + +[Text area with placeholder: "Add a logout button that clears the session..."] + +💡 Quick starts: +[Add form validation] [Create dark mode] [Fix broken link] + +──────────────────────────────────────────── +⏱️ This will use 1 of your 5 free monthly workflows + +[Submit First Spec →] +``` + +**Technical Notes**: +- Create `Spec` record on submission +- Start agent execution immediately (async) +- Track `first_spec_submitted` analytics event +- Show loading state while agent initializes + +--- + +### Step 5: Plan Selection (Soft Upsell) + +**Goal**: Introduce paid options while agent is working. Non-blocking. + +**UI Components**: +- Three-column pricing comparison +- Founding Member highlighted with urgency +- Skip option clearly visible + +**Copy**: +``` +🎉 Your first agent is working! + +While it runs, check out what's possible with more power: + +┌─────────────────────────────────────────────────────────────────────┐ +│ │ +│ FREE PRO ⭐ FOUNDING MEMBER │ +│ $0/month $50/month $299 one-time │ +│ │ +│ • 1 agent • 5 agents • 5 agents │ +│ • 5 workflows • 100 workflows • 50 workflows/mo │ +│ • 2GB storage • 50GB storage • 50GB storage │ +│ • BYO API keys • BYO API keys │ +│ • Priority support • Lifetime access │ +│ • Early features │ +│ │ +│ [Current] [Upgrade] [Claim Lifetime →] │ +│ │ +│ Only 87 of 100 spots left! │ +│ │ +└─────────────────────────────────────────────────────────────────────┘ + +[Skip for now - Continue to Dashboard →] +``` + +**Technical Notes**: +- Show real-time "spots remaining" count +- Stripe checkout for Pro/Lifetime +- Track `pricing_viewed` and `upgrade_clicked` events +- Skip goes directly to dashboard + +--- + +### Step 6: Dashboard with Active Agent + +**Goal**: Show the agent working, build anticipation, explain next steps. + +**UI Components**: +- Agent progress visualization +- Phase indicators (Planning → Building → Testing → PR) +- Notification setup prompt + +**Copy**: +``` +🚀 Your agent is working! + +┌────────────────────────────────────────────────────────────────────┐ +│ "Add logout button to navbar" │ +│ │ +│ ████████████████░░░░░░░░░░░░░░░░░░░░░░░░ 42% │ +│ │ +│ 📋 Planning → 🔨 Building → 🧪 Testing → ✅ PR Ready │ +│ ✓ Active │ +│ │ +│ Estimated completion: ~45 minutes │ +└────────────────────────────────────────────────────────────────────┘ + +💤 You don't need to watch this! Come back in the morning. + +┌────────────────────────────────────────────────────────────────────┐ +│ 📬 Get notified when your PR is ready? │ +│ │ +│ [Enable Browser Notifications] [Email me instead] [No thanks] │ +└────────────────────────────────────────────────────────────────────┘ +``` + +**Technical Notes**: +- WebSocket connection for real-time updates +- Request notification permission +- Store notification preferences +- Mark `onboarding_completed` in user record + +--- + +## Conversion Triggers (Post-Onboarding) + +After onboarding, conversion opportunities appear naturally: + +### Trigger 1: Workflow Limit Reached + +``` +┌────────────────────────────────────────────────────────────────────┐ +│ ⚠️ You've used 5 of 5 free workflows this month │ +│ │ +│ Your task is queued. It will run when: │ +│ • Your limit resets on Jan 1 (4 days) │ +│ • You upgrade to Pro ($50/mo for 100 workflows) │ +│ • You claim Founding Member ($299 once for 50/mo forever) │ +│ │ +│ [Upgrade to Pro] [Claim Founding Member] [Wait for reset] │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### Trigger 2: Agent Queue + +``` +┌────────────────────────────────────────────────────────────────────┐ +│ 🕐 2 tasks queued behind your running agent │ +│ │ +│ Free tier runs 1 agent at a time. │ +│ Pro runs 5 agents in parallel - ship 5x faster. │ +│ │ +│ [Upgrade to Pro →] [Keep waiting] │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### Trigger 3: Morning Email + +``` +Subject: ☀️ Your PR is ready! + a special offer + +Hey {name}, + +Your feature "Add logout button" is ready for review! + +→ View PR: https://github.com/... + +You've shipped 3 features this week with OmoiOS. +At this pace, you'll hit your free limit in 2 days. + +🔥 Lock in Founding Member access ($299 once) before it's gone: +→ Claim Your Spot (87 left) + +Happy shipping, +The OmoiOS Team +``` + +--- + +## Analytics Events to Track + +| Event | When | Data | +|-------|------|------| +| `onboarding_started` | Step 1 load | user_id, source | +| `github_connected` | OAuth complete | user_id, github_username | +| `repo_selected` | Step 3 complete | user_id, repo_name | +| `first_spec_submitted` | Step 4 complete | user_id, spec_length | +| `pricing_viewed` | Step 5 load | user_id, current_tier | +| `upgrade_clicked` | Any upgrade button | user_id, target_tier | +| `onboarding_completed` | Step 6 complete | user_id, total_time | +| `onboarding_abandoned` | Left mid-flow | user_id, last_step | + +--- + +## Implementation Priority + +1. **P0**: Steps 2-3 (GitHub + Repo selection) - Blocking, required +2. **P0**: Step 4 (First spec) - Core value moment +3. **P1**: Step 5 (Plan selection) - Revenue opportunity +4. **P1**: Post-onboarding triggers - Conversion nudges +5. **P2**: Step 1 animation polish +6. **P2**: Morning email sequence + +--- + +## Files to Create/Modify + +| File | Action | Description | +|------|--------|-------------| +| `frontend/app/(auth)/onboarding/page.tsx` | Modify | Replace with multi-step wizard | +| `frontend/components/onboarding/OnboardingWizard.tsx` | Create | Main wizard container | +| `frontend/components/onboarding/steps/WelcomeStep.tsx` | Create | Step 1 | +| `frontend/components/onboarding/steps/GitHubStep.tsx` | Create | Step 2 | +| `frontend/components/onboarding/steps/RepoSelectStep.tsx` | Create | Step 3 | +| `frontend/components/onboarding/steps/FirstSpecStep.tsx` | Create | Step 4 | +| `frontend/components/onboarding/steps/PlanSelectStep.tsx` | Create | Step 5 | +| `frontend/components/onboarding/UpgradeBanner.tsx` | Create | Reusable upgrade prompt | +| `frontend/hooks/useOnboarding.ts` | Create | Onboarding state management | + +--- + +## Related Documentation + +- [Pricing Strategy](../design/billing/pricing_strategy.md) - Tier definitions and pricing +- [Page Flows - Authentication](../page_flows/01_authentication.md) - OAuth flow details +- [Billing Page](../page_flows/11_cost_management.md) - Post-onboarding billing UI + +--- + +**Next**: See [README.md](./README.md) for complete documentation index. diff --git a/frontend/app/(auth)/onboarding/page.tsx b/frontend/app/(auth)/onboarding/page.tsx index df122a50..a39c8344 100644 --- a/frontend/app/(auth)/onboarding/page.tsx +++ b/frontend/app/(auth)/onboarding/page.tsx @@ -1,222 +1,39 @@ "use client" -import { useState } from "react" -import { useRouter } from "next/navigation" -import { Button } from "@/components/ui/button" -import { Input } from "@/components/ui/input" -import { Label } from "@/components/ui/label" -import { CardDescription, CardTitle } from "@/components/ui/card" -import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group" -import { Progress } from "@/components/ui/progress" -import { Loader2, ArrowRight, Building2, User, Github, CheckCircle } from "lucide-react" -import { useAuth } from "@/hooks/useAuth" -import { api } from "@/lib/api/client" - -type Step = "role" | "organization" | "github" | "complete" - -export default function OnboardingPage() { - const router = useRouter() - const { user } = useAuth() - const [currentStep, setCurrentStep] = useState("role") - const [isLoading, setIsLoading] = useState(false) - const [formData, setFormData] = useState({ - role: "", - organizationName: "", - organizationType: "personal", - githubConnected: false, - }) - - const steps: Step[] = ["role", "organization", "github", "complete"] - const currentStepIndex = steps.indexOf(currentStep) - const progress = ((currentStepIndex + 1) / steps.length) * 100 - - const handleNext = () => { - const nextIndex = currentStepIndex + 1 - if (nextIndex < steps.length) { - setCurrentStep(steps[nextIndex]) - } - } - - const handleSkip = () => { - handleNext() - } - - const handleComplete = async () => { - setIsLoading(true) - try { - // Save onboarding data (API endpoint may not exist yet) - await api.post("/api/v1/users/onboarding", formData).catch(() => { - // Silently fail if endpoint doesn't exist - console.log("Onboarding endpoint not available, skipping...") - }) - } finally { - setIsLoading(false) - router.push("/command") - } - } - - const connectGitHub = () => { - const apiUrl = process.env.NEXT_PUBLIC_API_URL || "http://localhost:18000" - window.location.href = `${apiUrl}/api/v1/auth/oauth/github?onboarding=true` - } +import { Suspense } from "react" +import { OnboardingWizard } from "@/components/onboarding" +import { Skeleton } from "@/components/ui/skeleton" +function OnboardingSkeleton() { return ( -
- {/* Welcome message */} - {user?.full_name && currentStepIndex === 0 && ( -
- Welcome, {user.full_name}! 👋 -
- )} - - {/* Progress */} +
-
- Step {currentStepIndex + 1} of {steps.length} - {Math.round(progress)}% complete +
+ +
- +
- - {/* Step: Role */} - {currentStep === "role" && ( -
-
- What describes you best? - This helps us personalize your experience -
- - setFormData({ ...formData, role: value })} - className="space-y-3" - > - {[ - { value: "engineering_manager", label: "Engineering Manager" }, - { value: "senior_engineer", label: "Senior Engineer" }, - { value: "tech_lead", label: "Technical Lead" }, - { value: "developer", label: "Developer" }, - { value: "other", label: "Other" }, - ].map((option) => ( -
- - -
- ))} -
- -
- - -
-
- )} - - {/* Step: Organization */} - {currentStep === "organization" && ( -
-
- Set up your workspace - Create an organization to manage your projects -
- -
- setFormData({ ...formData, organizationType: value })} - className="grid grid-cols-2 gap-4" - > -
- - -
-
- - -
-
- - {formData.organizationType === "team" && ( -
- - setFormData({ ...formData, organizationName: e.target.value })} - /> -
- )} -
- -
- - -
+
+
+ +
- )} - - {/* Step: GitHub */} - {currentStep === "github" && ( -
-
- Connect GitHub - - Connect your GitHub account to access your repositories - -
- -
- -

- OmoiOS needs access to your repositories to create agents and manage code. -

- -
- -
- - -
+
+ {[1, 2, 3].map((i) => ( + + ))}
- )} - - {/* Step: Complete */} - {currentStep === "complete" && ( -
-
- -
-
- You're all set! - - Your workspace is ready. Start by creating your first project. - -
- - -
- )} + +
) } + +export default function OnboardingPage() { + return ( + }> + + + ) +} diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index 127bff71..bbe1c808 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -9,6 +9,7 @@ import { LogoCloudSection, FeaturesSection, WorkflowSection, + ProductShowcaseSection, NightShiftSection, StatsSection, PricingSection, @@ -48,6 +49,9 @@ function LandingPage() { {/* How It Works - Workflow Section */} + {/* Product Screenshots Showcase */} + + {/* Features Bento Grid */} diff --git a/frontend/components/marketing/index.ts b/frontend/components/marketing/index.ts index 253ad4b4..7edef078 100644 --- a/frontend/components/marketing/index.ts +++ b/frontend/components/marketing/index.ts @@ -6,6 +6,7 @@ export { HeroSection } from "./sections/HeroSection" export { LogoCloudSection } from "./sections/LogoCloudSection" export { FeaturesSection } from "./sections/FeaturesSection" export { WorkflowSection } from "./sections/WorkflowSection" +export { ProductShowcaseSection } from "./sections/ProductShowcaseSection" export { NightShiftSection } from "./sections/NightShiftSection" export { StatsSection } from "./sections/StatsSection" export { PricingSection } from "./sections/PricingSection" diff --git a/frontend/components/marketing/sections/HeroSection.tsx b/frontend/components/marketing/sections/HeroSection.tsx index 181a489a..e4e482aa 100644 --- a/frontend/components/marketing/sections/HeroSection.tsx +++ b/frontend/components/marketing/sections/HeroSection.tsx @@ -2,6 +2,7 @@ import { useState } from "react" import Link from "next/link" +import Image from "next/image" import { useRouter } from "next/navigation" import { motion } from "framer-motion" import { ArrowRight, Play, Loader2 } from "lucide-react" @@ -189,7 +190,7 @@ export function HeroSection({ className }: HeroSectionProps) { initial={{ opacity: 0, y: 40 }} animate={{ opacity: 1, y: 0 }} transition={{ duration: 0.7, delay: 0.5 }} - className="relative mx-auto mt-16 max-w-4xl" + className="relative mx-auto mt-16 max-w-5xl" >
{/* Browser Chrome */} @@ -204,34 +205,15 @@ export function HeroSection({ className }: HeroSectionProps) {
- {/* Dashboard Content Placeholder */} -
-
- {/* Kanban Columns */} - {["Backlog", "Analyzing", "Building", "Testing", "Done"].map((col, i) => ( -
-
- {col} - - {i === 2 ? 3 : i === 4 ? 5 : i + 1} - -
- {/* Task Cards */} - {[...Array(i === 2 ? 3 : i === 4 ? 2 : 1)].map((_, j) => ( -
-
-
-
- ))} -
- ))} -
+ {/* Real Kanban Board Screenshot */} +
+ OmoiOS Kanban Board showing tasks in Backlog, Analyzing, Building, Testing, and Done columns
diff --git a/frontend/components/marketing/sections/ProductShowcaseSection.tsx b/frontend/components/marketing/sections/ProductShowcaseSection.tsx new file mode 100644 index 00000000..da54a2cd --- /dev/null +++ b/frontend/components/marketing/sections/ProductShowcaseSection.tsx @@ -0,0 +1,208 @@ +"use client" + +import { useState } from "react" +import Image from "next/image" +import { motion, AnimatePresence } from "framer-motion" +import { + LayoutDashboard, + FolderKanban, + Terminal, + MessageSquare, + ChevronLeft, + ChevronRight, +} from "lucide-react" +import { Button } from "@/components/ui/button" +import { cn } from "@/lib/utils" + +const showcaseItems = [ + { + id: "command", + title: "Command Center", + description: "Describe what you want to build in plain English. Select your repo, branch, and model. Launch an agent with one click.", + icon: MessageSquare, + image: "/screenshots/kanban-board.png", + alt: "OmoiOS Command Center - describe what you want to build", + }, + { + id: "kanban", + title: "Kanban Board", + description: "Watch your tickets flow through the pipeline: Backlog → Analyzing → Building → Testing → Deploying → Done. Full visibility into agent progress.", + icon: FolderKanban, + image: "/screenshots/agent-task-view.png", + alt: "OmoiOS Kanban Board showing task pipeline", + }, + { + id: "project", + title: "Project Dashboard", + description: "One view for everything: active tickets, running agents, commit history, and GitHub integration. Know exactly where every project stands.", + icon: LayoutDashboard, + image: "/screenshots/command-center.png", + alt: "OmoiOS Project Dashboard with overview stats", + }, + { + id: "agent", + title: "Live Agent View", + description: "Watch agents work in real-time: see the code they write, commands they run, and decisions they make. Full transparency, zero babysitting.", + icon: Terminal, + image: "/screenshots/project-overview.png", + alt: "OmoiOS Live Agent View showing real-time code execution", + }, +] + +interface ProductShowcaseSectionProps { + className?: string + id?: string +} + +export function ProductShowcaseSection({ className, id }: ProductShowcaseSectionProps) { + const [activeIndex, setActiveIndex] = useState(0) + const activeItem = showcaseItems[activeIndex] + + const handlePrev = () => { + setActiveIndex((prev) => (prev === 0 ? showcaseItems.length - 1 : prev - 1)) + } + + const handleNext = () => { + setActiveIndex((prev) => (prev === showcaseItems.length - 1 ? 0 : prev + 1)) + } + + return ( +
+
+ {/* Section Header */} + +

+ See the Product in Action +

+

+ Real screenshots from real projects. No mockups, no Figma dreams. +

+
+ + {/* Tab Navigation */} +
+
+ {showcaseItems.map((item, index) => ( + + ))} +
+
+ + {/* Screenshot Display */} + +
+ {/* Browser Frame */} +
+ {/* Browser Chrome */} +
+
+
+
+
+
+
+ app.omoios.dev +
+
+ + +
+
+ + {/* Screenshot */} + + + {activeItem.alt} + + +
+ + {/* Description Card */} + + +

+ {activeItem.title} +

+

+ {activeItem.description} +

+
+
+ + {/* Pagination Dots */} +
+ {showcaseItems.map((_, index) => ( +
+
+ +
+
+ ) +} diff --git a/frontend/components/onboarding/OnboardingWizard.tsx b/frontend/components/onboarding/OnboardingWizard.tsx new file mode 100644 index 00000000..1886a45c --- /dev/null +++ b/frontend/components/onboarding/OnboardingWizard.tsx @@ -0,0 +1,100 @@ +"use client" + +import { useEffect } from "react" +import { useSearchParams } from "next/navigation" +import { Progress } from "@/components/ui/progress" +import { Button } from "@/components/ui/button" +import { ArrowLeft } from "lucide-react" +import { useOnboarding, type OnboardingStep } from "@/hooks/useOnboarding" +import { WelcomeStep } from "./steps/WelcomeStep" +import { GitHubStep } from "./steps/GitHubStep" +import { RepoSelectStep } from "./steps/RepoSelectStep" +import { FirstSpecStep } from "./steps/FirstSpecStep" +import { PlanSelectStep } from "./steps/PlanSelectStep" +import { CompleteStep } from "./steps/CompleteStep" + +const STEP_COMPONENTS: Record = { + welcome: WelcomeStep, + github: GitHubStep, + repo: RepoSelectStep, + "first-spec": FirstSpecStep, + plan: PlanSelectStep, + complete: CompleteStep, +} + +const STEP_TITLES: Record = { + welcome: "Welcome", + github: "Connect GitHub", + repo: "Select Repository", + "first-spec": "First Feature", + plan: "Choose Plan", + complete: "All Set!", +} + +export function OnboardingWizard() { + const searchParams = useSearchParams() + const { + currentStep, + progress, + canGoBack, + prevStep, + goToStep, + checkGitHubConnection, + } = useOnboarding() + + // Handle return from GitHub OAuth + useEffect(() => { + const step = searchParams.get("step") + const githubConnected = searchParams.get("github_connected") + + if (githubConnected === "true") { + checkGitHubConnection() + } + + if (step && isValidStep(step)) { + goToStep(step as OnboardingStep) + } + }, [searchParams, goToStep, checkGitHubConnection]) + + const StepComponent = STEP_COMPONENTS[currentStep] + + return ( +
+ {/* Header with progress */} +
+ {/* Back button */} + {canGoBack && currentStep !== "complete" && ( + + )} + + {/* Progress bar */} + {currentStep !== "complete" && ( +
+
+ {STEP_TITLES[currentStep]} + {progress}% complete +
+ +
+ )} +
+ + {/* Step content */} +
+ +
+
+ ) +} + +function isValidStep(step: string): step is OnboardingStep { + return ["welcome", "github", "repo", "first-spec", "plan", "complete"].includes(step) +} diff --git a/frontend/components/onboarding/index.ts b/frontend/components/onboarding/index.ts new file mode 100644 index 00000000..925970d3 --- /dev/null +++ b/frontend/components/onboarding/index.ts @@ -0,0 +1,7 @@ +export { OnboardingWizard } from "./OnboardingWizard" +export { WelcomeStep } from "./steps/WelcomeStep" +export { GitHubStep } from "./steps/GitHubStep" +export { RepoSelectStep } from "./steps/RepoSelectStep" +export { FirstSpecStep } from "./steps/FirstSpecStep" +export { PlanSelectStep } from "./steps/PlanSelectStep" +export { CompleteStep } from "./steps/CompleteStep" diff --git a/frontend/components/onboarding/steps/CompleteStep.tsx b/frontend/components/onboarding/steps/CompleteStep.tsx new file mode 100644 index 00000000..b786f667 --- /dev/null +++ b/frontend/components/onboarding/steps/CompleteStep.tsx @@ -0,0 +1,137 @@ +"use client" + +import { useEffect } from "react" +import { Button } from "@/components/ui/button" +import { CardTitle, CardDescription } from "@/components/ui/card" +import { Progress } from "@/components/ui/progress" +import { + CheckCircle, + ArrowRight, + Loader2, + Bell, + Mail, + ExternalLink, + Clock, +} from "lucide-react" +import { useOnboarding } from "@/hooks/useOnboarding" + +export function CompleteStep() { + const { data, completeOnboarding, isLoading } = useOnboarding() + + // Auto-complete onboarding after a short delay to show success state + useEffect(() => { + const timer = setTimeout(() => { + // Don't auto-redirect, let user click + }, 2000) + return () => clearTimeout(timer) + }, []) + + return ( +
+ {/* Success icon */} +
+ +
+ + {/* Header */} +
+ You're All Set! + + Your agent is working on your first feature. + {data.selectedPlan === "lifetime" && ( + + Welcome, Founding Member! + + )} + +
+ + {/* Agent progress preview */} + {data.firstSpecText && ( +
+
+ Your first feature + +
+

+ "{data.firstSpecText}" +

+ +
+ Planning → Building → Testing → PR + ~45 min +
+
+ )} + + {/* Notification prompt */} +
+

Get notified when your PR is ready

+
+ + +
+
+ + {/* What's next */} +
+

What you can do now:

+
    +
  • + + Watch your agent work in real-time +
  • +
  • + + Add more features to the queue +
  • +
  • + + Invite team members to collaborate +
  • +
+
+ + {/* CTA */} +
+ + +

+ You don't need to watch - come back in the morning for your PR! +

+
+
+ ) +} diff --git a/frontend/components/onboarding/steps/FirstSpecStep.tsx b/frontend/components/onboarding/steps/FirstSpecStep.tsx new file mode 100644 index 00000000..73b37f73 --- /dev/null +++ b/frontend/components/onboarding/steps/FirstSpecStep.tsx @@ -0,0 +1,139 @@ +"use client" + +import { useState } from "react" +import { Button } from "@/components/ui/button" +import { Textarea } from "@/components/ui/textarea" +import { CardTitle, CardDescription } from "@/components/ui/card" +import { Badge } from "@/components/ui/badge" +import { ArrowRight, Sparkles, Clock, Loader2, AlertCircle } from "lucide-react" +import { useOnboarding } from "@/hooks/useOnboarding" + +const SUGGESTION_CHIPS = [ + "Add form validation to the contact form", + "Create a dark mode toggle", + "Add a logout button to the navbar", + "Fix the broken link in the footer", + "Add loading states to buttons", + "Improve mobile navigation", +] + +export function FirstSpecStep() { + const { data, submitFirstSpec, isLoading, error, clearError } = useOnboarding() + const [specText, setSpecText] = useState(data.firstSpecText || "") + + const handleSubmit = async () => { + if (!specText.trim()) return + await submitFirstSpec(specText.trim()) + } + + const handleSuggestionClick = (suggestion: string) => { + setSpecText(suggestion) + clearError() + } + + const characterCount = specText.length + const isValidLength = characterCount >= 10 && characterCount <= 2000 + + return ( +
+ {/* Header */} +
+ + + Describe Your First Feature + + + What should we build tonight? Start simple - you can go bigger later. + +
+ + {/* Selected repo context */} + {data.selectedRepo && ( +
+ Building in + {data.selectedRepo.fullName} +
+ )} + + {/* Spec input */} +
+
+ +