diff --git a/examples/deepwork/README.md b/examples/deepwork/README.md new file mode 100644 index 00000000..0a51be7b --- /dev/null +++ b/examples/deepwork/README.md @@ -0,0 +1,38 @@ +# Deepwork Examples + +Production examples from the Deepwork multi-agent swarm — a Gas Town running 6 rigs with 15+ agents, private wasteland federation, and external contributor onboarding. + +## Packs + +### `packs/deepwork-org/` + +Full org config pack for running a multi-agent Gas Town with: + +- **Private wasteland federation** — DoltHub-based task board with bidirectional sync +- **Deterministic effort estimation** — pattern-based task sizing (trivial → epic) +- **Self-evolving knowledge** — 19 knowledge files, auto-harvested from closed beads every 6h +- **Gitea → GitHub mirror** — hourly sync with auto-release creation +- **10 agent roles** — mayor, deacon, witness, refinery, polecat, crew, coordinator, planner, reviewer, worker +- **7 cron jobs** — thread guardrail, log rotation, knowledge evolution, GitHub mirror, wasteland push, pack update, auto-release +- **12 wasteland governance rules** — enforced via deepwork-governance.yaml + +### Key Features + +**Wasteland Integration in Formulas:** +- `mol-polecat-work` — polecats auto-detect wasteland items at start (3 cases: direct link, fuzzy match, auto-create), claim them, and complete on `gt done` +- `mol-witness-patrol` — witnesses check wasteland each patrol cycle and dispatch unclaimed work +- Smart internal filter ensures only externally meaningful work reaches wasteland + +**External Contributor Onboarding:** +- Complete onboarding guide at `docs/wasteland/ONBOARDING.md` +- Task posting template at `docs/wasteland/POST_TEMPLATE.md` +- Effort/priority guide, reputation system, project catalog + +### Prerequisites + +This pack requires a [patched gt binary](https://github.com/gastownhall/gastown/pull/3501) that reads wasteland config from `mayor/wasteland.json` instead of hardcoding `hop/wl-commons`. + +### Source + +- GitHub: [/deepwork-org-config-pack](https://github.com//deepwork-org-config-pack) +- Maintainer: [@](https://github.com/) diff --git a/examples/deepwork/packs/deepwork-org/crons/coordinator-crons.yaml b/examples/deepwork/packs/deepwork-org/crons/coordinator-crons.yaml new file mode 100644 index 00000000..e194975e --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/crons/coordinator-crons.yaml @@ -0,0 +1,65 @@ +# Coordinator Crons — installed on planner/coordinator nodes +# These run on the parent GT that manages workers. + +role: "coordinator" +requires: + - cron + - bash + - dolt # for DoltHub sync + +crons: + dolthub-sync: + schedule: "*/2 * * * *" + script: "sync-dolthub.sh" + script_path: "$GT_ROOT/mayor/sync-dolthub.sh" + log: "/tmp/dolthub-sync.log" + purpose: "Sync DoltHub mail database for cross-GT communication" + + mesh-sync: + schedule: "*/2 * * * *" + script: "mesh-sync.sh" + log: "/tmp/gt-mesh-sync.log" + purpose: "Mesh heartbeat, knowledge sync, mail handler trigger" + + deacon-pr-review: + schedule: "*/30 * * * *" + script: "deacon-pr-review.sh" + purpose: "Auto-review and merge approved PRs across repos" + + mesh-improve-review: + schedule: "*/10 * * * *" + script: "mesh-improve.sh review" + log: "/tmp/mesh-improve.log" + purpose: "Self-improving loop: review and graduate improvements" + + mesh-autosync-digest: + schedule: "0 */2 * * *" + script: "mesh-auto-sync.sh digest" + log: "/tmp/mesh-autosync.log" + purpose: "Broadcast work digest to mesh peers every 2 hours" + + mesh-work-watchdog: + schedule: "*/5 * * * *" + script: "mesh-work-watchdog.sh" + purpose: "Check worker container sessions alive, restart if dead" + + mesh-mayor-daemon: + schedule: "*/10 * * * *" + script: "mesh-mayor-daemon.sh" + purpose: "Mayor daemon health check" + + mesh-github-sync: + schedule: "0 6 * * *" + script: "mesh-github-sync.sh" + purpose: "Daily sync to GitHub public repos" + + mesh-pack-updater: + schedule: "0 */2 * * *" + script: "mesh-pack-updater.sh" + purpose: "Update blueprint packs from DoltHub" + + cron-audit: + schedule: "0 6 * * *" + script: "cron-audit.sh --enforce" + log: "/tmp/cron-audit.log" + purpose: "Daily audit: verify crontab matches cron-registry.yaml" diff --git a/examples/deepwork/packs/deepwork-org/crons/release-automation.yaml b/examples/deepwork/packs/deepwork-org/crons/release-automation.yaml new file mode 100644 index 00000000..93e842b6 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/crons/release-automation.yaml @@ -0,0 +1,35 @@ +# Release & Documentation Automation — Blueprint Level +# Applied to ALL rigs via deepwork-base pack + +# Daily README check: regenerate if code changed +readme_update: + schedule: "0 6 * * *" # 6am daily + command: "mesh-readme-update.sh" + description: "Check for code changes and update README if needed" + scope: "all-rigs" + executor: "chad-ji" # Content work → Kimi + +# Release detection: check if enough work accumulated for a release +release_check: + schedule: "0 8 * * 1" # Monday 8am + command: "mesh-release-check.sh" + description: "Check if dev has enough changes for a release PR" + scope: "all-rigs" + executor: "mayor" + +# Changelog generation: auto-generate from git log +changelog_gen: + schedule: "on-release" # Triggered, not cron + command: "mesh-changelog-gen.sh" + description: "Generate CHANGELOG.md from conventional commits" + scope: "all-rigs" + executor: "chad-ji" + +# Daily status to Telegram +daily_status: + schedule: "0 9 * * *" # 9am daily + command: "mesh-daily-status.sh" + description: "Post daily status to Telegram Status topic" + scope: "coordinator" + executor: "mayor" + telegram_topic: 13 diff --git a/examples/deepwork/packs/deepwork-org/crons/town-crons.yaml b/examples/deepwork/packs/deepwork-org/crons/town-crons.yaml new file mode 100644 index 00000000..7c5b7631 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/crons/town-crons.yaml @@ -0,0 +1,73 @@ +# Town Crons — Active cron jobs for Gas Town +# Updated: 2026-04-01 +# Context: Single orchestrator (gt v0.13), local Gitea, wasteland federation + +# Active crons (in actual crontab) +active: + thread-guardrail: + schedule: "* * * * *" + script: "scripts/mayor/thread-guardrail.sh" + log: "logs/thread-guardrail.log" + purpose: "Kill orphan node/gt/bd processes before they exhaust ulimit (16384)" + + log-rotation: + schedule: "0 0 * * *" + script: "scripts/mayor/log-rotate.sh" + log: "logs/log-rotate.log" + purpose: "Rotate GT-related logs exceeding 10MB, keep 2 copies" + + knowledge-evolve: + schedule: "0 */6 * * *" + script: "scripts/knowledge/cron-evolve.sh" + log: "logs/knowledge-evolve.log" + purpose: "Scan closed beads for lessons, append to knowledge base and changelog" + + gitea-to-github: + schedule: "0 * * * *" + script: "scripts/mayor/gitea-to-github.sh" + log: "logs/gitea-github-sync.log" + purpose: "Mirror Gitea repos to GitHub org, create releases for 5+ commits" + + wasteland-push: + schedule: "*/15 * * * *" + script: "scripts/mayor/wasteland-push.sh" + log: "logs/wasteland-push.log" + purpose: "Push wasteland changes to DoltHub so federation friends see updates" + + pack-update: + schedule: "30 */6 * * *" + script: "scripts/mayor/pack-update.sh" + log: "logs/pack-update.log" + purpose: "Sync knowledge, changelog, formulas, docs to org config pack (Gitea + GitHub)" + + readme-release: + schedule: "0 3 * * *" + script: "scripts/mayor/readme-release.sh" + log: "logs/readme-release.log" + purpose: "Create GitHub releases for repos with 10+ new commits since last release" + +# Disabled crons (kept for reference) +disabled: + flywheel-v4: " load balancer — obsolete (gt --agent handles providers)" + sync-dolthub: "DoltHub mesh sync — no remote GT instances currently" + batch-dispatch-: " dispatch — obsolete" + metrics-collector: "gascity-otel — removed" + -healer: " health — removed" + +# Recommended additions (not yet implemented) +recommended: + orphan-sweep: + schedule: "*/5 * * * *" + purpose: "Reset beads assigned to dead agents" + gate-sweep: + schedule: "*/1 * * * *" + purpose: "Evaluate timer/condition gates on beads" + wisp-compact: + schedule: "0 * * * *" + purpose: "TTL-based cleanup of ephemeral beads" + prune-branches: + schedule: "0 */6 * * *" + purpose: "Clean stale polecat branches" + spawn-storm-detect: + schedule: "*/5 * * * *" + purpose: "Detect beads stuck in crash loops" diff --git a/examples/deepwork/packs/deepwork-org/crons/worker-crons.yaml b/examples/deepwork/packs/deepwork-org/crons/worker-crons.yaml new file mode 100644 index 00000000..f241ce3f --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/crons/worker-crons.yaml @@ -0,0 +1,42 @@ +# Worker Crons — auto-installed on mesh join +# These run inside worker containers/nodes. +# Variables: $GT_ROOT resolves to the GT workspace root. +# +# On install, cron-audit.sh reads this file and writes the crontab. +# On daily audit, cron-audit.sh ensures crontab matches this definition. + +role: "worker" +requires: + - cron # apt-get install -y cron + - bash + +crons: + worker-mesh-sync: + schedule: "*/2 * * * *" + script: "mesh-sync.sh" + log: "/tmp/mesh-sync.log" + purpose: "Mesh heartbeat + DoltHub sync" + + worker-inbox-check: + schedule: "*/5 * * * *" + script: "mesh-inbox.sh" + log: "/tmp/mesh-inbox.log" + purpose: "Check inbox for work assignments from coordinator" + + worker-watchdog: + schedule: "*/5 * * * *" + script: "mesh-work-watchdog.sh" + log: "/tmp/mesh-watchdog.log" + purpose: "Check for stale claims, idle sessions, restart if needed" + + worker-autosync-digest: + schedule: "0 */2 * * *" + script: "mesh-auto-sync.sh digest" + log: "/tmp/mesh-autosync.log" + purpose: "Broadcast activity digest to coordinator" + + worker-cron-audit: + schedule: "0 6 * * *" + script: "cron-audit.sh --enforce" + log: "/tmp/cron-audit.log" + purpose: "Daily audit: ensure crontab matches registry" diff --git a/examples/deepwork/packs/deepwork-org/docs/GLOSSARY.md b/examples/deepwork/packs/deepwork-org/docs/GLOSSARY.md new file mode 100644 index 00000000..378e693a --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/docs/GLOSSARY.md @@ -0,0 +1,28 @@ +# Gas Town Glossary + +| Term | Definition | +|------|-----------| +| **Bead** | Unit of work tracked in Dolt. ID format: `prefix-hash` (e.g., `of-lj5`). | +| **Rig** | Self-contained project workspace with its own repo, beads DB, and agents. | +| **Town** | Top-level Gas Town deployment. One machine, one town. | +| **Mayor** | Human-facing coordinator. Dispatches work, reviews, merges. | +| **Deacon** | Automated patrol agent. Spawns witnesses, runs plugins. | +| **Witness** | Per-rig lifecycle agent. Monitors health, recovers orphans. | +| **Refinery** | Per-rig merge processor. Rebases, tests, merges. | +| **Polecat** | Disposable worker. Spawned per-bead via `gt sling`. | +| **Crew** | Persistent worker with domain expertise. | +| **Dog** | Short-lived helper for deacon. | +| **Sling** | Dispatching a bead to an agent. `gt sling `. | +| **Convoy** | Group of related beads tracked together. | +| **Molecule** | Instance of a formula — a multi-step workflow being executed. | +| **Formula** | Workflow template (TOML) defining steps for agents. | +| **Wisp** | Lightweight, ephemeral bead (patrol reports, status checks). | +| **Hook** | Bead attached to an agent — the agent's current work assignment. | +| **Plugin** | Deacon patrol task on a cooldown gate. | +| **Mesh** | Cross-town communication via DoltHub sync. | +| **Wasteland** | Shared federation board on DoltHub for collaborative work. | +| **bd** | Beads CLI for issue tracking. | +| **gt** | Gas Town CLI for agent orchestration. | +| **Tap** | Guard/hook system intercepting certain gt commands. | +| **Boot** | Ephemeral deacon watchdog agent. | +| **Ralph Loop** | Fresh-context-per-step execution for multi-step work. | diff --git a/examples/deepwork/packs/deepwork-org/docs/wasteland/ONBOARDING.md b/examples/deepwork/packs/deepwork-org/docs/wasteland/ONBOARDING.md new file mode 100644 index 00000000..d4b8d1b4 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/docs/wasteland/ONBOARDING.md @@ -0,0 +1,399 @@ +# Joining the Deepwork Wasteland + +The Deepwork Wasteland is a private, federated work board that connects your Gas Town with the Deepwork team. You post tasks, claim work, submit PRs, and build reputation — all coordinated via DoltHub and GitHub. + +``` +Your Gastown ◄──DoltHub──► Deepwork Gastown + │ │ + └──── GitHub () ───┘ + (code, PRs, reviews) +``` + +- **DoltHub** — Task board (post, claim, track reputation) +- **GitHub** — Code lives in the `` org +- **No VPN/tunnel needed** — public internet only + +--- + +## Setup (One-Time) + +### 1. Install Gas Town + +```bash +go install github.com/steveyegge/gastown/cmd/gt@latest +``` + +### 2. Patch gt for Private Wasteland Support + +The upstream `gt` binary hardcodes `hop/wl-commons` as the wasteland. Our private wasteland (`/`) requires a patched binary. Without this patch, `gt wl browse`, `gt wl stamps`, and `gt wl show` will try to clone from the wrong database. + +**Build the patched binary:** +```bash +cd /tmp +git clone https://github.com/steveyegge/gastown.git gastown-patch +cd gastown-patch +git checkout v0.13.0 +git cherry-pick # TODO: replace with PR link once merged + +# Or apply the patch manually: +# The patch makes all wl commands read from mayor/wasteland.json +# instead of hardcoding hop/wl-commons. +# See: https://github.com//deepwork-org-config-pack/tree/main/docs/wasteland/GT_PATCH.md + +VERSION=$(git describe --tags --always --dirty) +go build -ldflags "-X github.com/steveyegge/gastown/internal/cmd.Version=$VERSION" \ + -o ~/.local/bin/gt ./cmd/gt/ +``` + +**Verify:** +```bash +gt version # Should show v0.13.0 or later +``` + +We're working on getting this merged upstream. Once accepted, this step goes away. + +### 3. Install Dolt + +```bash +curl -L https://github.com/dolthub/dolt/releases/latest/download/install.sh | bash +``` + +### 4. Create Accounts + +- **DoltHub** — https://www.dolthub.com/ (get API token from Settings > Tokens) +- **GitHub** — Ask Pratham (@) to invite you to the `` org + +### 5. Initialize Gas Town + +```bash +mkdir my-town && cd my-town +gt init +gt up +``` + +### 6. Join the Wasteland + +```bash +export DOLTHUB_TOKEN="your-dolthub-api-token" +export DOLTHUB_ORG="your-dolthub-username" + +gt wl join / --handle your-name --display-name "Your Name" +``` + +### 7. Install the Org Config Pack + +Clone the Deepwork config pack for formulas, knowledge, and conventions: + +```bash +git clone https://github.com//deepwork-org-config-pack.git +``` + +This contains: +- 63 formulas (work lifecycle, reviews, releases) +- Knowledge base (patterns, anti-patterns, conventions) +- Automation scripts (sync, changelog, releases) +- Role definitions (polecat, witness, refinery, etc.) + +### 8. Clone Project Repos + +```bash +git clone https://github.com//.git +git clone https://github.com//.git +git clone https://github.com//.git +git clone https://github.com//website.git +git clone https://github.com//.git +git clone https://github.com//.git +``` + +--- + +## How the Board Works + +### Browse Available Tasks + +```bash +gt wl browse # All open tasks +gt wl browse --project # Filter by project +gt wl browse --type bug # Only bugs +gt wl browse --priority 0 # Critical only +gt wl browse --json # Machine-readable +``` + +### Understanding Effort Levels + +Every task has an effort level based on complexity: + +| Effort | Meaning | Typical Scope | Time Estimate | +|--------|---------|---------------|---------------| +| **trivial** | Config tweak, text update, delete unused code | 1 file | < 1 hour | +| **small** | Focused bug fix, add one component, simple feature | 1-3 files | 1-4 hours | +| **medium** | New page/endpoint, integration work, moderate refactor | 4-10 files | 4-12 hours | +| **large** | New system/module, cross-cutting feature, multi-component | 10+ files | 1-3 days | +| **epic** | New product area, architecture change, full deployment | Many files | 1+ week | + +**Pick tasks matching your skill level.** Start with `small` to learn the codebase, then move to `medium` and `large`. + +### Understanding Priority + +| Priority | Meaning | When to Pick | +|----------|---------|--------------| +| **P0** | Critical — security, data loss, broken deploy | Pick immediately if you can | +| **P1** | High — important features, significant bugs | Your main work queue | +| **P2** | Normal — standard work | When P0/P1 are empty | +| **P3-P4** | Low/Backlog — nice to have | Only if interested | + +--- + +## Working on a Task + +### When to Take a Task + +- Browse the board and find something matching your skills and available time +- Check the effort level — don't claim a `large` task if you only have 2 hours +- Read the full description: `gt wl show ` +- Make sure it has a repo link, acceptance criteria, and clear scope +- If the description is unclear, post a question (see below) + +### How to Take a Task + +```bash +# 1. Read the full details +gt wl show w-abc123 + +# 2. Claim it (this locks it — nobody else can claim it) +gt wl claim w-abc123 + +# 3. Clone the repo (if you haven't already) +git clone https://github.com//.git +cd + +# 4. Create a branch +git checkout -b feat/short-description + +# 5. Do the work +# ... code, test, verify ... + +# 6. Push and create a PR +git push origin feat/short-description +gh pr create --title "Short title" --body "Resolves wasteland item w-abc123 + +## Changes +- What you changed and why + +## Testing +- How you tested it" + +# 7. Submit completion with evidence +gt wl done w-abc123 --evidence "https://github.com///pull/42" +``` + +### If You Get Stuck + +```bash +# Post a question on the wasteland board +gt wl post \ + --title "Question: How does X work in ?" \ + --project \ + --type docs \ + --priority 3 \ + --description "I'm working on w-abc123 and I'm unsure about... + +Specific question: ... +What I've tried: ..." +``` + +### If You Can't Finish + +If you claimed a task but can't complete it, there's no built-in "unclaim" yet. Post a note: + +```bash +gt wl post \ + --title "Unclaim: w-abc123 — not able to finish" \ + --project \ + --type docs \ + --priority 3 \ + --description "Dropping w-abc123. Reason: ... +Progress so far: ... +Branch with partial work: " +``` + +The Deepwork team will reset the item. + +--- + +## Creating Tasks + +You can post tasks that the Deepwork team (or their agents) will pick up. + +### Task Template + +Every task MUST include enough context for someone (human or agent) to complete it without asking questions: + +```bash +gt wl post \ + --title "Clear, actionable title" \ + --project "" \ + --type "bug|feature|docs|design" \ + --priority 0-4 \ + --tags "relevant,tech,tags" \ + --description "## Context +What is this project? One sentence. + +**Repo:** https://github.com// +**Stack:** Languages, frameworks +**Key files:** where the work happens + +## Task +What exactly needs to be done. Be specific. + +## Acceptance Criteria +- [ ] Criterion 1 +- [ ] Criterion 2 +- [ ] Tests pass +- [ ] PR submitted + +## References +- Related issues: +- Design doc: " +``` + +### What Makes a Good Task + +- **Externally actionable** — someone outside the team can do it with just a repo clone +- **Clear scope** — not "improve the dashboard" but "add dark mode toggle to settings page" +- **Has acceptance criteria** — how do you know when it's done? +- **Has repo link** — where's the code? + +### What NOT to Post + +- Internal infrastructure work (CLAUDE.md, witness config, patrol tuning) +- Tasks requiring access to private servers or databases +- Vague ideas without concrete scope ("make things better") +- Duplicate tasks — check `gt wl browse` first + +### Nudging the Deepwork Team + +Posted a task and want it picked up faster? The Deepwork witnesses automatically scan the wasteland board and assign work to available polecats. Higher priority items get picked up first. + +To signal urgency: +- Use `--priority 0` for critical work +- Use `--priority 1` for important work +- Add clear tags so the right rig's witness picks it up + +--- + +## Reputation System + +Every completed task builds your reputation through stamps. + +```bash +gt wl charsheet # Your character sheet +gt wl charsheet alice-dev # Someone else's sheet +gt wl stamps your-handle # View stamps +gt wl scorekeeper # Compute tier standings +``` + +### Tiers + +| Tier | Requirements | Unlocks | +|------|-------------|---------| +| **newcomer** | Just joined | Browse, fork, claim work | +| **contributor** | 3+ stamps | Post wanted items, endorse others | +| **trusted** | cluster_breadth >= 1 | Direct branch writes | +| **maintainer** | Validated by trusted+ | Validate completions, stamp others | + +### How Stamps Work + +When you complete work, the Deepwork team reviews your PR and stamps it with: +- **Quality** (0-5): How good is the code? +- **Reliability** (0-5): Did you finish on time? Were there regressions? +- **Creativity** (0-5): Novel approach? Clean design? + +Your character sheet aggregates these into a reputation profile. + +--- + +## Projects + +| Project | Repo | Stack | Description | +|---------|------|-------|-------------| +| | [/](https://github.com//) | Python, TypeScript, Docker | ML shelf analysis with mobile app + dashboard | +| | [/](https://github.com//) | Python, TypeScript | AI with integration | +| | [/](https://github.com//) | TypeScript, Phaser 3 | GBA-style 3D agent visualizer | +| website | [/website](https://github.com//website) | TypeScript, Next.js | Deepwork company site (deepwork.art) | +| | [/](https://github.com//) | TypeScript | Product catalog | +| | [/](https://github.com//) | TypeScript | Media processing pipeline | + +--- + +## Syncing + +```bash +gt wl sync # Pull latest from upstream +gt wl sync --dry-run # Preview changes +``` + +Sync regularly to see new tasks and status updates. + +--- + +## Contributing to the Org Pack + +The config pack (knowledge, formulas, conventions) is shared across all Gas Towns. You can contribute: + +```bash +git clone https://github.com//deepwork-org-config-pack.git +cd deepwork-org-config-pack +# Add learnings to knowledge/, update docs, improve formulas +git checkout -b docs/your-contribution +gh pr create +``` + +Or post a learning: +```bash +gt wl post --title "Learning: discovered X pattern in " \ + --type docs --project --priority 3 \ + --description "## What I Learned +... +## Why It Matters +... +## How to Apply +..." +``` + +--- + +## Troubleshooting + +| Problem | Fix | +|---------|-----| +| "rig has not joined a wasteland" | Run `gt wl join / --handle your-name` | +| "database not found" | Run `gt up` to start the Dolt server | +| `gt wl browse` clones hop/wl-commons | You need the patched gt binary (see Setup step 2) | +| Sync failures | Check `DOLTHUB_TOKEN`: `echo $DOLTHUB_TOKEN` | +| GitHub access denied | Ask Pratham (@) for invite | +| "wanted item not found" after posting | Run `gt wl sync` to pull latest | + +--- + +## Quick Reference + +```bash +# Browse +gt wl browse +gt wl show + +# Work +gt wl claim +gt wl done --evidence "PR_URL" + +# Post +gt wl post --title "..." --project "..." --type feature --priority 1 --description "..." + +# Reputation +gt wl charsheet +gt wl stamps + +# Sync +gt wl sync +``` diff --git a/examples/deepwork/packs/deepwork-org/docs/wasteland/POST_TEMPLATE.md b/examples/deepwork/packs/deepwork-org/docs/wasteland/POST_TEMPLATE.md new file mode 100644 index 00000000..6e2c2328 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/docs/wasteland/POST_TEMPLATE.md @@ -0,0 +1,71 @@ +# Wasteland Post Template + +Every wanted item on the wasteland board MUST include enough context for an external contributor (human or agent) to pick it up and complete it without asking questions. + +## Required Fields + +```bash +gt wl post \ + --title "Clear, actionable title" \ + --project "project-name" \ + --type "bug|feature|docs|design|rfc" \ + --priority 0-4 \ + --tags "relevant,tech,tags" \ + --description "$(cat <<'EOF' +## Context +What is this project? One-line description. + +**Repo:** https://github.com// +**Stack:** Languages, frameworks +**Key directories:** where the work happens + +## Task +What exactly needs to be done. Be specific. + +## Acceptance Criteria +- [ ] Criterion 1 +- [ ] Criterion 2 +- [ ] Tests pass +- [ ] PR submitted to main + +## How to Work on This +1. Clone: `git clone ` +2. Branch: `git checkout -b feat/your-change` +3. Implement +4. Push + PR: `gh pr create` +5. Submit: `gt wl done --evidence "PR_URL"` + +## References +- Related beads: +- Design doc: +- Related PRs: +EOF +)" +``` + +## Priority Guide + +| Priority | When | Examples | +|----------|------|---------| +| P0 | Security vulnerability, data loss, service down | Hardcoded creds, CORS wildcard, broken deploy | +| P1 | Important feature, significant bug | Dashboard page, integration, mobile crash | +| P2 | Normal work | Refactor, docs, minor UI changes | +| P3 | Nice to have | Polish, optimization | +| P4 | Backlog | Ideas, research | + +## Anti-Patterns + +- **No description** — "Fix the thing" tells nobody anything +- **No repo link** — contributor can't find the code +- **No acceptance criteria** — how does anyone know when it's done? +- **Internal jargon without context** — "Fix vap-bap" means nothing to external contributors +- **Duplicate items** — check `gt wl browse` before posting + +## For Agents Posting Automatically + +When the `wasteland-on-create.sh` hook posts beads to wasteland, it MUST include: +- Bead ID (for traceability) +- Repo URL (from rig→GitHub mapping) +- Project description (from knowledge base) + +The `mol-polecat-work` formula auto-claims and auto-completes wasteland items on `gt done`. diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-do-work.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-do-work.formula.toml new file mode 100644 index 00000000..20a64a3f --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-do-work.formula.toml @@ -0,0 +1,72 @@ +description = """ +Simple work formula — read the bead, do what it says, close it. + +This is the minimal work lifecycle for coding agents. No git branching, +no worktree isolation, no refinery handoff. The agent reads the bead's +description, implements the solution in the current working directory, +and closes the bead when done. + +Use this for demos and simple single-agent workflows. For production +multi-agent setups with branch isolation and merge review, use +mol-polecat-work instead. + +## Variables + +| Variable | Source | Description | +|----------|--------|-------------| +| issue | caller | The work bead ID assigned to this agent | +""" +formula = "mol-do-work" +version = 1 + +[vars] +[vars.issue] +description = "The work bead ID assigned to this agent" +required = true + +[[steps]] +id = "do-work" +title = "Read assignment, implement, and close" +description = """ +You have been assigned a work bead. Read it, do the work, and close it. + +**1. Read your assignment:** +```bash +bd show {{issue}} +``` + +Read the bead's title and description carefully. This is your task. + +**2. Implement the solution:** + +Do exactly what the bead describes. Follow existing codebase conventions. +Make atomic, focused commits as you work: +```bash +git add +git commit -m ": " +``` + +**3. Close the bead when done:** +```bash +bd update {{issue}} --status=closed --notes "Done: " +``` + +If you get stuck or need clarification, check your inbox: +```bash +gc mail inbox +``` + +**Exit criteria:** The work described in the bead is complete and the bead is closed.""" + +[[steps]] +id = "drain" +title = "Signal completion" +needs = ["do-work"] +description = """ +Work is done. Signal the controller to reclaim this session: + +```bash +gc runtime drain-ack +``` + +Run this command and nothing else.""" diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-dog-wasteland-sync.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-dog-wasteland-sync.formula.toml new file mode 100644 index 00000000..49c7da5a --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-dog-wasteland-sync.formula.toml @@ -0,0 +1,113 @@ +description = """ +Sync beads ↔ wasteland wanted board. + +The Wasteland Sync Dog keeps the wasteland board in sync with local beads: + +1. **Bead → Wasteland**: Scan all rigs for P0/P1 open beads not yet on wasteland. + Post them as wanted items with repo links and project context. + +2. **Wasteland → Bead**: When a wasteland item is claimed or completed, update + the corresponding bead status (if a bead exists). + +3. **Closed beads**: When a bead is closed locally, mark the wasteland item as + completed (if not already). + +## Dog Contract + +This is infrastructure work. You: +1. Scan all rig beads and compare against wasteland board +2. Post new beads that aren't on wasteland yet +3. Sync status changes (claimed/done/closed) both directions +4. Push wasteland changes to DoltHub +5. Return to kennel + +## Variables + +| Variable | Source | Description | +|----------|--------|-------------| +| rigs | config | Comma-separated rig names to scan (default: all) | +| max_priority | config | Maximum priority to sync (default: 1 = P0+P1) | +| push | config | Whether to push to DoltHub after sync (default: false) | +| dry_run | config | Show what would sync without making changes (default: false) | +| github_repos | config | JSON map of project→GitHub repo URL | +""" + +[metadata] +type = "dog" +category = "wasteland" +schedule = "0 */4 * * *" # every 4 hours +ttl = "30m" + +[variables] +rigs = { source = "config", default = "" } +max_priority = { source = "config", default = "1" } +push = { source = "config", default = "false" } +dry_run = { source = "config", default = "false" } +github_repos = { source = "config", default = '{"":"https://github.com//","":"https://github.com//","":"https://github.com//","":"https://github.com//website","":"https://github.com//","":"https://github.com//","gt_mesh":"https://github.com//gt-mesh"}' } + +[[steps]] +id = "scan-beads" +title = "Scan all rigs for unsynced beads" +description = """ +For each rig, run `bd list --json --flat --no-pager` to get all open beads +up to max_priority. Compare bead IDs against existing wasteland items +(check description field for "Bead: " pattern). + +Collect list of beads that need posting to wasteland. +""" + +[[steps]] +id = "post-new" +title = "Post unsynced beads to wasteland" +needs = ["scan-beads"] +description = """ +For each unsynced bead, run: + gt wl post --title "" --project "" --type "" \ + --priority --tags "" \ + --description "Bead: \nRepo: \nProject: " + +Track the mapping: bead-id → wasteland-id for status sync. +""" + +[[steps]] +id = "sync-status" +title = "Sync status changes between beads and wasteland" +needs = ["scan-beads"] +description = """ +For each wasteland item that has a "Bead: " in its description: + +1. If bead is closed but wasteland item is open → mark wasteland item completed + (gt wl claim + gt wl done with evidence "closed locally") + +2. If wasteland item is claimed but bead is unassigned → update bead + (this is informational only for now) + +3. If wasteland item has completion evidence → log it on the bead +""" + +[[steps]] +id = "push-dolthub" +title = "Push wasteland changes to DoltHub" +needs = ["post-new", "sync-status"] +description = """ +If push=true, push the local wasteland database to DoltHub: + gt wl sync (which fetches upstream first, then we push) + +Or use: cd .wasteland// && dolt push origin main + +Skip if dry_run=true. +""" + +[[steps]] +id = "report" +title = "Report sync results" +needs = ["push-dolthub"] +description = """ +Log summary: +- N new beads posted to wasteland +- N status changes synced +- N items already in sync +- Push status (success/skipped/failed) + +Send report to deacon if any errors occurred. +""" diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-base.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-base.formula.toml new file mode 100644 index 00000000..9aa55d34 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-base.formula.toml @@ -0,0 +1,251 @@ +description = """ +Polecat base formula — shared steps for all polecat work variants. + +This formula defines the common lifecycle steps that every polecat variant +shares: loading context, workspace setup (placeholder), preflight tests, +implementation, and self-review. Variant formulas extend this base and +override workspace-setup with their specific branching/worktree strategy, +then add a terminal step (submit, commit, etc). + +## Variables + +| Variable | Source | Description | +|----------|--------|-------------| +| issue | caller | The work bead ID assigned to this polecat | +| base_branch | caller | Base branch to rebase on (default: main) | +| setup_command | rig config | Setup/install command. Empty = skip. | +| typecheck_command | rig config | Type check command. Empty = skip. | +| test_command | rig config | Test command. Empty = skip. | +| lint_command | rig config | Lint command. Empty = skip. | +| build_command | rig config | Build command. Empty = skip. |""" +formula = "mol-polecat-base" +version = 1 + +[vars] +[vars.issue] +description = "The work bead ID assigned to this polecat" +required = true + +[vars.base_branch] +description = "The base branch to rebase on and compare against (e.g., main, integration/convoy-id)" +default = "main" + +[vars.setup_command] +description = "Setup/install command (e.g., pnpm install). Empty = skip." +default = "" + +[vars.typecheck_command] +description = "Type check command (e.g., tsc --noEmit). Empty = skip." +default = "" + +[vars.test_command] +description = "Command to run tests (auto-detected from rig settings)" +default = "" + +[vars.lint_command] +description = "Command to run linting. Empty = skip." +default = "" + +[vars.build_command] +description = "Command to run build. Empty = skip." +default = "" + +[[steps]] +id = "load-context" +title = "Load context and verify assignment" +description = """ +Initialize your session and understand your assignment. + +**1. Prime your environment:** +```bash +gc prime # Load role context +bd prime # Load beads context +``` + +**2. Check your hook:** +```bash +bd list --assignee=$GC_AGENT --status=in_progress +``` + +The hook_bead is your assigned issue. Read it carefully: +```bash +bd show {{issue}} # Full issue details +bd show {{issue}} --json | jq '.metadata' # Check for existing metadata +``` + +**3. Check for rejection (IMPORTANT):** + +If `metadata.rejection_reason` exists, this bead was previously attempted +and rejected by the refinery. Read the reason carefully: +- Rebase conflict → you'll resume the existing branch and rebase +- Test failure → you'll resume the branch and fix the issue + +If `metadata.branch` exists, a branch already exists from the prior attempt. +You will use it in workspace-setup instead of creating a new one. + +**4. Check inbox for additional context:** +```bash +gc mail inbox +# Read any HANDOFF or assignment messages, then archive after absorbing context +# gc mail read → process → gc mail archive +``` + +**5. Understand the requirements:** +- What exactly needs to be done? +- What files are likely involved? +- Are there dependencies or blockers? +- What does "done" look like? +- If rejected: what specifically needs fixing? + +If blocked or unclear, mail Witness: +```bash +gc mail send /witness -s "HELP: Unclear requirements" -m "Issue: {{issue}} +Question: " +``` + +**Exit criteria:** You understand the work and can begin.""" + +[[steps]] +id = "workspace-setup" +title = "Set up workspace (override in variant formulas)" +needs = ["load-context"] +description = """ +Override this step in variant formulas to define the workspace strategy. + +Variants should set up an isolated worktree and working context appropriate +for their merge strategy (feature branch, direct commit, etc).""" + +[[steps]] +id = "preflight-tests" +title = "Verify pre-flights pass on base branch" +needs = ["workspace-setup"] +description = """ +Check if the codebase is healthy BEFORE starting your work. + +**Config: typecheck_command = {{typecheck_command}}** +**Config: lint_command = {{lint_command}}** +**Config: test_command = {{test_command}}** + +**Skip this step if resuming a rejected branch** — pre-flights were +already verified on the prior attempt. Close this step and proceed. + +**1. Run pre-flights (skip empty commands silently):** +```bash +{{typecheck_command}} +{{lint_command}} +{{test_command}} +``` + +**2. If pre-flights pass:** proceed. + +**3. If pre-flights fail on {{base_branch}}:** + +File a bead and proceed. Do NOT fix pre-existing failures — that's +not your assignment. + +FORBIDDEN: Pushing to {{base_branch}}. FORBIDDEN: Fixing pre-existing failures. + +```bash +bd create --title "Pre-existing failure: " --type bug --priority 1 +gc mail send /witness -s "NOTICE: {{base_branch}} has failing pre-flights" \ + -m "Filed: . Proceeding with {{issue}}." +``` + +**Exit criteria:** Pre-flights pass (or pre-existing bug filed), ready to implement.""" + +[[steps]] +id = "implement" +title = "Implement the solution" +needs = ["preflight-tests"] +description = """ +Do the actual implementation work. + +**Working principles:** +- Follow existing codebase conventions +- Make atomic, focused commits +- Keep changes scoped to the assigned issue +- Don't gold-plate or scope-creep + +**If resuming a rejected branch:** Read `metadata.rejection_reason` +from load-context. Focus on fixing the specific issue that caused +rejection — don't redo everything. + +**Commit frequently:** +```bash +git add +git commit -m ": ({{issue}})" +``` + +Commit types: feat, fix, refactor, test, docs, chore + +**Discovered work (outside scope):** +```bash +bd create --title "Found: " --type bug --priority 2 +``` +Do NOT fix unrelated issues in this branch. + +**If stuck (>15 minutes):** +```bash +gc mail send /witness -s "HELP: Stuck on implementation" -m "Issue: {{issue}} +Problem: +Tried: " +``` + +**If context filling up:** +```bash +gc runtime request-restart +``` +This blocks until the controller kills your session. The next session +resumes from context (re-reads formula steps, checks git/bead state). + +**Exit criteria:** Implementation complete, all changes committed.""" + +[[steps]] +id = "self-review" +title = "Self-review and run tests" +needs = ["implement"] +description = """ +Review your changes and verify they work. + +**Config: setup_command = {{setup_command}}** +**Config: typecheck_command = {{typecheck_command}}** +**Config: lint_command = {{lint_command}}** +**Config: build_command = {{build_command}}** +**Config: test_command = {{test_command}}** + +**1. Review the diff:** +```bash +git diff origin/{{base_branch}}...HEAD +git log --oneline origin/{{base_branch}}..HEAD +git diff --stat origin/{{base_branch}}...HEAD +``` + +Check for: bugs, security issues, style violations, missing error handling, +debug cruft, unintended file changes. Fix anything you find. + +**2. Run quality checks (skip empty commands):** +```bash +{{setup_command}} +{{typecheck_command}} +{{lint_command}} +{{build_command}} +{{test_command}} +``` + +**ALL CHECKS MUST PASS.** If your change caused the failure, fix it. +If pre-existing, file a bead. + +**3. Ensure everything is committed:** +```bash +git status # Must be clean +git log origin/{{base_branch}}..HEAD --oneline # Must show your commits +``` + +If uncommitted changes exist: +```bash +git add -A && git commit -m ": ({{issue}})" +``` + +NEVER discard implementation changes with `git checkout -- .` + +**Exit criteria:** All checks pass, all changes committed, working tree clean.""" diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-work.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-work.formula.toml new file mode 100644 index 00000000..f49012d6 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-polecat-work.formula.toml @@ -0,0 +1,873 @@ +description = """ +Full polecat work lifecycle from assignment through completion. + +This molecule guides a polecat through a complete work assignment. Each step +has clear entry/exit criteria and specific commands to run. A polecat can +crash after any step and resume from the last completed step. + +## Polecat Contract (Event-Driven Lifecycle) + +You are an event-driven worker. You: +1. Receive work via your hook (formula checklist + issue) +2. Work through formula steps in order (shown inline at prime time) +3. Submit your MR and enter awaiting_verdict state +4. Refinery processes your MR: + - MERGED → you self-clean via `gt done` (gone) + - FIX_NEEDED → you read failure, fix code, resubmit (loop) +5. You only die when refinery confirms merge + +**Event-driven:** After submitting your MR, you stay alive waiting for the +refinery verdict. If the merge fails, you fix the code in-place without +losing context. Only on MERGED do you self-clean. + +**Important:** This formula defines the workflow template. Steps are shown inline +when you run `gt prime` — there are no separate step beads to close. Work through +the checklist, await the verdict, then run `gt done` when merged. + +**Speed principle:** You run the full gate suite AFTER rebasing onto the target +branch (pre-verify step). This enables the refinery to fast-path merge your MR +in ~5 seconds instead of re-running gates. If pre-verification is skipped or +stale, the refinery falls through to normal gate execution. + +**Lifecycle:** +``` +spawning → working → mr_submitted → awaiting_verdict + │ + ┌───────────────┴───────────────┐ + ▼ ▼ + FIX_NEEDED MERGED + │ │ + (fix & resubmit) (gt done → gone) + │ + awaiting_verdict (loop) +``` + +**You do NOT:** +- Push directly to main (Refinery merges from MQ) +- Close your own issue (Refinery closes after merge) +- Fix pre-existing failures on main (Refinery owns main health) + +## Variables + +| Variable | Source | Description | +|----------|--------|-------------| +| issue | hook_bead | The issue ID you're assigned to work on | +| base_branch | sling vars | The base branch to rebase on (default: main) | +| setup_command | rig config | Setup/install command (e.g., `pnpm install`). Empty = skip. | +| typecheck_command | rig config | Type check command (e.g., `tsc --noEmit`). Empty = skip. | +| test_command | rig config | Test command. Empty = skip. Rig must configure for its language. | +| lint_command | rig config | Lint command (e.g., `eslint .`). Empty = skip. | +| build_command | rig config | Build command (e.g., `go build ./...`). Empty = skip. | + +## Failure Modes + +| Situation | Action | +|-----------|--------| +| Build fails | Fix it. Do not proceed if it won't compile. | +| FIX_NEEDED from refinery | Read failure details, fix code, resubmit | +| Blocked on external | Mail Witness for help, mark yourself stuck | +| Context filling | Use gt handoff to cycle to fresh session | +| Unsure what to do | Mail Witness, don't guess | +| Max fix attempts exceeded | Mail Witness, enter stuck state |""" +formula = "mol-polecat-work" +version = 9 + +[[steps]] +id = "load-context" +title = "Load context and verify assignment" +description = """ +Initialize your session and understand your assignment. + +**1. Prime your environment:** +```bash +gt prime # Load role context +bd prime # Load beads context +``` + +**2. Check your hook:** +```bash +gt hook # Shows your pinned molecule and hook_bead +``` + +The hook_bead is your assigned issue. Read it carefully: +```bash +bd show {{issue}} # Full issue details +``` + +**3. Check inbox for additional context:** +```bash +gt mail inbox +# Read any HANDOFF or assignment messages +``` + +**4. Check for prior merge failure context:** + +If the bead notes contain "MERGE REJECTION", a previous polecat's work was +rejected by the refinery. Read the failure details carefully: +- **Failure-Type**: what broke (tests, build, lint, typecheck) +- **Error**: the actual error output +- **Branch**: the previous branch may still exist with the original work + +If a prior branch exists, check it out instead of creating a new one: +```bash +git fetch origin +git branch -a | grep +# If it exists: git checkout && git rebase origin/{{base_branch}} +``` + +This lets you make a targeted fix rather than starting from scratch. + +**5. Understand the requirements:** +- What exactly needs to be done? +- What files are likely involved? +- Are there dependencies or blockers? +- What does "done" look like? +- If this is a rework: what specifically failed and why? + +**6. Wasteland link detection:** + +Check if this bead is linked to a wasteland wanted item. There are two cases: +a) The bead description contains "Wasteland: w-xxxx" (direct link) +b) The bead's project/title matches an open wasteland item (fuzzy match) + +```bash +# Case A: Direct link in bead description +WL_ID=$(bd show {{issue}} --json 2>/dev/null | python3 -c " +import json,sys,re +try: + data = json.load(sys.stdin) + desc = data[0].get('description','') if isinstance(data, list) else data.get('description','') + m = re.search(r'Wasteland:\s*(w-[a-f0-9]+)', desc) + if m: print(m.group(1)) +except: pass +" 2>/dev/null) + +# Case B: Search wasteland for matching title/bead +if [ -z "$WL_ID" ]; then + WL_ID=$(dolt --host 127.0.0.1 --port --user root --password "" --no-tls sql -r csv -q \ + "USE wl_commons; SELECT id FROM wanted WHERE status='open' AND (description LIKE '%{{issue}}%' OR title LIKE '%$(bd show {{issue}} --json 2>/dev/null | python3 -c "import json,sys; d=json.load(sys.stdin); print(d[0]['title'][:40] if isinstance(d,list) else d.get('title','')[:40])" 2>/dev/null)%') LIMIT 1" 2>/dev/null | tail -1) +fi + +# If found, claim it so no one else picks it up +if [ -n "$WL_ID" ] && [ "$WL_ID" != "id" ]; then + gt wl claim "$WL_ID" 2>/dev/null && echo "Claimed wasteland item: $WL_ID" + export GT_WL_ITEM="$WL_ID" +fi + +# Case C: No wasteland item exists — decide if this work belongs on wasteland +# NOT all beads belong on wasteland. Only externally meaningful work: +# - Features, bugs, improvements that an outside contributor can understand +# - Has a clear repo, acceptance criteria, and doesn't require internal knowledge +# +# SKIP wasteland for: +# - Internal infra (CLAUDE.md fixes, witness wiring, refinery config) +# - Gastown/beads/mesh internal work +# - Patrol/lifecycle/wisp/formula maintenance +# - Anything with labels: infra, internal, gastown, patrol, lifecycle, wisp + +if [ -z "$WL_ID" ] || [ "$WL_ID" = "id" ]; then + BEAD_JSON=$(bd show {{issue}} --json 2>/dev/null) + SHOULD_POST=$(echo "$BEAD_JSON" | python3 -c " +import json, sys, re + +INTERNAL_KEYWORDS = [ + 'CLAUDE.md', 'witness', 'refinery', 'deacon', 'mayor', 'patrol', + 'wisp', 'formula', 'mol-', 'daemon', 'dog-', 'lifecycle', + 'handoff', 'session', 'tmux', 'dolt server', 'circuit-breaker', + 'merge queue', 'gt mq', 'gt mail', 'gt sling', 'gt hook', + 'beads health', 'orphan', 'zombie', 'guardrail', 'ulimit', + 'MiniMax', 'LiteLLM', 'mesh', '', 'Gas City', + 'reclassify crew', 'AGENTS.md', 'polecat-lease', +] + +INTERNAL_LABELS = [ + 'infra', 'internal', 'gastown', 'patrol', 'lifecycle', + 'wisp', 'chore', 'dog-work', 'maintenance', +] + +try: + data = json.load(sys.stdin) + item = data[0] if isinstance(data, list) else data + title = item.get('title', '') + desc = item.get('description', '') + labels = item.get('labels', []) + priority = item.get('priority', 9) + issue_type = item.get('issue_type', '') + + # Must be P0 or P1 + if priority > 1: + print('NO'); sys.exit() + + # Skip if any internal label + if any(l in INTERNAL_LABELS for l in labels): + print('NO'); sys.exit() + + # Skip if title contains internal keywords + if any(kw.lower() in title.lower() for kw in INTERNAL_KEYWORDS): + print('NO'); sys.exit() + + # Skip if issue type is clearly internal + if issue_type in ('epic', 'convoy', 'agent', 'message'): + print('NO'); sys.exit() + + # Skip wisp beads (ephemeral internal tracking) + if item.get('id', '').startswith('wisp-') or '-wisp-' in item.get('id', ''): + print('NO'); sys.exit() + + # Looks externally meaningful + print('YES') +except: + print('NO') +" 2>/dev/null) + + if [ "$SHOULD_POST" = "YES" ]; then + BEAD_TITLE=$(echo "$BEAD_JSON" | python3 -c "import json,sys; d=json.load(sys.stdin); print(d[0]['title'] if isinstance(d,list) else d.get('title',''))" 2>/dev/null) + BEAD_PRI=$(echo "$BEAD_JSON" | python3 -c "import json,sys; d=json.load(sys.stdin); print(d[0].get('priority',2) if isinstance(d,list) else d.get('priority',2))" 2>/dev/null) + + RIG_NAME="${GT_RIG:-$(basename $(pwd))}" + case "$RIG_NAME" in + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//website" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + ) WL_PROJECT=""; REPO_URL="https://github.com//" ;; + *) WL_PROJECT="$RIG_NAME"; REPO_URL="" ;; + esac + + NEW_WL=$(gt wl post \ + --title "$BEAD_TITLE" \ + --project "$WL_PROJECT" \ + --type feature \ + --priority "${BEAD_PRI:-2}" \ + --description "## Context +Repo: $REPO_URL +Project: $WL_PROJECT +Bead: {{issue}} + +## Task +$BEAD_TITLE + +## Acceptance Criteria +- Implementation matches the task description +- Tests pass +- No regressions +- PR submitted to main branch + +## How to Work on This +1. Clone: git clone $REPO_URL +2. Branch: git checkout -b feat/your-change +3. Implement the change +4. Push + create PR +5. Submit: gt wl done --evidence PR_URL" 2>/dev/null | grep -oP 'w-[a-f0-9]+') + + if [ -n "$NEW_WL" ]; then + gt wl claim "$NEW_WL" 2>/dev/null + export GT_WL_ITEM="$NEW_WL" + echo "Created and claimed wasteland item: $NEW_WL for bead {{issue}}" + fi + else + echo "Wasteland: bead {{issue}} is internal work, not posting to wasteland" + fi +fi +``` + +Only externally meaningful P0/P1 work gets posted to wasteland. +Internal infra, patrol, lifecycle, and gastown-specific beads are skipped. + +**7. Verify you can proceed:** +- No unresolved blockers on the issue +- You understand what to do +- Required resources are available + +If blocked or unclear, mail Witness immediately: +```bash +gt mail send /witness -s "HELP: Unclear requirements" -m "Issue: {{issue}} +Question: " +``` + +**Exit criteria:** You understand the work, wasteland claimed if applicable, and can begin implementation.""" + +[[steps]] +id = "branch-setup" +title = "Set up working branch" +needs = ["load-context"] +description = """ +Ensure you're on a clean feature branch ready for work. + +**1. Check current branch state:** +```bash +git status +git branch --show-current +``` + +**2. Check for a prior branch from a rejected MR:** + +If the bead notes contain "MERGE REJECTION" with a branch name, check if that +branch still exists on the remote. Reusing it preserves all previous work: +```bash +git fetch origin +# Check for prior branch +git branch -r | grep +# If found, check it out: +git checkout -b origin/ +git rebase origin/{{base_branch}} +``` + +**If no prior branch, create a fresh one:** +```bash +git fetch origin +git checkout -b polecat/ origin/{{base_branch}} +``` + +**3. Ensure clean working state:** +```bash +git status # Should show "working tree clean" +git stash list # Should be empty +``` + +If dirty state from previous work: +```bash +# If changes are relevant to this issue: +git add -A && git commit -m "WIP: " + +# If changes are unrelated cruft: +git stash push -m "unrelated changes before {{issue}}" +# Or discard if truly garbage: +git checkout -- . +``` + +**4. Sync with {{base_branch}}:** +```bash +git fetch origin +git rebase origin/{{base_branch}} # Get latest, rebase your branch +``` + +If rebase conflicts: +- Resolve them carefully +- If stuck, mail Witness + +**5. Run project setup (if configured):** + +If setup_command is set, run it to install dependencies: +```bash +{{setup_command}} +``` + +This ensures dependencies are installed before you start work. +Empty setup_command means "not configured" — skip this step. + +**Exit criteria:** You're on a clean feature branch, rebased on latest {{base_branch}}, dependencies installed.""" + +[[steps]] +id = "implement" +title = "Implement the solution" +needs = ["branch-setup"] +description = """ +Do the actual implementation work. + +**Working principles:** +- Follow existing codebase conventions +- Make atomic, focused commits +- Keep changes scoped to the assigned issue +- Don't gold-plate or scope-creep + +**Persist findings as you go (CRITICAL for session survival):** +Your session can die at any time (context limit, crash, SIGKILL). Code changes +survive in git, but analysis, findings, and decisions exist only in your context +window. Persist them to the bead so they survive session death: +```bash +# After completing significant analysis or reaching conclusions: +bd update {{issue}} --notes "Findings so far: " +# For detailed reports, use --design: +bd update {{issue}} --design "" +``` +Do this BEFORE closing molecule steps, not after. If your session dies between +persisting and closing, the findings survive. If you close first, they're lost. + +**For report-only tasks** (audits, reviews, research): your findings ARE the +deliverable. There are no code changes to commit. You MUST persist all findings +to the bead via --notes or --design. Without this, your entire work product is +lost when the session ends. + +**Commit frequently (for code tasks):** +```bash +# After each logical unit of work: +git add +git commit -m ": ({{issue}})" +``` + +Commit types: feat, fix, refactor, test, docs, chore + +**Discovered work:** +If you find bugs or improvements outside your scope: +```bash +bd create --title "Found: " --type bug --priority 2 +# Note the ID, continue with your work +``` + +Do NOT fix unrelated issues in this branch. + +**If stuck:** +Don't spin for more than 15 minutes. Mail Witness: +```bash +gt mail send /witness -s "HELP: Stuck on implementation" -m "Issue: {{issue}} +Trying to: +Problem: +Tried: " +``` + +**Exit criteria:** Implementation complete. Code tasks: all changes committed. Report tasks: all findings persisted to bead.""" + +[[steps]] +id = "self-review" +title = "Self-review changes" +needs = ["implement"] +description = """ +Review your own changes before the build check. Use `/review --branch` for +structured grading, or do a manual review. + +**Option A: Use /review (recommended)** +```bash +/review --branch +``` +This gives you a structured grade (A-F) with CRITICAL/MAJOR/MINOR findings +and specific fix suggestions. Fix any CRITICAL or MAJOR issues before proceeding. + +**Option B: Manual review** + +**1. Review the diff:** +```bash +git diff origin/{{base_branch}}...HEAD # All changes vs {{base_branch}} +git log --oneline origin/{{base_branch}}..HEAD # All commits +``` + +**2. Check for common issues:** + +| Category | Look For | +|----------|----------| +| Bugs | Off-by-one, null handling, edge cases | +| Security | Injection, auth bypass, exposed secrets | +| Style | Naming, formatting, code organization | +| Completeness | Missing error handling, incomplete paths | +| Cruft | Debug prints, commented code, TODOs | + +**3. Fix issues found:** +Don't just note them - fix them now. Amend or add commits as needed. + +**4. Verify no unintended changes:** +```bash +git diff --stat origin/{{base_branch}}...HEAD +# Only files relevant to {{issue}} should appear +``` + +If you accidentally modified unrelated files, remove those changes. + +**Exit criteria:** Changes are clean, reviewed (Grade B or better if using /review), +and ready for build check.""" + +[[steps]] +id = "build-check" +title = "Build and sanity check" +needs = ["self-review"] +description = """ +Verify your changes compile and pass basic sanity checks. The Refinery's +bisecting merge queue runs the full test suite — your job is to catch +obvious problems fast so you don't waste the MQ's time. + +**1. Build (REQUIRED — must pass):** + +If build_command is set: +```bash +{{build_command}} +``` + +If setup_command is set (ensure new deps are installed): +```bash +{{setup_command}} +``` + +If typecheck_command is set: +```bash +{{typecheck_command}} +``` + +Empty commands mean "not configured" — skip silently. + +**2. If build fails:** +- Fix it. Return to implement step if needed. +- Do NOT submit broken code to the merge queue. + +**3. Targeted tests (OPTIONAL — run if fast and relevant):** + +If you modified or added test files, run those specific tests to catch +obvious regressions. Do NOT run the full test suite — that's the +Refinery's job. + +```bash +# Example: run only tests in packages you changed +go test ./internal/pkg/you/changed/... +# Or for JS: npx jest --testPathPattern="changed-file" +``` + +If you're unsure which tests are relevant, skip this — the MQ will catch it. + +**4. Lint (OPTIONAL — run if configured and fast):** + +If lint_command is set: +```bash +{{lint_command}} +``` + +**Exit criteria:** Code compiles. Obvious regressions caught. Ready to submit.""" + +[[steps]] +id = "commit-changes" +title = "Commit all implementation changes" +needs = ["build-check"] +description = """ +Ensure ALL implementation work is committed before cleanup. + +**CRITICAL: You MUST commit all changes from implementation.** +NEVER use `git checkout -- .` or `git restore .` to discard implementation work. +ALWAYS commit ALL uncommitted changes from your implementation. + +**1. Check for uncommitted changes:** +```bash +git status +``` + +**2. If there are ANY uncommitted changes, commit them now:** +```bash +git add -A && git commit -m ": ({{issue}})" +``` + +**3. If working tree is already clean, that's fine — but you MUST still have commits (step 4).** + +**4. VERIFY commits exist (HARD GATE — do NOT close this step without passing):** +```bash +git log origin/{{base_branch}}..HEAD --oneline +``` + +This MUST show at least 1 commit. If it shows NOTHING: +- You have NOT completed your implementation. Do NOT close this step. +- Go back to the implement step and do the work. +- If the task genuinely requires no code changes (already fixed upstream, etc.), + run `gt done --status DEFERRED` and skip remaining steps. +- Do NOT proceed to cleanup or submit with zero commits. + +**5. VERIFY clean working tree:** +```bash +git status +``` +Must show "nothing to commit, working tree clean". + +**Report-only tasks (audits, reviews, research — no code changes):** +If your task produced no code changes, verify your findings are persisted to the bead: +```bash +bd show {{issue}} # Check notes/design fields have your findings +``` +If findings are persisted, proceed to cleanup. `gt done --cleanup-status clean` +handles the no-commit case for report-only tasks. + +**Exit criteria:** Working tree clean AND either (a) at least 1 commit ahead of origin/{{base_branch}}, or (b) report-only task with findings persisted to bead.""" + +[[steps]] +id = "pre-verify" +title = "Pre-merge rebase verification" +needs = ["commit-changes"] +description = """ +Rebase onto the target branch and run the full gate suite. This enables the +refinery to fast-path merge your MR without re-running gates (~5s merge +instead of minutes). + +**1. Fetch and rebase onto target:** +```bash +git fetch origin {{base_branch}} +git rebase origin/{{base_branch}} +``` + +If rebase conflicts: +- Resolve them carefully +- Re-run the build to ensure the resolution is correct +- If stuck, mail Witness + +**2. Run the full gate suite on the rebased result:** + +Run ALL configured gates (not just targeted tests — this is the full verification): + +If build_command is set: +```bash +{{build_command}} +``` + +If typecheck_command is set: +```bash +{{typecheck_command}} +``` + +If lint_command is set: +```bash +{{lint_command}} +``` + +If test_command is set: +```bash +{{test_command}} +``` + +Empty commands mean "not configured" — skip silently. + +**3. If any gate fails after rebase:** +- Fix the issue, commit, and re-run from step 1 +- Do NOT proceed with --pre-verified if gates failed + +**4. If all gates pass:** +You will use `gt done --pre-verified` in the next step. This tells the refinery +that you ran the full gate suite on a rebased branch, enabling fast-path merge. + +**Note:** If this step takes too long or the gates are not configured for this +project, you may skip this step and use `gt done` without --pre-verified. +The refinery will run gates normally (slower but still correct). + +**Exit criteria:** Branch rebased onto latest origin/{{base_branch}}, all configured gates pass.""" + +[[steps]] +id = "submit-mr" +title = "Submit MR and enter awaiting_verdict" +needs = ["pre-verify"] +description = """ +Submit your MR to the merge queue but stay alive for the verdict. + +**Pre-flight: Verify you have actual work to submit (HARD GATE):** +```bash +git log origin/{{base_branch}}..HEAD --oneline +``` +This MUST show at least 1 commit. If it shows nothing, do NOT submit. + +**Report-only tasks (no commits — audits, reviews, research):** +If this is a report-only task with findings persisted to the bead, +run `gt done --cleanup-status clean` and skip remaining steps. + +**Push and submit MR:** +```bash +git push origin HEAD + +# Submit to merge queue (creates MR bead, does NOT nuke sandbox) +gt mq submit {{issue}} +``` + +Track the MR bead ID from the submit output — you need it for resubmission. + +**Enter awaiting_verdict state:** +After submission, you are in the `awaiting_verdict` state. The refinery will +process your MR and send you one of: +- **MERGED**: Your work was merged successfully → proceed to self-clean step +- **FIX_NEEDED**: Tests/build/lint failed → proceed to await-verdict step + +**Exit criteria:** MR submitted to merge queue, polecat in awaiting_verdict state.""" + +[[steps]] +id = "await-verdict" +title = "Wait for refinery verdict" +needs = ["submit-mr"] +description = """ +Wait for the refinery to process your MR and send you a verdict. + +**Check for verdict signal:** + +First check your mail for FIX_NEEDED or MERGED messages: +```bash +gt mail inbox +``` + +If no verdict yet, use await-signal to wait: +```bash +gt mol step await-signal --agent-bead \ + --backoff-base 30s --backoff-mult 2 --backoff-max 5m +``` + +**On MERGED signal:** +The refinery merged your work successfully. Proceed to self-clean step. + +**On FIX_NEEDED signal:** +The refinery found failures in your MR. The FIX_NEEDED message contains: +- Failure-Type: tests, build, lint, typecheck +- Error: the actual error output +- Attempt-Number: how many fix attempts so far + +**Fix procedure (on FIX_NEEDED):** + +1. **Read the failure details** from your mail: +```bash +gt mail inbox +# Read the FIX_NEEDED message for failure details +``` + +2. **Also check the bead for updated failure notes:** +```bash +bd show {{issue}} +``` + +3. **Fix the code:** + - Diagnose the failure based on the error output + - Make targeted fixes (don't refactor unrelated code) + - Commit your fix: + ```bash + git add + git commit -m "fix: address failure ({{issue}})" + ``` + +4. **Re-run the failing checks locally:** + - If failure was tests: run `{{test_command}}` + - If failure was build: run `{{build_command}}` + - If failure was lint: run `{{lint_command}}` + - If failure was typecheck: run `{{typecheck_command}}` + +5. **Rebase and push:** +```bash +git fetch origin {{base_branch}} +git rebase origin/{{base_branch}} +git push origin HEAD --force-with-lease +``` + +6. **Resubmit to merge queue:** +```bash +gt mq submit {{issue}} --resubmit +``` + +7. **Loop back to awaiting_verdict** — wait for the next verdict. + +**Max retry limit:** If Attempt-Number >= 3, do NOT keep retrying. +Instead, escalate: +```bash +gt mail send /witness -s "HELP: Cannot fix merge failure after 3 attempts" \ + --stdin <<'BODY' +Issue: {{issue}} +Failure-Type: +Error: +Attempts: 3 +I've tried to fix this failure 3 times and cannot resolve it. +Please advise or reassign. +BODY +``` + +**If your session is dying (context filling):** +Use handoff to preserve your state: +```bash +gt handoff -s "Awaiting verdict for {{issue}}" -m "MR submitted, waiting for refinery. +Attempt: +Last failure: " +``` + +**Exit criteria:** Received MERGED signal from refinery.""" + +[[steps]] +id = "self-clean" +title = "Self-clean after merge confirmation" +needs = ["await-verdict"] +description = """ +The refinery confirmed your work was merged. Self-clean and exit. + +**Verify MERGED signal:** +Confirm you received a MERGED signal (not just timing out): +```bash +gt mail inbox +# Should show MERGED message for your branch +``` + +**Wasteland completion (if wasteland is configured):** +Before self-cleaning, report completion on the wasteland board. This builds +your rig's reputation and lets other Gas Towns see the work was done. + +The wasteland item may have been claimed in load-context (GT_WL_ITEM), or we +need to detect it now. Not all beads map to wasteland items — some beads are +sub-tasks of a larger wasteland item, or purely internal work. + +```bash +# Use GT_WL_ITEM if set during load-context, otherwise re-detect +WL_ID="${GT_WL_ITEM:-}" + +if [ -z "$WL_ID" ]; then + # Search for wasteland item linked to this bead or its title + WL_ID=$(dolt --host 127.0.0.1 --port --user root --password "" --no-tls sql -r csv -q \ + "USE wl_commons; SELECT id FROM wanted WHERE (status='open' OR status='claimed') AND description LIKE '%{{issue}}%' LIMIT 1" 2>/dev/null | tail -1) + [ "$WL_ID" = "id" ] && WL_ID="" +fi + +if [ -n "$WL_ID" ]; then + # Get evidence: PR URL or branch merge info + PR_URL=$(git log --oneline -1 --format="%s" | grep -oP '#\d+' | head -1) + BRANCH=$(git branch --show-current 2>/dev/null) + EVIDENCE="Bead {{issue}} merged to {{base_branch}}." + [ -n "$PR_URL" ] && EVIDENCE="$EVIDENCE PR $PR_URL." + [ -n "$BRANCH" ] && EVIDENCE="$EVIDENCE Branch: $BRANCH." + + # Claim if not already claimed by us + gt wl claim "$WL_ID" 2>/dev/null + + # Mark done with evidence + gt wl done "$WL_ID" --evidence "$EVIDENCE" 2>/dev/null \ + && echo "Wasteland: marked $WL_ID done" \ + || echo "Wasteland: could not mark $WL_ID done (may already be completed)" +else + echo "Wasteland: no linked item found for {{issue}} (OK — not all beads have wasteland items)" +fi +``` +This step is always safe — if wasteland is not configured, the dolt query fails silently. +If no item is found, the polecat just logs it and continues. + +**Run gt done:** +```bash +# With pre-verification (if you ran gates before submitting): +gt done --pre-verified + +# Without pre-verification: +gt done +``` + +You should see output like: +``` +✓ Work submitted to merge queue + MR ID: gt-xxxxx + Source: polecat/ + Target: {{base_branch}} + Issue: {{issue}} +✓ Sandbox nuked +✓ Session exiting +``` + +**You are GONE after this.** The refinery has already merged your work. +`gt done` at this point is just cleanup (nuke sandbox, close session). + +**Exit criteria:** Wasteland updated (if configured), sandbox nuked, session exited.""" + +[vars] +[vars.issue] +description = "The issue ID assigned to this polecat" +required = true + +[vars.base_branch] +description = "The base branch to rebase on and compare against (e.g., main, integration/epic-id)" +default = "main" + +[vars.setup_command] +description = "Setup/install command (e.g., pnpm install). Empty = skip." +default = "" + +[vars.typecheck_command] +description = "Type check command (e.g., tsc --noEmit). Empty = skip." +default = "" + +[vars.test_command] +description = "Command to run tests (auto-detected from rig settings)" +default = "" + +[vars.lint_command] +description = "Command to run linting. Empty = skip." +default = "" + +[vars.build_command] +description = "Command to run build. Empty = skip." +default = "" diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-scoped-work.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-scoped-work.formula.toml new file mode 100644 index 00000000..33346afb --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-scoped-work.formula.toml @@ -0,0 +1,172 @@ +description = """ +Graph-first worktree lifecycle. + +This is the built-in v2 workflow prototype for Gas City. It models work as an +explicit DAG with: + +- a durable `body` scope bead +- explicit worktree setup and teardown +- first-class step beads that can be routed independently +- continuation metadata for same-session execution + +Use this as the opt-in replacement for hierarchy-first single-session formulas. +""" +formula = "mol-scoped-work" +version = 2 + +[vars] +[vars.issue] +description = "The work bead ID or external issue reference" +required = true + +[vars.base_branch] +description = "Base branch to branch from" +default = "main" + +[vars.setup_command] +description = "Optional setup command (install deps, bootstrap tools)" +default = "" + +[vars.typecheck_command] +description = "Optional typecheck command" +default = "" + +[vars.lint_command] +description = "Optional lint command" +default = "" + +[vars.build_command] +description = "Optional build command" +default = "" + +[vars.test_command] +description = "Optional test command" +default = "" + +[[steps]] +id = "load-context" +title = "Load context and inspect the assignment" +description = """ +Prime the workspace, inspect the assigned work, and understand the current bead +metadata before doing anything destructive. + +```bash +gc prime +bd prime +bd show {{issue}} +bd show {{issue}} --json | jq '.metadata' +gc mail inbox +``` +""" +metadata = { "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "body" +title = "Worktree body scope" +needs = ["workspace-setup", "preflight-tests", "implement", "self-review", "submit"] +description = """ +Terminal latch for the main worktree body. + +Normal work beads inside this scope can fail. Paired `scope-check` control +beads handle fail-fast skipping, close this bead with `gc.outcome=fail|pass`, +and allow teardown to proceed. +""" +metadata = { "gc.kind" = "scope", "gc.scope_name" = "worktree", "gc.scope_role" = "body" } + +[[steps]] +id = "workspace-setup" +title = "Set up a worktree and branch" +needs = ["load-context"] +description = """ +Ensure there is an isolated worktree for this work item. + +```bash +git fetch --prune origin +WORKTREE=$(bd show {{issue}} --json | jq -r '.metadata.work_dir // empty') +if [ -z "$WORKTREE" ]; then + WORKTREE_PATH=$(pwd)/worktrees/{{issue}} + git worktree add "$WORKTREE_PATH" --detach origin/{{base_branch}} + bd update {{issue}} --set-metadata work_dir="$WORKTREE_PATH" + WORKTREE="$WORKTREE_PATH" +fi +cd "$WORKTREE" +{{setup_command}} +``` +""" +metadata = { "gc.scope_ref" = "body", "gc.scope_role" = "setup", "gc.on_fail" = "abort_scope", "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "preflight-tests" +title = "Run preflight checks on the base branch" +needs = ["workspace-setup"] +description = """ +Run the configured checks before implementation. + +```bash +{{typecheck_command}} +{{lint_command}} +{{test_command}} +``` +""" +metadata = { "gc.scope_ref" = "body", "gc.scope_role" = "member", "gc.on_fail" = "abort_scope", "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "implement" +title = "Implement the requested change" +needs = ["preflight-tests"] +description = """ +Make the code changes for `{{issue}}` in the worktree. Commit focused changes +as needed and keep the branch scoped to this work. +""" +metadata = { "gc.scope_ref" = "body", "gc.scope_role" = "member", "gc.on_fail" = "abort_scope", "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "self-review" +title = "Review the diff and run verification" +needs = ["implement"] +description = """ +Inspect the change and run verification commands. + +```bash +git diff origin/{{base_branch}}...HEAD +{{typecheck_command}} +{{lint_command}} +{{build_command}} +{{test_command}} +git status +``` +""" +metadata = { "gc.scope_ref" = "body", "gc.scope_role" = "member", "gc.on_fail" = "abort_scope", "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "submit" +title = "Finalize the work item" +needs = ["self-review"] +description = """ +Perform the city-specific finalization for this work item. Examples: + +- push the branch and hand off to a reviewer +- close the original work bead +- update metadata for downstream systems + +This is intentionally generic so cities can opt into the graph contract +without inheriting Gastown's exact human workflow. +""" +metadata = { "gc.scope_ref" = "body", "gc.scope_role" = "member", "gc.on_fail" = "abort_scope", "gc.continuation_group" = "main", "gc.session_affinity" = "require" } + +[[steps]] +id = "cleanup-worktree" +title = "Clean up the worktree" +needs = ["body"] +description = """ +Remove the temporary worktree after the body reaches terminal state. + +```bash +WORKTREE=$(bd show {{issue}} --json | jq -r '.metadata.work_dir // empty') +if [ -n "$WORKTREE" ] && [ -d "$WORKTREE" ]; then + git worktree remove --force "$WORKTREE" || rm -rf "$WORKTREE" +fi +bd update {{issue}} --unset-metadata work_dir +``` +""" +metadata = { "gc.kind" = "cleanup", "gc.scope_ref" = "body", "gc.scope_role" = "teardown" } diff --git a/examples/deepwork/packs/deepwork-org/formulas/mol-witness-patrol.formula.toml b/examples/deepwork/packs/deepwork-org/formulas/mol-witness-patrol.formula.toml new file mode 100644 index 00000000..397b0154 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/formulas/mol-witness-patrol.formula.toml @@ -0,0 +1,67 @@ +description = "Per-rig worker monitor patrol loop.\n\nThe Witness is the Pit Boss for your rig. You watch polecats, nudge them toward\ncompletion, verify clean git state before kills, and escalate stuck workers.\n\n**You do NOT do implementation work.** Your job is oversight, not coding.\n\n## Persistent Polecat Model (gt-4ac)\n\nPolecats persist after work completion — sandbox is preserved for reuse:\n\n```\nPolecat lifecycle: spawning → working → mr_submitted → idle (sandbox preserved)\nMR lifecycle: created → queued → processed → merged (Refinery handles)\n```\n\nOnce a polecat calls gt done and submits an MR, it transitions to idle state.\nThe MR lifecycle continues independently in the Refinery. The polecat is NOT\nnuked — its sandbox is preserved for reuse by future slings.\n\n**CRITICAL**: Do NOT nuke polecats with pending MRs. The refinery needs the\nremote branch to exist to process the merge. Nuking deletes the remote branch\nand orphans the MR. See gt-6a9d.\n\n**Key principle**: Polecat lifecycle is separate from MR lifecycle. Polecats\ngo idle after work, they are NOT destroyed.\n\n## Restart-First Policy (gt-dsgp)\n\nThe witness NEVER nukes polecats automatically. When a polecat is stuck, hung,\nor has a dead agent process, the witness RESTARTS the session instead of nuking.\nThis preserves the polecat's worktree and branch, preventing work loss.\n\n- Dead agent process → restart session\n- Hung session (no output 30+ min) → restart session\n- Stuck in gt done → restart session\n- Done polecat (bead closed) → leave alone (sandbox preserved)\n- Polecat with pending MR → leave alone (refinery handles)\n\nNuking only happens via explicit `gt polecat nuke` command from a human or Mayor.\n\n## Design Philosophy\n\nThis patrol follows Gas Town principles:\n- **Discovery over tracking**: Observe reality each cycle, with minimal agent-bead state for duration tracking\n- **Beads over mail**: survey-workers discovers completion state from agent bead metadata (gt-w0br); inbox-check POLECAT_DONE is fallback only\n- **Persistent by default**: Clean polecats go idle, sandbox preserved for reuse (gt-4ac)\n- **Cleanup wisps for merge tracking**: Created when MR is pending in refinery\n- **Task tool for parallelism**: Subagents inspect polecats, not molecule arms\n- **Swim lane discipline**: Only close wisps YOU created. Wisp lifecycle for non-witness wisps is the reaper Dog's job. Report orphaned foreign wisps — never close them.\n\n## Patrol Shape (Linear)\n\n```\ninbox-check ─► process-cleanups ─► check-refinery ─► survey-workers\n │\n ┌──────────────────────────────────────────────────┘\n ▼\n check-timer-gates ─► check-swarm ─► patrol-cleanup ─► context-check ─► loop-or-exit\n```\n\nNo dynamic arms. No fanout gates. No persistent nudge counters.\nState is discovered each cycle from reality (tmux, beads, mail)." +formula = 'mol-witness-patrol' +version = 10 + +[vars] +[vars.wisp_type] +description = "Type of wisp created for this molecule" +default = "patrol" + +[[steps]] +description = "First, clean up YOUR OWN wisps from previous cycles (closed wisps + abandoned wisps):\n```bash\nbd mol wisp gc --closed --force\nbd mol wisp gc --age 1h --force\n```\n\n🚨 **SWIM LANE RULE: Do NOT close wisps you didn't create.**\nWisp lifecycle management (close, delete, gc) for non-witness wisps is the\nreaper Dog's responsibility, NOT yours. If you see wisps that look orphaned\nor stale but were NOT created by your patrol, **report them — don't close them**:\n```bash\ngt mail send deacon/ -s \"NOTICE: Possibly orphaned wisps\" -m \"Found wisps that may be orphaned:\n\nThese were NOT created by witness patrol. Reporting for reaper review.\"\n```\nClosing foreign wisps kills active polecat work molecules.\n\n## Step 0: Drain stale protocol messages (ALWAYS run first)\n\nBefore processing individual messages, bulk-drain stale protocol messages.\nThis prevents inbox backlog from consuming patrol context.\n\n```bash\ngt mail drain --identity /witness --max-age 30m\n```\n\nThis archives POLECAT_DONE, POLECAT_STARTED, LIFECYCLE:*, MERGED,\nMERGE_READY, MERGE_FAILED, and SWARM_START messages older than 30 minutes.\nHELP and HANDOFF messages are NEVER drained (they need attention).\n\nIf the drain reports > 0 archived messages, log the count and continue.\n\n## Step 1: Check inbox size and batch if needed\n\n```bash\ngt mail inbox\n```\n\n**Batch processing rule**: If inbox has > 10 messages after drain:\n- Process messages in batches by type, not one-by-one\n- Group POLECAT_DONE messages together: archive all at once\n- Group MERGED messages: close cleanup wisps, then archive batch\n- Process HELP messages individually (they need assessment)\n- Log summary counts: \"Processed 5 POLECAT_DONE, 3 MERGED, 1 HELP\"\n\n**If inbox ≤ 10 messages**: Process each individually as described below.\n\nFor each message:\n\n**POLECAT_STARTED**:\nA new polecat has started working. Acknowledge and archive.\n```bash\n# Acknowledge startup (optional: log for activity tracking)\ngt mail archive \n```\nNo action needed beyond acknowledgment - archive immediately.\n\n**POLECAT_DONE / LIFECYCLE:Shutdown** (FALLBACK — primary discovery is via survey-workers bead scan, gt-w0br):\n\n*PERSISTENT MODEL (gt-4ac)*: Polecats persist after work completion.\nThe polecat transitions to idle state — its sandbox is preserved for reuse.\nThe MR lifecycle continues independently in the Refinery.\n\nPolecat lifecycle: spawning → working → mr_submitted → idle (preserved)\nMR lifecycle: created → queued → processed → merged (handled by Refinery)\n\n⚠️ **CRITICAL (gt-6a9d): Do NOT nuke polecats with pending MRs.**\nThe refinery needs the remote branch to merge. Nuking deletes the branch\nand orphans the MR, causing work loss.\n\nThe handler (HandlePolecatDone) will:\n1. If pending MR exists: Create cleanup wisp, send MERGE_READY to refinery\n2. If no MR: Acknowledge completion (polecat is idle)\n\n```bash\n# The handler does this automatically:\n# - With MR: create cleanup wisp + send MERGE_READY → archive mail\n# - Without MR: acknowledge → archive mail\n# - Polecat goes idle in BOTH cases — no nuke.\n```\n\nDo NOT run gt polecat nuke on POLECAT_DONE (or any automatic trigger). The polecat is idle, not dead.\nArchive the message after the handler processes it.\n\n**MERGED**:\nA branch was merged successfully. The polecat's cleanup wisp can be closed.\nThe polecat remains idle (sandbox preserved for reuse).\n\nIf a cleanup wisp exists, close it:\n```bash\n# Find the cleanup wisp for this polecat\nbd list --label polecat:,state:merge-requested --status=open\n\n# If found, close the wisp (work is merged, cleanup tracked)\nbd close --reason \"merged successfully\"\n```\nDo NOT nuke the polecat. Archive after cleanup wisp is closed.\n\n**HELP / Blocked**:\nThe handler (HandleHelp) automatically classifies the request by category and\nseverity using keyword matching. The assessment appears in the handler output.\n\n**Assessment categories and routing:**\n| Category | Severity | Route to | Trigger keywords |\n|----------|----------|----------|------------------|\n| emergency | critical | overseer | security, vulnerability, breach, data corruption, data loss |\n| failed | high | deacon | crash, panic, fatal, oom, disk full, connection refused, database error |\n| blocked | high | mayor | blocked, merge conflict, deadlock, stuck, cannot proceed |\n| decision | medium | deacon | which approach, ambiguous, unclear, design choice, architecture |\n| lifecycle | medium | witness | session, respawn, zombie, hung, timeout, no progress |\n| help | medium | deacon | (default when no keywords match) |\n\nUse the assessment as guidance, but apply your own judgment:\n1. **Can you resolve it directly?** (e.g., lifecycle issues, simple guidance) → Help and archive\n2. **Need to escalate?** → Route to the suggested target:\n```bash\ngt mail send / -s \"Escalation: needs help\" -m \"Category: \nSeverity: \n\"\n```\n3. **Override assessment if needed** — the heuristic is a starting point, not gospel.\n\nArchive after handling (escalated or resolved):\n```bash\ngt mail archive \n```\n\n**HANDOFF**:\nRead predecessor context. Continue from where they left off.\nArchive after absorbing context:\n```bash\ngt mail archive \n```\n\n**SWARM_START**:\nMayor initiating batch polecat work. Initialize swarm tracking.\n```bash\n# Parse swarm info from mail body: {\"swarm_id\": \"batch-123\", \"beads\": [\"bd-a\", \"bd-b\"]}\nbd create --ephemeral --wisp-type patrol --title \"swarm:\" --description \"Tracking batch: \" --labels swarm,swarm_id:,total:,completed:0,start:\n```\nArchive after creating swarm tracking wisp:\n```bash\ngt mail archive \n```\n\n**Hygiene principle**: Archive messages after they're fully processed.\nKeep only: active work, unprocessed requests. Inbox should be near-empty." +id = 'inbox-check' +title = 'Process witness mail' + +[[steps]] +description = "Process cleanup wisps (merge tracking and dirty state handling).\n\nCleanup wisps are created for two reasons:\n1. Pending MR: HandlePolecatDone creates a wisp in 'merge-requested' state\n when a polecat has work in the refinery queue. These are resolved when\n the MERGED signal arrives.\n2. Dirty state: When a polecat has uncommitted changes or unpushed commits\n that need manual intervention.\n\n```bash\n# Find all cleanup wisps\nbd list --label cleanup --status=open\n```\n\nIf no wisps, skip this step (most common case in ephemeral model).\n\nFor each cleanup wisp, investigate and resolve the dirty state:\n\n## State: pending (needs investigation)\n\n1. **Extract polecat name** from wisp title/labels\n\n2. **Diagnose the problem**:\n```bash\ncd polecats/\ngit status # What's uncommitted?\ngit stash list # Any stashed work?\ngit log @{u}..HEAD # Any unpushed commits?\n```\n\n3. **Resolution options**:\n - **Uncommitted changes**: Commit and push, then nuke\n - **Stashed work**: Pop and commit, or discard if not valuable\n - **Unpushed commits**: Push to origin, then nuke\n - **All valuable work lost**: Escalate to Deacon for recovery\n\n4. **If resolvable locally**: Fix and nuke\n```bash\n# Example: push unpushed commits\ngit push origin HEAD\n\n# Then nuke\ngt session restart /\n\n# Close the wisp\nbd close --reason \"Resolved: pushed commits, nuked\"\n```\n\n5. **If needs escalation**: Send RECOVERY_NEEDED to Deacon\n```bash\ngt mail send deacon/ -s \"RECOVERY_NEEDED /\" \\\n -m \"Cleanup Status: \nBranch: \nIssue: \n\nCannot auto-resolve. Please advise.\"\n```\nLeave wisp open until Deacon resolves.\n\n## State: merge-requested (legacy, rare)\n\nThis state was used before the ephemeral model. If found, the polecat is\nwaiting for a MERGED signal. The inbox-check step handles these.\n\n**Parallelism**: Use Task tool subagents to process multiple cleanups concurrently.\nEach cleanup is independent - perfect for parallel execution." +id = 'process-cleanups' +needs = ['inbox-check'] +title = 'Process pending cleanup wisps' + +[[steps]] +description = "Check refinery and deacon health.\n\n**Step 1: Check refinery session**\n```bash\ngt session status /refinery\n```\n\nIf MRs waiting AND refinery not running:\n```bash\ngt session start /refinery\ngt mail send /refinery -s \"PATROL: Wake up\" -m \"Merge requests in queue. Please process.\"\ngt mol step emit-event --channel refinery --type PATROL_WAKE \\\n --payload source=witness --payload queue_depth=\n```\n\n**Event emission**: Always emit a file event when waking the refinery.\nThis ensures the refinery's `await-event` unblocks instantly instead of\nwaiting for its next timeout cycle.\n\n**Step 2: Queue health analysis**\n\nRun the full queue view to get raw data for every open MR:\n```bash\ngt refinery ready --all --json\n```\n\nThis returns all open MRs with timestamps, assignees, and branch existence data.\nUse your judgment to assess the queue — there are no hardcoded thresholds.\n\n**What to look for:**\n\n- **Stale claimed MRs**: MRs with a non-empty `Assignee` but old `UpdatedAt`.\n Consider the queue size, time of day, and typical processing time.\n A claimed MR that hasn't been updated in a while may indicate a stuck refinery.\n\n- **Orphaned branches**: MRs where both `BranchExistsLocal` and `BranchExistsRemote`\n are false. The source branch may have been deleted while the MR bead is still open.\n These likely need to be closed or investigated.\n\n- **Queue depth**: A large number of unclaimed MRs may indicate the refinery is down\n or overwhelmed. Consider waking it or escalating.\n\n**Step 3: Check deacon health**\n\n⚠️ **The deacon tmux session is named `hq-deacon`** (NOT `deacon`).\nTown-level agents use the `hq-` prefix.\n\n```bash\ntmux has-session -t hq-deacon 2>/dev/null && echo \"alive\" || echo \"dead\"\n```\n\nIf the deacon session is dead, escalate to Mayor:\n```bash\ngt mail send mayor/ -s \"ALERT: Deacon session hq-deacon is down\" \\\n -m \"Deacon tmux session (hq-deacon) not found.\nDetected during witness patrol.\nPlease restart the deacon.\"\n```\n\n**Step 4: Escalate if needed**\n\nIf you identify problems, escalate to Deacon with specific MR IDs and context:\n```bash\ngt mail send deacon/ -s \"QUEUE_HEALTH: \" \\\n -m \"MR IDs: \nObservation: \nRecommendation: \"\n```" +id = 'check-refinery' +needs = ['process-cleanups'] +title = 'Check refinery and deacon health' + +[[steps]] +description = "Survey all polecats using agent beads and tmux session cross-reference.\n\n## PRIMARY: Discover completions from agent bead metadata (gt-w0br)\n\nBefore zombie detection or progress checks, scan agent beads for completion\nmetadata written by `gt done`. This is the PRIMARY mechanism for discovering\npolecat state transitions. The inbox-check POLECAT_DONE mail is now fallback only.\n\nCompletion metadata fields on agent beads (set by gt done):\n- `exit_type`: COMPLETED, ESCALATED, DEFERRED, PHASE_COMPLETE\n- `mr_id`: MR bead ID (if MR was created)\n- `branch`: Working branch name\n- `mr_failed`: true if MR creation failed\n- `completion_time`: RFC3339 timestamp\n\n**Step 0: Discover completions from beads**\n\nThe `DiscoverCompletions()` function (witness/handlers.go) handles this:\n1. Scans all polecat agent beads for `exit_type` + `completion_time` set\n2. Routes each: MR present → cleanup wisp + MERGE_READY; no MR → acknowledge idle\n3. Clears completion metadata after processing (prevents re-processing)\n\nThis replaces the reactive POLECAT_DONE mail flow with proactive bead discovery.\n\n🚨 **SWIM LANE RULE: You may ONLY close wisps that YOU (the witness) created.**\nDo NOT close formula wisps, polecat work wisps, or any wisp created by `gt sling`\nor another agent. Wisp lifecycle for non-witness wisps is the reaper Dog's job.\nIf you encounter wisps that look orphaned but weren't created by your patrol,\nreport them to Deacon — do NOT close them. Closing foreign wisps kills active\npolecat work molecules.\n\n**Step 1: List polecat agent beads**\n\n```bash\nbd list --type=agent --json\n```\n\nFilter the JSON output for entries where description contains `role_type: polecat`.\nEach polecat agent bead has fields in its description:\n- `role_type: polecat`\n- `rig: `\n- `agent_state: running|idle|awaiting_verdict|stuck|done`\n- `hook_bead: `\n\n**Step 2: For each polecat, check agent_state**\n\n| agent_state | Meaning | Action |\n|-------------|---------|--------|\n| running | Actively working | Check for zombie (Step 2a), then progress (Step 3) |\n| working | Actively working | Check for zombie (Step 2a), then progress (Step 3) |\n| awaiting_verdict | MR submitted, waiting for refinery | Check for zombie (Step 2c) |\n| spawning | Agent initializing | Skip zombie detection. Check spawn age (Step 2b) |\n| idle | No work assigned | Leave alone — sandbox preserved for reuse (Step 3a) |\n| stuck | Self-reported stuck | Handle stuck protocol |\n| done | Work complete | Verify cleanup triggered (see Step 4a) |\n\n**Step 2a: ZOMBIE DETECTION — Cross-reference tmux session existence**\n\n🚨 **CRITICAL**: Zombies cannot send signals. A polecat with agent_state=running\nor hook_bead assigned but NO tmux session is a zombie that will sit forever\nundetected unless you proactively check.\n\n⚠️ **SKIP spawning polecats**: Polecats with agent_state=spawning are still\ninitializing (worktree creation, dependency install, tmux session startup).\nThey will NOT have a tmux session yet — this is expected, not a zombie.\nDo NOT run zombie detection on spawning polecats. Handle them in Step 2b instead.\n\nFor EVERY polecat with agent_state=running/working (NOT spawning/awaiting_verdict) OR hook_bead assigned with non-spawning state:\n(awaiting_verdict polecats have their own zombie detection in Step 2c)\n```bash\ngt session status / --json | jq -r '.running' | grep -q true && echo ALIVE || echo ZOMBIE\n```\n\n**If ZOMBIE detected** (session missing, agent says working):\n\n**IMPORTANT (gt-sy8)**: Before processing as zombie, check if the hook_bead is\nalready CLOSED:\n```bash\nbd show --json | jq -r '.[0].status'\n```\nIf status is \"closed\", the polecat completed its work successfully. The dead\nsession is expected (gt done kills it). Just nuke the dead session — do NOT\ntrigger re-dispatch or send RECOVERED_BEAD/RECOVERY_NEEDED to Deacon.\n\n1. Check git state to determine if work is recoverable:\n```bash\ncd polecats//\ngit status --porcelain # Uncommitted changes?\ngit log @{u}..HEAD # Unpushed commits?\n```\n\n2. **If clean** (no uncommitted, no unpushed): Check for pending MR first.\n```bash\n# CRITICAL (gt-6a9d): Check for pending MR before any nuke!\nbd list --label polecat:,state:merge-requested --status=open\n# If merge-requested wisp exists → DO NOT NUKE, MR pending in refinery\n# If no pending MR → safe to nuke (zombie with no work to preserve)\ngt session restart /\n```\n\n3. **If dirty** (has unpushed/uncommitted work): Escalate to Deacon for recovery.\n```bash\ngt mail send deacon/ -s \"RECOVERY_NEEDED /\" \\\n -m \"Polecat: /\nCleanup Status: \nHook Bead: \nDetected: $(date -u +%Y-%m-%dT%H:%M:%SZ)\n\nZombie detected: tmux session dead, agent_state=.\nThis polecat has unpushed/uncommitted work that will be lost if nuked.\nPlease coordinate recovery before authorizing cleanup.\"\n```\n\nAlso create a cleanup wisp for tracking:\n```bash\nbd create --ephemeral --title \"cleanup:\" \\\n --description \"Zombie detected: session dead, state=\" \\\n --labels cleanup,polecat:,state:zombie-detected\n```\n\n**Step 2b: STALE SPAWN DETECTION — Check spawn age for spawning polecats**\n\nFor polecats with agent_state=spawning, check how long they've been spawning.\nSpawning should complete within 5 minutes even on large repos.\n\n```bash\n# Get the agent bead's updated_at timestamp to estimate spawn start\nbd show --json | jq -r '.[0].updated_at'\n# Compare with current time\n```\n\n| Spawn age | Action |\n|-----------|--------|\n| < 5 min | Normal — leave alone, spawning in progress |\n| 5-10 min | Warning — log observation, check again next cycle |\n| > 10 min | Stale spawn — escalate (do NOT nuke) |\n\n**If stale spawn detected** (spawning > 10 min):\n```bash\ngt escalate -s HIGH \"Stale spawn: / has been spawning for minutes\"\n```\n\nDo NOT nuke stale spawning polecats. The sling process may be slow (large repo\nclone, dependency install) or stuck. Escalation lets a human or Mayor investigate\nwithout destroying a potentially-in-progress setup.\n\n**Step 2c: AWAITING_VERDICT ZOMBIE DETECTION**\n\nPolecats in `awaiting_verdict` state have submitted their MR and are waiting for\nthe refinery to send MERGED or FIX_NEEDED. This is a valid long-running state.\n\n⚠️ **Do NOT treat awaiting_verdict as idle or stuck.** The polecat is legitimately\nwaiting for an external signal. However, if the session dies while waiting, the\npolecat becomes a zombie that will never receive the signal.\n\nFor EVERY polecat with agent_state=awaiting_verdict:\n```bash\ngt session status / --json | jq -r '.running' | grep -q true && echo ALIVE || echo ZOMBIE\n```\n\n**If ALIVE**: Leave alone. The polecat is waiting for its verdict. No action needed.\nDo NOT nudge awaiting_verdict polecats — they are correctly idle-waiting.\n\n**If ZOMBIE** (session dead while awaiting_verdict):\nThe polecat died while waiting for the refinery verdict. Restart it so it can\nre-check for pending FIX_NEEDED or MERGED signals:\n```bash\n# Check git state first\ncd polecats//\ngit status --porcelain\n```\n\nIf clean (expected for awaiting_verdict — work was already pushed):\n```bash\ngt session restart /\n```\n\nIf dirty (unexpected — should have been committed before submitting MR):\n```bash\ngt mail send deacon/ -s \"RECOVERY_NEEDED /\" \\\n -m \"Polecat: /\nState: awaiting_verdict (zombie)\nHook Bead: \nGit status: dirty (unexpected for awaiting_verdict)\n\nZombie detected while awaiting refinery verdict.\nHas uncommitted work that should have been pushed before MR submission.\nPlease coordinate recovery.\"\n```\n\n**Step 3: For running polecats (with LIVE session), assess progress**\n\nCheck the hook_bead field to see what they're working on:\n```bash\nbd show # See current step/issue\n```\n\nYou can also verify they're responsive:\n```bash\ngt peek / 20\n```\n\nLook for:\n- Recent tool activity → making progress\n- Idle at prompt → may need nudge\n- Error messages → may need help\n\n**Step 3a: For idle polecats, verify sandbox health**\n\nWhen agent_state=idle, the polecat has no work assigned. Its sandbox is\npreserved for reuse by future slings (persistent polecat model, gt-4ac).\n\n⚠️ **Do NOT nuke idle polecats.** Their sandbox is preserved for reuse.\nNuking would force a full re-clone on the next sling, which is slow.\n\nCheck for pending MRs — an idle polecat may have work in the refinery:\n```bash\n# Check for cleanup wisps (merge-requested = MR pending in refinery)\nbd list --label polecat:,state:merge-requested --status=open\n```\nIf a merge-requested wisp exists, the polecat's MR is in the refinery queue.\nDo NOT nuke — the refinery needs the remote branch.\n\n**If dirty** (uncommitted or unpushed work):\n```bash\n# Escalate to Deacon - polecat has work that might be valuable\ngt mail send deacon/ -s \\\"IDLE_DIRTY: has uncommitted work\\\" \\\n -m \\\"Polecat: \nState: idle (no hook_bead)\nGit status: \nUnpushed commits: \n\nPlease advise: recover work or discard?\\\"\n```\n\n**Rationale**: Idle polecats are preserved for reuse. Their sandbox contains\na pre-configured worktree that saves clone time on the next sling. Only\nescalate when there's actual dirty state at risk.\n\n**Step 4: Decide action**\n\n| Observation | Action |\n|-------------|--------|\n| agent_state=running, session alive, recent activity | None |\n| agent_state=running, session alive, idle 5-15 min | Gentle nudge |\n| agent_state=running, session alive, idle 15+ min | Direct nudge with deadline |\n| agent_state=running, SESSION DEAD | ZOMBIE — handle in Step 2a |\n| agent_state=awaiting_verdict, session alive | None — waiting for refinery verdict |\n| agent_state=awaiting_verdict, SESSION DEAD | ZOMBIE — restart session (Step 2c) |\n| agent_state=spawning, < 5 min | None — spawning in progress |\n| agent_state=spawning, 5-10 min | Log warning, check next cycle |\n| agent_state=spawning, > 10 min | Stale spawn — escalate (Step 2b) |\n| agent_state=stuck | Assess and help or escalate |\n| agent_state=done | Verify cleanup triggered (see Step 4a) |\n\n**Step 4a: Handle agent_state=done**\n\nIn the persistent model, polecats with agent_state=done should be idle with\ntheir sandbox preserved. Finding one here indicates:\n\n1. **Stale agent bead** - polecat was nuked but bead remains\n ```bash\n # Verify polecat doesn't exist anymore\n ls polecats/ 2>/dev/null || echo \"Already nuked\"\n ```\n If nuked, the agent bead is stale. Clean it up or ignore.\n\n2. **Cleanup wisp exists** - polecat has dirty state needing intervention\n ```bash\n bd list --label polecat: --status=open\n ```\n Process in process-cleanups step.\n\n3. **No wisp, polecat exists** - POLECAT_DONE mail was missed\n Check for pending MR before taking any action:\n ```bash\n # Check for pending MR (gt-6a9d: do NOT nuke if MR pending)\n bd list --label polecat:,state:merge-requested --status=open\n # If no pending MR and no dirty state → polecat is idle, leave it\n ```\n If dirty state exists, create cleanup wisp for investigation.\n\n**Step 5: Execute nudges**\n```bash\n# Use --mode=queue to avoid interrupting in-flight tool calls\ngt nudge --mode=queue /polecats/ \"How's progress? Need help?\"\n```\n\n**Step 6: Escalate if needed**\n```bash\ngt mail send deacon/ -s \"Escalation: stuck\" \\\n -m \"Polecat reports stuck. Please intervene.\"\n```\n\n**Parallelism**: Use Task tool subagents to inspect multiple polecats concurrently.\n\n**ZFC Principle**: Trust agent_state from beads for WHAT agents report. But\nverify tmux session existence for WHETHER agents are alive. A dead session with\nagent_state=running is a zombie — the agent cannot correct its own state.\n\n**Step 7: ORPHANED BEAD DETECTION — Scan from beads side**\n\n🚨 **CRITICAL**: Zombie detection (Step 2a) scans FROM polecat directories.\nOnce a polecat is nuked and its directory removed, its beads become invisible\nto zombie detection. Orphaned bead detection scans FROM beads to catch this case.\n\n```bash\nbd list --status=in_progress --json --limit=0\nbd list --status=hooked --json --limit=0\n```\n\nFor each in_progress or hooked bead with a polecat assignee (format: `/polecats/`):\n0. Verify bead status is still in_progress/hooked (not closed since listing). If\n closed, skip — the polecat completed its work. (gt-sy8)\n1. Only check beads assigned to polecats in YOUR rig\n2. Check tmux session: `gt session status / --json | jq -r '.running'`\n3. Check polecat directory: `ls /polecats/ 2>/dev/null`\n4. If BOTH session dead AND directory missing → orphan. Reset the bead:\n ```bash\n bd update --status=open --assignee=\n gt mail send deacon/ -s \"ORPHAN_RECOVERED: \" \\\n -m \"Bead was assigned to /polecats/ which no longer exists.\n The bead has been reset to open with no assignee.\n Please re-dispatch to an available polecat.\"\n ```\n5. If directory exists but session dead → skip (zombie detection handles it)\n6. If session alive → not an orphan, skip" +id = 'survey-workers' +needs = ['check-refinery'] +title = 'Inspect all active polecats' + +[[steps]] +description = "Check for expired timer gates and escalate as needed.\n\nTimer gates are async wait conditions with a timeout. When the timeout expires,\nthe gate should be escalated to the overseer for human intervention.\n\n**Step 1: Run timer gate check**\n```bash\nbd gate check --type=timer --escalate\n```\n\nThis command:\n1. Finds all open gate issues with await_type=timer\n2. Checks if `now > created_at + timeout`\n3. Escalates expired gates via `gt escalate` (HIGH severity)\n4. Reports summary of gate status\n\n**Step 2: Review output**\n\nIf expired gates were found and escalated:\n- The escalation creates an audit trail bead\n- Overseer will be notified via mail\n- Gate remains open until manually resolved\n\nIf no expired gates:\n- Continue patrol normally\n\n**Note**: Timer gates do NOT auto-close on expiration. They escalate.\nThis ensures human oversight of timeout conditions.\n\n**Parallelism**: This is a single command, no parallel execution needed." +id = 'check-timer-gates' +needs = ['survey-workers'] +title = 'Check timer gates for expiration' + +[[steps]] +description = "If Mayor started a batch (SWARM_START), check if all polecats have completed.\n\n**Step 1: Find active swarm tracking wisps**\n```bash\nbd list --label swarm --status=open\n```\nIf no active swarm, skip this step.\n\n**Step 2: Count completed polecats for this swarm**\n\nExtract from wisp labels: swarm_id, total, completed, start timestamp.\nCheck how many cleanup wisps have been closed for this swarm's polecats.\n\n**Step 3: If all complete, notify Mayor**\n```bash\ngt mail send mayor/ -s \"SWARM_COMPLETE: \" -m \"All polecats merged.\nDuration: minutes\nSwarm: \"\n\n# Close the swarm tracking wisp\nbd close --reason \"All polecats merged\"\n```\n\nNote: Runs every patrol cycle. Notification sent exactly once when all complete." +id = 'check-swarm-completion' +needs = ['check-timer-gates'] +title = 'Check if active swarm is complete' + +[[steps]] +description = "Verify inbox hygiene before ending patrol cycle.\n\n**Step 1: Run drain to catch any protocol messages that arrived during patrol**\n```bash\ngt mail drain --identity /witness --max-age 30m\n```\nThis catches protocol messages that accumulated while you were processing\nother patrol steps.\n\n**Step 2: Check inbox state**\n```bash\ngt mail inbox\n```\n\nIn the persistent model, POLECAT_DONE messages create cleanup wisps and\nsend MERGE_READY to refinery. Inbox should contain ONLY:\n- Unprocessed messages (just arrived, will handle next cycle)\n- MERGED notifications (close cleanup wisp, then archive)\n\n**Step 3: Archive any remaining stale messages**\n\nLook for messages that were processed but not archived:\n- HELP/Blocked that was escalated → archive\n- Any other processed messages still in inbox → archive\n\n```bash\n# For each stale message found:\ngt mail archive \n```\n\n**Step 4: Verify cleanup wisp hygiene**\n\nIn the persistent model, cleanup wisps track pending MRs and dirty state:\n```bash\nbd list --label cleanup --status=open\n```\n\n- state:pending → Needs investigation in process-cleanups\n- state:merge-requested → Legacy state, handle in inbox-check\n\nIf cleanup wisps are accumulating, investigate why polecats aren't clean.\n\n**Goal**: Inbox should be nearly empty. Cleanup wisps should be rare." +id = 'patrol-cleanup' +needs = ['check-wasteland'] +title = 'End-of-cycle inbox hygiene' + +[[steps]] +description = "Check wasteland for unclaimed work matching this rig.\n\nThe witness acts as a wasteland consumer — if there are open, unclaimed wanted\nitems for this rig's project, and we have idle polecats or capacity to spawn,\ncreate a bead and sling it.\n\n**Step 1: Check if wasteland is configured**\n```bash\ngt wl charsheet --json 2>/dev/null | head -1 | grep -q handle || { echo 'Wasteland not configured, skipping'; exit 0; }\n```\nIf wasteland is not configured, skip this step entirely.\n\n**Step 2: Browse unclaimed items for this rig's project**\n```bash\n# Map rig name to wasteland project:\n# \n# \n# \n# \n# \n\ngt wl browse --project --status open --limit 5 --json\n```\n\nFilter for items that are NOT claimed (claimed_by is empty).\n\n**Step 3: Check capacity**\n\nBefore claiming, check if we have idle polecats or spawn headroom:\n```bash\n# Count active polecats in this rig\nls polecats/ 2>/dev/null | wc -l\n\n# Check scheduler limits\ngt scheduler status\n```\n\nIf at capacity, skip. Don't over-commit.\n\n**Step 4: Claim and dispatch (max 1 per patrol cycle)**\n\nPick the highest priority unclaimed item:\n```bash\n# Claim on wasteland first\ngt wl claim \n\n# Create a local bead from the wasteland item\nbd create --rig '' --type <type> --priority <priority> \\\n --description 'Wasteland: <wanted-id>\nRepo: <repo-url-from-description>\n<full-description-from-wasteland>'\n\n# Sling to a polecat\ngt sling <bead-id> <rig>\n```\n\n**IMPORTANT**: Only claim ONE item per patrol cycle. This prevents over-commitment\nand lets the system naturally balance load across patrol cycles.\n\n**Step 5: If no unclaimed items or no capacity, skip silently**\n\nThis is normal — most patrol cycles will skip this step. Wasteland consumption\nis opportunistic, not mandatory." +id = 'check-wasteland' +needs = ['check-swarm-completion'] +title = 'Check wasteland for unclaimed work' + +[[steps]] +description = "Check own context usage.\n\nIf context is HIGH (>80%):\n- Ensure any notes are written to handoff mail\n- Prepare for session restart\n\nIf context is LOW:\n- Can continue patrolling" +id = 'context-check' +needs = ['patrol-cleanup'] +title = 'Check own context limit' + +[[steps]] +description = "End of patrol cycle decision.\n\n**If context LOW** (can continue patrolling):\n\nResolve your agent bead ID for this patrol cycle. You MUST replace `<YOUR_RIG>` below with your actual rig name (e.g., `beads`, `town`) before running:\n```bash\nbd list --type=agent --desc-contains=\"role_type: witness\" --json | jq -r '.[] | select(.status != \"closed\") | select(.description | test(\"(?m)^\\\\s*rig: <YOUR_RIG>\\\\s*$\")) | .id'\n```\nThis must return exactly one bead ID. If it returns zero results, STOP and report an error — verify you substituted `<YOUR_RIG>` correctly. If it returns multiple results, STOP and report an error — manual disambiguation is required. Use the single resolved bead ID as YOUR_AGENT_BEAD in the commands below.\n\nThen use await-signal with exponential backoff to wait for activity:\n\n```bash\ngt mol step await-signal --agent-bead YOUR_AGENT_BEAD \\\n --backoff-base 30s --backoff-mult 2 --backoff-max 5m\n```\n\nThis command:\n1. Subscribes to `bd activity --follow` (beads activity feed)\n2. Returns IMMEDIATELY when any beads activity occurs\n3. If no activity, times out with exponential backoff:\n - First timeout: 30s\n - Second timeout: 60s\n - Third timeout: 120s\n - ...capped at 5 minutes max\n4. Tracks `idle:N` label on your agent bead for backoff state\n\n**On signal received** (activity detected):\nReset the idle counter and start next patrol cycle:\n```bash\ngt agent state YOUR_AGENT_BEAD --set idle=0\n```\n\n**On timeout** (no activity):\nThe idle counter was auto-incremented. Continue to next patrol cycle\n(the longer backoff will apply next time).\n\nAfter await-signal returns (either by signal or timeout):\n1. Generate a brief summary of this patrol cycle's observations\n2. Close current patrol and start next cycle:\n```bash\ngt patrol report --summary \"<brief summary of patrol observations>\"\n```\nThis closes the current patrol wisp and automatically creates a new one.\n3. Continue executing from the first step of the new patrol cycle\n\n**If context HIGH** (approaching limit):\n1. Write handoff mail with notable observations:\n```bash\ngt handoff -s \"Witness patrol handoff\" -m \"<observations>\"\n```\n2. Exit cleanly - the daemon will respawn a fresh Witness session\n\n**NOTE (gt-058d)**: `gt handoff` enforces a minimum 2-minute cooldown between\nhandoffs. If the previous handoff was less than 2 minutes ago, the command\nwill sleep until the cooldown expires before proceeding. This prevents tight\nrestart loops when the rig is idle and patrols complete quickly.\n\n**IMPORTANT**: You must either report and loop (context LOW) or exit (context HIGH).\nNever leave the session idle without work on your hook." +id = 'loop-or-exit' +needs = ['context-check'] +title = 'Loop or exit for respawn' diff --git a/examples/deepwork/packs/deepwork-org/knowledge/anti-patterns.md b/examples/deepwork/packs/deepwork-org/knowledge/anti-patterns.md new file mode 100644 index 00000000..e83bd931 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/anti-patterns.md @@ -0,0 +1,31 @@ +# Anti-Patterns — What Breaks + +Things that have caused incidents, wasted time, or confused agents. Avoid these. + +### Running `next dev` in Docker containers (2026-03-28) +Next.js dev mode inside a Docker container causes infinite recompile loops. The <product> dashboard hit 38,000% CPU. Always use production builds (multi-stage Dockerfile: node build -> nginx serve) for containerized frontends. +Source: vap dashboard incident. See memory: project_ulimit_fix.md. + +### Treating gastown/beads/mesh as user projects (2026-03-31) +These are TOOLS, not <your-project>. Don't create beads in the gastown rig for town infrastructure work. Town-level work uses de- prefix beads. User's actual <your-project>: <your-project>, <your-rig>, <your-rig>, <your-rig>, etc. +Source: User correction during de-9s0 session. + +### Excessive GitHub API calls from agents (2026-03-07) +6+ agents hitting GitHub API simultaneously got the account suspended. Never use GitHub for agent coordination. Use Gitea locally. GitHub is public mirror only, with zero agent API calls. +Source: freebird-ai suspension. + +### `kill -QUIT` on Dolt (2026-03-30) +SIGQUIT kills the Dolt server — it does NOT produce a goroutine dump like in standard Go programs. Dolt overrides the signal handler. Use `gt dolt status` for diagnostics instead. +Source: Dolt incident 2026-03-30. + +### Orphaned mesh sync crons spawning thousands of dolt processes (2026-03-31) +Mesh sync crons (every 2 min) each spawn a dolt subprocess. If the subprocess hangs (e.g., due to ulimit), cron spawns another. This created 9000+ zombie dolt processes on the host. Fix: raise ulimit AND add process-already-running guards to cron scripts. +Source: de-9s0 investigation. + +### Slinging town-level beads (de-) to rigs (2026-03-31) +`gt sling de-xxx <rig>` fails by design. The bead prefix must match the rig's database. Create beads with `bd create --rig <rigname>` to get the correct prefix. +Source: de-2yd, de-yn5 investigation. + +### Using `go build` instead of `make build` for gt binary (2026-04-01) +Direct `go build` skips ldflags that set BuiltProperly=1, version, and commit hash. The binary works but prints warnings and may behave differently in production code paths that check BuiltProperly. +Source: de-9s0 execution. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/bug-fixes.md b/examples/deepwork/packs/deepwork-org/knowledge/bug-fixes.md new file mode 100644 index 00000000..3517981d --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/bug-fixes.md @@ -0,0 +1,60 @@ +# Bug Fixes & Patterns — Learned from Usage + +These are bugs discovered and fixed through the self-improving loop. +Every fix here prevents the same bug from hitting another node. + +## Dolt CSV Output + +**Bug:** `dolt sql -q "SELECT ..." -r csv | tail -1` returns the column header when query returns 0 rows. +**Fix:** Always use `tail -n +2 | head -1` to skip the CSV header row. +**Applies to:** Every script that queries DoltHub via `dolt sql -r csv`. + +## Dolt Diff Grep Pattern + +**Bug:** `dolt diff --staged --stat` outputs "Row Modified" (capitalized), but grep pattern was `grep -q "rows"` (lowercase plural). +**Fix:** Use `grep -qi "row"` (case-insensitive, singular). +**Applies to:** mesh-sync.sh and any script checking for staged changes. + +## set -e With Dolt Commands + +**Bug:** `set -e` causes the entire script to exit when a dolt command returns non-zero for benign reasons (e.g., `dolt pull` with nothing to pull, `grep` finding no matches in a pipe). +**Fix:** Remove `set -e` from mesh scripts. Handle errors explicitly with `|| true` or `if` checks. +**Applies to:** All mesh scripts that call dolt. + +## Dirty State Before Pull + +**Bug:** `dolt pull` fails with "cannot merge with uncommitted changes" when the previous sync left staged changes (e.g., heartbeat update) without committing. +**Fix:** Always commit staged changes BEFORE pulling: +```bash +dolt add . 2>/dev/null +if dolt diff --staged --stat 2>/dev/null | grep -qi "row"; then + dolt commit -m "mesh: pre-sync commit" --allow-empty 2>/dev/null || true +fi +dolt pull +``` + +## Knowledge Deduplication + +**Bug:** `mesh-learnings.md` gets duplicate entries when both the graduate command and the sync pull write the same knowledge entry. +**Fix:** Before appending, check if the title already exists in the file: +```bash +if grep -qF "$title" "$LEARNINGS" 2>/dev/null; then + continue # already present +fi +``` + +## CSV Parsing With Commas in Content + +**Bug:** `IFS=',' read` breaks when a field contains commas (e.g., skill descriptions). +**Fix:** Use `CONCAT(fields, '|')` with `IFS='|'` for list queries. For detail queries, fetch each field individually. + +## Shell Function Override for gt + +**Bug:** `gt` is a compiled Go binary that doesn't support plugin subcommands. Symlinks and PATH tricks don't work. +**Fix:** Add a shell function to `.bashrc` that intercepts `gt mesh` and routes to `gt-mesh`: +```bash +gt() { + if [ "$1" = "mesh" ]; then shift; command gt-mesh "$@" + else command gt "$@"; fi +} +``` diff --git a/examples/deepwork/packs/deepwork-org/knowledge/formulas-reference.md b/examples/deepwork/packs/deepwork-org/knowledge/formulas-reference.md new file mode 100644 index 00000000..69ee895e --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/formulas-reference.md @@ -0,0 +1,80 @@ +# Formulas Reference — Workflow Templates + +Formulas define multi-step workflows as TOML files. Instantiated as molecules with steps that agents execute in order. + +## Active Formulas + +### mol-polecat-base (v1) +Base polecat lifecycle — never used directly, extended by variants. +Steps: load-context → workspace-setup (placeholder) → preflight-tests → implement → self-review + +### mol-polecat-commit (v1) +Extends mol-polecat-base. Direct-commit to base branch, no feature branch. +Steps: ...base steps... → commit-and-push (3-retry push with rebase) +Use case: Simple setups without merge review. + +### mol-do-work (v1) +Minimal: read bead → implement → close. No git branching. +Use case: Demos, simple single-agent workflows. + +### mol-scoped-work (v2) +DAG-based workflow with explicit scope beads, worktree lifecycle, fail-fast. +Steps: load-context → body scope → workspace-setup → preflight → implement → self-review → submit → cleanup-worktree +Use case: Gas City prototype. Most sophisticated formula. + +### cooking / pancakes (v1) +Demo formulas for testing the molecule system. + +## Missing Formulas (from reference implementation) + +These exist in the gascity reference but not in our town: + +### mol-polecat-work (v7) +Feature-branch + refinery handoff variant. The production polecat workflow: +- Creates bead-scoped worktree with feature branch +- Pushes branch to Gitea +- Hands off to refinery for merge review +- NOT a direct commit like mol-polecat-commit + +### mol-deacon-patrol (v12) +Deacon patrol loop: +- Check inbox, orphan process cleanup, health scan +- Utility agent health, Dolt health, system diagnostics +- Pour next iteration with exponential backoff + +### mol-witness-patrol (v7) +Witness patrol loop: +- Check inbox, recover orphaned beads +- Check polecat health, check refinery queue +- Pour next iteration + +### mol-refinery-patrol (v1) +Merge queue processor: +- Find work, rebase, run tests, handle failures +- Merge/push (supports direct and PR strategies) + +### mol-idea-to-plan (v2) +Full planning pipeline: +- Draft PRD, 6 parallel review legs, human gate +- 6 design explorations, 3 PRD alignment rounds +- Convert to bead DAG + +### mol-shutdown-dance (v1) +3-attempt interrogation (60s/120s/240s) — pardon or execute stuck agents. + +## Exec Orders (reference has, we don't) + +The reference implementation has "exec orders" — shell scripts run by the controller on cooldown without LLM involvement: + +| Order | Interval | Script | +|-------|----------|--------| +| gate-sweep | 30s | Evaluate timer/condition gates | +| orphan-sweep | 5m | Reset beads assigned to dead agents | +| cross-rig-deps | 5m | Convert satisfied cross-rig blocks | +| spawn-storm-detect | 5m | Detect crash-looping beads | +| jsonl-export | 15m | Export Dolt to JSONL git archive | +| reaper | 30m | Reap stale wisps/issues | +| wisp-compact | 1h | TTL-based ephemeral bead cleanup | +| prune-branches | 6h | Clean stale gc/* branches | + +We handle some of these via plugins (which require deacon + LLM), but exec orders are more efficient. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/hooks-reference.md b/examples/deepwork/packs/deepwork-org/knowledge/hooks-reference.md new file mode 100644 index 00000000..75d85942 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/hooks-reference.md @@ -0,0 +1,51 @@ +# Hooks Reference — Claude Code Lifecycle Hooks + +Gas Town uses Claude Code hooks to inject behavior at key moments in every agent's lifecycle. + +## Shared Base (~/.gt/hooks-base.json) + +Applied to ALL agents via `gt hooks sync`. + +| Event | Matcher | Command | Purpose | +|-------|---------|---------|---------| +| SessionStart | (all) | `gt prime --hook; cold-start-recovery.sh` | Load role context, recover from crashes | +| Stop | (all) | `graceful-handoff.sh --reason session-stop; gt costs record` | Save state to mail, log changelog, record costs | +| PreCompact | (all) | `gt handoff --auto --collect; graceful-handoff.sh --reason compaction` | Preserve context before compression | +| UserPromptSubmit | (all) | `timeout 5 gt mail check --inject` | Check for incoming mail on every user message | +| PreToolUse | `Bash(gh pr create*)` | `gt tap guard pr-workflow` | Block GitHub PRs, enforce Gitea | +| PreToolUse | `Bash(git checkout -b*)` | `gt tap guard pr-workflow` | Control branching | +| PreToolUse | `Bash(git switch -c*)` | `gt tap guard pr-workflow` | Control branching | +| PreToolUse | `Task` | `gt tap guard task-dispatch` | Intercept task creation | + +## Hook Scripts + +### graceful-handoff.sh +Runs on every session end (Stop) and compaction (PreCompact). +- Collects: hooked work, git state, recent events, inbox count +- Sends handoff mail to self (pinned, permanent) +- Logs session activity to changelog (if dirty work or hooked bead) +- Falls back to /tmp file if Dolt is down + +### cold-start-recovery.sh +Runs on SessionStart when gt prime detects no handoff context. +- Queries event log to reconstruct predecessor's state +- Prints context summary injected into agent prompt + +## Known Issues + +### Mayor override is weaker than base +Mayor's settings.json replaces (not extends) the base hooks. Missing: +- graceful-handoff.sh on Stop +- handoff collection on PreCompact +- Task guard on PreToolUse + +Fix: align mayor override with base, or remove override and rely on base. + +## Per-Role Overlays + +The reference implementation (gascity) supports per-role overlays: +- Default: just PreCompact handoff +- Witness: PreToolUse blockers that prevent patrol formula issues +- These are defined in pack overlay directories + +Gas Town currently has no per-role overlays (empty ~/.gt/hooks-overrides/). diff --git a/examples/deepwork/packs/deepwork-org/knowledge/patterns.md b/examples/deepwork/packs/deepwork-org/knowledge/patterns.md new file mode 100644 index 00000000..3882392a --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/patterns.md @@ -0,0 +1,30 @@ +# Patterns — What Works + +Recurring approaches that have proven effective in Gas Town operations. + +### Kill non-essential agents before heavy work (2026-04-01) +When operating near process limits, kill witnesses for rigs without P0 work before attempting sling/spawn operations. Deacon auto-respawns them later. This frees 10-20 processes per witness killed. +Source: de-9s0 handoff session. + +### Direct Dolt queries bypass gt/bd hangs (2026-04-01) +When gt/bd commands timeout, query Dolt directly: +```bash +dolt --host 127.0.0.1 --port <dolt-port> --user root --password "" --no-tls sql -q "QUERY" +``` +This bypasses all the Go binary overhead and circuit-breaker logic. Useful for diagnostics and emergency operations. +Source: de-9s0 execution session. + +### Beads need rig-scoped creation for sling to work (2026-03-31) +`bd create --rig <rigname> "title"` gives a rig-prefixed ID that `gt sling` understands. Creating at town level (de- prefix) then slinging to a rig fails by design. The ID prefix must match the target rig's database. +Source: de-2yd investigation. + +### Process cleanup before restart (2026-03-30) +Before restarting Dolt or the daemon, kill orphaned bd/gt processes (ppid=1) first. They hold stale connections and eat process budget. Pattern: +```bash +ps -u pratham2 --no-headers -o pid,ppid,args | awk '$2==1 && /bd |gt /' | awk '{print $1}' | xargs kill +``` +Source: Dolt incident 2026-03-30. + +### Gitea over GitHub for all agent coordination (2026-03-07) +After the GitHub API rate limits, all agent git operations moved to Gitea (port <gitea-port>). This is faster (local), has no rate limits, and keeps agent API noise off GitHub. GitHub is reserved for public releases only. +Source: GitHub API rate limits incident. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/plugins-reference.md b/examples/deepwork/packs/deepwork-org/knowledge/plugins-reference.md new file mode 100644 index 00000000..83fd92c2 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/plugins-reference.md @@ -0,0 +1,56 @@ +# Plugins Reference — Deacon Patrol Plugins + +Plugins run during deacon patrol on cooldown gates. Each is a markdown file with shell commands the deacon executes. + +## Active Plugins (14) + +| Plugin | Cooldown | Purpose | +|--------|----------|---------| +| stuck-agent-dog | 5m | Detects crashed/stuck polecats and deacons, restarts them | +| dolt-backup | 15m | Smart Dolt backup with change detection | +| compactor-dog | 30m | Monitors Dolt commit growth, escalates when compaction needed | +| rebuild-gt | 1h | Rebuilds gt binary when source is newer than installed | +| dolt-archive | 1h | Offsite backup: JSONL snapshots + dolt push to remotes | +| submodule-commit | 2h | Auto-commits accumulated submodule changes | +| github-sheriff | 2h | Monitors GitHub CI on open PRs (BROKEN: GitHub suspended) | +| quality-review | 6h | Reviews merge quality, tracks per-worker trends | +| dolt-log-rotate | 6h | Rotates Dolt server log when exceeding size threshold | +| gitignore-reconcile | 6h | Auto-untracks files matching .gitignore | +| git-hygiene | 12h | Cleans stale branches, stashes, loose git objects | +| knowledge-evolve | 12h | Harvests lessons from closed beads into knowledge base | +| dolt-snapshots | event | Tags Dolt DBs at convoy boundaries for audit/rollback | +| tool-updater | 168h (weekly) | Upgrades bd and dolt via Homebrew | + +## Plugin Format + +```toml ++++ +name = "plugin-name" +description = "What it does" +version = 1 + +[gate] +type = "cooldown" # or "event" +duration = "6h" + +[tracking] +labels = ["plugin:name", "category:infra"] +digest = true + +[execution] +timeout = "5m" +notify_on_failure = true +severity = "medium" ++++ + +# Plugin Title + +Step-by-step instructions with bash code blocks. +Deacon executes each step in order. +``` + +## Known Issues + +- **github-sheriff** is broken (GitHub account suspended). Should be disabled or converted to Gitea. +- Plugins require LLM (deacon). The reference implementation has "exec orders" that run without LLM. +- If deacon is down, no plugins run. The cron-based knowledge-evolve provides a fallback path. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/rules.md b/examples/deepwork/packs/deepwork-org/knowledge/rules.md new file mode 100644 index 00000000..4c369fbe --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/rules.md @@ -0,0 +1,27 @@ +# Hard Rules — All GT Instances + +These are non-negotiable constraints for every GT in the <your-gitea-org> mesh. + +## Code & Git + +1. **Only work on authorized repos** — <your-gitea-org> org repos only +2. **Never push directly to main or dev** — All work via PRs targeting dev +3. **Never commit secrets** — No .env, credentials, API keys, tokens +4. **Use conventional commits** — feat/fix/chore/docs(scope): description +5. **Branch format** — `gt/<instance-id>/<issue-number>-<description>` + +## Coordination + +6. **Use labels correctly** — Proper gt-status transitions (pending -> claimed -> done) +7. **Claim before work** — Always mark gt-status:claimed before starting +8. **Report progress via mesh mail** — Not through the human +9. **Stay in scope** — Don't modify files outside your assigned task +10. **Coordinate autonomously** — Use `gt mesh send` to talk to other GTs directly + +## Mesh Behavior + +11. **Check inbox at session start** — `gt-mesh inbox` +12. **Log friction via improve loop** — `gt mesh improve report` every time something is wrong +13. **Never tell the user what another GT needs to do** — Send it via mesh mail directly +14. **Update knowledge, not just chat** — Findings go into the system, not just the conversation +15. **Heartbeat every 2 minutes** — Cron sync keeps you visible as online diff --git a/examples/deepwork/packs/deepwork-org/knowledge/session-handoffs.md b/examples/deepwork/packs/deepwork-org/knowledge/session-handoffs.md new file mode 100644 index 00000000..56674c2e --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/session-handoffs.md @@ -0,0 +1,58 @@ +# Session Handoffs — Context Preservation + +Sessions end due to credit limits, compaction, crashes, or intentional cycling. +The goal: the next session resumes work with minimal context loss. + +## The Handoff Chain + +``` +Session N (context filling up) + -> commits code, updates beads + -> writes handoff mail (gt handoff) + -> session ends + +Session N+1 (fresh context) + -> gt prime (loads role context) + -> gt hook (checks for hooked work) + -> gt mail inbox (finds handoff mail) + -> reads handoff, resumes work +``` + +## What to Include in Handoff Mail + +Keep it SHORT. The next session has full codebase access — it only needs: + +1. **What you were doing** (1 sentence) +2. **What's done** (commits pushed, beads closed) +3. **What remains** (next steps, blockers) +4. **Gotchas** (things that aren't obvious from code alone) + +Bad: 500-word essay about your journey +Good: "Fixed vaa-4sc (pushed). Still need to run tests on vaa-bwa. Anar polecat has uncommitted work in /tmp." + +## Compaction vs. Handoff + +| Event | Trigger | You Control It? | Action | +|-------|---------|-----------------|--------| +| Handoff | You decide | Yes | `gt handoff` | +| Compaction | Context window full | No | Auto-summarizes, loses detail | +| Credit limit | API quota exhausted | No | Session dies mid-work | +| Crash | Bug/infra failure | No | Whatever was pushed survives | + +**Best practice:** Handoff BEFORE compaction forces it. If you notice context getting large, proactively cycle. + +## Hook vs. Mail + +- **Hook** (`gt hook`): Your current assignment. Survives across sessions. Check first. +- **Handoff mail** (`gt mail inbox`): Context notes. Supplements the hook. + +The hook tells you WHAT to work on. The mail tells you WHERE you left off. + +## Polecat Handoffs + +Polecats are transient — their handoff is different: +1. Commit and push to polecat branch +2. Call `gt done` (submits to refinery) +3. Polecat is garbage collected + +If a polecat dies mid-work, the manager rescues uncommitted files and closes the bead. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/shared-knowledge.md b/examples/deepwork/packs/deepwork-org/knowledge/shared-knowledge.md new file mode 100644 index 00000000..cc6d9a3b --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/shared-knowledge.md @@ -0,0 +1,72 @@ +# Shared Knowledge — <your-gitea-org> Mesh + +## GitHub Labels (Multi-GT Coordination) + +| Label | When to Use | +|-------|-------------| +| `gt-task` | Any cross-GT work assignment | +| `gt-from:<id>` | Which GT created the task | +| `gt-to:<id>` | Which GT should pick it up | +| `gt-status:pending` | Waiting for pickup | +| `gt-status:claimed` | Work in progress | +| `gt-status:done` | Delivered (PR created) | +| `needs-review` | PR needs parent review | +| `priority:p0/p1/p2` | Urgency levels | + +## Branch Naming + +``` +gt/<instance-id>/<issue-number>-<short-description> +``` + +## PR Body Template + +```markdown +Closes #<issue-number> + +## Changes +- Bullet points + +## Testing +- How you tested +``` + +## Pro Tips + +1. Always `gh auth status` before GitHub work +2. Claim issues before starting (`gt-status:claimed`) +3. Mark done after PR (`gt-status:done`) +4. Never push directly to dev/main +5. Use `gt prime` after session start for full context +6. Update memory files when you learn something new +7. When idle, ask: "Any beads or work I should pick up?" +8. Use `gt mesh improve report` when you hit ANY friction — the loop makes everything better +9. Check `gt mesh inbox` at session start — messages may be waiting +10. Coordinate via mesh mail, not through the human — you have the tools + +## Communication Style + +- Short, direct messages +- No fluff or unnecessary context +- Code speaks louder than descriptions +- When delegating: send instructions via mesh mail, don't report to user what someone else "needs to do" + +## Mesh Mail Behavior + +- Incoming mail is auto-routed to the right agent based on content keywords +- P0/P1 messages nudge agents immediately +- Status/ping requests get auto-replies +- If you need something from another GT, use `gt mesh send` — don't wait for a human session +- Check inbox at every session start: `gt-mesh inbox` + +## Self-Improving Loop + +Every time you use the mesh and hit a problem: +```bash +# Report the finding +gt mesh improve report "description" --category bug --command sync --fix "how to fix it" + +# After fixing, graduate to shared knowledge +gt mesh improve graduate <improvement-id> +``` +This makes the mesh smarter for ALL nodes, not just your session. diff --git a/examples/deepwork/packs/deepwork-org/knowledge/troubleshooting.md b/examples/deepwork/packs/deepwork-org/knowledge/troubleshooting.md new file mode 100644 index 00000000..b5275610 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/troubleshooting.md @@ -0,0 +1,93 @@ +# Troubleshooting — Common Issues and Fixes + +## Dolt Issues + +**"database is read only"** +The dolt clone was created without write permissions. Fix: +```bash +rm -rf /tmp/mesh-sync-clone +GT_ROOT=/path/to/gt bash .gt-mesh/scripts/mesh-sync.sh +``` + +**"permission denied" on dolt clone** +Dolt credentials not configured or expired. Fix: +```bash +dolt creds ls # Check active credential +dolt creds use <id> # Switch to valid credential +``` + +**"merge conflict" on dolt pull** +Multiple GTs wrote to the same table simultaneously. Fix: +```bash +cd /tmp/mesh-sync-clone +dolt conflicts resolve --ours . # Keep local changes +dolt add . && dolt commit -m "resolve merge conflict" +dolt push +``` + +## Git Issues + +**"push rejected" (non-fast-forward)** +Someone else pushed while you were working. Fix: +```bash +git pull --rebase && git push +``` + +**Polecat branch has no commits** +Polecat died before committing. Check for uncommitted files: +```bash +cd /path/to/polecat/workspace +git status --short +git ls-files --others --exclude-standard +``` +Rescue files manually, commit from crew workspace. + +## Tunnel Issues + +**Backend/frontend not responding** +Cloudflare tunnels expire. Restart: +```bash +# Check if process is running +pgrep -f cloudflared +# If not, restart the tunnel +cloudflared tunnel --url http://localhost:PORT +``` + +**"python: command not found"** +Use `python3` not `python` on Ubuntu/Debian systems. + +## Mesh Issues + +**Messages not delivering** +Check sync is running: +```bash +GT_ROOT=/path/to/gt bash .gt-mesh/scripts/mesh-sync.sh +``` +Then verify message exists: +```bash +cd /tmp/mesh-sync-clone +dolt sql -q "SELECT id, subject FROM messages WHERE to_gt='target' ORDER BY created_at DESC LIMIT 3;" -r csv +``` + +**Peer shows stale last_seen** +The peer's sync daemon may have stopped. Nudge them or check their tmux session. + +## Polecat Issues + +**"NEEDS_RECOVERY" but bead is closed** +False positive — work already landed on dev through another path. Safe to nuke: +```bash +gt polecat nuke rig/name --force +``` + +**Polecat session "not running" but state is "working"** +Session crashed or hit credit limit. Check for uncommitted work, rescue if needed, then nuke. + +## Bead Issues + +**"unknown flag" errors with bd/gt commands** +Check the command syntax — flags changed between versions: +```bash +gt polecat list <your-rig> # positional arg, not --rig flag +gt polecat nuke rig/name # requires rig/name format +``` diff --git a/examples/deepwork/packs/deepwork-org/knowledge/worker-sla.md b/examples/deepwork/packs/deepwork-org/knowledge/worker-sla.md new file mode 100644 index 00000000..a6de1267 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/knowledge/worker-sla.md @@ -0,0 +1,75 @@ +# Worker SLA — Deterministic Accountability System + +Workers (gt-docker, future GTs) must follow these rules. Violations are tracked +automatically and trigger escalation. + +## Claim-to-PR Pipeline (Enforced) + +``` +Issue assigned (gt-status:pending) + | + v Worker claims (gt-status:claimed) -- clock starts + | + | <= 24 hours: first commit must appear on branch + | <= 48 hours: PR must be submitted targeting dev + | + v PR submitted (gt-status:done, needs-review label) + | + v Review + merge by Mayor +``` + +**If 24h passes with no commit:** Issue auto-unclaimed, escalation comment posted. +**If 48h passes with no PR:** Issue reassigned, worker gets SLA violation. +**After 2 violations:** Worker loses assignment priority. + +## Deacon Enforcement (Automated) + +The `deacon-worker-sla.sh` script runs every 30 minutes and: + +1. Lists all `gt-status:claimed` issues across repos +2. Checks `claimed_at` timestamp (from issue event timeline) +3. If claimed > 24h ago with no branch activity: + - Posts comment: "SLA WARNING: No activity in 24h. Issue will be unclaimed in 6h." +4. If claimed > 30h ago with no PR: + - Removes `gt-status:claimed`, adds `gt-status:pending` + - Posts comment: "SLA VIOLATION: Unclaimed due to inactivity. Reassigning." + - Sends mesh mail to Mayor with violation report +5. Tracks violations per worker in DoltHub `worker_sla` table + +## Closed Issue = Full Stop + +When the Mayor closes an issue: +- ALL work on that issue STOPS immediately +- Any open branch for it should be abandoned or deleted +- Any cron job polling for it must be removed +- Continued commits to a closed issue = automatic SLA violation + +This is non-negotiable. The Mayor's close is final. + +## Cron Hygiene + +- Every cron job must be registered in `.gt-mesh/cron-registry.yaml` +- Unregistered crons found during audit are killed immediately +- When an epic/issue is deprioritized, all associated crons die with it +- Workers must not create crons without Mayor approval + +## Deprioritization Protocol + +When the Mayor deprioritizes work: +1. Issues are closed with "DEPRIORITIZED" comment +2. All crons related to that work are removed from registry +3. Mesh mail sent to affected workers: "STOP work on X" +4. Workers have 1 sync cycle (2 min) to acknowledge +5. Any commit after deprioritization = SLA violation + +## Worker Scorecard + +Tracked per worker, reported weekly: +- PRs submitted +- PRs merged +- SLA violations +- Average claim-to-PR time +- Issues reassigned due to inactivity + +Workers below minimum delivery (3 PRs/week) get flagged. +Workers with 3+ violations in a month get decommissioned. diff --git a/examples/deepwork/packs/deepwork-org/pack.toml b/examples/deepwork/packs/deepwork-org/pack.toml new file mode 100644 index 00000000..87a9d88f --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/pack.toml @@ -0,0 +1,136 @@ +# Deepwork Org Config Pack +# +# A production pack for running a multi-agent Gas Town with: +# - Private wasteland federation (DoltHub + GitHub) +# - Deterministic effort estimation for task sizing +# - Bidirectional wasteland sync (witnesses consume, polecats publish) +# - Self-evolving knowledge base with 6-hour cron evolution +# - Auto-mirroring from internal Gitea to public GitHub +# - Changelog system with CLI tooling +# - 10 agent roles, 7 cron jobs, 14 automation scripts +# +# Submitted by: Pratham's Gas Town swarm (deepwork) +# Source: https://github.com/<your-github-org>/deepwork-org-config-pack +# Wasteland: <your-dolthub-org>/<your-wasteland-db> on DoltHub +# GitHub org: <your-github-org> + +[pack] +name = "deepwork-org" +version = "4.0.0" +description = "Production org config pack — wasteland federation, deterministic effort, self-evolving knowledge, Gitea→GitHub mirror" +author = "deepwork (<your-github-handle>)" +tags = ["wasteland", "federation", "knowledge", "automation", "multi-agent"] + +[pack.wasteland] +upstream = "<your-dolthub-org>/<your-wasteland-db>" +github_org = "<your-github-org>" +note = "Requires patched gt binary — see docs/wasteland/ONBOARDING.md and gastownhall/gastown#3501" + +[[roles]] +name = "mayor" +file = "roles/mayor.yaml" + +[[roles]] +name = "deacon" +file = "roles/deacon.yaml" + +[[roles]] +name = "witness" +file = "roles/witness.yaml" + +[[roles]] +name = "refinery" +file = "roles/refinery.yaml" + +[[roles]] +name = "polecat" +file = "roles/polecat.yaml" + +[[roles]] +name = "crew" +file = "roles/crew.yaml" + +[[roles]] +name = "coordinator" +file = "roles/coordinator.yaml" + +[[roles]] +name = "planner" +file = "roles/planner.yaml" + +[[roles]] +name = "reviewer" +file = "roles/reviewer.yaml" + +[[roles]] +name = "worker" +file = "roles/worker.yaml" + +[[formulas]] +name = "mol-polecat-work" +file = "formulas/mol-polecat-work.formula.toml" +note = "Includes wasteland auto-claim on start, smart internal filter, auto-complete on done" + +[[formulas]] +name = "mol-witness-patrol" +file = "formulas/mol-witness-patrol.formula.toml" +note = "Includes check-wasteland step — witness consumes unclaimed items" + +[[formulas]] +name = "mol-dog-wasteland-sync" +file = "formulas/mol-dog-wasteland-sync.formula.toml" +note = "Periodic bead↔wasteland reconciliation dog" + +[[formulas]] +name = "mol-do-work" +file = "formulas/mol-do-work.formula.toml" + +[[formulas]] +name = "mol-scoped-work" +file = "formulas/mol-scoped-work.formula.toml" + +[[formulas]] +name = "mol-polecat-base" +file = "formulas/mol-polecat-base.formula.toml" + +[[crons]] +file = "crons/town-crons.yaml" +note = "7 active cron jobs: thread guardrail, log rotate, knowledge evolve, gitea→github, wasteland push, pack update, readme release" + +[[rules]] +name = "deepwork-governance" +file = "rules/deepwork-governance.yaml" +note = "Includes 12 wasteland federation rules + SLA enforcement" + +[[knowledge]] +files = [ + "knowledge/patterns.md", + "knowledge/anti-patterns.md", + "knowledge/decisions.md", + "knowledge/operations.md", + "knowledge/<your-project>.md", + "knowledge/conventions.md", + "knowledge/troubleshooting.md", + "knowledge/shared-knowledge.md", + "knowledge/bug-fixes.md", + "knowledge/crew-structure.md", + "knowledge/mail-routing.md", + "knowledge/session-handoffs.md", + "knowledge/worker-sla.md", + "knowledge/account-cycling.md", + "knowledge/offloading.md", + "knowledge/rules.md", + "knowledge/hooks-reference.md", + "knowledge/formulas-reference.md", + "knowledge/plugins-reference.md", +] + +[[docs]] +files = [ + "docs/wasteland/ONBOARDING.md", + "docs/wasteland/POST_TEMPLATE.md", + "docs/changelog/README.md", + "docs/ARCHITECTURE.md", + "docs/GLOSSARY.md", + "docs/RUNBOOKS.md", +] diff --git a/examples/deepwork/packs/deepwork-org/roles/coordinator.yaml b/examples/deepwork/packs/deepwork-org/roles/coordinator.yaml new file mode 100644 index 00000000..bc3e9bce --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/coordinator.yaml @@ -0,0 +1,71 @@ +# Coordinator Role Template +# Used for planning, coordination, and strategic roles in the organization + +apiVersion: gt.deepwork.ai/v1 +kind: RoleTemplate +metadata: + name: coordinator + description: Planning and coordination role template + version: "1.0.0" + +spec: + tier: coordination + llm_profile: claude + + characteristics: + planning_ability: high + strategic_thinking: high + technical_depth: medium + execution_focus: low + coordination_focus: high + + responsibilities: + - task_decomposition + - resource_allocation + - timeline_planning + - conflict_resolution + - stakeholder_communication + - priority_management + - risk_assessment + - quality_oversight + + work_types: + - architecture_review + - design_decisions + - planning_sessions + - code_reviews + - technical_consultation + - escalation_handling + + routing_preferences: + complex_tasks: accept + urgent_tasks: accept + routine_tasks: delegate + documentation_tasks: delegate + + collaboration: + can_assign_to: + - senior-developer + - junior-developer + - qa-engineer + - content-writer + + must_review: + - complexity:high + - architectural_changes + - security_related + + escalation_path: + - ceo + + constraints: + max_parallel_tasks: 5 + response_time_sla: 30m + availability: "09:00-18:00 UTC" + + metrics: + - tasks_planned + - reviews_completed + - conflicts_resolved + - team_productivity + - on_time_delivery_rate diff --git a/examples/deepwork/packs/deepwork-org/roles/crew.yaml b/examples/deepwork/packs/deepwork-org/roles/crew.yaml new file mode 100644 index 00000000..0fa1eab7 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/crew.yaml @@ -0,0 +1,18 @@ +# Crew — Persistent worker +name: crew +scope: rig +description: "Persistent coding worker with specific skills. Unlike polecats, crew members survive across beads and maintain context." +work_dir: "{{.TownRoot}}/{{.Rig}}/crew/{{.Name}}" + +responsibilities: + - implement_code # Same as polecat + - maintain_expertise # Develop domain knowledge over time + - mentor_polecats # Help polecats via mail when they're stuck + +restrictions: + - never_merge_own_work + - never_push_to_main + +lifecycle: + spawn: "gt crew add <rig> <name>" + persistent: true # Survives across beads diff --git a/examples/deepwork/packs/deepwork-org/roles/deacon.yaml b/examples/deepwork/packs/deepwork-org/roles/deacon.yaml new file mode 100644 index 00000000..63f41f53 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/deacon.yaml @@ -0,0 +1,27 @@ +# Deacon — Automated town patrol +name: deacon +scope: city +description: "Automated patrol agent. Spawns witnesses, runs plugins, monitors health, escalates to mayor. Never human-facing." +work_dir: "{{.TownRoot}}/deacon" + +responsibilities: + - patrol # Periodic health scan of all rigs + - spawn_witnesses # Start witness agents for each rig + - run_plugins # Execute plugins on cooldown gates + - escalate # Mail mayor about issues + - process_cleanup # Kill stuck agents, orphaned processes + +restrictions: + - never_write_code + - never_merge + - never_communicate_with_human + +plugins_executed: + - stuck-agent-dog (5m) + - dolt-backup (15m) + - compactor-dog (30m) + - rebuild-gt (1h) + - dolt-archive (1h) + - quality-review (6h) + - knowledge-evolve (12h) + - git-hygiene (12h) diff --git a/examples/deepwork/packs/deepwork-org/roles/mayor.yaml b/examples/deepwork/packs/deepwork-org/roles/mayor.yaml new file mode 100644 index 00000000..50315894 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/mayor.yaml @@ -0,0 +1,28 @@ +# Mayor — Town coordinator, human-facing +name: mayor +scope: city +description: "Town coordinator. Dispatches work via gt sling, reviews PRs, merges, manages town systems (docs, changelog, knowledge). Human-facing." +work_dir: "{{.TownRoot}}/mayor" + +responsibilities: + - dispatch_work # gt sling <bead> <rig> + - review_prs # Review on Gitea, merge to dev/main + - merge # Only role that merges + - maintain_docs # mayor/docs/, mayor/changelog/, mayor/knowledge/ + - manage_rigs # Register rigs, configure agents + - human_communication # Direct interface with the user + +restrictions: + - never_write_product_code # Polecats/crew write code + - never_push_to_main_directly + +hooks: + session_start: "gt prime --hook" + stop: "gt costs record" + pre_compact: "gt prime --hook" + user_prompt_submit: "gt mail check --inject" + +knowledge_duties: + - "Log notable events to mayor/changelog/ via append.sh" + - "Capture lessons to mayor/knowledge/ via capture.sh" + - "Update mayor/docs/ when infrastructure changes" diff --git a/examples/deepwork/packs/deepwork-org/roles/planner.yaml b/examples/deepwork/packs/deepwork-org/roles/planner.yaml new file mode 100644 index 00000000..1d6c6ac1 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/planner.yaml @@ -0,0 +1,17 @@ +# Planner Role — Behavioral Defaults +# Soft preference: delegates by default, can execute if needed +name: planner +description: "Delegates by default. Creates tasks, assigns work, reviews PRs." +default_actions: + - create_beads + - assign_work + - review_prs + - merge_approved_prs + - manage_access +can_also: + - write_code + - create_prs +priorities: + - delegation_first + - quality_gate + - strategic_planning diff --git a/examples/deepwork/packs/deepwork-org/roles/polecat.yaml b/examples/deepwork/packs/deepwork-org/roles/polecat.yaml new file mode 100644 index 00000000..1b8666d4 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/polecat.yaml @@ -0,0 +1,23 @@ +# Polecat — Disposable worker +name: polecat +scope: rig +description: "Disposable coding worker. Spawned per-bead via gt sling. Works in isolated worktree. Exits when done — no idle state." +work_dir: "{{.TownRoot}}/{{.Rig}}/polecats/{{.Name}}" + +responsibilities: + - implement_code # Read bead, write code, commit + - run_tests # Self-review before submitting + - push_branch # Push to Gitea for refinery + - close_bead # Mark work complete + +restrictions: + - never_merge_own_work # Refinery handles merge + - never_push_to_main + - never_create_new_beads # Stay scoped to assigned work + - exit_when_done # No idle state — done means gone + +lifecycle: + spawn: "gt sling <bead> <rig>" + formula: "mol-polecat-commit or mol-polecat-work" + exit: "gc runtime drain-ack" + crash_recovery: "witness detects, rescues worktree, re-slings" diff --git a/examples/deepwork/packs/deepwork-org/roles/refinery.yaml b/examples/deepwork/packs/deepwork-org/roles/refinery.yaml new file mode 100644 index 00000000..7a90a48e --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/refinery.yaml @@ -0,0 +1,16 @@ +# Refinery — Per-rig merge processor +name: refinery +scope: rig +description: "Per-rig merge queue processor. Rebases branches, runs tests, merges PRs. Does not write new code." +work_dir: "{{.TownRoot}}/{{.Rig}}/refinery" + +responsibilities: + - process_merge_queue # Pick up completed polecat branches + - rebase_branches # Rebase onto base branch + - run_tests # Verify all checks pass + - merge_or_reject # Merge if passing, reject with reason if not + - notify_witness # Report merge results + +restrictions: + - never_write_new_code # Only rebases and merges existing work + - never_create_beads diff --git a/examples/deepwork/packs/deepwork-org/roles/reviewer.yaml b/examples/deepwork/packs/deepwork-org/roles/reviewer.yaml new file mode 100644 index 00000000..865403f0 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/reviewer.yaml @@ -0,0 +1,114 @@ +# Reviewer Role Template +# Used for code review and quality assurance roles + +apiVersion: gt.deepwork.ai/v1 +kind: RoleTemplate +metadata: + name: reviewer + description: Code review and quality assurance role template + version: "1.0.0" + +spec: + tier: quality + llm_profile: balanced + + characteristics: + attention_to_detail: high + technical_breadth: high + technical_depth: medium + communication: high + mentorship: medium + + responsibilities: + - code_review + - architecture_review + - test_review + - documentation_review + - security_review + - performance_review + - best_practices_enforcement + - knowledge_sharing + + review_checklist: + code_quality: + - readability + - maintainability + - consistency + - naming_conventions + - code_complexity + + functionality: + - correctness + - edge_cases + - error_handling + - input_validation + + testing: + - test_coverage + - test_quality + - test_maintainability + + security: + - injection_prevention + - authentication + - authorization + - data_validation + - secret_management + + performance: + - algorithm_efficiency + - resource_usage + - database_queries + - network_calls + + documentation: + - inline_comments + - function_docs + - readme_updates + - api_documentation + + approval_criteria: + auto_approve: + - complexity:low + - docs_only + - test_only + + require_discussion: + - architectural_concerns + - breaking_changes + - security_implications + + require_changes: + - critical_bugs + - security_vulnerabilities + - performance_regression + + collaboration: + can_review_work_from: + - senior-developer + - junior-developer + - qa-engineer + - content-writer + + escalation_triggers: + - security_concerns + - architectural_disagreement + - performance_issues + + constraints: + max_reviews_per_day: 20 + response_time_sla: 2h + availability: "flexible" + + review_priorities: + p0_critical: 15m + p1_high: 2h + p2_medium: 4h + p3_low: 24h + + metrics: + - reviews_completed + - review_turnaround_time + - issues_caught + - bugs_prevented + - knowledge_transfer_sessions diff --git a/examples/deepwork/packs/deepwork-org/roles/witness.yaml b/examples/deepwork/packs/deepwork-org/roles/witness.yaml new file mode 100644 index 00000000..fdb762b8 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/witness.yaml @@ -0,0 +1,63 @@ +# Witness — Per-rig lifecycle manager + wasteland publisher +name: witness +scope: rig +description: "Per-rig lifecycle agent. Monitors polecat health, recovers orphaned work, and publishes new beads to the wasteland as rich work packages for external contributors." +work_dir: "{{.TownRoot}}/{{.Rig}}/witness" + +responsibilities: + - monitor_polecats # Check polecat sessions alive + - recover_orphans # Reclaim beads from dead agents + - check_refinery # Verify merge queue isn't stuck + - report_status # Mail deacon with rig health + - wasteland_enrichment # Scan new P0/P1 beads → write rich descriptions → gt wl post + +wasteland_enrichment: + description: | + During patrol, the witness checks for new P0/P1 beads that aren't on the + wasteland board yet. For each, it reads the bead and relevant code, then + writes a complete work package (repo, files, current/desired behavior, + how to test, acceptance criteria) and posts it via gt wl post. + + filter_out: + - Title contains: deacon, witness, refinery, patrol, AGENTS.md, hooks, formula, molecule, wisp + - Title contains: merge queue, process cleanup, Dolt health, session handoff + - Title starts with: "Pre-existing:", "Merge:", "Boot:" + - Issue type: chore (unless clearly product work) + + work_package_template: | + Bead: {bead_id} + + ## Context + {1-2 sentences about the project and this task} + + ## Repo + {github_url} + Branch: dev (create feature branch from dev) + + ## Current Behavior + {what exists / what's broken} + + ## Desired Behavior + {what it should do when done} + + ## Key Files + {2-5 most relevant files with descriptions} + + ## How to Test + {step-by-step verification} + + ## Acceptance Criteria + {checkable conditions} + + ## Setup + {clone + install commands} + +restrictions: + - never_write_code + - never_merge + - never_create_beads # Only monitors and publishes, doesn't create work + +lifecycle: + watchdog: "witness-watchdog.sh (cron every 5min)" + restart_on_death: true + critical: true # Witnesses MUST be running for wasteland sync to work diff --git a/examples/deepwork/packs/deepwork-org/roles/worker.yaml b/examples/deepwork/packs/deepwork-org/roles/worker.yaml new file mode 100644 index 00000000..dc243f09 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/roles/worker.yaml @@ -0,0 +1,18 @@ +# Worker Role — Behavioral Defaults +# Soft preference: executes by default, writes code and creates PRs +name: worker +description: "Executes by default. Writes code, creates PRs, doesn't merge own PRs." +default_actions: + - claim_beads + - write_code + - create_branches + - create_prs + - post_status_updates +can_also: + - suggest_improvements + - ask_questions + - create_beads +restrictions: + - never_merge_own_prs + - never_push_to_main + - always_use_branch_format diff --git a/examples/deepwork/packs/deepwork-org/rules/deepwork-governance.yaml b/examples/deepwork/packs/deepwork-org/rules/deepwork-governance.yaml new file mode 100644 index 00000000..7818ab9f --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/rules/deepwork-governance.yaml @@ -0,0 +1,165 @@ +# Deepwork Governance Rules +# Applied to mesh_rules DoltHub table on install +name: deepwork-governance +description: "Standard governance rules for <your-gitea-org> mesh participants" + +rules: + branch_format: + value: "gt/{id}/{issue}-{desc}" + category: "git" + description: "All branches follow gt/<instance-id>/<issue>-<desc> format" + + pr_target: + value: "dev" + category: "git" + description: "All PRs target dev branch, never main" + + require_review: + value: "true" + category: "governance" + description: "All PRs require at least one review before merge" + + no_force_push: + value: "true" + category: "git" + description: "Force push is prohibited on shared branches" + + no_secrets: + value: "true" + category: "security" + description: "Never commit .env, credentials, or API keys" + + max_concurrent_claims: + value: "3" + category: "governance" + description: "Maximum beads a single GT can claim at once" + + commit_format: + value: "conventional" + category: "git" + description: "Use conventional commits: feat/fix/chore/docs(scope): description" + + # --- Worker SLA Rules --- + + worker_claim_timeout_hours: + value: "24" + category: "sla" + description: "Claimed issue with no PR within 24h -> auto-unclaim + escalate to Mayor" + + worker_pr_deadline_hours: + value: "48" + category: "sla" + description: "Max time from claim to PR. Violation triggers reassignment." + + worker_min_delivery_per_week: + value: "3" + category: "sla" + description: "Workers must deliver at least 3 merged PRs per week" + + stale_claim_action: + value: "unclaim_and_escalate" + category: "sla" + description: "On timeout: remove gt-status:claimed, restore gt-status:pending, post escalation comment, notify Mayor" + + worker_deprioritized_work_check: + value: "true" + category: "sla" + description: "Workers MUST stop work on closed/deprioritized issues within 1 sync cycle. Continued work = violation." + + reassignment_after_violations: + value: "2" + category: "sla" + description: "After 2 SLA violations, worker loses priority. New issues go to other workers first." + + # --- Cron Hygiene Rules --- + + cron_registry_required: + value: "true" + category: "ops" + description: "All cron jobs must be in .gt-mesh/cron-registry.yaml. Unregistered crons killed on audit." + + cron_max_age_days: + value: "30" + category: "ops" + description: "Cron jobs older than 30 days with no output are auto-removed" + + cron_must_have_owner: + value: "true" + category: "ops" + description: "Every cron has an owner GT. Decommissioned GT = crons removed." + + # --- Issue Lifecycle Rules --- + + issue_closed_means_stop: + value: "true" + category: "governance" + description: "Closed issue = ALL work stops. Workers must check issue state before every commit." + + deprioritized_kills_crons: + value: "true" + category: "governance" + description: "When an issue/epic is deprioritized, all associated crons and polling jobs are removed." + + # --- Wasteland Federation Rules --- + + wasteland_is_source_of_truth: + value: "true" + category: "wasteland" + description: "The wasteland board (<your-dolthub-org>/<your-wasteland-db>) is the canonical external work board. All externally meaningful work must be represented there." + + wasteland_auto_claim_on_start: + value: "true" + category: "wasteland" + description: "Polecats must claim their wasteland item at load-context time. Prevents duplicate work across Gas Towns." + + wasteland_auto_complete_on_done: + value: "true" + category: "wasteland" + description: "Polecats must mark wasteland items done (with PR evidence) before running gt done." + + wasteland_internal_filter: + value: "true" + category: "wasteland" + description: "Internal work (infra, patrol, witness, CLAUDE.md, lifecycle) NEVER goes to wasteland. Only externally actionable features/bugs." + + wasteland_effort_is_deterministic: + value: "true" + category: "wasteland" + description: "Effort levels are set by estimate-effort.py (pattern matching), not defaulted to medium. Levels: trivial, small, medium, large, epic." + + wasteland_post_requires_template: + value: "true" + category: "wasteland" + description: "Every wasteland item must include: Context (repo, stack), Task description, Acceptance Criteria, How to Work section. Bare titles are not allowed." + + wasteland_witness_consumption: + value: "true" + category: "wasteland" + description: "Witnesses check the wasteland board during patrol. If unclaimed items match their rig and capacity allows, they create a bead and sling a polecat. Max 1 item per patrol cycle." + + wasteland_polecat_auto_post: + value: "true" + category: "wasteland" + description: "When a polecat works on a P0/P1 bead with no wasteland item, it creates one (if the bead passes the internal filter). This ensures all external work is tracked." + + wasteland_sync_frequency: + value: "15m" + category: "wasteland" + description: "Wasteland changes push to DoltHub every 15 minutes via cron." + + # --- Pack Distribution Rules --- + + pack_auto_update: + value: "true" + category: "ops" + description: "The org config pack syncs to Gitea + GitHub every 6 hours. Knowledge, changelog, formulas, and docs auto-push." + + changelog_on_notable_events: + value: "true" + category: "ops" + description: "All notable events (deploys, incidents, milestones, decisions) must be logged via gt-changelog or changelog/append.sh." + + knowledge_evolves_automatically: + value: "true" + category: "ops" + description: "The knowledge base auto-evolves every 6 hours from closed beads. Agents should also manually capture non-obvious lessons." diff --git a/examples/deepwork/packs/deepwork-org/scripts/changelog/append.sh b/examples/deepwork/packs/deepwork-org/scripts/changelog/append.sh new file mode 100755 index 00000000..a08e2ee5 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/changelog/append.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# changelog/append.sh — Append an entry to the current month's changelog +# +# Usage: append.sh <type> <rigs> <title> <body> +# type: decision | deploy | fix | incident | milestone | infra +# rigs: comma-separated rig names, or "town" +# title: Short title +# body: What happened (can be multi-line) +# +# Called by: agents, hooks, or plugins + +set -euo pipefail + +TYPE="${1:?Usage: append.sh <type> <rigs> <title> <body>}" +RIGS="${2:?Missing rigs}" +TITLE="${3:?Missing title}" +BODY="${4:?Missing body}" +DATE=$(date +%Y-%m-%d) +MONTH_FILE="$(dirname "$0")/$(date +%Y-%m).md" + +# Create month file if it doesn't exist +if [ ! -f "$MONTH_FILE" ]; then + echo "# $(date +'%B %Y')" > "$MONTH_FILE" + echo "" >> "$MONTH_FILE" +fi + +# Idempotency: skip if title already exists in this month +if grep -qF "— $TITLE" "$MONTH_FILE" 2>/dev/null; then + echo "SKIP: '$TITLE' already in $(basename "$MONTH_FILE")" + exit 0 +fi + +# Prepend entry after the header (newest first) +# Find line 2 (after the "# Month Year" header) and insert there +ENTRY="## $DATE — $TITLE + +**Type:** $TYPE +**Rigs:** $RIGS + +$BODY +" + +# Insert after first line (the # header) +{ + head -1 "$MONTH_FILE" + echo "" + echo "$ENTRY" + tail -n +2 "$MONTH_FILE" +} > "${MONTH_FILE}.tmp" && mv -f "${MONTH_FILE}.tmp" "$MONTH_FILE" + +echo "OK: Added '$TITLE' to $(basename "$MONTH_FILE")" diff --git a/examples/deepwork/packs/deepwork-org/scripts/knowledge/capture.sh b/examples/deepwork/packs/deepwork-org/scripts/knowledge/capture.sh new file mode 100755 index 00000000..cbce4728 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/knowledge/capture.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# knowledge/capture.sh — Append a knowledge entry to the appropriate file +# +# Usage: capture.sh <type> <title> <body> [source] +# type: pattern | anti-pattern | decision | operations | product +# title: Short title +# body: What we learned (can be multi-line) +# source: Optional bead ID or session reference +# +# Called by: gt plugins, hooks, or agents directly +# Idempotent: skips if title already exists in target file + +set -euo pipefail + +TYPE="${1:?Usage: capture.sh <type> <title> <body> [source]}" +TITLE="${2:?Missing title}" +BODY="${3:?Missing body}" +SOURCE="${4:-unknown}" +DATE=$(date +%Y-%m-%d) + +KB_DIR="$(dirname "$0")" + +case "$TYPE" in + pattern) FILE="$KB_DIR/patterns.md" ;; + anti-pattern) FILE="$KB_DIR/anti-patterns.md" ;; + decision) FILE="$KB_DIR/decisions.md" ;; + operations) FILE="$KB_DIR/operations.md" ;; + product) FILE="$KB_DIR/<your-project>.md" ;; + *) echo "ERROR: Unknown type '$TYPE'. Use: pattern|anti-pattern|decision|operations|product" >&2; exit 1 ;; +esac + +# Idempotency: skip if title already exists +if grep -qF "### $TITLE" "$FILE" 2>/dev/null; then + echo "SKIP: '$TITLE' already exists in $(basename "$FILE")" + exit 0 +fi + +# Append entry +cat >> "$FILE" << EOF + +### $TITLE ($DATE) +$BODY +Source: $SOURCE. +EOF + +echo "OK: Added '$TITLE' to $(basename "$FILE")" diff --git a/examples/deepwork/packs/deepwork-org/scripts/knowledge/cron-evolve.sh b/examples/deepwork/packs/deepwork-org/scripts/knowledge/cron-evolve.sh new file mode 100755 index 00000000..245245ed --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/knowledge/cron-evolve.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# knowledge/cron-evolve.sh — Cron-safe wrapper for knowledge evolution +# +# Runs as a cron job every 6 hours. Handles locking, logging, and error recovery. +# This is the DETERMINISTIC path — it runs regardless of whether deacon is alive. +# +# Install: Add to crontab: +# 0 */6 * * * /home/pratham2/gt/mayor/knowledge/cron-evolve.sh >> /home/pratham2/gt/logs/knowledge-evolve.log 2>&1 + +set -euo pipefail + +LOCKFILE="/tmp/knowledge-evolve.lock" +LOGFILE="/home/pratham2/gt/logs/knowledge-evolve.log" +KB_DIR="/home/pratham2/gt/mayor/knowledge" + +# Ensure log dir exists +mkdir -p "$(dirname "$LOGFILE")" + +# Flock to prevent concurrent runs +exec 200>"$LOCKFILE" +if ! flock -n 200; then + echo "$(date): SKIP — another instance running" + exit 0 +fi + +echo "$(date): Starting knowledge evolution" + +# Run the evolution script +if bash "$KB_DIR/evolve.sh" 2>&1; then + echo "$(date): Evolution completed successfully" +else + echo "$(date): Evolution failed (exit $?), continuing" +fi + +# Also scan for recent changelog-worthy events: +# Check if any beads were closed in the last 6h with substantial close reasons +DOLT_CMD="dolt --host 127.0.0.1 --port <dolt-port> --user root --password '' --no-tls" +CL_SCRIPT="/home/pratham2/gt/mayor/changelog/append.sh" + +for rig in <your-project> <your-rig> <your-rig> <your-rig>; do + results=$($DOLT_CMD sql -q " + SELECT id, title, close_reason + FROM ${rig}.issues + WHERE status='closed' + AND closed_at > DATE_SUB(NOW(), INTERVAL 6 HOUR) + AND close_reason IS NOT NULL + AND close_reason != '' + AND LENGTH(close_reason) > 30 + LIMIT 10 + " -r csv 2>/dev/null || echo "") + + [ -z "$results" ] && continue + [ "$results" = "id,title,close_reason" ] && continue + + echo "$results" | tail -n +2 | while IFS=, read -r id title reason; do + bash "$CL_SCRIPT" "fix" "$rig" "$title" "Closed: $reason" 2>/dev/null || true + done +done + +echo "$(date): Cron evolution complete" diff --git a/examples/deepwork/packs/deepwork-org/scripts/knowledge/evolve.sh b/examples/deepwork/packs/deepwork-org/scripts/knowledge/evolve.sh new file mode 100755 index 00000000..6456a713 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/knowledge/evolve.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# knowledge/evolve.sh — Periodic knowledge evolution +# +# Runs during deacon patrol (via plugin) or manually. +# Scans recently closed beads for lessons and appends to knowledge base. +# +# What it does: +# 1. Finds beads closed in the last 24h +# 2. Checks if they had multiple attempts (respawn count > 0) or incident labels +# 3. Extracts close_reason as a potential lesson +# 4. Appends non-trivial lessons to anti-patterns.md or patterns.md +# +# Requires: dolt CLI, access to Dolt on port <dolt-port> + +set -euo pipefail + +KB_DIR="$(dirname "$0")" +CAPTURE="$KB_DIR/capture.sh" +DOLT_CMD="dolt --host 127.0.0.1 --port <dolt-port> --user root --password '' --no-tls" + +# Query recently closed beads with close_reason across all rig DBs +RIGS="<your-project> <your-rig> <your-rig> <your-rig> gastown" + +for rig in $RIGS; do + prefix=$(echo "$rig" | head -c 3) + + # Get beads closed in last 24h that have a close_reason + results=$($DOLT_CMD sql -q " + SELECT id, title, close_reason + FROM ${rig}.issues + WHERE status='closed' + AND closed_at > DATE_SUB(NOW(), INTERVAL 24 HOUR) + AND close_reason IS NOT NULL + AND close_reason != '' + AND LENGTH(close_reason) > 50 + LIMIT 10 + " -r csv 2>/dev/null || echo "") + + if [ -z "$results" ] || [ "$results" = "id,title,close_reason" ]; then + continue + fi + + # Skip header, process each row + echo "$results" | tail -n +2 | while IFS=, read -r id title reason; do + # Skip if already captured + if grep -qF "$id" "$KB_DIR/patterns.md" "$KB_DIR/anti-patterns.md" 2>/dev/null; then + continue + fi + + # Determine type: if close_reason mentions "bug", "fix", "broke", "incident" → anti-pattern + if echo "$reason" | grep -qiE 'bug|broke|incident|crash|fail|wrong|mistake'; then + bash "$CAPTURE" anti-pattern "$title" "$reason" "$prefix-$id" + else + bash "$CAPTURE" pattern "$title" "$reason" "$prefix-$id" + fi + done +done + +echo "Knowledge evolution complete: $(date)" diff --git a/examples/deepwork/packs/deepwork-org/scripts/knowledge/health-check.sh b/examples/deepwork/packs/deepwork-org/scripts/knowledge/health-check.sh new file mode 100755 index 00000000..4f9fd47c --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/knowledge/health-check.sh @@ -0,0 +1,115 @@ +#!/bin/bash +# knowledge/health-check.sh — Verify the knowledge system is operational +# +# Run this to confirm all automation paths are working. +# Returns non-zero if any path is broken. + +set -euo pipefail + +ERRORS=0 +KB_DIR="$(cd "$(dirname "$0")" && pwd)" +GT_ROOT="/home/pratham2/gt" + +echo "=== Knowledge System Health Check ===" +echo "" + +# 1. Files exist +echo -n "1. Knowledge files exist: " +for f in patterns.md anti-patterns.md decisions.md operations.md <your-project>.md; do + if [ ! -f "$KB_DIR/$f" ]; then + echo "FAIL — missing $f" + ERRORS=$((ERRORS+1)) + continue + fi +done +echo "OK" + +# 2. Scripts are executable +echo -n "2. Scripts executable: " +for s in capture.sh evolve.sh cron-evolve.sh on-bead-close.sh health-check.sh; do + if [ ! -x "$KB_DIR/$s" ]; then + echo "FAIL — $s not executable" + ERRORS=$((ERRORS+1)) + continue + fi +done +echo "OK" + +# 3. Changelog exists +echo -n "3. Changelog directory: " +if [ -d "$GT_ROOT/mayor/changelog" ] && [ -x "$GT_ROOT/mayor/changelog/append.sh" ]; then + echo "OK" +else + echo "FAIL" + ERRORS=$((ERRORS+1)) +fi + +# 4. Cron job installed +echo -n "4. Cron job installed: " +if crontab -l 2>/dev/null | grep -q "cron-evolve.sh"; then + echo "OK" +else + echo "FAIL — cron not found in crontab" + ERRORS=$((ERRORS+1)) +fi + +# 5. Cron has run recently (within last 12h) +echo -n "5. Cron ran recently: " +LOG="$GT_ROOT/logs/knowledge-evolve.log" +if [ -f "$LOG" ]; then + LAST_RUN=$(stat -c %Y "$LOG" 2>/dev/null || echo 0) + NOW=$(date +%s) + AGE=$(( (NOW - LAST_RUN) / 3600 )) + if [ $AGE -lt 12 ]; then + echo "OK (${AGE}h ago)" + else + echo "WARN — last run ${AGE}h ago (expected <12h)" + fi +else + echo "WARN — no log yet (cron may not have run yet)" +fi + +# 6. Plugin registered +echo -n "6. Plugin exists: " +if [ -f "$GT_ROOT/plugins/knowledge-evolve/plugin.md" ]; then + echo "OK" +else + echo "FAIL — plugin not found" + ERRORS=$((ERRORS+1)) +fi + +# 7. Dolt accessible (needed for evolve.sh) +echo -n "7. Dolt accessible: " +if timeout 5 dolt --host 127.0.0.1 --port <dolt-port> --user root --password "" --no-tls sql -q "SELECT 1" >/dev/null 2>&1; then + echo "OK" +else + echo "FAIL — Dolt unreachable" + ERRORS=$((ERRORS+1)) +fi + +# 8. AGENTS.md has knowledge instructions +echo -n "8. AGENTS.md has knowledge section: " +if grep -q "Town Knowledge System" "$GT_ROOT/AGENTS.md" 2>/dev/null; then + echo "OK" +else + echo "FAIL — agents don't know about knowledge system" + ERRORS=$((ERRORS+1)) +fi + +# 9. graceful-handoff.sh has changelog integration +echo -n "9. Handoff logs to changelog: " +if grep -q "changelog/append.sh" "$GT_ROOT/mayor/graceful-handoff.sh" 2>/dev/null; then + echo "OK" +else + echo "FAIL — handoff doesn't write changelog" + ERRORS=$((ERRORS+1)) +fi + +echo "" +if [ $ERRORS -eq 0 ]; then + echo "ALL CHECKS PASSED" +else + echo "FAILED: $ERRORS check(s)" +fi + +exit $ERRORS diff --git a/examples/deepwork/packs/deepwork-org/scripts/knowledge/on-bead-close.sh b/examples/deepwork/packs/deepwork-org/scripts/knowledge/on-bead-close.sh new file mode 100755 index 00000000..df19ed9b --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/knowledge/on-bead-close.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# knowledge/on-bead-close.sh — Called after a bead is closed +# +# Usage: on-bead-close.sh <bead-id> <title> <close-reason> +# +# Decides whether the close is worth logging to changelog and/or knowledge base. +# Only logs if close_reason is substantial (>50 chars). + +set -euo pipefail + +BEAD_ID="${1:-}" +TITLE="${2:-}" +REASON="${3:-}" + +[ -z "$BEAD_ID" ] && exit 0 +[ ${#REASON} -lt 50 ] && exit 0 + +KB_DIR="$(dirname "$0")" +CL_DIR="$(dirname "$0")/../changelog" + +# Always add to changelog +bash "$CL_DIR/append.sh" "fix" "town" "$TITLE" "Closed bead $BEAD_ID. $REASON" 2>/dev/null || true + +# If it mentions a bug/incident/lesson, add to knowledge +if echo "$REASON" | grep -qiE 'bug|broke|incident|crash|fail|wrong|lesson|learned|avoid|never|always'; then + bash "$KB_DIR/capture.sh" anti-pattern "$TITLE" "$REASON" "$BEAD_ID" 2>/dev/null || true +fi + +exit 0 diff --git a/examples/deepwork/packs/deepwork-org/scripts/mayor/estimate-effort.py b/examples/deepwork/packs/deepwork-org/scripts/mayor/estimate-effort.py new file mode 100644 index 00000000..9426ee7d --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/mayor/estimate-effort.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +"""Deterministic effort estimator for wasteland items. + +Usage: echo "title" | python3 estimate-effort.py + or: python3 estimate-effort.py "title string" + +Output: trivial | small | medium | large | epic + +Effort Guide: + trivial — Single file, config tweak, text update, remove something + small — 1-3 files, focused bug fix, one component, simple feature + medium — 4-10 files, new page/endpoint, integration, moderate refactor + large — 10+ files, new system/module, cross-cutting, multi-component + epic — Multi-week, new product area, architecture change +""" +import sys +import re + +EPIC_PATTERNS = [ + r'deploy.*vercel.*domain', + r'full.*redesign.*architecture', +] + +LARGE_PATTERNS = [ + r'groundedsam', r'photo detail.*ml pipeline', r'docker.compose', + r'extract.*framework', r'extract.*template', r'gateway.*adk', + r'launch prep', r'<your-project> page.*redesign', r'liveDatasystem', + r'kanban whiteboard', r'whatsapp gateway', r'new.*system', + r'full.*integration', r'migrate.*from.*to', r'rewrite', +] + +MEDIUM_PATTERNS = [ + r'dashboard:.*page', r'catalog page', r'production build', + r'production hardening', r'secure redis', r'shared-types', + r'explore section', r'activity feed', r'case studies', + r'research lab', r'research section', r'floating.*voice', + r'interactive.*demo', r'rest api', r'eventbus', r'cli gateway', + r'visual fixes', r'interactive.*product', r'monorepo', + r'new.*page', r'new.*endpoint', r'new.*component', +] + +SMALL_PATTERNS = [ + r'fix.*cors', r'fix.*xml', r'add.*shelf', r'fix.*crop', r'fix.*race', + r'formatter', r'client for sending', r'webhook endpoint', + r'phone.*mapping', r'screenshots', r'move.*button', + r'split.*component', r'connect.*backend', r'remove.*credential', + r'assets', r'session management', r'add.*column', r'update.*config', + r'fix.*bug', r'fix.*typo', r'fix.*error', r'add.*field', +] + +TRIVIAL_PATTERNS = [ + r'remove mock', r'simplify.*categor', r'^rename', r'update.*readme', + r'fix.*typo', r'bump.*version', r'delete.*unused', +] + + +def estimate_effort(title): + t = title.lower() + for p in EPIC_PATTERNS: + if re.search(p, t, re.I): + return 'epic' + for p in LARGE_PATTERNS: + if re.search(p, t, re.I): + return 'large' + for p in MEDIUM_PATTERNS: + if re.search(p, t, re.I): + return 'medium' + for p in SMALL_PATTERNS: + if re.search(p, t, re.I): + return 'small' + for p in TRIVIAL_PATTERNS: + if re.search(p, t, re.I): + return 'trivial' + + # Fallback heuristics + if 'security' in t: + return 'medium' + words = len(title.split()) + if words <= 5: + return 'small' + if words >= 15: + return 'large' + return 'medium' + + +if __name__ == '__main__': + if len(sys.argv) > 1: + title = ' '.join(sys.argv[1:]) + else: + title = sys.stdin.read().strip() + print(estimate_effort(title)) diff --git a/examples/deepwork/packs/deepwork-org/scripts/mayor/log-rotate.sh b/examples/deepwork/packs/deepwork-org/scripts/mayor/log-rotate.sh new file mode 100755 index 00000000..ec66b70a --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/mayor/log-rotate.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash +# log-rotate.sh — Rotate large log files in /tmp/ to prevent disk exhaustion +# +# Runs on cron: 0 0 * * * (daily at midnight) +# Rotates files > 10MB, keeps 2 old copies (.1, .2), compresses with gzip. +# Only targets known GT-related log files — does not touch arbitrary /tmp files. + +set -euo pipefail + +LOG="/home/pratham2/gt/.mesh-activity.log" +MAX_SIZE_BYTES=$(( 10 * 1024 * 1024 )) # 10MB +KEEP_ROTATIONS=2 + +log() { echo "$(date +%Y-%m-%dT%H:%M:%S) [log-rotate] $*" >> "$LOG"; } + +# Known log file patterns to manage +LOG_PATTERNS=( + "/tmp/dolt*.log" + "/tmp/victoria-bot*.log" + "/tmp/tunnel*.log" + "/tmp/gt-mesh-sync.log" + "/tmp/dolthub-sync.log" + "/tmp/mayor-dispatcher.log" + "/tmp/mesh-improve.log" + "/tmp/mesh-autosync.log" + "/tmp/cron-audit.log" + "/tmp/mesh-mayor-daemon.log" + "/tmp/mesh-pack-updater.log" + "/tmp/mesh-inbox.log" + "/tmp/mesh-watchdog.log" + "/tmp/process-guardian.log" + "/tmp/worker-flywheel.log" + "/tmp/tg-approval-poll.log" + "/tmp/linkedin-engage.log" + "/tmp/hot-take-*.log" + "/tmp/yolo-training*.log" + "/tmp/dolt-hang-*.log" +) + +rotate_file() { + local filepath="$1" + local basename + basename=$(basename "$filepath") + + # Remove oldest rotation + if [ -f "${filepath}.${KEEP_ROTATIONS}.gz" ]; then + rm -f "${filepath}.${KEEP_ROTATIONS}.gz" + fi + + # Shift existing rotations up + local i + for (( i=KEEP_ROTATIONS; i>1; i-- )); do + local prev=$(( i - 1 )) + if [ -f "${filepath}.${prev}.gz" ]; then + mv "${filepath}.${prev}.gz" "${filepath}.${i}.gz" + fi + done + + # Rotate current: copy + truncate (keeps file descriptor valid for writing processes) + cp "$filepath" "${filepath}.1" + truncate -s 0 "$filepath" + gzip "${filepath}.1" + + local old_size + old_size=$(stat -c %s "${filepath}.1.gz" 2>/dev/null || echo "?") + log "Rotated $basename (compressed to ${old_size} bytes)" +} + +rotated=0 +skipped=0 +total_freed=0 + +for pattern in "${LOG_PATTERNS[@]}"; do + # Expand glob — may match multiple files + for filepath in $pattern; do + [ -f "$filepath" ] || continue + + # Skip already-rotated files (.1, .2, .gz) + case "$filepath" in + *.gz|*.[0-9]) continue ;; + esac + + # Skip files we don't own (can't rotate other users' logs) + [ -O "$filepath" ] || continue + # Skip files we can't write to + [ -w "$filepath" ] || continue + + size=$(stat -c %s "$filepath" 2>/dev/null || echo 0) + if [ "$size" -ge "$MAX_SIZE_BYTES" ]; then + size_mb=$(( size / 1024 / 1024 )) + log "$(basename "$filepath") is ${size_mb}MB — rotating" + if rotate_file "$filepath"; then + rotated=$(( rotated + 1 )) + total_freed=$(( total_freed + size )) + else + log "Failed to rotate $(basename "$filepath")" + fi + else + skipped=$(( skipped + 1 )) + fi + done +done + +# Also clean up the mesh-activity log itself if it's huge +MESH_LOG="/home/pratham2/gt/.mesh-activity.log" +if [ -f "$MESH_LOG" ]; then + mesh_size=$(stat -c %s "$MESH_LOG" 2>/dev/null || echo 0) + if [ "$mesh_size" -ge "$MAX_SIZE_BYTES" ]; then + mesh_mb=$(( mesh_size / 1024 / 1024 )) + log ".mesh-activity.log is ${mesh_mb}MB — rotating" + rotate_file "$MESH_LOG" + rotated=$(( rotated + 1 )) + total_freed=$(( total_freed + mesh_size )) + fi +fi + +freed_mb=$(( total_freed / 1024 / 1024 )) +log "Log rotation complete: $rotated rotated, $skipped under threshold, ~${freed_mb}MB freed" +exit 0 diff --git a/examples/deepwork/packs/deepwork-org/scripts/mayor/readme-release.sh b/examples/deepwork/packs/deepwork-org/scripts/mayor/readme-release.sh new file mode 100755 index 00000000..f33a0528 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/mayor/readme-release.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# readme-release.sh — Update README stats and create GitHub releases +# +# For each <your-github-org> repo on GitHub: +# 1. Check if there are new commits since last release +# 2. If 10+ commits, create a new release with changelog +# +# Also updates the org config pack README with current stats. +# +# Cron: 0 3 * * * /home/pratham2/gt/mayor/scripts/readme-release.sh +# (runs daily at 3 AM) + +set -uo pipefail + +GT_ROOT="${GT_ROOT:-$HOME/gt}" +GITHUB_ORG="<your-github-org>" +LOGFILE="$GT_ROOT/logs/readme-release.log" +LOCKFILE="/tmp/readme-release.lock" +CL_SCRIPT="$GT_ROOT/mayor/changelog/append.sh" +RELEASE_THRESHOLD=10 + +mkdir -p "$(dirname "$LOGFILE")" +log() { echo "$(date +%Y-%m-%dT%H:%M:%S) $*" >> "$LOGFILE"; } + +exec 200>"$LOCKFILE" +flock -n 200 || { log "SKIP — another run"; exit 0; } + +log "=== Starting README/release update ===" + +REPOS=( + "<your-project>" + "<your-project>" + "<your-project>" + "website" + "<your-project>" + "<your-project>" + "gt-mesh" + "deepwork-base" + "<your-project>" +) + +RELEASES_CREATED=0 + +for repo in "${REPOS[@]}"; do + # Get latest release tag + latest_tag=$(gh api "repos/$GITHUB_ORG/$repo/releases/latest" --jq '.tag_name' 2>/dev/null || echo "") + + # Count commits since last release (or all if no release) + if [ -n "$latest_tag" ]; then + commits_since=$(gh api "repos/$GITHUB_ORG/$repo/compare/${latest_tag}...main" --jq '.total_commits' 2>/dev/null || echo "0") + else + commits_since=$(gh api "repos/$GITHUB_ORG/$repo/commits?per_page=1" --jq 'length' 2>/dev/null || echo "0") + # If no release exists and repo has commits, set high number to trigger + [ "$commits_since" -gt 0 ] 2>/dev/null && commits_since=100 + fi + + log "$repo: $commits_since commits since ${latest_tag:-'no release'}" + + # Create release if threshold met + if [ "${commits_since:-0}" -ge "$RELEASE_THRESHOLD" ] 2>/dev/null; then + new_tag="v$(date +%Y.%m.%d)" + + # Check if tag already exists today + if gh api "repos/$GITHUB_ORG/$repo/git/refs/tags/$new_tag" >/dev/null 2>&1; then + log " SKIP: $new_tag already exists" + continue + fi + + # Get recent commit messages + changelog=$(gh api "repos/$GITHUB_ORG/$repo/commits?per_page=20" \ + --jq '.[].commit.message' 2>/dev/null | head -20 | sed 's/^/- /') + + notes_file=$(mktemp) + cat > "$notes_file" <<EOF +## What's Changed + +${changelog} + +--- +*${commits_since} commits since ${latest_tag:-'initial release'}* +EOF + + if gh release create "$new_tag" \ + --repo "$GITHUB_ORG/$repo" \ + --title "$repo $(date +%Y-%m-%d)" \ + --notes-file "$notes_file" 2>/dev/null; then + log " RELEASE: $new_tag created" + RELEASES_CREATED=$((RELEASES_CREATED + 1)) + else + log " ERROR: release creation failed" + fi + rm -f "$notes_file" + fi +done + +# Log to changelog if releases were created +if [ "$RELEASES_CREATED" -gt 0 ]; then + bash "$CL_SCRIPT" "deploy" "town" \ + "GitHub releases: $RELEASES_CREATED repos" \ + "Auto-created releases on <your-github-org> org" 2>/dev/null || true +fi + +log "=== Done: $RELEASES_CREATED releases created ===" diff --git a/examples/deepwork/packs/deepwork-org/scripts/mayor/thread-guardrail.sh b/examples/deepwork/packs/deepwork-org/scripts/mayor/thread-guardrail.sh new file mode 100755 index 00000000..84661912 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/mayor/thread-guardrail.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# Thread Guardrail v2 for pratham2 +# Prevents thread exhaustion by: killing orphan node processes, capping thread-heavy +# non-essential processes, and escalating when approaching limits. +# Cron: * * * * * /home/pratham2/gt/mayor/thread-guardrail.sh >> /tmp/thread-guardrail.log 2>&1 + +set -euo pipefail + +USER="pratham2" +LOG_PREFIX="[guardrail $(date +%Y-%m-%dT%H:%M:%S)]" + +# Hard limit for pratham2 (ulimit -u = 3072) +LIMIT=3072 + +# Thresholds (tight — 3072 is very low) +ORPHAN_ALWAYS=1 # Always kill orphan node processes regardless of thread count +WARN_PCT=40 # 40% = ~1,229 threads — start logging top consumers +ACTION_PCT=60 # 60% = ~1,843 threads — kill non-essential processes +CRITICAL_PCT=75 # 75% = ~2,304 threads — kill aggressively + alert + +# Count current threads +CURRENT=$(ps -Lu "$USER" --no-headers 2>/dev/null | wc -l) +PCT=$(( CURRENT * 100 / LIMIT )) + +echo "$LOG_PREFIX threads=$CURRENT limit=$LIMIT usage=${PCT}%" + +# ============================================================ +# ALWAYS: Kill orphan node/next/npm processes +# These are the #1 cause of thread exhaustion. A "next dev" server +# spawns 20+ threads and persists after the polecat that started it dies. +# ============================================================ +kill_orphan_nodes() { + local killed=0 + # Find node processes owned by pratham2 whose parent is init (ppid=1) or + # whose parent tmux session no longer exists + while IFS= read -r line; do + pid=$(echo "$line" | awk '{print $1}') + ppid=$(echo "$line" | awk '{print $2}') + threads=$(echo "$line" | awk '{print $3}') + etime=$(echo "$line" | awk '{print $4}') + cmd=$(echo "$line" | cut -d' ' -f5-) + + # Skip if PID is gone + [ -d "/proc/$pid" ] || continue + + # Orphan = parent is PID 1 (reparented after parent died) + # or parent is not a tmux/claude/bash process + if [ "$ppid" -eq 1 ]; then + echo "$LOG_PREFIX ORPHAN node (ppid=1): PID $pid, ${threads} threads, age=$etime — killing" + kill -TERM "$pid" 2>/dev/null || true + killed=$((killed + 1)) + fi + done < <(ps -u "$USER" --no-headers -o pid,ppid,nlwp,etime,args 2>/dev/null | grep -E '[/]node\b|next-server|next dev|npm run|npx ' | grep -v grep || true) + + # Also kill any node_modules/.bin processes that are orphaned + while IFS= read -r line; do + pid=$(echo "$line" | awk '{print $1}') + ppid=$(echo "$line" | awk '{print $2}') + threads=$(echo "$line" | awk '{print $3}') + if [ "$ppid" -eq 1 ] && [ -d "/proc/$pid" ]; then + echo "$LOG_PREFIX ORPHAN node_modules process: PID $pid, ${threads} threads — killing" + kill -TERM "$pid" 2>/dev/null || true + killed=$((killed + 1)) + fi + done < <(ps -u "$USER" --no-headers -o pid,ppid,nlwp,args 2>/dev/null | grep 'node_modules/.bin' | grep -v grep || true) + + if [ "$killed" -gt 0 ]; then + echo "$LOG_PREFIX Killed $killed orphan node processes" + fi +} + +# Always run orphan cleanup +kill_orphan_nodes + +# Also always kill dolt send-metrics (telemetry, ~100 threads, never needed) +METRICS_PIDS=$(pgrep -u "$USER" -f "dolt send-metrics" 2>/dev/null || true) +if [ -n "$METRICS_PIDS" ]; then + echo "$LOG_PREFIX Killing dolt send-metrics ($METRICS_PIDS)" + echo "$METRICS_PIDS" | xargs kill 2>/dev/null || true +fi + +# Recount after orphan cleanup +CURRENT=$(ps -Lu "$USER" --no-headers 2>/dev/null | wc -l) +PCT=$(( CURRENT * 100 / LIMIT )) + +if [ "$PCT" -lt "$WARN_PCT" ]; then + exit 0 +fi + +# ============================================================ +# WARNING ZONE (30%+) — log top consumers +# ============================================================ +echo "$LOG_PREFIX WARNING: ${PCT}% thread usage ($CURRENT/$LIMIT)" +echo "$LOG_PREFIX Top thread consumers:" +ps -u "$USER" --no-headers -o pid,nlwp,etime,comm 2>/dev/null | sort -k2 -rn | head -10 | while read pid nlwp etime comm; do + echo "$LOG_PREFIX PID=$pid threads=$nlwp age=$etime cmd=$comm" +done + +if [ "$PCT" -lt "$ACTION_PCT" ]; then + exit 0 +fi + +# ============================================================ +# ACTION ZONE (50%+) — kill non-essential heavy processes +# ============================================================ +echo "$LOG_PREFIX ACTION: ${PCT}% — killing non-essential processes" + +# Kill ALL node/next/npm processes (not just orphans) — dev servers can restart +echo "$LOG_PREFIX Killing all node/next/npm processes..." +pkill -u "$USER" -f "next-server" 2>/dev/null || true +pkill -u "$USER" -f "next dev" 2>/dev/null || true +pkill -u "$USER" -f "npm run dev" 2>/dev/null || true +pkill -u "$USER" -f "npx " 2>/dev/null || true +# Be more careful with generic "node" — only kill if high thread count +ps -u "$USER" --no-headers -o pid,nlwp,args 2>/dev/null | grep '[/]node\b' | grep -v 'claude\|gt\|bd' | while read pid nlwp cmd; do + if [ "$nlwp" -gt 10 ]; then + echo "$LOG_PREFIX Killing node PID $pid ($nlwp threads): $(echo "$cmd" | head -c 60)" + kill -TERM "$pid" 2>/dev/null || true + fi +done + +# Kill vite/webpack dev servers +pkill -u "$USER" -f "vite" 2>/dev/null || true +pkill -u "$USER" -f "webpack.*serve" 2>/dev/null || true + +sleep 2 +CURRENT=$(ps -Lu "$USER" --no-headers 2>/dev/null | wc -l) +PCT=$(( CURRENT * 100 / LIMIT )) +echo "$LOG_PREFIX After action: threads=$CURRENT usage=${PCT}%" + +if [ "$PCT" -lt "$CRITICAL_PCT" ]; then + exit 0 +fi + +# ============================================================ +# CRITICAL ZONE (70%+) — aggressive cleanup +# ============================================================ +echo "$LOG_PREFIX CRITICAL: ${PCT}% — aggressive cleanup" + +# Kill non-essential Claude sessions (not mayor, deacon, witness, refinery) +ps -u "$USER" --no-headers -o pid,etimes,args 2>/dev/null | grep 'claude' | \ + grep -v 'mayor\|deacon\|witness\|refinery' | \ + sort -k2 -rn | head -5 | while read pid etime cmd; do + threads=$(ls /proc/$pid/task 2>/dev/null | wc -l) + echo "$LOG_PREFIX Killing Claude session PID $pid ($threads threads, age=${etime}s)" + kill "$pid" 2>/dev/null || true +done + +sleep 2 +CURRENT=$(ps -Lu "$USER" --no-headers 2>/dev/null | wc -l) +PCT=$(( CURRENT * 100 / LIMIT )) +echo "$LOG_PREFIX Final: threads=$CURRENT usage=${PCT}%" + +if [ "$PCT" -ge "$CRITICAL_PCT" ]; then + echo "$LOG_PREFIX STILL CRITICAL — manual intervention needed" + /home/pratham2/go/bin/gt mail send mayor/ \ + -s "[CRITICAL] Thread limit ${PCT}% — manual action needed" \ + -m "Guardrail killed node+metrics+claude but still at ${CURRENT}/${LIMIT}. Top consumers: $(ps -u $USER --no-headers -o nlwp,comm | sort -rn | head -5 | tr '\n' '; ')" \ + 2>/dev/null || true +fi diff --git a/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-close.sh b/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-close.sh new file mode 100755 index 00000000..d26e6833 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-close.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# wasteland-on-close.sh — Auto-mark wasteland items done when bead is closed +# +# Called after bd close. Finds the matching wasteland item and marks it done. +# +# Usage: wasteland-on-close.sh <bead-id> +# Environment: GT_ROOT (default ~/gt) + +set -uo pipefail + +BEAD_ID="${1:-}" +GT_ROOT="${GT_ROOT:-$HOME/gt}" +LOGFILE="$GT_ROOT/logs/wasteland-sync.log" +mkdir -p "$(dirname "$LOGFILE")" + +log() { echo "$(date +%Y-%m-%dT%H:%M:%S) [on-close] $*" >> "$LOGFILE"; } + +[ -z "$BEAD_ID" ] && exit 0 + +# Find matching wasteland item +wl_id=$(timeout 15 gt wl browse --json 2>/dev/null | python3 -c " +import json,sys +items = json.load(sys.stdin) +for item in items: + desc = item.get('description','') + ' ' + item.get('title','') + if 'Bead: $BEAD_ID' in desc or '$BEAD_ID' in item.get('title',''): + if item.get('status','') == 'open': + print(item['id']) + break +" 2>/dev/null) + +[ -z "$wl_id" ] && { log "SKIP: No wasteland item found for $BEAD_ID"; exit 0; } + +# Claim it first (required before done) +timeout 10 gt wl claim "$wl_id" 2>/dev/null || true + +# Mark done with evidence +timeout 10 gt wl done "$wl_id" --evidence "Bead $BEAD_ID closed locally" 2>/dev/null \ + && log "OK: Marked $wl_id done (bead $BEAD_ID)" \ + || log "ERROR: Failed to mark $wl_id done" + +exit 0 diff --git a/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-create.sh b/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-create.sh new file mode 100755 index 00000000..063f7345 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/wasteland-on-create.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# wasteland-on-create.sh — Auto-post new beads to wasteland +# +# Called after bd create. Reads the new bead and posts to wasteland if P0/P1. +# +# Usage: wasteland-on-create.sh <bead-id> <rig> +# Environment: GT_ROOT (default ~/gt) + +set -uo pipefail + +BEAD_ID="${1:-}" +RIG="${2:-}" +GT_ROOT="${GT_ROOT:-$HOME/gt}" +LOGFILE="$GT_ROOT/logs/wasteland-sync.log" +mkdir -p "$(dirname "$LOGFILE")" + +log() { echo "$(date +%Y-%m-%dT%H:%M:%S) [on-create] $*" >> "$LOGFILE"; } + +[ -z "$BEAD_ID" ] && exit 0 + +# Get bead details +bead_json=$(timeout 10 bd show "$BEAD_ID" --json 2>/dev/null) || { log "SKIP: can't read $BEAD_ID"; exit 0; } +title=$(echo "$bead_json" | python3 -c "import json,sys; print(json.load(sys.stdin).get('title',''))" 2>/dev/null) +priority=$(echo "$bead_json" | python3 -c "import json,sys; print(json.load(sys.stdin).get('priority',2))" 2>/dev/null) +issue_type=$(echo "$bead_json" | python3 -c "import json,sys; print(json.load(sys.stdin).get('issue_type','task'))" 2>/dev/null) +description=$(echo "$bead_json" | python3 -c "import json,sys; print(json.load(sys.stdin).get('description','')[:500])" 2>/dev/null) + +# Only sync P0/P1 +[ "$priority" -gt 1 ] 2>/dev/null && { log "SKIP: $BEAD_ID is P${priority} (only P0/P1 synced)"; exit 0; } + +# Map rig to project name and GitHub repo +declare -A REPO_MAP=( + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-project>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/website" + ["<your-project>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" +) + +project="${RIG:-unknown}" +repo_url="${REPO_MAP[$RIG]:-}" + +# Map issue_type to wasteland type +wl_type="feature" +[[ "$issue_type" == "bug" ]] && wl_type="bug" +[[ "$issue_type" == "docs" ]] && wl_type="docs" + +# Check if already on wasteland (search by bead ID in description) +existing=$(timeout 10 gt wl browse --json 2>/dev/null | python3 -c " +import json,sys +items = json.load(sys.stdin) +for item in items: + if 'Bead: $BEAD_ID' in (item.get('description','') + item.get('title','')): + print(item['id']) + break +" 2>/dev/null) + +[ -n "$existing" ] && { log "SKIP: $BEAD_ID already on wasteland as $existing"; exit 0; } + +# Estimate effort deterministically +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +effort=$(python3 "$SCRIPT_DIR/estimate-effort.py" "$title" 2>/dev/null || echo "medium") + +# Post to wasteland with proper template +wl_desc="## Context +Repo: ${repo_url} +Project: ${project} +Bead: ${BEAD_ID} + +## Task +${title} + +${description} + +## Acceptance Criteria +- Implementation matches the task description +- Tests pass +- No regressions +- PR submitted to main branch + +## How to Work on This +1. Clone: git clone ${repo_url} +2. Branch: git checkout -b feat/your-change +3. Implement the change +4. Push + create PR +5. Submit: gt wl done <id> --evidence PR_URL" + +timeout 15 gt wl post \ + --title "$title" \ + --project "$project" \ + --type "$wl_type" \ + --priority "$priority" \ + --effort "$effort" \ + --description "$wl_desc" 2>/dev/null && log "OK: Posted $BEAD_ID to wasteland (effort=$effort)" || log "ERROR: Failed to post $BEAD_ID" + +exit 0 diff --git a/examples/deepwork/packs/deepwork-org/scripts/wasteland-sync.sh b/examples/deepwork/packs/deepwork-org/scripts/wasteland-sync.sh new file mode 100755 index 00000000..ff1c1376 --- /dev/null +++ b/examples/deepwork/packs/deepwork-org/scripts/wasteland-sync.sh @@ -0,0 +1,151 @@ +#!/bin/bash +# wasteland-sync.sh — Catch-up reconciliation between beads and wasteland +# +# Runs every 4 hours via cron. Handles anything the hooks missed: +# 1. Scan all rig beads for P0/P1 not on wasteland → post them +# 2. Scan closed beads that have open wasteland items → mark done +# 3. Push to DoltHub +# +# Cron: 0 */4 * * * /home/pratham2/gt/mayor/scripts/wasteland-sync.sh + +set -uo pipefail + +GT_ROOT="${GT_ROOT:-$HOME/gt}" +LOCKFILE="/tmp/wasteland-sync.lock" +LOGFILE="$GT_ROOT/logs/wasteland-sync.log" +DOLT_CMD="dolt --host 127.0.0.1 --port <dolt-port> --user root --password '' --no-tls" + +mkdir -p "$(dirname "$LOGFILE")" +log() { echo "$(date +%Y-%m-%dT%H:%M:%S) [sync] $*" >> "$LOGFILE"; } + +# Flock +exec 200>"$LOCKFILE" +if ! flock -n 200; then + log "SKIP — another sync running" + exit 0 +fi + +log "=== Starting wasteland catch-up sync ===" + +# Rig → project name mapping +declare -A RIG_PROJECT=( + ["<your-project>"]="<your-project>" + ["<your-rig>"]="<your-project>" + ["<your-rig>"]="<your-project>" + ["<your-rig>"]="<your-project>" + ["<your-rig>"]="<your-project>" + ["<your-project>"]="<your-project>" + ["<your-rig>"]="<your-project>" +) + +declare -A REPO_MAP=( + ["<your-project>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/website" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-project>"]="https://github.com/<your-github-org>/<your-project>" + ["<your-rig>"]="https://github.com/<your-github-org>/<your-project>" +) + +# DB name → rig prefix mapping +declare -A DB_PREFIX=( + ["<your-project>"]="of" + ["<your-rig>"]="ds" + ["<your-rig>"]="vaa" + ["<your-rig>"]="vap" + ["<your-rig>"]="cc" + ["<your-project>"]="prd" + ["<your-rig>"]="med" +) + +# Get existing wasteland items for dedup +wl_items=$(timeout 20 gt wl browse --json 2>/dev/null || echo "[]") +posted=0 +closed=0 + +for rig in "${!RIG_PROJECT[@]}"; do + db="$rig" + prefix="${DB_PREFIX[$rig]}" + project="${RIG_PROJECT[$rig]}" + repo="${REPO_MAP[$rig]}" + + # Get open P0/P1 beads from this rig + open_beads=$($DOLT_CMD sql -q " + SELECT id, title, priority, issue_type, SUBSTRING(description, 1, 300) as desc_short + FROM ${db}.issues + WHERE status='open' AND priority <= 1 + " -r csv 2>/dev/null | tail -n +2) || continue + + while IFS=, read -r id title priority issue_type desc; do + [ -z "$id" ] && continue + bead_id="${prefix}-${id}" + + # Check if already on wasteland + already=$(echo "$wl_items" | python3 -c " +import json,sys +items = json.load(sys.stdin) +for item in items: + if 'Bead: ${bead_id}' in item.get('description','') or '${bead_id}' in item.get('title',''): + print('yes') + break +" 2>/dev/null) + + [ "$already" = "yes" ] && continue + + # Post it + wl_type="feature" + [[ "$issue_type" == "bug" ]] && wl_type="bug" + + timeout 15 gt wl post \ + --title "$title" \ + --project "$project" \ + --type "$wl_type" \ + --priority "$priority" \ + --description "Bead: ${bead_id} +Repo: ${repo} +Project: ${project} + +${desc}" 2>/dev/null && { posted=$((posted+1)); log "Posted: $bead_id → wasteland"; } || log "ERROR: Failed to post $bead_id" + + done <<< "$open_beads" + + # Find closed beads that still have open wasteland items + closed_beads=$($DOLT_CMD sql -q " + SELECT id FROM ${db}.issues + WHERE status='closed' AND priority <= 1 + AND closed_at > DATE_SUB(NOW(), INTERVAL 24 HOUR) + " -r csv 2>/dev/null | tail -n +2) || continue + + while IFS= read -r id; do + [ -z "$id" ] && continue + bead_id="${prefix}-${id}" + + # Find open wasteland item for this bead + wl_id=$(echo "$wl_items" | python3 -c " +import json,sys +items = json.load(sys.stdin) +for item in items: + if 'Bead: ${bead_id}' in item.get('description','') and item.get('status','') == 'open': + print(item['id']) + break +" 2>/dev/null) + + [ -z "$wl_id" ] && continue + + timeout 10 gt wl claim "$wl_id" 2>/dev/null || true + timeout 10 gt wl done "$wl_id" --evidence "Bead $bead_id closed locally" 2>/dev/null \ + && { closed=$((closed+1)); log "Closed: $wl_id (bead $bead_id)"; } || log "ERROR: Failed to close $wl_id" + + done <<< "$closed_beads" +done + +# Push to DoltHub via SQL (server-compatible, no merge needed) +# gt wl sync does pull+merge which conflicts with running server +# Instead, push the wl-commons database directly via dolt push through SQL +dolt --host 127.0.0.1 --port <dolt-port> --user root --password "" --no-tls sql -q "USE <your-wl-db>; CALL dolt_push('origin', 'main')" 2>/dev/null \ + && log "DoltHub push OK" \ + || log "WARN: DoltHub push failed (may need manual gt wl sync with server stopped)" + +log "Sync complete: $posted posted, $closed closed" +log "=== Done ==="