diff --git a/CHANGELOG.md b/CHANGELOG.md index 67b5bf9c88..bd16e40260 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,37 @@ Format follows [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## [Unreleased] +### Fixed +- **Quality/balanced profiles now deliver Opus subagents** — `resolveModelInternal` previously + converted `opus` to `inherit`, causing agents to silently run on Sonnet when the parent + session used the default Sonnet 4.6 model. Opus is now passed directly to Task calls (#695) + +### Added +- **Kimi CLI support** — install GSD skills and agents to [Kimi](https://github.com/MoonshotAI/kimi-cli) via `--kimi --global` + - Skills install to XDG path `~/.config/agents/skills/gsd-/SKILL.md` and are invoked with `/skill:gsd-` + - Agents install as dual-file format: `~/.kimi/agents/gsd-.yaml` + `gsd-.md` system prompt + - Tool names translated to full module paths (`kimi_cli.tools.file:ReadFile`, `kimi_cli.tools.shell:Shell`, etc.) + - MCP tools excluded from agent tool lists (configured separately via Kimi's config) + - `${VAR}` patterns in agent bodies escaped for Kimi CLI compatibility + - Local install guard — Kimi's XDG skills path is always global; `--kimi --local` exits with a clear error + - `KIMI_CONFIG_DIR` and `KIMI_SKILLS_DIR` env overrides for custom paths +- **Adaptive model profile** — fourth model profile (`adaptive`) that auto-selects models per-plan based on complexity evaluation (#210) + - `evaluateComplexity()` scores plan metadata (files modified, task count, objective keywords, plan type, dependencies) on 0-10+ scale + - Three tiers: Simple (haiku/sonnet), Medium (sonnet/opus), Complex (opus/sonnet) + - `adaptive_settings` config: `min_model`/`max_model` clamping, `log_selections` usage logging + - `resolve-adaptive-model` CLI command with `--context` for per-plan resolution + - `init plan-phase` and `init quick` now return `model_profile` and `adaptive_settings` for workflow use + - Plan index includes `type` and `depends_on` for enriched complexity evaluation + - Verifier uses per-plan adaptive resolution when in adaptive mode + - Full backward compatibility — non-adaptive profiles unaffected +- `/gsd:report-bug` command for structured bug reporting with severity tracking, diagnostic log capture, and GitHub issue creation + - `gsd-tools bug list/update/resolve` CLI commands for bug management + - `gsd-tools init bugs` and `scaffold bugs` for workflow bootstrapping + - Severity inference from keywords (critical/high/medium/low) + - Automatic diagnostic capture (git state, log files, error output) + - Optional GitHub issue creation via `gh` CLI + - Bug lifecycle: reported → investigating → fixing → resolved + ## [1.22.0] - 2026-02-27 ### Added @@ -13,6 +44,14 @@ Format follows [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Analysis paralysis guard in agents to prevent over-deliberation during planning - Exhaustive cross-check and task-level TDD patterns in agent workflows - Code-aware discuss phase with codebase scouting — `/gsd:discuss-phase` now analyzes relevant source files before asking questions +- Concurrent milestone execution: work on multiple milestones in parallel with isolated state (#291) + - Milestone-scoped directories under `.planning/milestones//` + - `ACTIVE_MILESTONE` pointer file for switching context + - `/gsd:switch-milestone` command with in-progress work warnings + - `--milestone` CLI flag for explicit milestone targeting + - Statusline shows active milestone in multi-milestone mode + - All 28 workflow files updated for milestone-aware paths + - Zero behavioral change for single-milestone projects (legacy mode) ### Fixed - Update checker clears both cache paths to prevent stale version notifications diff --git a/README.md b/README.md index 91332b8cef..02a426949e 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # GET SHIT DONE -**A light-weight and powerful meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini CLI, and Codex.** +**A light-weight and powerful meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini CLI, Codex, and Kimi.** **Solves context rot — the quality degradation that happens as Claude fills its context window.** @@ -80,16 +80,18 @@ npx get-shit-done-cc@latest ``` The installer prompts you to choose: -1. **Runtime** — Claude Code, OpenCode, Gemini, Codex, or all +1. **Runtime** — Claude Code, OpenCode, Gemini, Codex, Kimi, or all 2. **Location** — Global (all projects) or local (current project only) Verify with: - Claude Code / Gemini: `/gsd:help` - OpenCode: `/gsd-help` - Codex: `$gsd-help` +- Kimi: `/skill:gsd-help` > [!NOTE] -> Codex installation uses skills (`skills/gsd-*/SKILL.md`) rather than custom prompts. +> Codex and Kimi installations use skills (`skills/gsd-*/SKILL.md`) rather than custom prompts. +> Kimi installs globally to `~/.config/agents/skills/` and does not support local install. ### Staying Updated @@ -117,12 +119,15 @@ npx get-shit-done-cc --gemini --global # Install to ~/.gemini/ npx get-shit-done-cc --codex --global # Install to ~/.codex/ npx get-shit-done-cc --codex --local # Install to ./.codex/ +# Kimi (skills-first, XDG global only) +npx get-shit-done-cc --kimi --global # Install to ~/.config/agents/skills/ + # All runtimes npx get-shit-done-cc --all --global # Install to all directories ``` Use `--global` (`-g`) or `--local` (`-l`) to skip the location prompt. -Use `--claude`, `--opencode`, `--gemini`, `--codex`, or `--all` to skip the runtime prompt. +Use `--claude`, `--opencode`, `--gemini`, `--codex`, `--kimi`, or `--all` to skip the runtime prompt. @@ -474,6 +479,7 @@ You're never locked in. The system adapts. | `/gsd:audit-milestone` | Verify milestone achieved its definition of done | | `/gsd:complete-milestone` | Archive milestone, tag release | | `/gsd:new-milestone [name]` | Start next version: questions → research → requirements → roadmap | +| `/gsd:switch-milestone ` | Switch active milestone for concurrent work | ### Navigation @@ -512,15 +518,31 @@ You're never locked in. The system adapts. | Command | What it does | |---------|--------------| | `/gsd:settings` | Configure model profile and workflow agents | -| `/gsd:set-profile ` | Switch model profile (quality/balanced/budget) | +| `/gsd:set-profile ` | Switch model profile (quality/balanced/budget/adaptive) | +| `/gsd:report-bug [desc]` | Report bug with severity tracking and GitHub issue creation | | `/gsd:add-todo [desc]` | Capture idea for later | | `/gsd:check-todos` | List pending todos | | `/gsd:debug [desc]` | Systematic debugging with persistent state | +| `/gsd:add-tests [instructions]` | Generate unit and E2E tests for completed phase | | `/gsd:quick [--full]` | Execute ad-hoc task with GSD guarantees (`--full` adds plan-checking and verification) | | `/gsd:health [--repair]` | Validate `.planning/` directory integrity, auto-repair with `--repair` | ¹ Contributed by reddit user OracleGreyBeard +### Concurrent Milestones + +Work on multiple milestones simultaneously — e.g., v2.0 features + v1.5.1 hotfix: + +``` +/gsd:new-milestone "v1.5.1 Hotfix" # Creates milestone-scoped directory +/gsd:switch-milestone v2.0-features # Switch back to feature work +/gsd:progress # See status of active milestone +``` + +Each milestone gets isolated state: `STATE.md`, `ROADMAP.md`, `REQUIREMENTS.md`, `phases/` — all scoped under `.planning/milestones//`. Switch freely without losing progress. + +When no second milestone exists, everything stays in `.planning/` as usual (zero behavioral change). + --- ## Configuration @@ -629,9 +651,10 @@ This prevents Claude from reading these files entirely, regardless of what comma - Restart your runtime to reload commands/skills - Verify files exist in `~/.claude/commands/gsd/` (global) or `./.claude/commands/gsd/` (local) - For Codex, verify skills exist in `~/.codex/skills/gsd-*/SKILL.md` (global) or `./.codex/skills/gsd-*/SKILL.md` (local) +- For Kimi, verify skills exist in `~/.config/agents/skills/gsd-*/SKILL.md` **Commands not working as expected?** -- Run `/gsd:help` to verify installation +- Run `/gsd:help` (or `/skill:gsd-help` for Kimi) to verify installation - Re-run `npx get-shit-done-cc` to reinstall **Updating to the latest version?** @@ -656,6 +679,7 @@ To remove GSD completely: npx get-shit-done-cc --claude --global --uninstall npx get-shit-done-cc --opencode --global --uninstall npx get-shit-done-cc --codex --global --uninstall +npx get-shit-done-cc --kimi --global --uninstall # Local installs (current project) npx get-shit-done-cc --claude --local --uninstall diff --git a/bin/install.js b/bin/install.js index 8a265e643e..e6d15b404c 100755 --- a/bin/install.js +++ b/bin/install.js @@ -41,6 +41,7 @@ const hasOpencode = args.includes('--opencode'); const hasClaude = args.includes('--claude'); const hasGemini = args.includes('--gemini'); const hasCodex = args.includes('--codex'); +const hasKimi = args.includes('--kimi'); const hasBoth = args.includes('--both'); // Legacy flag, keeps working const hasAll = args.includes('--all'); const hasUninstall = args.includes('--uninstall') || args.includes('-u'); @@ -48,7 +49,7 @@ const hasUninstall = args.includes('--uninstall') || args.includes('-u'); // Runtime selection - can be set by flags or interactive prompt let selectedRuntimes = []; if (hasAll) { - selectedRuntimes = ['claude', 'opencode', 'gemini', 'codex']; + selectedRuntimes = ['claude', 'opencode', 'gemini', 'codex', 'kimi']; } else if (hasBoth) { selectedRuntimes = ['claude', 'opencode']; } else { @@ -56,6 +57,7 @@ if (hasAll) { if (hasClaude) selectedRuntimes.push('claude'); if (hasGemini) selectedRuntimes.push('gemini'); if (hasCodex) selectedRuntimes.push('codex'); + if (hasKimi) selectedRuntimes.push('kimi'); } // Helper to get directory name for a runtime (used for local/project installs) @@ -63,13 +65,14 @@ function getDirName(runtime) { if (runtime === 'opencode') return '.opencode'; if (runtime === 'gemini') return '.gemini'; if (runtime === 'codex') return '.codex'; + if (runtime === 'kimi') return '.kimi'; return '.claude'; } /** * Get the config directory path relative to home directory for a runtime * Used for templating hooks that use path.join(homeDir, '', ...) - * @param {string} runtime - 'claude', 'opencode', 'gemini', or 'codex' + * @param {string} runtime - 'claude', 'opencode', 'gemini', 'codex', or 'kimi' * @param {boolean} isGlobal - Whether this is a global install */ function getConfigDirFromHome(runtime, isGlobal) { @@ -85,6 +88,7 @@ function getConfigDirFromHome(runtime, isGlobal) { } if (runtime === 'gemini') return "'.gemini'"; if (runtime === 'codex') return "'.codex'"; + if (runtime === 'kimi') return "'.kimi'"; return "'.claude'"; } @@ -113,9 +117,24 @@ function getOpencodeGlobalDir() { return path.join(os.homedir(), '.config', 'opencode'); } +/** + * Get the Kimi global skills directory. + * Kimi follows XDG Base Directory spec: skills live in ~/.config/agents/skills/ + * Priority: KIMI_SKILLS_DIR > XDG_CONFIG_HOME/agents/skills > ~/.config/agents/skills + */ +function getKimiSkillsDir() { + if (process.env.KIMI_SKILLS_DIR) { + return expandTilde(process.env.KIMI_SKILLS_DIR); + } + if (process.env.XDG_CONFIG_HOME) { + return path.join(expandTilde(process.env.XDG_CONFIG_HOME), 'agents', 'skills'); + } + return path.join(os.homedir(), '.config', 'agents', 'skills'); +} + /** * Get the global config directory for a runtime - * @param {string} runtime - 'claude', 'opencode', 'gemini', or 'codex' + * @param {string} runtime - 'claude', 'opencode', 'gemini', 'codex', or 'kimi' * @param {string|null} explicitDir - Explicit directory from --config-dir flag */ function getGlobalDir(runtime, explicitDir = null) { @@ -148,7 +167,18 @@ function getGlobalDir(runtime, explicitDir = null) { } return path.join(os.homedir(), '.codex'); } - + + if (runtime === 'kimi') { + // Kimi: --config-dir > KIMI_CONFIG_DIR > ~/.kimi + if (explicitDir) { + return expandTilde(explicitDir); + } + if (process.env.KIMI_CONFIG_DIR) { + return expandTilde(process.env.KIMI_CONFIG_DIR); + } + return path.join(os.homedir(), '.kimi'); + } + // Claude Code: --config-dir > CLAUDE_CONFIG_DIR > ~/.claude if (explicitDir) { return expandTilde(explicitDir); @@ -169,7 +199,7 @@ const banner = '\n' + '\n' + ' Get Shit Done ' + dim + 'v' + pkg.version + reset + '\n' + ' A meta-prompting, context engineering and spec-driven\n' + - ' development system for Claude Code, OpenCode, Gemini, and Codex by TÂCHES.\n'; + ' development system for Claude Code, OpenCode, Gemini, Codex, and Kimi by TÂCHES.\n'; // Parse --config-dir argument function parseConfigDirArg() { @@ -203,7 +233,7 @@ console.log(banner); // Show help if requested if (hasHelp) { - console.log(` ${yellow}Usage:${reset} npx get-shit-done-cc [options]\n\n ${yellow}Options:${reset}\n ${cyan}-g, --global${reset} Install globally (to config directory)\n ${cyan}-l, --local${reset} Install locally (to current directory)\n ${cyan}--claude${reset} Install for Claude Code only\n ${cyan}--opencode${reset} Install for OpenCode only\n ${cyan}--gemini${reset} Install for Gemini only\n ${cyan}--codex${reset} Install for Codex only\n ${cyan}--all${reset} Install for all runtimes\n ${cyan}-u, --uninstall${reset} Uninstall GSD (remove all GSD files)\n ${cyan}-c, --config-dir ${reset} Specify custom config directory\n ${cyan}-h, --help${reset} Show this help message\n ${cyan}--force-statusline${reset} Replace existing statusline config\n\n ${yellow}Examples:${reset}\n ${dim}# Interactive install (prompts for runtime and location)${reset}\n npx get-shit-done-cc\n\n ${dim}# Install for Claude Code globally${reset}\n npx get-shit-done-cc --claude --global\n\n ${dim}# Install for Gemini globally${reset}\n npx get-shit-done-cc --gemini --global\n\n ${dim}# Install for Codex globally${reset}\n npx get-shit-done-cc --codex --global\n\n ${dim}# Install for all runtimes globally${reset}\n npx get-shit-done-cc --all --global\n\n ${dim}# Install to custom config directory${reset}\n npx get-shit-done-cc --codex --global --config-dir ~/.codex-work\n\n ${dim}# Install to current project only${reset}\n npx get-shit-done-cc --claude --local\n\n ${dim}# Uninstall GSD from Codex globally${reset}\n npx get-shit-done-cc --codex --global --uninstall\n\n ${yellow}Notes:${reset}\n The --config-dir option is useful when you have multiple configurations.\n It takes priority over CLAUDE_CONFIG_DIR / GEMINI_CONFIG_DIR / CODEX_HOME environment variables.\n`); + console.log(` ${yellow}Usage:${reset} npx get-shit-done-cc [options]\n\n ${yellow}Options:${reset}\n ${cyan}-g, --global${reset} Install globally (to config directory)\n ${cyan}-l, --local${reset} Install locally (to current directory)\n ${cyan}--claude${reset} Install for Claude Code only\n ${cyan}--opencode${reset} Install for OpenCode only\n ${cyan}--gemini${reset} Install for Gemini only\n ${cyan}--codex${reset} Install for Codex only\n ${cyan}--kimi${reset} Install for Kimi only\n ${cyan}--all${reset} Install for all runtimes\n ${cyan}-u, --uninstall${reset} Uninstall GSD (remove all GSD files)\n ${cyan}-c, --config-dir ${reset} Specify custom config directory\n ${cyan}-h, --help${reset} Show this help message\n ${cyan}--force-statusline${reset} Replace existing statusline config\n\n ${yellow}Examples:${reset}\n ${dim}# Interactive install (prompts for runtime and location)${reset}\n npx get-shit-done-cc\n\n ${dim}# Install for Claude Code globally${reset}\n npx get-shit-done-cc --claude --global\n\n ${dim}# Install for Gemini globally${reset}\n npx get-shit-done-cc --gemini --global\n\n ${dim}# Install for Codex globally${reset}\n npx get-shit-done-cc --codex --global\n\n ${dim}# Install for Kimi globally${reset}\n npx get-shit-done-cc --kimi --global\n\n ${dim}# Install for all runtimes globally${reset}\n npx get-shit-done-cc --all --global\n\n ${dim}# Install to custom config directory${reset}\n npx get-shit-done-cc --codex --global --config-dir ~/.codex-work\n\n ${dim}# Install to current project only${reset}\n npx get-shit-done-cc --claude --local\n\n ${dim}# Uninstall GSD from Codex globally${reset}\n npx get-shit-done-cc --codex --global --uninstall\n\n ${yellow}Notes:${reset}\n The --config-dir option is useful when you have multiple configurations.\n It takes priority over CLAUDE_CONFIG_DIR / GEMINI_CONFIG_DIR / CODEX_HOME / KIMI_CONFIG_DIR environment variables.\n Kimi only supports global install (skills go to XDG ~/.config/agents/skills/).\n`); process.exit(0); } @@ -288,7 +318,7 @@ function getCommitAttribution(runtime) { result = settings.attribution.commit; } } else { - // Codex currently has no attribution setting equivalent + // Codex and Kimi have no attribution setting equivalent result = undefined; } @@ -364,6 +394,34 @@ const claudeToGeminiTools = { AskUserQuestion: 'ask_user', }; +// Tool name mapping from Claude Code to Kimi CLI +// Kimi CLI uses PascalCase tool names with full module paths +const claudeToKimiTools = { + Read: 'kimi_cli.tools.file:ReadFile', + Write: 'kimi_cli.tools.file:WriteFile', + Edit: 'kimi_cli.tools.file:StrReplaceFile', + Glob: 'kimi_cli.tools.file:Glob', + Grep: 'kimi_cli.tools.file:Grep', + ReadMediaFile: 'kimi_cli.tools.file:ReadMediaFile', + Bash: 'kimi_cli.tools.shell:Shell', + WebSearch: 'kimi_cli.tools.web:SearchWeb', + WebFetch: 'kimi_cli.tools.web:FetchURL', + TodoWrite: 'kimi_cli.tools.todo:SetTodoList', + AskUserQuestion: 'kimi_cli.tools.ask_user:AskUserQuestion', + Task: 'kimi_cli.tools.multiagent:Task', +}; + +/** + * Convert a Claude Code tool name to Kimi CLI format. + * Returns the full module path (e.g. 'kimi_cli.tools.file:ReadFile'), or null to exclude. + * MCP tools are excluded — configured separately via Kimi's config. + * @returns {string|null} + */ +function convertKimiToolName(claudeTool) { + if (claudeTool.startsWith('mcp__')) return null; + return claudeToKimiTools[claudeTool] || null; +} + /** * Convert a Claude Code tool name to OpenCode format * - Applies special mappings (AskUserQuestion -> question, etc.) @@ -406,6 +464,81 @@ function convertGeminiToolName(claudeTool) { return claudeTool.toLowerCase(); } +/** + * Convert a Claude Code command to Kimi skill format. + * Kimi skills live in ~/.config/agents/skills/gsd-/SKILL.md + * and are invoked via /skill:gsd-. + * @param {string} content - Markdown file content + * @param {string} skillName - Skill name (e.g. 'gsd-new-project') + * @returns {string} Converted skill content + */ +function convertClaudeToKimiSkill(content, skillName) { + let converted = content; + + // Replace /gsd:command with Kimi skill invocation syntax + converted = converted.replace(/\/gsd:([a-z0-9-]+)/gi, (_, cmd) => { + return `/skill:gsd-${cmd}`; + }); + + // Replace Claude tool names in body text so documentation is accurate + converted = converted.replace(/\bRead\b(?=\s*\(|\s*["'])/g, 'ReadFile'); + converted = converted.replace(/\bWrite\b(?=\s*\(|\s*["'])/g, 'WriteFile'); + converted = converted.replace(/\bEdit\b(?=\s*\(|\s*["'])/g, 'StrReplaceFile'); + converted = converted.replace(/\bBash\b(?=\s*\(|\s*["'])/g, 'Shell'); + converted = converted.replace(/\bTodoWrite\b(?=\s*\(|\s*["'])/g, 'SetTodoList'); + converted = converted.replace(/\bWebSearch\b(?=\s*\(|\s*["'])/g, 'SearchWeb'); + converted = converted.replace(/\bWebFetch\b(?=\s*\(|\s*["'])/g, 'FetchURL'); + + const { frontmatter, body } = extractFrontmatterAndBody(converted); + + let skillFrontmatter = `---\nname: ${skillName}\n`; + if (frontmatter) { + const descMatch = frontmatter.match(/^description:\s*(.+)$/m); + if (descMatch) { + skillFrontmatter += `description: ${descMatch[1].trim()}\n`; + } + } + skillFrontmatter += `---\n`; + + return skillFrontmatter + body; +} + +/** + * Convert a Claude Code agent markdown to Kimi YAML agent format. + * Kimi agents use YAML with a separate system prompt file. + * @param {string} content - Agent markdown content + * @returns {{ yaml: string, systemPrompt: string } | null} + */ +function convertClaudeToKimiAgent(content) { + const { frontmatter, body } = extractFrontmatterAndBody(content); + if (!frontmatter) return null; + + const name = extractFrontmatterField(frontmatter, 'name') || 'unknown'; + const description = extractFrontmatterField(frontmatter, 'description') || ''; + const toolsField = extractFrontmatterField(frontmatter, 'tools') || ''; + + const kimiTools = toolsField.split(',').map(t => t.trim()).filter(t => t) + .map(convertKimiToolName) + .filter(t => t !== null); + + // Escape ${VAR} patterns for Kimi CLI compatibility (same as Gemini) + const escapedBody = body.replace(/\$\{(\w+)\}/g, '$$$1'); + + const lines = ['version: 1', 'agent:', ` name: ${name}`]; + if (description) { + lines.push(` description: ${toSingleLine(description)}`); + } + if (kimiTools.length > 0) { + lines.push(' tools:'); + for (const tool of kimiTools) { + lines.push(` - "${tool}"`); + } + } + lines.push(` system_prompt_path: ./${name}.md`); + + return { yaml: lines.join('\n') + '\n', systemPrompt: escapedBody }; +} + function toSingleLine(value) { return value.replace(/\s+/g, ' ').trim(); } @@ -1058,6 +1191,47 @@ function copyCommandsAsCodexSkills(srcDir, skillsDir, prefix, pathPrefix, runtim recurse(srcDir, prefix); } +/** + * Copy commands as Kimi skills to ~/.config/agents/skills/gsd-/SKILL.md + */ +function copyCommandsAsKimiSkills(srcDir, skillsDir, prefix, pathPrefix, runtime) { + if (!fs.existsSync(srcDir)) return; + + fs.mkdirSync(skillsDir, { recursive: true }); + + // Remove previous GSD Kimi skills to avoid stale entries + for (const entry of fs.readdirSync(skillsDir, { withFileTypes: true })) { + if (entry.isDirectory() && entry.name.startsWith(`${prefix}-`)) { + fs.rmSync(path.join(skillsDir, entry.name), { recursive: true }); + } + } + + function recurse(currentSrcDir, currentPrefix) { + for (const entry of fs.readdirSync(currentSrcDir, { withFileTypes: true })) { + const srcPath = path.join(currentSrcDir, entry.name); + if (entry.isDirectory()) { + recurse(srcPath, `${currentPrefix}-${entry.name}`); + continue; + } + if (!entry.name.endsWith('.md')) continue; + + const skillName = `${currentPrefix}-${entry.name.replace('.md', '')}`; + const skillDir = path.join(skillsDir, skillName); + fs.mkdirSync(skillDir, { recursive: true }); + + let content = fs.readFileSync(srcPath, 'utf8'); + content = content.replace(/~\/\.claude\//g, pathPrefix); + content = content.replace(/\.\/\.claude\//g, `./${getDirName(runtime)}/`); + content = processAttribution(content, getCommitAttribution(runtime)); + content = convertClaudeToKimiSkill(content, skillName); + + fs.writeFileSync(path.join(skillDir, 'SKILL.md'), content); + } + } + + recurse(srcDir, prefix); +} + /** * Recursively copy directory, replacing paths in .md files * Deletes existing destDir first to remove orphaned files from previous versions @@ -1205,6 +1379,7 @@ function cleanupOrphanedHooks(settings) { function uninstall(isGlobal, runtime = 'claude') { const isOpencode = runtime === 'opencode'; const isCodex = runtime === 'codex'; + const isKimi = runtime === 'kimi'; const dirName = getDirName(runtime); // Get the target directory based on runtime and install type @@ -1220,11 +1395,13 @@ function uninstall(isGlobal, runtime = 'claude') { if (runtime === 'opencode') runtimeLabel = 'OpenCode'; if (runtime === 'gemini') runtimeLabel = 'Gemini'; if (runtime === 'codex') runtimeLabel = 'Codex'; + if (runtime === 'kimi') runtimeLabel = 'Kimi'; console.log(` Uninstalling GSD from ${cyan}${runtimeLabel}${reset} at ${cyan}${locationLabel}${reset}\n`); // Check if target directory exists - if (!fs.existsSync(targetDir)) { + // For Kimi, skills live outside targetDir so we still proceed to clean them up + if (!fs.existsSync(targetDir) && !isKimi) { console.log(` ${yellow}⚠${reset} Directory does not exist: ${locationLabel}`); console.log(` Nothing to uninstall.\n`); return; @@ -1297,6 +1474,22 @@ function uninstall(isGlobal, runtime = 'claude') { console.log(` ${green}✓${reset} Cleaned GSD sections from config.toml`); } } + } else if (isKimi) { + // Kimi: remove skills from XDG skills directory (~/.config/agents/skills/gsd-*/) + const skillsDir = getKimiSkillsDir(); + if (fs.existsSync(skillsDir)) { + let skillCount = 0; + for (const entry of fs.readdirSync(skillsDir, { withFileTypes: true })) { + if (entry.isDirectory() && entry.name.startsWith('gsd-')) { + fs.rmSync(path.join(skillsDir, entry.name), { recursive: true }); + skillCount++; + } + } + if (skillCount > 0) { + removedCount++; + console.log(` ${green}✓${reset} Removed ${skillCount} Kimi skills from ${skillsDir.replace(os.homedir(), '~')}/`); + } + } } else { // Claude Code & Gemini: remove commands/gsd/ directory const gsdCommandsDir = path.join(targetDir, 'commands', 'gsd'); @@ -1315,13 +1508,15 @@ function uninstall(isGlobal, runtime = 'claude') { console.log(` ${green}✓${reset} Removed get-shit-done/`); } - // 3. Remove GSD agents (gsd-*.md files only) + // 3. Remove GSD agents (gsd-*.md; also gsd-*.yaml for Kimi) const agentsDir = path.join(targetDir, 'agents'); if (fs.existsSync(agentsDir)) { const files = fs.readdirSync(agentsDir); let agentCount = 0; for (const file of files) { - if (file.startsWith('gsd-') && file.endsWith('.md')) { + const isAgentMd = file.startsWith('gsd-') && file.endsWith('.md'); + const isAgentYaml = isKimi && file.startsWith('gsd-') && file.endsWith('.yaml'); + if (isAgentMd || isAgentYaml) { fs.unlinkSync(path.join(agentsDir, file)); agentCount++; } @@ -1703,6 +1898,7 @@ function generateManifest(dir, baseDir) { function writeManifest(configDir, runtime = 'claude') { const isOpencode = runtime === 'opencode'; const isCodex = runtime === 'codex'; + const isKimi = runtime === 'kimi'; const gsdDir = path.join(configDir, 'get-shit-done'); const commandsDir = path.join(configDir, 'commands', 'gsd'); const opencodeCommandDir = path.join(configDir, 'command'); @@ -1714,7 +1910,7 @@ function writeManifest(configDir, runtime = 'claude') { for (const [rel, hash] of Object.entries(gsdHashes)) { manifest.files['get-shit-done/' + rel] = hash; } - if (!isOpencode && !isCodex && fs.existsSync(commandsDir)) { + if (!isOpencode && !isCodex && !isKimi && fs.existsSync(commandsDir)) { const cmdHashes = generateManifest(commandsDir); for (const [rel, hash] of Object.entries(cmdHashes)) { manifest.files['commands/gsd/' + rel] = hash; @@ -1741,6 +1937,9 @@ function writeManifest(configDir, runtime = 'claude') { if (file.startsWith('gsd-') && file.endsWith('.md')) { manifest.files['agents/' + file] = fileHash(path.join(agentsDir, file)); } + if (isKimi && file.startsWith('gsd-') && file.endsWith('.yaml')) { + manifest.files['agents/' + file] = fileHash(path.join(agentsDir, file)); + } } } @@ -1805,7 +2004,9 @@ function reportLocalPatches(configDir, runtime = 'claude') { ? '/gsd-reapply-patches' : runtime === 'codex' ? '$gsd-reapply-patches' - : '/gsd:reapply-patches'; + : runtime === 'kimi' + ? '/skill:gsd-reapply-patches' + : '/gsd:reapply-patches'; console.log(''); console.log(' ' + yellow + 'Local patches detected' + reset + ' (from v' + meta.from_version + '):'); for (const f of meta.files) { @@ -1824,6 +2025,7 @@ function install(isGlobal, runtime = 'claude') { const isOpencode = runtime === 'opencode'; const isGemini = runtime === 'gemini'; const isCodex = runtime === 'codex'; + const isKimi = runtime === 'kimi'; const dirName = getDirName(runtime); const src = path.join(__dirname, '..'); @@ -1847,6 +2049,7 @@ function install(isGlobal, runtime = 'claude') { if (isOpencode) runtimeLabel = 'OpenCode'; if (isGemini) runtimeLabel = 'Gemini'; if (isCodex) runtimeLabel = 'Codex'; + if (isKimi) runtimeLabel = 'Kimi'; console.log(` Installing for ${cyan}${runtimeLabel}${reset} to ${cyan}${locationLabel}${reset}\n`); @@ -1884,6 +2087,17 @@ function install(isGlobal, runtime = 'claude') { } else { failures.push('skills/gsd-*'); } + } else if (isKimi) { + // Kimi: skills go to XDG path (~/.config/agents/skills/), not under targetDir + const skillsDir = getKimiSkillsDir(); + const gsdSrc = path.join(src, 'commands', 'gsd'); + copyCommandsAsKimiSkills(gsdSrc, skillsDir, 'gsd', pathPrefix, runtime); + const installedSkillCount = fs.readdirSync(skillsDir).filter(d => d.startsWith('gsd-')).length; + if (installedSkillCount > 0) { + console.log(` ${green}✓${reset} Installed ${installedSkillCount} skills to ${skillsDir.replace(os.homedir(), '~')}/`); + } else { + failures.push('skills/gsd-*'); + } } else { // Claude Code & Gemini: nested structure in commands/ directory const commandsDir = path.join(targetDir, 'commands'); @@ -1936,12 +2150,24 @@ function install(isGlobal, runtime = 'claude') { // Convert frontmatter for runtime compatibility if (isOpencode) { content = convertClaudeToOpencodeFrontmatter(content); + fs.writeFileSync(path.join(agentsDest, entry.name), content); } else if (isGemini) { content = convertClaudeToGeminiAgent(content); + fs.writeFileSync(path.join(agentsDest, entry.name), content); } else if (isCodex) { content = convertClaudeAgentToCodexAgent(content); + fs.writeFileSync(path.join(agentsDest, entry.name), content); + } else if (isKimi) { + // Kimi: YAML agent definition + separate system prompt file + const kimiAgent = convertClaudeToKimiAgent(content); + if (kimiAgent) { + const baseName = entry.name.replace('.md', ''); + fs.writeFileSync(path.join(agentsDest, `${baseName}.yaml`), kimiAgent.yaml); + fs.writeFileSync(path.join(agentsDest, `${baseName}.md`), kimiAgent.systemPrompt); + } + } else { + fs.writeFileSync(path.join(agentsDest, entry.name), content); } - fs.writeFileSync(path.join(agentsDest, entry.name), content); } } if (verifyInstalled(agentsDest, 'agents')) { @@ -1972,7 +2198,7 @@ function install(isGlobal, runtime = 'claude') { failures.push('VERSION'); } - if (!isCodex) { + if (!isCodex && !isKimi) { // Write package.json to force CommonJS mode for GSD scripts // Prevents "require is not defined" errors when project has "type": "module" // Node.js walks up looking for package.json - this stops inheritance from project @@ -2030,6 +2256,11 @@ function install(isGlobal, runtime = 'claude') { return { settingsPath: null, settings: null, statuslineCommand: null, runtime }; } + if (isKimi) { + // Kimi has no settings.json or hooks — installation complete + return { settingsPath: null, settings: null, statuslineCommand: null, runtime }; + } + // Configure statusline and hooks in settings.json // Gemini shares same hook system as Claude Code for now const settingsPath = path.join(targetDir, 'settings.json'); @@ -2055,7 +2286,7 @@ function install(isGlobal, runtime = 'claude') { } } - // Configure SessionStart hook for update checking (skip for opencode) + // Configure SessionStart hook for update checking (skip for opencode and kimi) if (!isOpencode) { if (!settings.hooks) { settings.hooks = {}; @@ -2111,8 +2342,9 @@ function install(isGlobal, runtime = 'claude') { function finishInstall(settingsPath, settings, statuslineCommand, shouldInstallStatusline, runtime = 'claude', isGlobal = true) { const isOpencode = runtime === 'opencode'; const isCodex = runtime === 'codex'; + const isKimi = runtime === 'kimi'; - if (shouldInstallStatusline && !isOpencode && !isCodex) { + if (shouldInstallStatusline && !isOpencode && !isCodex && !isKimi) { settings.statusLine = { type: 'command', command: statuslineCommand @@ -2121,7 +2353,7 @@ function finishInstall(settingsPath, settings, statuslineCommand, shouldInstallS } // Write settings when runtime supports settings.json - if (!isCodex) { + if (!isCodex && !isKimi) { writeSettings(settingsPath, settings); } @@ -2134,10 +2366,12 @@ function finishInstall(settingsPath, settings, statuslineCommand, shouldInstallS if (runtime === 'opencode') program = 'OpenCode'; if (runtime === 'gemini') program = 'Gemini'; if (runtime === 'codex') program = 'Codex'; + if (runtime === 'kimi') program = 'Kimi'; let command = '/gsd:new-project'; if (runtime === 'opencode') command = '/gsd-new-project'; if (runtime === 'codex') command = '$gsd-new-project'; + if (runtime === 'kimi') command = '/skill:gsd-new-project'; console.log(` ${green}Done!${reset} Open a blank directory in ${program} and run ${cyan}${command}${reset}. @@ -2219,15 +2453,18 @@ function promptRuntime(callback) { ${cyan}2${reset}) OpenCode ${dim}(~/.config/opencode)${reset} - open source, free models ${cyan}3${reset}) Gemini ${dim}(~/.gemini)${reset} ${cyan}4${reset}) Codex ${dim}(~/.codex)${reset} - ${cyan}5${reset}) All + ${cyan}5${reset}) Kimi ${dim}(~/.config/agents/skills)${reset} + ${cyan}6${reset}) All `); rl.question(` Choice ${dim}[1]${reset}: `, (answer) => { answered = true; rl.close(); const choice = answer.trim() || '1'; - if (choice === '5') { - callback(['claude', 'opencode', 'gemini', 'codex']); + if (choice === '6') { + callback(['claude', 'opencode', 'gemini', 'codex', 'kimi']); + } else if (choice === '5') { + callback(['kimi']); } else if (choice === '4') { callback(['codex']); } else if (choice === '3') { @@ -2333,6 +2570,10 @@ if (process.env.GSD_TEST_MODE) { convertClaudeCommandToCodexSkill, GSD_CODEX_MARKER, CODEX_AGENT_SANDBOX, + convertClaudeToKimiSkill, + convertClaudeToKimiAgent, + convertKimiToolName, + copyCommandsAsKimiSkills, }; } else { @@ -2343,6 +2584,9 @@ if (hasGlobal && hasLocal) { } else if (explicitConfigDir && hasLocal) { console.error(` ${yellow}Cannot use --config-dir with --local${reset}`); process.exit(1); +} else if (hasKimi && hasLocal) { + console.error(` ${yellow}Kimi only supports global install (skills go to XDG ~/.config/agents/skills/)${reset}`); + process.exit(1); } else if (hasUninstall) { if (!hasGlobal && !hasLocal) { console.error(` ${yellow}--uninstall requires --global or --local${reset}`); diff --git a/commands/gsd/report-bug.md b/commands/gsd/report-bug.md new file mode 100644 index 0000000000..a3d167d723 --- /dev/null +++ b/commands/gsd/report-bug.md @@ -0,0 +1,52 @@ +--- +name: gsd:report-bug +description: Report a bug with structured format, severity tracking, and GitHub integration +argument-hint: [optional bug description] +allowed-tools: + - Read + - Write + - Bash + - Glob + - Grep + - AskUserQuestion +--- + + +Report and track a bug with structured format, automatic severity classification, diagnostic log capture, and optional GitHub issue creation. + +Routes to the report-bug workflow which handles: +- Directory structure creation +- Content extraction from arguments or conversation +- Severity inference from keywords +- Area inference from file paths +- Diagnostic log capture (git state, error output, log files) +- Bug file creation with frontmatter +- Git commits +- GitHub issue creation (if gh available) +- Next-action routing (investigate, plan fix, continue) + + + +@~/.claude/get-shit-done/workflows/report-bug.md + + + +Arguments: $ARGUMENTS (optional bug description) + +State is resolved in-workflow via `init bugs` and targeted reads. + + + +**Follow the report-bug workflow** from `@~/.claude/get-shit-done/workflows/report-bug.md`. + +The workflow handles all logic including: +1. Directory ensuring +2. Bug detail gathering (title, actual/expected behavior, repro steps) +3. Diagnostic log capture +4. Severity inference and confirmation +5. Area inference from file paths +6. Bug file creation with slug generation +7. Git commits +8. GitHub issue creation +9. Next-action routing + diff --git a/commands/gsd/set-profile.md b/commands/gsd/set-profile.md index ab24458d13..fdb6b1da2e 100644 --- a/commands/gsd/set-profile.md +++ b/commands/gsd/set-profile.md @@ -1,6 +1,6 @@ --- name: gsd:set-profile -description: Switch model profile for GSD agents (quality/balanced/budget) +description: Switch model profile for GSD agents (quality/balanced/budget/adaptive) argument-hint: allowed-tools: - Read @@ -12,7 +12,7 @@ allowed-tools: Switch the model profile used by GSD agents. Controls which Claude model each agent uses, balancing quality vs token spend. Routes to the set-profile workflow which handles: -- Argument validation (quality/balanced/budget) +- Argument validation (quality/balanced/budget/adaptive) - Config file creation if missing - Profile update in config.json - Confirmation with model table display diff --git a/commands/gsd/switch-milestone.md b/commands/gsd/switch-milestone.md new file mode 100644 index 0000000000..4ca60c4af9 --- /dev/null +++ b/commands/gsd/switch-milestone.md @@ -0,0 +1,30 @@ +--- +type: prompt +name: gsd:switch-milestone +description: Switch active milestone for concurrent work +argument-hint: +allowed-tools: + - Read + - Bash +--- + + +Switch the active milestone to work on a different one concurrently. + +Reads available milestones, warns about in-progress work on the current milestone, and updates the ACTIVE_MILESTONE pointer. + + + +**Load these files NOW (before proceeding):** + +- @~/.claude/get-shit-done/workflows/switch-milestone.md (main workflow) + + + +**User input:** +- Target milestone: {{milestone-name}} + + + +Follow switch-milestone.md workflow end-to-end. + diff --git a/docs/USER-GUIDE.md b/docs/USER-GUIDE.md index 2d02cafd45..12d0a49fd8 100644 --- a/docs/USER-GUIDE.md +++ b/docs/USER-GUIDE.md @@ -168,6 +168,7 @@ rapid prototyping phases where test infrastructure isn't the focus. | `/gsd:audit-milestone` | Verify milestone met its definition of done | Before completing milestone | | `/gsd:complete-milestone` | Archive milestone, tag release | All phases verified | | `/gsd:new-milestone [name]` | Start next version cycle | After completing a milestone | +| `/gsd:switch-milestone ` | Switch active milestone for concurrent work | When working on multiple milestones | ### Navigation @@ -197,11 +198,13 @@ rapid prototyping phases where test infrastructure isn't the focus. |---------|---------|-------------| | `/gsd:map-codebase` | Analyze existing codebase | Before `/gsd:new-project` on existing code | | `/gsd:quick` | Ad-hoc task with GSD guarantees | Bug fixes, small features, config changes | +| `/gsd:report-bug [desc]` | Report bug with severity tracking and GitHub issues | When you discover a bug | | `/gsd:debug [desc]` | Systematic debugging with persistent state | When something breaks | | `/gsd:add-todo [desc]` | Capture an idea for later | Think of something during a session | | `/gsd:check-todos` | List pending todos | Review captured ideas | | `/gsd:settings` | Configure workflow toggles and model profile | Change model, toggle agents | | `/gsd:set-profile ` | Quick profile switch | Change cost/quality tradeoff | +| `/gsd:add-tests [instructions]` | Generate unit and E2E tests for completed phase | After execution, before milestone completion | | `/gsd:reapply-patches` | Restore local modifications after update | After `/gsd:update` if you had local edits | --- @@ -241,7 +244,7 @@ GSD stores project settings in `.planning/config.json`. Configure during `/gsd:n |---------|---------|---------|------------------| | `mode` | `interactive`, `yolo` | `interactive` | `yolo` auto-approves decisions; `interactive` confirms at each step | | `depth` | `quick`, `standard`, `comprehensive` | `standard` | Planning thoroughness: 3-5, 5-8, or 8-12 phases | -| `model_profile` | `quality`, `balanced`, `budget` | `balanced` | Model tier for each agent (see table below) | +| `model_profile` | `quality`, `balanced`, `budget`, `adaptive` | `balanced` | Model tier for each agent (see table below). `adaptive` auto-selects per-plan based on complexity. | ### Planning Settings @@ -283,24 +286,27 @@ Disable these to speed up phases in familiar domains or when conserving tokens. ### Model Profiles (Per-Agent Breakdown) -| Agent | `quality` | `balanced` | `budget` | -|-------|-----------|------------|----------| -| gsd-planner | Opus | Opus | Sonnet | -| gsd-roadmapper | Opus | Sonnet | Sonnet | -| gsd-executor | Opus | Sonnet | Sonnet | -| gsd-phase-researcher | Opus | Sonnet | Haiku | -| gsd-project-researcher | Opus | Sonnet | Haiku | -| gsd-research-synthesizer | Sonnet | Sonnet | Haiku | -| gsd-debugger | Opus | Sonnet | Sonnet | -| gsd-codebase-mapper | Sonnet | Haiku | Haiku | -| gsd-verifier | Sonnet | Sonnet | Haiku | -| gsd-plan-checker | Sonnet | Sonnet | Haiku | -| gsd-integration-checker | Sonnet | Sonnet | Haiku | +| Agent | `quality` | `balanced` | `budget` | `adaptive` | +|-------|-----------|------------|----------|------------| +| gsd-planner | Opus | Opus | Sonnet | Sonnet→Opus | +| gsd-roadmapper | Opus | Sonnet | Sonnet | Sonnet→Opus | +| gsd-executor | Opus | Sonnet | Sonnet | Haiku→Sonnet | +| gsd-phase-researcher | Opus | Sonnet | Haiku | Haiku→Opus | +| gsd-project-researcher | Opus | Sonnet | Haiku | Haiku→Opus | +| gsd-research-synthesizer | Sonnet | Sonnet | Haiku | Haiku→Sonnet | +| gsd-debugger | Opus | Sonnet | Sonnet | Sonnet→Opus | +| gsd-codebase-mapper | Sonnet | Haiku | Haiku | Haiku→Sonnet | +| gsd-verifier | Sonnet | Sonnet | Haiku | Haiku→Sonnet | +| gsd-plan-checker | Sonnet | Sonnet | Haiku | Haiku→Sonnet | +| gsd-integration-checker | Sonnet | Sonnet | Haiku | Haiku→Sonnet | + +*Adaptive column: range from simple→complex tier. Actual model depends on per-plan complexity scoring.* **Profile philosophy:** - **quality** -- Opus for all decision-making agents, Sonnet for read-only verification. Use when quota is available and the work is critical. - **balanced** -- Opus only for planning (where architecture decisions happen), Sonnet for everything else. The default for good reason. - **budget** -- Sonnet for anything that writes code, Haiku for research and verification. Use for high-volume work or less critical phases. +- **adaptive** -- Auto-selects per-plan based on complexity scoring. Simple plans get Haiku/Sonnet, complex plans get Opus. Best cost-quality tradeoff for mixed-complexity milestones. --- @@ -380,6 +386,38 @@ claude --dangerously-skip-permissions /gsd:remove-phase 7 # Descope phase 7 and renumber ``` +### Concurrent Milestones + +Work on multiple milestones simultaneously (e.g., v2.0 features + v1.5.1 hotfix): + +``` +/gsd:new-milestone "v1.5.1 Hotfix" # Creates milestone-scoped directory +/gsd:switch-milestone v2.0-features # Switch back to feature work +/gsd:progress # See status of active milestone +``` + +Each milestone gets isolated state under `.planning/milestones//`: + +``` +.planning/ +├── PROJECT.md # Global (shared) +├── MILESTONES.md # Global (shared) +├── ACTIVE_MILESTONE # Pointer: "v2.0" +├── milestones/ +│ ├── v2.0/ +│ │ ├── STATE.md +│ │ ├── ROADMAP.md +│ │ ├── REQUIREMENTS.md +│ │ ├── config.json +│ │ └── phases/ +│ └── v1.5.1-hotfix/ +│ ├── STATE.md +│ ├── ROADMAP.md +│ └── phases/ +``` + +When no second milestone exists, everything stays in `.planning/` as usual. + --- ## Troubleshooting @@ -449,11 +487,8 @@ For reference, here is what GSD creates in your project: ``` .planning/ PROJECT.md # Project vision and context (always loaded) - REQUIREMENTS.md # Scoped v1/v2 requirements with IDs - ROADMAP.md # Phase breakdown with status tracking - STATE.md # Decisions, blockers, session memory - config.json # Workflow configuration - MILESTONES.md # Completed milestone archive + MILESTONES.md # Completed milestone archive (global, shared) + ACTIVE_MILESTONE # Active milestone pointer (multi-milestone mode only) research/ # Domain research from /gsd:new-project todos/ pending/ # Captured ideas awaiting work @@ -461,6 +496,12 @@ For reference, here is what GSD creates in your project: debug/ # Active debug sessions resolved/ # Archived debug sessions codebase/ # Brownfield codebase mapping (from /gsd:map-codebase) + + # Single-milestone layout (default): + REQUIREMENTS.md # Scoped v1/v2 requirements with IDs + ROADMAP.md # Phase breakdown with status tracking + STATE.md # Decisions, blockers, session memory + config.json # Workflow configuration phases/ XX-phase-name/ XX-YY-PLAN.md # Atomic execution plans @@ -468,4 +509,17 @@ For reference, here is what GSD creates in your project: CONTEXT.md # Your implementation preferences RESEARCH.md # Ecosystem research findings VERIFICATION.md # Post-execution verification results + + # Multi-milestone layout (when concurrent milestones exist): + milestones/ + v2.0/ + STATE.md + ROADMAP.md + REQUIREMENTS.md + config.json + phases/ + v1.5.1-hotfix/ + STATE.md + ROADMAP.md + phases/ ``` diff --git a/get-shit-done/bin/gsd-tools.cjs b/get-shit-done/bin/gsd-tools.cjs index fa404eb491..e9c4014360 100755 --- a/get-shit-done/bin/gsd-tools.cjs +++ b/get-shit-done/bin/gsd-tools.cjs @@ -15,6 +15,8 @@ * state get [section] Get STATE.md content or section * state patch --field val ... Batch update STATE.md fields * resolve-model Get model for agent based on profile + * resolve-adaptive-model Resolve model with plan complexity context + * [--context ''] * find-phase Find phase directory by number * commit [--files f1 f2] Commit planning docs * verify-summary Verify a SUMMARY.md file @@ -61,12 +63,19 @@ * Todos: * todo complete Move todo from pending to completed * + * Bugs: + * bug list [--area X] [--severity Y] List/filter bug reports + * [--status Z] + * bug update --status Update bug status + * bug resolve Mark bug as resolved + * * Scaffolding: * scaffold context --phase Create CONTEXT.md template * scaffold uat --phase Create UAT.md template * scaffold verification --phase Create VERIFICATION.md template * scaffold phase-dir --phase Create phase directory * --name + * scaffold bugs Create bugs directories * * Frontmatter CRUD: * frontmatter get [--field k] Extract frontmatter as JSON @@ -121,6 +130,7 @@ * init verify-work All context for verify-work workflow * init phase-op Generic phase operation context * init todos [area] All context for todo workflows + * init bugs All context for bug reporting workflow * init milestone-op All context for milestone operations * init map-codebase All context for map-codebase workflow * init progress All context for progress workflow @@ -129,6 +139,7 @@ const fs = require('fs'); const path = require('path'); const { error } = require('./lib/core.cjs'); +const { setMilestoneOverride } = require('./lib/paths.cjs'); const state = require('./lib/state.cjs'); const phase = require('./lib/phase.cjs'); const roadmap = require('./lib/roadmap.cjs'); @@ -165,6 +176,21 @@ async function main() { error(`Invalid --cwd: ${cwd}`); } + // Optional --milestone override for multi-milestone support + const msEqArg = args.find(arg => arg.startsWith('--milestone=')); + const msIdx = args.indexOf('--milestone'); + if (msEqArg) { + const value = msEqArg.slice('--milestone='.length).trim(); + if (!value) error('Missing value for --milestone'); + args.splice(args.indexOf(msEqArg), 1); + setMilestoneOverride(value); + } else if (msIdx !== -1) { + const value = args[msIdx + 1]; + if (!value || value.startsWith('--')) error('Missing value for --milestone'); + args.splice(msIdx, 2); + setMilestoneOverride(value); + } + const rawIndex = args.indexOf('--raw'); const raw = rawIndex !== -1; if (rawIndex !== -1) args.splice(rawIndex, 1); @@ -252,6 +278,16 @@ async function main() { break; } + case 'resolve-adaptive-model': { + const contextIdx = args.indexOf('--context'); + let context = null; + if (contextIdx !== -1 && args[contextIdx + 1]) { + try { context = JSON.parse(args[contextIdx + 1]); } catch { error('Invalid --context JSON'); } + } + commands.cmdResolveAdaptiveModel(cwd, args[1], context, raw); + break; + } + case 'find-phase': { phase.cmdFindPhase(cwd, args[1], raw); break; @@ -459,8 +495,16 @@ async function main() { milestoneName = nameArgs.join(' ') || null; } milestone.cmdMilestoneComplete(cwd, args[2], { name: milestoneName, archivePhases }, raw); + } else if (subcommand === 'create') { + milestone.cmdMilestoneCreate(cwd, args[2], raw); + } else if (subcommand === 'switch') { + milestone.cmdMilestoneSwitch(cwd, args[2], raw); + } else if (subcommand === 'list') { + milestone.cmdMilestoneList(cwd, raw); + } else if (subcommand === 'status') { + milestone.cmdMilestoneStatus(cwd, raw); } else { - error('Unknown milestone subcommand. Available: complete'); + error('Unknown milestone subcommand. Available: complete, create, switch, list, status'); } break; } @@ -494,6 +538,28 @@ async function main() { break; } + case 'bug': { + const subcommand = args[1]; + if (subcommand === 'list') { + const areaIdx = args.indexOf('--area'); + const severityIdx = args.indexOf('--severity'); + const statusIdx = args.indexOf('--status'); + commands.cmdBugList(cwd, { + area: areaIdx !== -1 ? args[areaIdx + 1] : null, + severity: severityIdx !== -1 ? args[severityIdx + 1] : null, + status: statusIdx !== -1 ? args[statusIdx + 1] : null, + }, raw); + } else if (subcommand === 'update') { + const statusIdx = args.indexOf('--status'); + commands.cmdBugUpdate(cwd, args[2], statusIdx !== -1 ? args[statusIdx + 1] : null, raw); + } else if (subcommand === 'resolve') { + commands.cmdBugResolve(cwd, args[2], raw); + } else { + error('Unknown bug subcommand. Available: list, update, resolve'); + } + break; + } + case 'scaffold': { const scaffoldType = args[1]; const phaseIndex = args.indexOf('--phase'); @@ -536,6 +602,9 @@ async function main() { case 'todos': init.cmdInitTodos(cwd, args[2], raw); break; + case 'bugs': + init.cmdInitBugs(cwd, raw); + break; case 'milestone-op': init.cmdInitMilestoneOp(cwd, raw); break; @@ -546,7 +615,7 @@ async function main() { init.cmdInitProgress(cwd, raw); break; default: - error(`Unknown init workflow: ${workflow}\nAvailable: execute-phase, plan-phase, new-project, new-milestone, quick, resume, verify-work, phase-op, todos, milestone-op, map-codebase, progress`); + error(`Unknown init workflow: ${workflow}\nAvailable: execute-phase, plan-phase, new-project, new-milestone, quick, resume, verify-work, phase-op, todos, bugs, milestone-op, map-codebase, progress`); } break; } diff --git a/get-shit-done/bin/lib/commands.cjs b/get-shit-done/bin/lib/commands.cjs index 829ba993a2..c01311a48e 100644 --- a/get-shit-done/bin/lib/commands.cjs +++ b/get-shit-done/bin/lib/commands.cjs @@ -4,8 +4,9 @@ const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); -const { safeReadFile, loadConfig, isGitIgnored, execGit, normalizePhaseName, comparePhaseNum, getArchivedPhaseDirs, generateSlugInternal, getMilestoneInfo, resolveModelInternal, MODEL_PROFILES, output, error, findPhaseInternal } = require('./core.cjs'); +const { safeReadFile, loadConfig, isGitIgnored, execGit, normalizePhaseName, comparePhaseNum, getArchivedPhaseDirs, generateSlugInternal, getMilestoneInfo, resolveModelInternal, evaluateComplexity, MODEL_PROFILES, output, error, findPhaseInternal } = require('./core.cjs'); const { extractFrontmatter } = require('./frontmatter.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdGenerateSlug(text, raw) { if (!text) { @@ -42,7 +43,8 @@ function cmdCurrentTimestamp(format, raw) { } function cmdListTodos(cwd, area, raw) { - const pendingDir = path.join(cwd, '.planning', 'todos', 'pending'); + const paths = resolvePlanningPaths(cwd); + const pendingDir = path.join(paths.abs.planningRoot, 'todos', 'pending'); let count = 0; const todos = []; @@ -68,7 +70,7 @@ function cmdListTodos(cwd, area, raw) { created: createdMatch ? createdMatch[1].trim() : 'unknown', title: titleMatch ? titleMatch[1].trim() : 'Untitled', area: todoArea, - path: path.join('.planning', 'todos', 'pending', file), + path: '.planning/todos/pending/' + file, }); } catch {} } @@ -97,7 +99,7 @@ function cmdVerifyPathExists(cwd, targetPath, raw) { } function cmdHistoryDigest(cwd, raw) { - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = resolvePlanningPaths(cwd).abs.phases; const digest = { phases: {}, decisions: [], tech_stack: new Set() }; // Collect all phase directories: archived + current @@ -213,6 +215,48 @@ function cmdResolveModel(cwd, agentType, raw) { output(result, raw, model); } +function cmdResolveAdaptiveModel(cwd, agentType, context, raw) { + if (!agentType) { + error('agent-type required'); + } + + const config = loadConfig(cwd); + const profile = config.model_profile || 'balanced'; + const model = resolveModelInternal(cwd, agentType, context); + + const result = { model, profile }; + + if (profile === 'adaptive') { + const complexity = evaluateComplexity(context || null); + result.complexity = complexity; + } + + const agentModels = MODEL_PROFILES[agentType]; + if (!agentModels) result.unknown_agent = true; + + // Usage logging + if (profile === 'adaptive') { + const settings = config.adaptive_settings; + if (settings?.log_selections) { + const logPath = path.join(cwd, '.planning', 'adaptive-usage.json'); + try { + let log = []; + try { log = JSON.parse(fs.readFileSync(logPath, 'utf-8')); } catch {} + log.push({ + timestamp: new Date().toISOString(), + agent: agentType, + tier: result.complexity?.tier || 'unknown', + score: result.complexity?.score ?? null, + model, + }); + fs.writeFileSync(logPath, JSON.stringify(log, null, 2)); + } catch {} + } + } + + output(result, raw, model); +} + function cmdCommit(cwd, message, files, raw, amend) { if (!message && !amend) { error('commit message required'); @@ -380,8 +424,9 @@ async function cmdWebsearch(query, options, raw) { } function cmdProgressRender(cwd, format, raw) { - const phasesDir = path.join(cwd, '.planning', 'phases'); - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const paths = resolvePlanningPaths(cwd); + const phasesDir = paths.abs.phases; + const roadmapPath = paths.abs.roadmap; const milestone = getMilestoneInfo(cwd); const phases = []; @@ -452,8 +497,9 @@ function cmdTodoComplete(cwd, filename, raw) { error('filename required for todo complete'); } - const pendingDir = path.join(cwd, '.planning', 'todos', 'pending'); - const completedDir = path.join(cwd, '.planning', 'todos', 'completed'); + const planningRoot = resolvePlanningPaths(cwd).abs.planningRoot; + const pendingDir = path.join(planningRoot, 'todos', 'pending'); + const completedDir = path.join(planningRoot, 'todos', 'completed'); const sourcePath = path.join(pendingDir, filename); if (!fs.existsSync(sourcePath)) { @@ -474,6 +520,100 @@ function cmdTodoComplete(cwd, filename, raw) { output({ completed: true, file: filename, date: today }, raw, 'completed'); } +function cmdBugList(cwd, filters, raw) { + const planningRoot = resolvePlanningPaths(cwd).abs.planningRoot; + const bugsDir = path.join(planningRoot, 'bugs'); + + let count = 0; + const bugs = []; + + try { + const files = fs.readdirSync(bugsDir).filter(f => /^BUG-\d+\.md$/.test(f)); + + for (const file of files) { + try { + const content = fs.readFileSync(path.join(bugsDir, file), 'utf-8'); + const fm = extractFrontmatter(content); + + const bugSeverity = fm.severity || 'medium'; + const bugStatus = fm.status || 'reported'; + const bugArea = fm.area || 'general'; + + if (filters.area && bugArea !== filters.area) continue; + if (filters.severity && bugSeverity !== filters.severity) continue; + if (filters.status && bugStatus !== filters.status) continue; + + count++; + bugs.push({ + id: fm.id || file.replace('.md', ''), + title: fm.title || 'Untitled', + severity: bugSeverity, + status: bugStatus, + area: bugArea, + created: fm.created || 'unknown', + file, + }); + } catch {} + } + } catch {} + + const result = { count, bugs }; + output(result, raw, count.toString()); +} + +function cmdBugUpdate(cwd, bugId, newStatus, raw) { + if (!bugId) { + error('bug id required for bug update'); + } + if (!newStatus) { + error('status required for bug update'); + } + + const validStatuses = ['reported', 'investigating', 'fixing', 'resolved']; + if (!validStatuses.includes(newStatus)) { + error(`Invalid status: ${newStatus}. Valid: ${validStatuses.join(', ')}`); + } + + const planningRoot = resolvePlanningPaths(cwd).abs.planningRoot; + const bugsDir = path.join(planningRoot, 'bugs'); + const resolvedDir = path.join(planningRoot, 'bugs', 'resolved'); + + // Normalize bugId to filename + const filename = bugId.endsWith('.md') ? bugId : `${bugId}.md`; + const sourcePath = path.join(bugsDir, filename); + + if (!fs.existsSync(sourcePath)) { + error(`Bug not found: ${bugId}`); + } + + let content = fs.readFileSync(sourcePath, 'utf-8'); + const now = new Date().toISOString(); + + // Update status in frontmatter + content = content.replace(/^status:\s*.+$/m, `status: ${newStatus}`); + // Update updated timestamp + if (content.match(/^updated:\s*.+$/m)) { + content = content.replace(/^updated:\s*.+$/m, `updated: ${now}`); + } else { + // Insert updated after status line + content = content.replace(/^(status:\s*.+)$/m, `$1\nupdated: ${now}`); + } + + if (newStatus === 'resolved') { + fs.mkdirSync(resolvedDir, { recursive: true }); + fs.writeFileSync(path.join(resolvedDir, filename), content, 'utf-8'); + fs.unlinkSync(sourcePath); + output({ updated: true, id: bugId, status: newStatus, moved: 'resolved' }, raw, 'resolved'); + } else { + fs.writeFileSync(sourcePath, content, 'utf-8'); + output({ updated: true, id: bugId, status: newStatus }, raw, newStatus); + } +} + +function cmdBugResolve(cwd, bugId, raw) { + cmdBugUpdate(cwd, bugId, 'resolved', raw); +} + function cmdScaffold(cwd, type, options, raw) { const { phase, name } = options; const padded = phase ? normalizePhaseName(phase) : '00'; @@ -505,13 +645,22 @@ function cmdScaffold(cwd, type, options, raw) { content = `---\nphase: "${padded}"\nname: "${name || phaseInfo?.phase_name || 'Unnamed'}"\ncreated: ${today}\nstatus: pending\n---\n\n# Phase ${phase}: ${name || phaseInfo?.phase_name || 'Unnamed'} — Verification\n\n## Goal-Backward Verification\n\n**Phase Goal:** [From ROADMAP.md]\n\n## Checks\n\n| # | Requirement | Status | Evidence |\n|---|------------|--------|----------|\n\n## Result\n\n_Pending verification_\n`; break; } + case 'bugs': { + const planningRoot = resolvePlanningPaths(cwd).abs.planningRoot; + const bugsPath = path.join(planningRoot, 'bugs'); + const resolvedPath = path.join(planningRoot, 'bugs', 'resolved'); + fs.mkdirSync(bugsPath, { recursive: true }); + fs.mkdirSync(resolvedPath, { recursive: true }); + output({ created: true, directories: ['.planning/bugs', '.planning/bugs/resolved'] }, raw, 'created'); + return; + } case 'phase-dir': { if (!phase || !name) { error('phase and name required for phase-dir scaffold'); } const slug = generateSlugInternal(name); const dirName = `${padded}-${slug}`; - const phasesParent = path.join(cwd, '.planning', 'phases'); + const phasesParent = resolvePlanningPaths(cwd).abs.phases; fs.mkdirSync(phasesParent, { recursive: true }); const dirPath = path.join(phasesParent, dirName); fs.mkdirSync(dirPath, { recursive: true }); @@ -519,7 +668,7 @@ function cmdScaffold(cwd, type, options, raw) { return; } default: - error(`Unknown scaffold type: ${type}. Available: context, uat, verification, phase-dir`); + error(`Unknown scaffold type: ${type}. Available: context, uat, verification, phase-dir, bugs`); } if (fs.existsSync(filePath)) { @@ -539,10 +688,14 @@ module.exports = { cmdVerifyPathExists, cmdHistoryDigest, cmdResolveModel, + cmdResolveAdaptiveModel, cmdCommit, cmdSummaryExtract, cmdWebsearch, cmdProgressRender, cmdTodoComplete, + cmdBugList, + cmdBugUpdate, + cmdBugResolve, cmdScaffold, }; diff --git a/get-shit-done/bin/lib/config.cjs b/get-shit-done/bin/lib/config.cjs index 0d9a9260df..583bdcc6fd 100644 --- a/get-shit-done/bin/lib/config.cjs +++ b/get-shit-done/bin/lib/config.cjs @@ -5,10 +5,12 @@ const fs = require('fs'); const path = require('path'); const { output, error } = require('./core.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdConfigEnsureSection(cwd, raw) { - const configPath = path.join(cwd, '.planning', 'config.json'); - const planningDir = path.join(cwd, '.planning'); + const paths = resolvePlanningPaths(cwd); + const configPath = paths.abs.config; + const planningDir = paths.abs.base; // Ensure .planning directory exists try { @@ -67,7 +69,7 @@ function cmdConfigEnsureSection(cwd, raw) { try { fs.writeFileSync(configPath, JSON.stringify(defaults, null, 2), 'utf-8'); - const result = { created: true, path: '.planning/config.json' }; + const result = { created: true, path: paths.rel.config }; output(result, raw, 'created'); } catch (err) { error('Failed to create config.json: ' + err.message); @@ -75,7 +77,7 @@ function cmdConfigEnsureSection(cwd, raw) { } function cmdConfigSet(cwd, keyPath, value, raw) { - const configPath = path.join(cwd, '.planning', 'config.json'); + const configPath = resolvePlanningPaths(cwd).abs.config; if (!keyPath) { error('Usage: config-set '); @@ -120,7 +122,7 @@ function cmdConfigSet(cwd, keyPath, value, raw) { } function cmdConfigGet(cwd, keyPath, raw) { - const configPath = path.join(cwd, '.planning', 'config.json'); + const configPath = resolvePlanningPaths(cwd).abs.config; if (!keyPath) { error('Usage: config-get '); diff --git a/get-shit-done/bin/lib/core.cjs b/get-shit-done/bin/lib/core.cjs index 6ef6ccb2a1..2d5a46287b 100644 --- a/get-shit-done/bin/lib/core.cjs +++ b/get-shit-done/bin/lib/core.cjs @@ -5,6 +5,7 @@ const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); +const { resolvePlanningPaths } = require('./paths.cjs'); // ─── Path helpers ──────────────────────────────────────────────────────────── @@ -29,6 +30,136 @@ const MODEL_PROFILES = { 'gsd-integration-checker': { quality: 'sonnet', balanced: 'sonnet', budget: 'haiku' }, }; +// ─── Adaptive Model Tiers ─────────────────────────────────────────────────── + +const ADAPTIVE_TIERS = { + simple: { + 'gsd-planner': 'sonnet', + 'gsd-roadmapper': 'sonnet', + 'gsd-executor': 'haiku', + 'gsd-phase-researcher': 'haiku', + 'gsd-project-researcher': 'haiku', + 'gsd-research-synthesizer': 'haiku', + 'gsd-debugger': 'sonnet', + 'gsd-codebase-mapper': 'haiku', + 'gsd-verifier': 'haiku', + 'gsd-plan-checker': 'haiku', + 'gsd-integration-checker': 'haiku', + }, + medium: { + 'gsd-planner': 'opus', + 'gsd-roadmapper': 'sonnet', + 'gsd-executor': 'sonnet', + 'gsd-phase-researcher': 'sonnet', + 'gsd-project-researcher': 'sonnet', + 'gsd-research-synthesizer': 'sonnet', + 'gsd-debugger': 'sonnet', + 'gsd-codebase-mapper': 'haiku', + 'gsd-verifier': 'sonnet', + 'gsd-plan-checker': 'sonnet', + 'gsd-integration-checker': 'sonnet', + }, + complex: { + 'gsd-planner': 'opus', + 'gsd-roadmapper': 'opus', + 'gsd-executor': 'sonnet', + 'gsd-phase-researcher': 'opus', + 'gsd-project-researcher': 'opus', + 'gsd-research-synthesizer': 'sonnet', + 'gsd-debugger': 'opus', + 'gsd-codebase-mapper': 'sonnet', + 'gsd-verifier': 'sonnet', + 'gsd-plan-checker': 'sonnet', + 'gsd-integration-checker': 'sonnet', + }, +}; + +// ─── Complexity Evaluation ────────────────────────────────────────────────── + +const MODEL_RANK = { haiku: 0, sonnet: 1, opus: 2 }; +const RANK_MODEL = ['haiku', 'sonnet', 'opus']; + +function evaluateComplexity(context) { + if (!context) { + return { score: 5, tier: 'medium', factors: ['default (no context)'] }; + } + + let score = 0; + const factors = []; + + // Files modified: 1pt each, max 5 + const filesCount = Array.isArray(context.files_modified) ? context.files_modified.length : 0; + const filesPts = Math.min(filesCount, 5); + if (filesPts > 0) { + score += filesPts; + factors.push(`files_modified: ${filesCount} (+${filesPts})`); + } + + // Task count: 3-5 = 1pt, 6+ = 2pts + const taskCount = typeof context.task_count === 'number' ? context.task_count : 0; + if (taskCount >= 6) { + score += 2; + factors.push(`task_count: ${taskCount} (+2)`); + } else if (taskCount >= 3) { + score += 1; + factors.push(`task_count: ${taskCount} (+1)`); + } + + // Keyword scoring on objective + const obj = typeof context.objective === 'string' ? context.objective.toLowerCase() : ''; + + if (/architect|system[\s._-]design|data[\s._-]model/.test(obj)) { + score += 3; + factors.push('architecture keywords (+3)'); + } + if (/integrat|external[\s._-]api|third[\s._-]party|webhook/.test(obj)) { + score += 2; + factors.push('integration keywords (+2)'); + } + if (/cross[\s._-]cutting|multiple[\s._-]modules|refactor[\s._-]across/.test(obj)) { + score += 2; + factors.push('cross-cutting keywords (+2)'); + } + if (/new[\s._-]library|unfamiliar|prototype/.test(obj)) { + score += 3; + factors.push('novel pattern keywords (+3)'); + } + if (/refactor|restructure|migrate/.test(obj)) { + score += 1; + factors.push('refactoring keywords (+1)'); + } + + // Plan type: TDD plans are inherently more complex + if (context.plan_type === 'tdd') { + score += 2; + factors.push('tdd plan type (+2)'); + } + + // Dependencies: plans that integrate previous outputs + const depsCount = Array.isArray(context.depends_on) ? context.depends_on.length : 0; + if (depsCount > 0) { + score += 1; + factors.push(`depends_on: ${depsCount} deps (+1)`); + } + + // Test files in modified list + const testFileCount = Array.isArray(context.files_modified) + ? context.files_modified.filter(f => /\.(test|spec)\.[tj]sx?$|__tests__/.test(f)).length + : 0; + if (testFileCount > 0) { + score += 1; + factors.push(`test_files: ${testFileCount} (+1)`); + } + + // Determine tier + let tier; + if (score <= 3) tier = 'simple'; + else if (score <= 7) tier = 'medium'; + else tier = 'complex'; + + return { score, tier, factors }; +} + // ─── Output helpers ─────────────────────────────────────────────────────────── function output(result, raw, rawValue) { @@ -64,8 +195,9 @@ function safeReadFile(filePath) { } } -function loadConfig(cwd) { - const configPath = path.join(cwd, '.planning', 'config.json'); +function loadConfig(cwd, paths) { + const p = paths || resolvePlanningPaths(cwd); + const configPath = p.abs.config; const defaults = { model_profile: 'balanced', commit_docs: true, @@ -114,6 +246,7 @@ function loadConfig(cwd) { parallelization, brave_search: get('brave_search') ?? defaults.brave_search, model_overrides: parsed.model_overrides || null, + adaptive_settings: parsed.adaptive_settings || null, }; } catch { return defaults; @@ -243,18 +376,19 @@ function searchPhaseInDir(baseDir, relBase, normalized) { } } -function findPhaseInternal(cwd, phase) { +function findPhaseInternal(cwd, phase, paths) { if (!phase) return null; - const phasesDir = path.join(cwd, '.planning', 'phases'); + const p = paths || resolvePlanningPaths(cwd); + const phasesDir = p.abs.phases; const normalized = normalizePhaseName(phase); // Search current phases first - const current = searchPhaseInDir(phasesDir, '.planning/phases', normalized); + const current = searchPhaseInDir(phasesDir, p.rel.phases, normalized); if (current) return current; // Search archived milestone phases (newest first) - const milestonesDir = path.join(cwd, '.planning', 'milestones'); + const milestonesDir = p.global.abs.milestonesDir; if (!fs.existsSync(milestonesDir)) return null; try { @@ -280,8 +414,9 @@ function findPhaseInternal(cwd, phase) { return null; } -function getArchivedPhaseDirs(cwd) { - const milestonesDir = path.join(cwd, '.planning', 'milestones'); +function getArchivedPhaseDirs(cwd, paths) { + const p = paths || resolvePlanningPaths(cwd); + const milestonesDir = p.global.abs.milestonesDir; const results = []; if (!fs.existsSync(milestonesDir)) return results; @@ -317,9 +452,10 @@ function getArchivedPhaseDirs(cwd) { // ─── Roadmap & model utilities ──────────────────────────────────────────────── -function getRoadmapPhaseInternal(cwd, phaseNum) { +function getRoadmapPhaseInternal(cwd, phaseNum, paths) { if (!phaseNum) return null; - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const p = paths || resolvePlanningPaths(cwd); + const roadmapPath = p.abs.roadmap; if (!fs.existsSync(roadmapPath)) return null; try { @@ -351,21 +487,45 @@ function getRoadmapPhaseInternal(cwd, phaseNum) { } } -function resolveModelInternal(cwd, agentType) { +function resolveModelInternal(cwd, agentType, context) { const config = loadConfig(cwd); // Check per-agent override first const override = config.model_overrides?.[agentType]; if (override) { - return override === 'opus' ? 'inherit' : override; + return override; } // Fall back to profile lookup const profile = config.model_profile || 'balanced'; + + // Adaptive profile: evaluate complexity and select from tier + if (profile === 'adaptive') { + const complexity = evaluateComplexity(context || null); + const tierModels = ADAPTIVE_TIERS[complexity.tier]; + let resolved = (tierModels && tierModels[agentType]) || 'sonnet'; + + // Clamp to min_model / max_model bounds + const settings = config.adaptive_settings; + if (settings) { + const resolvedRank = MODEL_RANK[resolved] ?? 1; + if (settings.min_model && MODEL_RANK[settings.min_model] !== undefined) { + const minRank = MODEL_RANK[settings.min_model]; + if (resolvedRank < minRank) resolved = RANK_MODEL[minRank]; + } + if (settings.max_model && MODEL_RANK[settings.max_model] !== undefined) { + const maxRank = MODEL_RANK[settings.max_model]; + if (resolvedRank > maxRank) resolved = RANK_MODEL[maxRank]; + } + } + + return resolved; + } + const agentModels = MODEL_PROFILES[agentType]; if (!agentModels) return 'sonnet'; const resolved = agentModels[profile] || agentModels['balanced'] || 'sonnet'; - return resolved === 'opus' ? 'inherit' : resolved; + return resolved; } // ─── Misc utilities ─────────────────────────────────────────────────────────── @@ -385,9 +545,10 @@ function generateSlugInternal(text) { return text.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, ''); } -function getMilestoneInfo(cwd) { +function getMilestoneInfo(cwd, paths) { try { - const roadmap = fs.readFileSync(path.join(cwd, '.planning', 'ROADMAP.md'), 'utf-8'); + const p = paths || resolvePlanningPaths(cwd); + const roadmap = fs.readFileSync(p.abs.roadmap, 'utf-8'); // Strip
...
blocks so shipped milestones don't interfere const cleaned = roadmap.replace(/
[\s\S]*?<\/details>/gi, ''); // Extract version and name from the same ## heading for consistency @@ -411,6 +572,8 @@ function getMilestoneInfo(cwd) { module.exports = { MODEL_PROFILES, + ADAPTIVE_TIERS, + evaluateComplexity, output, error, safeReadFile, @@ -429,4 +592,5 @@ module.exports = { generateSlugInternal, getMilestoneInfo, toPosixPath, + resolvePlanningPaths, }; diff --git a/get-shit-done/bin/lib/init.cjs b/get-shit-done/bin/lib/init.cjs index 7e551a01fb..aa96a19089 100644 --- a/get-shit-done/bin/lib/init.cjs +++ b/get-shit-done/bin/lib/init.cjs @@ -6,17 +6,19 @@ const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); const { loadConfig, resolveModelInternal, findPhaseInternal, getRoadmapPhaseInternal, pathExistsInternal, generateSlugInternal, getMilestoneInfo, normalizePhaseName, toPosixPath, output, error } = require('./core.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdInitExecutePhase(cwd, phase, raw) { if (!phase) { error('phase required for init execute-phase'); } - const config = loadConfig(cwd); - const phaseInfo = findPhaseInternal(cwd, phase); - const milestone = getMilestoneInfo(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const phaseInfo = findPhaseInternal(cwd, phase, paths); + const milestone = getMilestoneInfo(cwd, paths); - const roadmapPhase = getRoadmapPhaseInternal(cwd, phase); + const roadmapPhase = getRoadmapPhaseInternal(cwd, phase, paths); const reqMatch = roadmapPhase?.section?.match(/^\*\*Requirements\*\*:[^\S\n]*([^\n]*)$/m); const reqExtracted = reqMatch ? reqMatch[1].replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean).join(', ') @@ -28,6 +30,10 @@ function cmdInitExecutePhase(cwd, phase, raw) { executor_model: resolveModelInternal(cwd, 'gsd-executor'), verifier_model: resolveModelInternal(cwd, 'gsd-verifier'), + // Model profile (for adaptive per-plan resolution) + model_profile: config.model_profile || 'balanced', + adaptive_settings: config.model_profile === 'adaptive' ? (config.adaptive_settings || null) : null, + // Config flags commit_docs: config.commit_docs, parallelization: config.parallelization, @@ -66,15 +72,18 @@ function cmdInitExecutePhase(cwd, phase, raw) { milestone_version: milestone.version, milestone_name: milestone.name, milestone_slug: generateSlugInternal(milestone.name), + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, // File existence - state_exists: pathExistsInternal(cwd, '.planning/STATE.md'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), - config_exists: pathExistsInternal(cwd, '.planning/config.json'), + state_exists: pathExistsInternal(cwd, paths.rel.state), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), + config_exists: pathExistsInternal(cwd, paths.rel.config), // File paths - state_path: '.planning/STATE.md', - roadmap_path: '.planning/ROADMAP.md', - config_path: '.planning/config.json', + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, + config_path: paths.rel.config, }; output(result, raw); @@ -85,10 +94,11 @@ function cmdInitPlanPhase(cwd, phase, raw) { error('phase required for init plan-phase'); } - const config = loadConfig(cwd); - const phaseInfo = findPhaseInternal(cwd, phase); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const phaseInfo = findPhaseInternal(cwd, phase, paths); - const roadmapPhase = getRoadmapPhaseInternal(cwd, phase); + const roadmapPhase = getRoadmapPhaseInternal(cwd, phase, paths); const reqMatch = roadmapPhase?.section?.match(/^\*\*Requirements\*\*:[^\S\n]*([^\n]*)$/m); const reqExtracted = reqMatch ? reqMatch[1].replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean).join(', ') @@ -101,6 +111,10 @@ function cmdInitPlanPhase(cwd, phase, raw) { planner_model: resolveModelInternal(cwd, 'gsd-planner'), checker_model: resolveModelInternal(cwd, 'gsd-plan-checker'), + // Model profile (for adaptive per-plan resolution) + model_profile: config.model_profile || 'balanced', + adaptive_settings: config.model_profile === 'adaptive' ? (config.adaptive_settings || null) : null, + // Workflow flags research_enabled: config.research, plan_checker_enabled: config.plan_checker, @@ -124,12 +138,17 @@ function cmdInitPlanPhase(cwd, phase, raw) { // Environment planning_exists: pathExistsInternal(cwd, '.planning'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), + + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, // File paths - state_path: '.planning/STATE.md', - roadmap_path: '.planning/ROADMAP.md', - requirements_path: '.planning/REQUIREMENTS.md', + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, + requirements_path: paths.rel.requirements, }; if (phaseInfo?.directory) { @@ -160,7 +179,8 @@ function cmdInitPlanPhase(cwd, phase, raw) { } function cmdInitNewProject(cwd, raw) { - const config = loadConfig(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); // Detect Brave Search API key availability const homedir = require('os').homedir(); @@ -211,6 +231,11 @@ function cmdInitNewProject(cwd, raw) { // Enhanced search brave_search_available: hasBraveSearch, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File paths project_path: '.planning/PROJECT.md', }; @@ -219,8 +244,9 @@ function cmdInitNewProject(cwd, raw) { } function cmdInitNewMilestone(cwd, raw) { - const config = loadConfig(cwd); - const milestone = getMilestoneInfo(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const milestone = getMilestoneInfo(cwd, paths); const result = { // Models @@ -236,22 +262,28 @@ function cmdInitNewMilestone(cwd, raw) { current_milestone: milestone.version, current_milestone_name: milestone.name, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File existence project_exists: pathExistsInternal(cwd, '.planning/PROJECT.md'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), - state_exists: pathExistsInternal(cwd, '.planning/STATE.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), + state_exists: pathExistsInternal(cwd, paths.rel.state), // File paths project_path: '.planning/PROJECT.md', - roadmap_path: '.planning/ROADMAP.md', - state_path: '.planning/STATE.md', + roadmap_path: paths.rel.roadmap, + state_path: paths.rel.state, }; output(result, raw); } function cmdInitQuick(cwd, description, raw) { - const config = loadConfig(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); const now = new Date(); const slug = description ? generateSlugInternal(description)?.substring(0, 40) : null; @@ -275,6 +307,10 @@ function cmdInitQuick(cwd, description, raw) { checker_model: resolveModelInternal(cwd, 'gsd-plan-checker'), verifier_model: resolveModelInternal(cwd, 'gsd-verifier'), + // Model profile (for adaptive per-plan resolution) + model_profile: config.model_profile || 'balanced', + adaptive_settings: config.model_profile === 'adaptive' ? (config.adaptive_settings || null) : null, + // Config commit_docs: config.commit_docs, @@ -291,8 +327,13 @@ function cmdInitQuick(cwd, description, raw) { quick_dir: '.planning/quick', task_dir: slug ? `.planning/quick/${nextNum}-${slug}` : null, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File existence - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), planning_exists: pathExistsInternal(cwd, '.planning'), }; @@ -301,7 +342,8 @@ function cmdInitQuick(cwd, description, raw) { } function cmdInitResume(cwd, raw) { - const config = loadConfig(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); // Check for interrupted agent let interruptedAgentId = null; @@ -311,16 +353,21 @@ function cmdInitResume(cwd, raw) { const result = { // File existence - state_exists: pathExistsInternal(cwd, '.planning/STATE.md'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), + state_exists: pathExistsInternal(cwd, paths.rel.state), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), project_exists: pathExistsInternal(cwd, '.planning/PROJECT.md'), planning_exists: pathExistsInternal(cwd, '.planning'), // File paths - state_path: '.planning/STATE.md', - roadmap_path: '.planning/ROADMAP.md', + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, project_path: '.planning/PROJECT.md', + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // Agent state has_interrupted_agent: !!interruptedAgentId, interrupted_agent_id: interruptedAgentId, @@ -337,8 +384,9 @@ function cmdInitVerifyWork(cwd, phase, raw) { error('phase required for init verify-work'); } - const config = loadConfig(cwd); - const phaseInfo = findPhaseInternal(cwd, phase); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const phaseInfo = findPhaseInternal(cwd, phase, paths); const result = { // Models @@ -354,6 +402,11 @@ function cmdInitVerifyWork(cwd, phase, raw) { phase_number: phaseInfo?.phase_number || null, phase_name: phaseInfo?.phase_name || null, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // Existing artifacts has_verification: phaseInfo?.has_verification || false, }; @@ -362,12 +415,13 @@ function cmdInitVerifyWork(cwd, phase, raw) { } function cmdInitPhaseOp(cwd, phase, raw) { - const config = loadConfig(cwd); - let phaseInfo = findPhaseInternal(cwd, phase); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + let phaseInfo = findPhaseInternal(cwd, phase, paths); // Fallback to ROADMAP.md if no directory exists (e.g., Plans: TBD) if (!phaseInfo) { - const roadmapPhase = getRoadmapPhaseInternal(cwd, phase); + const roadmapPhase = getRoadmapPhaseInternal(cwd, phase, paths); if (roadmapPhase?.found) { const phaseName = roadmapPhase.phase_name; phaseInfo = { @@ -406,14 +460,19 @@ function cmdInitPhaseOp(cwd, phase, raw) { has_verification: phaseInfo?.has_verification || false, plan_count: phaseInfo?.plans?.length || 0, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File existence - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), planning_exists: pathExistsInternal(cwd, '.planning'), // File paths - state_path: '.planning/STATE.md', - roadmap_path: '.planning/ROADMAP.md', - requirements_path: '.planning/REQUIREMENTS.md', + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, + requirements_path: paths.rel.requirements, }; if (phaseInfo?.directory) { @@ -443,7 +502,8 @@ function cmdInitPhaseOp(cwd, phase, raw) { } function cmdInitTodos(cwd, area, raw) { - const config = loadConfig(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); const now = new Date(); // List todos (reuse existing logic) @@ -492,6 +552,11 @@ function cmdInitTodos(cwd, area, raw) { pending_dir: '.planning/todos/pending', completed_dir: '.planning/todos/completed', + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File existence planning_exists: pathExistsInternal(cwd, '.planning'), todos_dir_exists: pathExistsInternal(cwd, '.planning/todos'), @@ -501,14 +566,92 @@ function cmdInitTodos(cwd, area, raw) { output(result, raw); } +function cmdInitBugs(cwd, raw) { + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const now = new Date(); + + const bugsDir = path.join(cwd, '.planning', 'bugs'); + const resolvedDir = path.join(cwd, '.planning', 'bugs', 'resolved'); + + const bugs = []; + let maxId = 0; + + // Scan active bugs + try { + const files = fs.readdirSync(bugsDir).filter(f => /^BUG-\d+\.md$/.test(f)); + for (const file of files) { + const num = parseInt(file.match(/BUG-(\d+)\.md/)[1], 10); + if (num > maxId) maxId = num; + try { + const content = fs.readFileSync(path.join(bugsDir, file), 'utf-8'); + const titleMatch = content.match(/^title:\s*"?(.+?)"?\s*$/m); + const severityMatch = content.match(/^severity:\s*(.+)$/m); + const statusMatch = content.match(/^status:\s*(.+)$/m); + const areaMatch = content.match(/^area:\s*(.+)$/m); + const createdMatch = content.match(/^created:\s*(.+)$/m); + bugs.push({ + id: `BUG-${String(num).padStart(3, '0')}`, + title: titleMatch ? titleMatch[1].trim() : 'Untitled', + severity: severityMatch ? severityMatch[1].trim() : 'medium', + status: statusMatch ? statusMatch[1].trim() : 'reported', + area: areaMatch ? areaMatch[1].trim() : 'general', + created: createdMatch ? createdMatch[1].trim() : 'unknown', + }); + } catch {} + } + } catch {} + + // Scan resolved bugs for ID calculation + try { + const files = fs.readdirSync(resolvedDir).filter(f => /^BUG-\d+\.md$/.test(f)); + for (const file of files) { + const num = parseInt(file.match(/BUG-(\d+)\.md/)[1], 10); + if (num > maxId) maxId = num; + } + } catch {} + + const nextId = maxId + 1; + + const result = { + // Config + commit_docs: config.commit_docs, + + // Timestamps + date: now.toISOString().split('T')[0], + timestamp: now.toISOString(), + + // Bug inventory + bug_count: bugs.length, + bugs, + next_id: nextId, + next_id_padded: String(nextId).padStart(3, '0'), + + // Paths + bugs_dir: '.planning/bugs', + resolved_dir: '.planning/bugs/resolved', + planning_base: paths.rel.base, + + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + + // File existence + bugs_dir_exists: pathExistsInternal(cwd, '.planning/bugs'), + }; + + output(result, raw); +} + function cmdInitMilestoneOp(cwd, raw) { - const config = loadConfig(cwd); - const milestone = getMilestoneInfo(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const milestone = getMilestoneInfo(cwd, paths); // Count phases let phaseCount = 0; let completedPhases = 0; - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = paths.abs.phases; try { const entries = fs.readdirSync(phasesDir, { withFileTypes: true }); const dirs = entries.filter(e => e.isDirectory()).map(e => e.name); @@ -542,6 +685,11 @@ function cmdInitMilestoneOp(cwd, raw) { milestone_name: milestone.name, milestone_slug: generateSlugInternal(milestone.name), + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // Phase counts phase_count: phaseCount, completed_phases: completedPhases, @@ -553,17 +701,18 @@ function cmdInitMilestoneOp(cwd, raw) { // File existence project_exists: pathExistsInternal(cwd, '.planning/PROJECT.md'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), - state_exists: pathExistsInternal(cwd, '.planning/STATE.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), + state_exists: pathExistsInternal(cwd, paths.rel.state), archive_exists: pathExistsInternal(cwd, '.planning/archive'), - phases_dir_exists: pathExistsInternal(cwd, '.planning/phases'), + phases_dir_exists: pathExistsInternal(cwd, paths.rel.base + '/phases'), }; output(result, raw); } function cmdInitMapCodebase(cwd, raw) { - const config = loadConfig(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); // Check for existing codebase maps const codebaseDir = path.join(cwd, '.planning', 'codebase'); @@ -588,6 +737,11 @@ function cmdInitMapCodebase(cwd, raw) { existing_maps: existingMaps, has_maps: existingMaps.length > 0, + // Milestone + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, + // File existence planning_exists: pathExistsInternal(cwd, '.planning'), codebase_dir_exists: pathExistsInternal(cwd, '.planning/codebase'), @@ -597,11 +751,12 @@ function cmdInitMapCodebase(cwd, raw) { } function cmdInitProgress(cwd, raw) { - const config = loadConfig(cwd); - const milestone = getMilestoneInfo(cwd); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); + const milestone = getMilestoneInfo(cwd, paths); // Analyze phases - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = paths.abs.phases; const phases = []; let currentPhase = null; let nextPhase = null; @@ -629,7 +784,7 @@ function cmdInitProgress(cwd, raw) { const phaseInfo = { number: phaseNumber, name: phaseName, - directory: '.planning/phases/' + dir, + directory: paths.rel.phases + '/' + dir, status, plan_count: plans.length, summary_count: summaries.length, @@ -651,7 +806,7 @@ function cmdInitProgress(cwd, raw) { // Check for paused work let pausedAt = null; try { - const state = fs.readFileSync(path.join(cwd, '.planning', 'STATE.md'), 'utf-8'); + const state = fs.readFileSync(paths.abs.state, 'utf-8'); const pauseMatch = state.match(/\*\*Paused At:\*\*\s*(.+)/); if (pauseMatch) pausedAt = pauseMatch[1].trim(); } catch {} @@ -667,6 +822,9 @@ function cmdInitProgress(cwd, raw) { // Milestone milestone_version: milestone.version, milestone_name: milestone.name, + milestone: paths.milestone, + is_multi_milestone: paths.isMultiMilestone, + planning_base: paths.rel.base, // Phase overview phases, @@ -682,13 +840,13 @@ function cmdInitProgress(cwd, raw) { // File existence project_exists: pathExistsInternal(cwd, '.planning/PROJECT.md'), - roadmap_exists: pathExistsInternal(cwd, '.planning/ROADMAP.md'), - state_exists: pathExistsInternal(cwd, '.planning/STATE.md'), + roadmap_exists: pathExistsInternal(cwd, paths.rel.roadmap), + state_exists: pathExistsInternal(cwd, paths.rel.state), // File paths - state_path: '.planning/STATE.md', - roadmap_path: '.planning/ROADMAP.md', + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, project_path: '.planning/PROJECT.md', - config_path: '.planning/config.json', + config_path: paths.rel.config, }; output(result, raw); @@ -704,6 +862,7 @@ module.exports = { cmdInitVerifyWork, cmdInitPhaseOp, cmdInitTodos, + cmdInitBugs, cmdInitMilestoneOp, cmdInitMapCodebase, cmdInitProgress, diff --git a/get-shit-done/bin/lib/milestone.cjs b/get-shit-done/bin/lib/milestone.cjs index 77625376bc..982fe40c9c 100644 --- a/get-shit-done/bin/lib/milestone.cjs +++ b/get-shit-done/bin/lib/milestone.cjs @@ -7,6 +7,7 @@ const path = require('path'); const { output, error } = require('./core.cjs'); const { extractFrontmatter } = require('./frontmatter.cjs'); const { writeStateMd } = require('./state.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdRequirementsMarkComplete(cwd, reqIdsRaw, raw) { if (!reqIdsRaw || reqIdsRaw.length === 0) { @@ -25,7 +26,7 @@ function cmdRequirementsMarkComplete(cwd, reqIdsRaw, raw) { error('no valid requirement IDs found'); } - const reqPath = path.join(cwd, '.planning', 'REQUIREMENTS.md'); + const reqPath = resolvePlanningPaths(cwd).abs.requirements; if (!fs.existsSync(reqPath)) { output({ updated: false, reason: 'REQUIREMENTS.md not found', ids: reqIds }, raw, 'no requirements file'); return; @@ -80,12 +81,13 @@ function cmdMilestoneComplete(cwd, version, options, raw) { error('version required for milestone complete (e.g., v1.0)'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); - const reqPath = path.join(cwd, '.planning', 'REQUIREMENTS.md'); - const statePath = path.join(cwd, '.planning', 'STATE.md'); - const milestonesPath = path.join(cwd, '.planning', 'MILESTONES.md'); - const archiveDir = path.join(cwd, '.planning', 'milestones'); - const phasesDir = path.join(cwd, '.planning', 'phases'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; + const reqPath = paths.abs.requirements; + const statePath = paths.abs.state; + const milestonesPath = path.join(paths.abs.planningRoot, 'MILESTONES.md'); + const archiveDir = path.join(paths.abs.planningRoot, 'milestones'); + const phasesDir = paths.abs.phases; const today = new Date().toISOString().split('T')[0]; const milestoneName = options.name || version; @@ -178,7 +180,7 @@ function cmdMilestoneComplete(cwd, version, options, raw) { } // Archive audit file if exists - const auditFile = path.join(cwd, '.planning', `${version}-MILESTONE-AUDIT.md`); + const auditFile = path.join(paths.abs.planningRoot, `${version}-MILESTONE-AUDIT.md`); if (fs.existsSync(auditFile)) { fs.renameSync(auditFile, path.join(archiveDir, `${version}-MILESTONE-AUDIT.md`)); } @@ -261,7 +263,258 @@ function cmdMilestoneComplete(cwd, version, options, raw) { output(result, raw); } +function cmdMilestoneCreate(cwd, name, raw) { + if (!name) { + error('milestone name required. Usage: milestone create '); + } + + const today = new Date().toISOString().split('T')[0]; + const planningRoot = path.join(cwd, '.planning'); + const milestonesDir = path.join(planningRoot, 'milestones'); + const activeMilestonePath = path.join(planningRoot, 'ACTIVE_MILESTONE'); + const targetDir = path.join(milestonesDir, name); + let migratedFrom = null; + + // Check if this is the first milestone AND legacy mode exists + const legacyStateExists = fs.existsSync(path.join(planningRoot, 'STATE.md')); + let existingMilestones = []; + try { + existingMilestones = fs.readdirSync(milestonesDir, { withFileTypes: true }) + .filter(e => e.isDirectory() && fs.existsSync(path.join(milestonesDir, e.name, 'STATE.md'))) + .map(e => e.name); + } catch {} + + if (existingMilestones.length === 0 && legacyStateExists) { + // Auto-migrate existing global files to a milestone directory + // Determine a name for the current milestone from STATE.md + let currentMilestoneName = 'initial'; + try { + const stateContent = fs.readFileSync(path.join(planningRoot, 'STATE.md'), 'utf-8'); + const milestoneMatch = stateContent.match(/\*\*Milestone:\*\*\s*(.+)/); + if (milestoneMatch) { + currentMilestoneName = milestoneMatch[1].trim().toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, '') || 'initial'; + } + } catch {} + + const migrationDir = path.join(milestonesDir, currentMilestoneName); + fs.mkdirSync(migrationDir, { recursive: true }); + fs.mkdirSync(path.join(migrationDir, 'phases'), { recursive: true }); + + // Copy legacy files + const filesToMigrate = [ + { src: 'STATE.md', dest: 'STATE.md' }, + { src: 'ROADMAP.md', dest: 'ROADMAP.md' }, + { src: 'REQUIREMENTS.md', dest: 'REQUIREMENTS.md' }, + { src: 'config.json', dest: 'config.json' }, + ]; + for (const { src, dest } of filesToMigrate) { + const srcPath = path.join(planningRoot, src); + if (fs.existsSync(srcPath)) { + fs.copyFileSync(srcPath, path.join(migrationDir, dest)); + } + } + + // Set ACTIVE_MILESTONE to the migrated milestone first + fs.writeFileSync(activeMilestonePath, currentMilestoneName, 'utf-8'); + migratedFrom = currentMilestoneName; + } + + // Create the new milestone directory + fs.mkdirSync(targetDir, { recursive: true }); + fs.mkdirSync(path.join(targetDir, 'phases'), { recursive: true }); + + // STATE.md template + const stateTemplate = `# Session State + +## Position + +**Milestone:** ${name} +**Current Phase:** Not started +**Current Phase Name:** TBD +**Total Phases:** 0 +**Current Plan:** Not started +**Total Plans in Phase:** 0 +**Status:** Ready to plan +**Progress:** [░░░░░░░░░░] 0% +**Last Activity:** ${today} +**Last Activity Description:** Milestone created + +## Decisions Made +None yet. + +## Blockers +None + +## Performance Metrics +| Plan | Duration | Tasks | Files | +|------|----------|-------|-------| +None yet + +## Session +**Last Date:** ${today} +**Stopped At:** N/A +**Resume File:** None +`; + fs.writeFileSync(path.join(targetDir, 'STATE.md'), stateTemplate, 'utf-8'); + + // ROADMAP.md template + const roadmapTemplate = `# ${name} Roadmap + +> Milestone roadmap — run /gsd:new-milestone to populate + +## Progress Overview + +| Phase | Plans | Status | Completed | +|-------|-------|--------|-----------| + +## Phase Summary + +(No phases yet — run /gsd:new-milestone to create roadmap) +`; + fs.writeFileSync(path.join(targetDir, 'ROADMAP.md'), roadmapTemplate, 'utf-8'); + + // config.json — copy from current milestone or use defaults + let configContent = JSON.stringify({ commit_docs: true, research: true, verifier: true, plan_checker: true, nyquist_validation: true, parallelization: false, branching_strategy: 'none' }, null, 2); + try { + const currentPaths = resolvePlanningPaths(cwd); + if (fs.existsSync(currentPaths.abs.config)) { + configContent = fs.readFileSync(currentPaths.abs.config, 'utf-8'); + } + } catch {} + fs.writeFileSync(path.join(targetDir, 'config.json'), configContent, 'utf-8'); + + // Set ACTIVE_MILESTONE to the new milestone + fs.writeFileSync(activeMilestonePath, name, 'utf-8'); + + const directory = '.planning/milestones/' + name; + output({ created: true, name, directory, migrated_from: migratedFrom }, raw, `milestone "${name}" created at ${directory}`); +} + +function cmdMilestoneSwitch(cwd, name, raw) { + if (!name) { + error('milestone name required. Usage: milestone switch '); + } + + const planningRoot = path.join(cwd, '.planning'); + const milestoneDir = path.join(planningRoot, 'milestones', name); + const activeMilestonePath = path.join(planningRoot, 'ACTIVE_MILESTONE'); + + if (!fs.existsSync(milestoneDir)) { + error(`milestone "${name}" not found in .planning/milestones/`); + } + + // Check current active milestone for in-progress work + let previousMilestone = null; + let previousStatus = null; + let hasInProgress = false; + try { + previousMilestone = fs.readFileSync(activeMilestonePath, 'utf-8').trim(); + if (previousMilestone && previousMilestone !== name) { + const prevStatePath = path.join(planningRoot, 'milestones', previousMilestone, 'STATE.md'); + if (fs.existsSync(prevStatePath)) { + const content = fs.readFileSync(prevStatePath, 'utf-8'); + const statusMatch = content.match(/\*\*Status:\*\*\s*(.+)/); + if (statusMatch) { + previousStatus = statusMatch[1].trim(); + hasInProgress = /executing|planning/i.test(previousStatus); + } + } + } + } catch {} + + // Write ACTIVE_MILESTONE + fs.writeFileSync(activeMilestonePath, name, 'utf-8'); + + // Read status from target milestone's STATE.md + let status = 'unknown'; + const statePath = path.join(milestoneDir, 'STATE.md'); + if (fs.existsSync(statePath)) { + try { + const content = fs.readFileSync(statePath, 'utf-8'); + const statusMatch = content.match(/\*\*Status:\*\*\s*(.+)/); + if (statusMatch) status = statusMatch[1].trim(); + } catch {} + } + + const state_path = '.planning/milestones/' + name + '/STATE.md'; + output({ + switched: true, + name, + status, + state_path, + previous_milestone: previousMilestone, + previous_status: previousStatus, + has_in_progress: hasInProgress, + }, raw, `switched to milestone "${name}" (${status})`); +} + +function cmdMilestoneList(cwd, raw) { + const planningRoot = path.join(cwd, '.planning'); + const milestonesDir = path.join(planningRoot, 'milestones'); + const activeMilestonePath = path.join(planningRoot, 'ACTIVE_MILESTONE'); + + // Read active milestone + let active = null; + try { + const content = fs.readFileSync(activeMilestonePath, 'utf-8').trim(); + if (content) active = content; + } catch {} + + // List milestone directories that contain STATE.md (skip archived v*-phases dirs) + const milestones = []; + try { + const entries = fs.readdirSync(milestonesDir, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isDirectory()) continue; + // Skip archived phase directories (e.g. v1.0-phases) + if (/^v[\d.]+-phases$/.test(entry.name)) continue; + const stateFile = path.join(milestonesDir, entry.name, 'STATE.md'); + if (!fs.existsSync(stateFile)) continue; + + let status = 'unknown'; + try { + const content = fs.readFileSync(stateFile, 'utf-8'); + const statusMatch = content.match(/\*\*Status:\*\*\s*(.+)/); + if (statusMatch) status = statusMatch[1].trim(); + } catch {} + + milestones.push({ + name: entry.name, + status, + active: entry.name === active, + }); + } + } catch {} + + output({ milestones, active, count: milestones.length }, raw, `${milestones.length} milestone(s) found`); +} + +function cmdMilestoneStatus(cwd, raw) { + const planningRoot = path.join(cwd, '.planning'); + const activeMilestonePath = path.join(planningRoot, 'ACTIVE_MILESTONE'); + + let active = null; + try { + const content = fs.readFileSync(activeMilestonePath, 'utf-8').trim(); + if (content) active = content; + } catch {} + + const isMultiMilestone = !!active; + const paths = resolvePlanningPaths(cwd); + + output({ + active, + is_multi_milestone: isMultiMilestone, + state_path: paths.rel.state, + roadmap_path: paths.rel.roadmap, + }, raw, active ? `active milestone: ${active}` : 'legacy mode (no active milestone)'); +} + module.exports = { cmdRequirementsMarkComplete, cmdMilestoneComplete, + cmdMilestoneCreate, + cmdMilestoneSwitch, + cmdMilestoneList, + cmdMilestoneStatus, }; diff --git a/get-shit-done/bin/lib/paths.cjs b/get-shit-done/bin/lib/paths.cjs new file mode 100644 index 0000000000..081d615684 --- /dev/null +++ b/get-shit-done/bin/lib/paths.cjs @@ -0,0 +1,100 @@ +/** + * Paths — Centralized planning path resolution for milestone-scoped directories + * + * Resolution order: milestoneOverride arg > module-level override > ACTIVE_MILESTONE file > legacy fallback + * When no active milestone is detected, returns identical paths to legacy hardcoded `.planning/` paths. + */ + +const fs = require('fs'); +const path = require('path'); + +// ─── Module-level milestone override (set by CLI --milestone flag) ─────────── + +let _milestoneOverride = null; + +function setMilestoneOverride(milestone) { + _milestoneOverride = milestone || null; +} + +function getMilestoneOverride() { + return _milestoneOverride; +} + +// ─── Path resolution ───────────────────────────────────────────────────────── + +/** + * Resolve all planning paths for the given working directory. + * + * @param {string} cwd - Project root directory + * @param {string|null} [milestoneOverride] - Explicit milestone name (from --milestone flag) + * @returns {object} Resolved paths object with abs, rel, global, milestone, isMultiMilestone + */ +function resolvePlanningPaths(cwd, milestoneOverride) { + const planningRoot = path.join(cwd, '.planning'); + const activeMilestonePath = path.join(planningRoot, 'ACTIVE_MILESTONE'); + + // Determine active milestone: explicit override > module override > file > null + let milestone = milestoneOverride || _milestoneOverride || null; + if (!milestone) { + try { + const content = fs.readFileSync(activeMilestonePath, 'utf-8').trim(); + if (content) milestone = content; + } catch {} + } + + const isMultiMilestone = !!milestone; + + // Determine base directory for milestone-scoped files + const absBase = isMultiMilestone + ? path.join(planningRoot, 'milestones', milestone) + : planningRoot; + + const relBase = isMultiMilestone + ? '.planning/milestones/' + milestone + : '.planning'; + + return { + abs: { + planningRoot, + base: absBase, + state: path.join(absBase, 'STATE.md'), + roadmap: path.join(absBase, 'ROADMAP.md'), + requirements: path.join(absBase, 'REQUIREMENTS.md'), + config: path.join(absBase, 'config.json'), + phases: path.join(absBase, 'phases'), + research: path.join(absBase, 'research'), + codebase: path.join(planningRoot, 'codebase'), + }, + rel: { + base: relBase, + state: relBase + '/STATE.md', + roadmap: relBase + '/ROADMAP.md', + requirements: relBase + '/REQUIREMENTS.md', + config: relBase + '/config.json', + phases: relBase + '/phases', + research: relBase + '/research', + }, + global: { + abs: { + project: path.join(planningRoot, 'PROJECT.md'), + milestones: path.join(planningRoot, 'MILESTONES.md'), + activeMilestone: activeMilestonePath, + codebase: path.join(planningRoot, 'codebase'), + milestonesDir: path.join(planningRoot, 'milestones'), + }, + rel: { + project: '.planning/PROJECT.md', + milestones: '.planning/MILESTONES.md', + activeMilestone: '.planning/ACTIVE_MILESTONE', + }, + }, + milestone, + isMultiMilestone, + }; +} + +module.exports = { + resolvePlanningPaths, + setMilestoneOverride, + getMilestoneOverride, +}; diff --git a/get-shit-done/bin/lib/phase.cjs b/get-shit-done/bin/lib/phase.cjs index 4e4cbff609..5fd3e61f93 100644 --- a/get-shit-done/bin/lib/phase.cjs +++ b/get-shit-done/bin/lib/phase.cjs @@ -7,9 +7,10 @@ const path = require('path'); const { escapeRegex, normalizePhaseName, comparePhaseNum, findPhaseInternal, getArchivedPhaseDirs, generateSlugInternal, output, error } = require('./core.cjs'); const { extractFrontmatter } = require('./frontmatter.cjs'); const { writeStateMd } = require('./state.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdPhasesList(cwd, options, raw) { - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = resolvePlanningPaths(cwd).abs.phases; const { type, phase, includeArchived } = options; // If no phases directory, return empty @@ -85,7 +86,7 @@ function cmdPhasesList(cwd, options, raw) { } function cmdPhaseNextDecimal(cwd, basePhase, raw) { - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = resolvePlanningPaths(cwd).abs.phases; const normalized = normalizePhaseName(basePhase); // Check if phases directory exists @@ -154,7 +155,8 @@ function cmdFindPhase(cwd, phase, raw) { error('phase identifier required'); } - const phasesDir = path.join(cwd, '.planning', 'phases'); + const paths = resolvePlanningPaths(cwd); + const phasesDir = paths.abs.phases; const normalized = normalizePhaseName(phase); const notFound = { found: false, directory: null, phase_number: null, phase_name: null, plans: [], summaries: [] }; @@ -180,7 +182,7 @@ function cmdFindPhase(cwd, phase, raw) { const result = { found: true, - directory: path.join('.planning', 'phases', match), + directory: paths.rel.phases + '/' + match, phase_number: phaseNumber, phase_name: phaseName, plans, @@ -203,7 +205,7 @@ function cmdPhasePlanIndex(cwd, phase, raw) { error('phase required for phase-plan-index'); } - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = resolvePlanningPaths(cwd).abs.phases; const normalized = normalizePhaseName(phase); // Find phase directory @@ -281,6 +283,11 @@ function cmdPhasePlanIndex(cwd, phase, raw) { id: planId, wave, autonomous, + type: fm.type || 'execute', + depends_on: (() => { + const d = fm.depends_on || fm['depends-on'] || []; + return Array.isArray(d) ? d : [d]; + })(), objective: extractObjective(content) || fm.objective || null, files_modified: filesModified, task_count: taskCount, @@ -313,7 +320,8 @@ function cmdPhaseAdd(cwd, description, raw) { error('description required for phase add'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; if (!fs.existsSync(roadmapPath)) { error('ROADMAP.md not found'); } @@ -333,7 +341,7 @@ function cmdPhaseAdd(cwd, description, raw) { const newPhaseNum = maxPhase + 1; const paddedNum = String(newPhaseNum).padStart(2, '0'); const dirName = `${paddedNum}-${slug}`; - const dirPath = path.join(cwd, '.planning', 'phases', dirName); + const dirPath = path.join(paths.abs.phases, dirName); // Create directory with .gitkeep so git tracks empty folders fs.mkdirSync(dirPath, { recursive: true }); @@ -358,7 +366,7 @@ function cmdPhaseAdd(cwd, description, raw) { padded: paddedNum, name: description, slug, - directory: `.planning/phases/${dirName}`, + directory: `${paths.rel.phases}/${dirName}`, }; output(result, raw, paddedNum); @@ -369,7 +377,8 @@ function cmdPhaseInsert(cwd, afterPhase, description, raw) { error('after-phase and description required for phase insert'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; if (!fs.existsSync(roadmapPath)) { error('ROADMAP.md not found'); } @@ -387,7 +396,7 @@ function cmdPhaseInsert(cwd, afterPhase, description, raw) { } // Calculate next decimal using existing logic - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = paths.abs.phases; const normalizedBase = normalizePhaseName(afterPhase); let existingDecimals = []; @@ -404,7 +413,7 @@ function cmdPhaseInsert(cwd, afterPhase, description, raw) { const nextDecimal = existingDecimals.length === 0 ? 1 : Math.max(...existingDecimals) + 1; const decimalPhase = `${normalizedBase}.${nextDecimal}`; const dirName = `${decimalPhase}-${slug}`; - const dirPath = path.join(cwd, '.planning', 'phases', dirName); + const dirPath = path.join(phasesDir, dirName); // Create directory with .gitkeep so git tracks empty folders fs.mkdirSync(dirPath, { recursive: true }); @@ -439,7 +448,7 @@ function cmdPhaseInsert(cwd, afterPhase, description, raw) { after_phase: afterPhase, name: description, slug, - directory: `.planning/phases/${dirName}`, + directory: `${paths.rel.phases}/${dirName}`, }; output(result, raw, decimalPhase); @@ -450,8 +459,9 @@ function cmdPhaseRemove(cwd, targetPhase, options, raw) { error('phase number required for phase remove'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); - const phasesDir = path.join(cwd, '.planning', 'phases'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; + const phasesDir = paths.abs.phases; const force = options.force || false; if (!fs.existsSync(roadmapPath)) { @@ -666,7 +676,7 @@ function cmdPhaseRemove(cwd, targetPhase, options, raw) { fs.writeFileSync(roadmapPath, roadmapContent, 'utf-8'); // Update STATE.md phase count - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = paths.abs.state; if (fs.existsSync(statePath)) { let stateContent = fs.readFileSync(statePath, 'utf-8'); // Update "Total Phases" field @@ -703,9 +713,10 @@ function cmdPhaseComplete(cwd, phaseNum, raw) { error('phase number required for phase complete'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); - const statePath = path.join(cwd, '.planning', 'STATE.md'); - const phasesDir = path.join(cwd, '.planning', 'phases'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; + const statePath = paths.abs.state; + const phasesDir = paths.abs.phases; const normalized = normalizePhaseName(phaseNum); const today = new Date().toISOString().split('T')[0]; @@ -753,7 +764,7 @@ function cmdPhaseComplete(cwd, phaseNum, raw) { fs.writeFileSync(roadmapPath, roadmapContent, 'utf-8'); // Update REQUIREMENTS.md traceability for this phase's requirements - const reqPath = path.join(cwd, '.planning', 'REQUIREMENTS.md'); + const reqPath = paths.abs.requirements; if (fs.existsSync(reqPath)) { // Extract Requirements line from roadmap for this phase const reqMatch = roadmapContent.match( diff --git a/get-shit-done/bin/lib/roadmap.cjs b/get-shit-done/bin/lib/roadmap.cjs index 9717b9aae0..fc4bf595d1 100644 --- a/get-shit-done/bin/lib/roadmap.cjs +++ b/get-shit-done/bin/lib/roadmap.cjs @@ -5,9 +5,11 @@ const fs = require('fs'); const path = require('path'); const { escapeRegex, normalizePhaseName, output, error, findPhaseInternal } = require('./core.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdRoadmapGetPhase(cwd, phaseNum, raw) { - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; if (!fs.existsSync(roadmapPath)) { output({ found: false, error: 'ROADMAP.md not found' }, raw, ''); @@ -91,7 +93,8 @@ function cmdRoadmapGetPhase(cwd, phaseNum, raw) { } function cmdRoadmapAnalyze(cwd, raw) { - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; if (!fs.existsSync(roadmapPath)) { output({ error: 'ROADMAP.md not found', milestones: [], phases: [], current_phase: null }, raw); @@ -99,7 +102,7 @@ function cmdRoadmapAnalyze(cwd, raw) { } const content = fs.readFileSync(roadmapPath, 'utf-8'); - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = paths.abs.phases; // Extract all phase headings: ## Phase N: Name or ### Phase N: Name const phasePattern = /#{2,4}\s*Phase\s+(\d+[A-Z]?(?:\.\d+)*)\s*:\s*([^\n]+)/gi; @@ -222,7 +225,7 @@ function cmdRoadmapUpdatePlanProgress(cwd, phaseNum, raw) { error('phase number required for roadmap update-plan-progress'); } - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); + const roadmapPath = resolvePlanningPaths(cwd).abs.roadmap; const phaseInfo = findPhaseInternal(cwd, phaseNum); if (!phaseInfo) { diff --git a/get-shit-done/bin/lib/state.cjs b/get-shit-done/bin/lib/state.cjs index 915f51c452..eefb48f453 100644 --- a/get-shit-done/bin/lib/state.cjs +++ b/get-shit-done/bin/lib/state.cjs @@ -5,19 +5,20 @@ const fs = require('fs'); const path = require('path'); const { loadConfig, getMilestoneInfo, output, error } = require('./core.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); const { extractFrontmatter, reconstructFrontmatter } = require('./frontmatter.cjs'); function cmdStateLoad(cwd, raw) { - const config = loadConfig(cwd); - const planningDir = path.join(cwd, '.planning'); + const paths = resolvePlanningPaths(cwd); + const config = loadConfig(cwd, paths); let stateRaw = ''; try { - stateRaw = fs.readFileSync(path.join(planningDir, 'STATE.md'), 'utf-8'); + stateRaw = fs.readFileSync(paths.abs.state, 'utf-8'); } catch {} - const configExists = fs.existsSync(path.join(planningDir, 'config.json')); - const roadmapExists = fs.existsSync(path.join(planningDir, 'ROADMAP.md')); + const configExists = fs.existsSync(paths.abs.config); + const roadmapExists = fs.existsSync(paths.abs.roadmap); const stateExists = stateRaw.length > 0; const result = { @@ -53,7 +54,7 @@ function cmdStateLoad(cwd, raw) { } function cmdStateGet(cwd, section, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; try { const content = fs.readFileSync(statePath, 'utf-8'); @@ -99,7 +100,7 @@ function readTextArgOrFile(cwd, value, filePath, label) { } function cmdStatePatch(cwd, patches, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; try { let content = fs.readFileSync(statePath, 'utf-8'); const results = { updated: [], failed: [] }; @@ -131,7 +132,7 @@ function cmdStateUpdate(cwd, field, value) { error('field and value required for state update'); } - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; try { let content = fs.readFileSync(statePath, 'utf-8'); const fieldEscaped = field.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); @@ -167,7 +168,7 @@ function stateReplaceField(content, fieldName, newValue) { } function cmdStateAdvancePlan(cwd, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } let content = fs.readFileSync(statePath, 'utf-8'); @@ -196,7 +197,7 @@ function cmdStateAdvancePlan(cwd, raw) { } function cmdStateRecordMetric(cwd, options, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } let content = fs.readFileSync(statePath, 'utf-8'); @@ -230,13 +231,14 @@ function cmdStateRecordMetric(cwd, options, raw) { } function cmdStateUpdateProgress(cwd, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const paths = resolvePlanningPaths(cwd); + const statePath = paths.abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } let content = fs.readFileSync(statePath, 'utf-8'); // Count summaries across all phases - const phasesDir = path.join(cwd, '.planning', 'phases'); + const phasesDir = paths.abs.phases; let totalPlans = 0; let totalSummaries = 0; @@ -267,7 +269,7 @@ function cmdStateUpdateProgress(cwd, raw) { } function cmdStateAddDecision(cwd, options, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } const { phase, summary, summary_file, rationale, rationale_file } = options; @@ -305,7 +307,7 @@ function cmdStateAddDecision(cwd, options, raw) { } function cmdStateAddBlocker(cwd, text, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } const blockerOptions = typeof text === 'object' && text !== null ? text : { text }; let blockerText = null; @@ -338,7 +340,7 @@ function cmdStateAddBlocker(cwd, text, raw) { } function cmdStateResolveBlocker(cwd, text, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } if (!text) { output({ error: 'text required' }, raw); return; } @@ -370,7 +372,7 @@ function cmdStateResolveBlocker(cwd, text, raw) { } function cmdStateRecordSession(cwd, options, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); return; } let content = fs.readFileSync(statePath, 'utf-8'); @@ -405,7 +407,7 @@ function cmdStateRecordSession(cwd, options, raw) { } function cmdStateSnapshot(cwd, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw); @@ -547,7 +549,8 @@ function buildStateFrontmatter(bodyContent, cwd) { if (cwd) { try { - const phasesDir = path.join(cwd, '.planning', 'phases'); + const _p = require('./paths.cjs').resolvePlanningPaths(cwd); + const phasesDir = _p.abs.phases; if (fs.existsSync(phasesDir)) { const phaseDirs = fs.readdirSync(phasesDir, { withFileTypes: true }) .filter(e => e.isDirectory()).map(e => e.name); @@ -641,7 +644,7 @@ function writeStateMd(statePath, content, cwd) { } function cmdStateJson(cwd, raw) { - const statePath = path.join(cwd, '.planning', 'STATE.md'); + const statePath = resolvePlanningPaths(cwd).abs.state; if (!fs.existsSync(statePath)) { output({ error: 'STATE.md not found' }, raw, 'STATE.md not found'); return; diff --git a/get-shit-done/bin/lib/verify.cjs b/get-shit-done/bin/lib/verify.cjs index 2e0d5db6a5..86d4fe52fd 100644 --- a/get-shit-done/bin/lib/verify.cjs +++ b/get-shit-done/bin/lib/verify.cjs @@ -7,6 +7,7 @@ const path = require('path'); const { safeReadFile, normalizePhaseName, execGit, findPhaseInternal, getMilestoneInfo, output, error } = require('./core.cjs'); const { extractFrontmatter, parseMustHavesBlock } = require('./frontmatter.cjs'); const { writeStateMd } = require('./state.cjs'); +const { resolvePlanningPaths } = require('./paths.cjs'); function cmdVerifySummary(cwd, summaryPath, checkFileCount, raw) { if (!summaryPath) { @@ -395,8 +396,9 @@ function cmdVerifyKeyLinks(cwd, planFilePath, raw) { } function cmdValidateConsistency(cwd, raw) { - const roadmapPath = path.join(cwd, '.planning', 'ROADMAP.md'); - const phasesDir = path.join(cwd, '.planning', 'phases'); + const paths = resolvePlanningPaths(cwd); + const roadmapPath = paths.abs.roadmap; + const phasesDir = paths.abs.phases; const errors = []; const warnings = []; @@ -515,12 +517,13 @@ function cmdValidateConsistency(cwd, raw) { } function cmdValidateHealth(cwd, options, raw) { - const planningDir = path.join(cwd, '.planning'); + const paths = resolvePlanningPaths(cwd); + const planningDir = paths.abs.planningRoot; const projectPath = path.join(planningDir, 'PROJECT.md'); - const roadmapPath = path.join(planningDir, 'ROADMAP.md'); - const statePath = path.join(planningDir, 'STATE.md'); - const configPath = path.join(planningDir, 'config.json'); - const phasesDir = path.join(planningDir, 'phases'); + const roadmapPath = paths.abs.roadmap; + const statePath = paths.abs.state; + const configPath = paths.abs.config; + const phasesDir = paths.abs.phases; const errors = []; const warnings = []; @@ -607,7 +610,7 @@ function cmdValidateHealth(cwd, options, raw) { const raw = fs.readFileSync(configPath, 'utf-8'); const parsed = JSON.parse(raw); // Validate known fields - const validProfiles = ['quality', 'balanced', 'budget']; + const validProfiles = ['quality', 'balanced', 'budget', 'adaptive']; if (parsed.model_profile && !validProfiles.includes(parsed.model_profile)) { addIssue('warning', 'W004', `config.json: invalid model_profile "${parsed.model_profile}"`, `Valid values: ${validProfiles.join(', ')}`); } diff --git a/get-shit-done/references/adaptive-model-selection.md b/get-shit-done/references/adaptive-model-selection.md new file mode 100644 index 0000000000..081654cef5 --- /dev/null +++ b/get-shit-done/references/adaptive-model-selection.md @@ -0,0 +1,117 @@ +# Adaptive Model Selection + +The `adaptive` profile auto-selects models per-plan based on complexity evaluation, achieving 35-65% cost savings while maintaining quality where it matters. + +## How It Works + +1. Plan metadata (files modified, task count, objective keywords) is evaluated +2. A complexity score (0-10+) is computed +3. The score maps to a tier: Simple, Medium, or Complex +4. Each tier defines which model each agent uses + +## Scoring Algorithm + +| Factor | Points | Signal | +|--------|--------|--------| +| Files modified | 1pt each (max 5) | `context.files_modified.length` | +| Task count | 0-2pts | 3-5 tasks = 1pt, 6+ = 2pts | +| Architecture keywords | +3pts | `architect`, `system design`, `data model` | +| Integration keywords | +2pts | `integrat*`, `external api`, `third party`, `webhook` | +| Cross-cutting keywords | +2pts | `cross cutting`, `multiple modules`, `refactor across` | +| Novel pattern keywords | +3pts | `new library`, `unfamiliar`, `prototype` | +| Refactoring keywords | +1pt | `refactor`, `restructure`, `migrate` | +| TDD plan type | +2pts | `context.plan_type === 'tdd'` | +| Dependencies | +1pt | `context.depends_on.length > 0` | +| Test files | +1pt | Files matching `.(test|spec).[tj]sx?` or `__tests__` in `files_modified` | + +## Tier Definitions + +| Tier | Score Range | Description | +|------|-------------|-------------| +| Simple | 0-3 | Small changes, few files, no architectural concerns | +| Medium | 4-7 | Moderate scope, some complexity factors | +| Complex | 8+ | Large scope, architectural decisions, novel patterns | + +## Tier-to-Model Mapping + +| Agent | Simple | Medium | Complex | +|-------|--------|--------|---------| +| gsd-planner | sonnet | opus | opus | +| gsd-roadmapper | sonnet | sonnet | opus | +| gsd-executor | haiku | sonnet | sonnet | +| gsd-phase-researcher | haiku | sonnet | opus | +| gsd-project-researcher | haiku | sonnet | opus | +| gsd-research-synthesizer | haiku | sonnet | sonnet | +| gsd-debugger | sonnet | sonnet | opus | +| gsd-codebase-mapper | haiku | haiku | sonnet | +| gsd-verifier | haiku | sonnet | sonnet | +| gsd-plan-checker | haiku | sonnet | sonnet | +| gsd-integration-checker | haiku | sonnet | sonnet | + +## Config Schema + +```json +{ + "model_profile": "adaptive", + "adaptive_settings": { + "min_model": "sonnet", + "max_model": "opus", + "log_selections": false + } +} +``` + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `min_model` | string | none | Floor model — never select below this tier | +| `max_model` | string | none | Ceiling model — never select above this tier | +| `log_selections` | boolean | false | When true, appends each model selection to `.planning/adaptive-usage.json` with timestamp, agent, tier, score, and model | + +### Clamping Examples + +- `min_model: "sonnet"` — Simple plans that would get haiku are upgraded to sonnet +- `max_model: "sonnet"` — Complex plans that would get opus are capped at sonnet +- Both set — constrains to a single model tier + +## Default Behavior + +When adaptive is active but no plan context is available (e.g., during init), defaults to **medium tier** (score 5). This is a safe middle ground that avoids under-provisioning. + +## Resolution Logic + +``` +1. Check model_overrides for agent-specific override (highest priority) +2. If adaptive profile: + a. Evaluate complexity from plan context + b. Look up model from ADAPTIVE_TIERS[tier][agentType] + c. Clamp to min_model / max_model bounds + d. Return result ('opus', 'sonnet', or 'haiku') +3. Non-adaptive: standard profile table lookup +``` + +Per-agent overrides always take precedence over adaptive selection. + +## CLI Usage + +```bash +# Resolve with plan context +node gsd-tools.cjs resolve-adaptive-model gsd-executor \ + --context '{"files_modified":["a.js"],"task_count":1,"objective":"fix typo"}' +# Returns: haiku, simple tier + +# Complex plan +node gsd-tools.cjs resolve-adaptive-model gsd-planner \ + --context '{"files_modified":["a.js","b.js","c.js","d.js","e.js"],"task_count":8,"objective":"architect new integration with external API"}' +# Returns: opus, complex tier +``` + +## Workflow Integration + +In `execute-phase`, when `model_profile === 'adaptive'`: + +1. Load plan metadata from `phase-plan-index` +2. Call `resolve-adaptive-model` per-plan with metadata as context +3. Display complexity tier and model in wave description +4. Spawn executor with the resolved model + +Non-adaptive profiles ignore plan context entirely — full backward compatibility. diff --git a/get-shit-done/references/model-profile-resolution.md b/get-shit-done/references/model-profile-resolution.md index 4e41aab9e2..189813b290 100644 --- a/get-shit-done/references/model-profile-resolution.md +++ b/get-shit-done/references/model-profile-resolution.md @@ -20,15 +20,27 @@ Look up the agent in the table for the resolved profile. Pass the model paramete Task( prompt="...", subagent_type="gsd-planner", - model="{resolved_model}" # "inherit", "sonnet", or "haiku" + model="{resolved_model}" # "opus", "sonnet", or "haiku" ) ``` -**Note:** Opus-tier agents resolve to `"inherit"` (not `"opus"`). This causes the agent to use the parent session's model, avoiding conflicts with organization policies that may block specific opus versions. +**Note:** Opus-tier agents resolve to `"opus"`, which Claude Code's Task tool maps to the current Opus model version. Earlier versions used `"inherit"` to avoid org-policy conflicts, but that silently downgraded agents to Sonnet when the parent session ran on Sonnet (the default). Passing `"opus"` directly ensures quality-profile agents actually run on Opus. + +## Adaptive Profile + +When `model_profile` is `"adaptive"`, resolution is per-plan rather than per-project: + +1. Pass plan metadata (files_modified, task_count, objective) as context +2. `evaluateComplexity(context)` scores 0-10+ and maps to simple/medium/complex tier +3. Model looked up from `ADAPTIVE_TIERS[tier][agentType]` +4. Clamped to `adaptive_settings.min_model` / `max_model` if configured + +Use `resolve-adaptive-model` CLI command with `--context` for per-plan resolution. +See `references/adaptive-model-selection.md` for full algorithm. ## Usage -1. Resolve once at orchestration start +1. Resolve once at orchestration start (or per-plan if adaptive) 2. Store the profile value 3. Look up each agent's model from the table when spawning -4. Pass model parameter to each Task call (values: `"inherit"`, `"sonnet"`, `"haiku"`) +4. Pass model parameter to each Task call (values: `"opus"`, `"sonnet"`, `"haiku"`) diff --git a/get-shit-done/references/model-profiles.md b/get-shit-done/references/model-profiles.md index ad401e27cf..d09c1c57e3 100644 --- a/get-shit-done/references/model-profiles.md +++ b/get-shit-done/references/model-profiles.md @@ -4,19 +4,21 @@ Model profiles control which Claude model each GSD agent uses. This allows balan ## Profile Definitions -| Agent | `quality` | `balanced` | `budget` | -|-------|-----------|------------|----------| -| gsd-planner | opus | opus | sonnet | -| gsd-roadmapper | opus | sonnet | sonnet | -| gsd-executor | opus | sonnet | sonnet | -| gsd-phase-researcher | opus | sonnet | haiku | -| gsd-project-researcher | opus | sonnet | haiku | -| gsd-research-synthesizer | sonnet | sonnet | haiku | -| gsd-debugger | opus | sonnet | sonnet | -| gsd-codebase-mapper | sonnet | haiku | haiku | -| gsd-verifier | sonnet | sonnet | haiku | -| gsd-plan-checker | sonnet | sonnet | haiku | -| gsd-integration-checker | sonnet | sonnet | haiku | +| Agent | `quality` | `balanced` | `budget` | `adaptive` | +|-------|-----------|------------|----------|------------| +| gsd-planner | opus | opus | sonnet | sonnet→opus | +| gsd-roadmapper | opus | sonnet | sonnet | sonnet→opus | +| gsd-executor | opus | sonnet | sonnet | haiku→sonnet | +| gsd-phase-researcher | opus | sonnet | haiku | haiku→opus | +| gsd-project-researcher | opus | sonnet | haiku | haiku→opus | +| gsd-research-synthesizer | sonnet | sonnet | haiku | haiku→sonnet | +| gsd-debugger | opus | sonnet | sonnet | sonnet→opus | +| gsd-codebase-mapper | sonnet | haiku | haiku | haiku→sonnet | +| gsd-verifier | sonnet | sonnet | haiku | haiku→sonnet | +| gsd-plan-checker | sonnet | sonnet | haiku | haiku→sonnet | +| gsd-integration-checker | sonnet | sonnet | haiku | haiku→sonnet | + +*Adaptive column shows the range: simple tier → complex tier. Actual model depends on per-plan complexity evaluation.* ## Profile Philosophy @@ -36,6 +38,14 @@ Model profiles control which Claude model each GSD agent uses. This allows balan - Haiku for research and verification - Use when: conserving quota, high-volume work, less critical phases +**adaptive** - Per-plan complexity-based selection +- Evaluates plan metadata (files, tasks, objective keywords) to score complexity 0-10+ +- Simple plans (0-3): haiku for executors, sonnet for planners +- Medium plans (4-7): sonnet across the board +- Complex plans (8+): opus for planners/researchers, sonnet for executors +- Use when: mixed-complexity projects, optimizing cost without manual profile switching +- See `references/adaptive-model-selection.md` for full algorithm + ## Resolution Logic Orchestrators resolve model before spawning: @@ -43,8 +53,13 @@ Orchestrators resolve model before spawning: ``` 1. Read .planning/config.json 2. Check model_overrides for agent-specific override -3. If no override, look up agent in profile table -4. Pass model parameter to Task call +3. If adaptive profile: + a. Evaluate complexity from plan context (files, tasks, objective) + b. Map score to tier (simple/medium/complex) + c. Look up model from ADAPTIVE_TIERS[tier][agentType] + d. Clamp to adaptive_settings.min_model / max_model bounds +4. If non-adaptive: look up agent in static profile table +5. Pass model parameter to Task call ``` ## Per-Agent Overrides @@ -88,5 +103,10 @@ Verification requires goal-backward reasoning - checking if code *delivers* what **Why Haiku for gsd-codebase-mapper?** Read-only exploration and pattern extraction. No reasoning required, just structured output from file contents. -**Why `inherit` instead of passing `opus` directly?** -Claude Code's `"opus"` alias maps to a specific model version. Organizations may block older opus versions while allowing newer ones. GSD returns `"inherit"` for opus-tier agents, causing them to use whatever opus version the user has configured in their session. This avoids version conflicts and silent fallbacks to Sonnet. +**Why `opus` and not `inherit`?** +GSD passes `"opus"` directly to Claude Code's Task tool, which resolves it to the current +Opus model version. Earlier versions used `"inherit"` to avoid org-policy version conflicts, +but this silently downgraded agents to Sonnet when the parent session ran on Sonnet (the +default). Passing `"opus"` explicitly ensures quality-profile agents actually run on Opus. +If an org policy blocks Opus, the Task call will fail with a clear error rather than +silently running on the wrong model. diff --git a/get-shit-done/references/planning-config.md b/get-shit-done/references/planning-config.md index 946f5aaea8..58ab2621c6 100644 --- a/get-shit-done/references/planning-config.md +++ b/get-shit-done/references/planning-config.md @@ -12,6 +12,11 @@ Configuration options for `.planning/` directory behavior. "branching_strategy": "none", "phase_branch_template": "gsd/phase-{phase}-{slug}", "milestone_branch_template": "gsd/{milestone}-{slug}" +}, +"adaptive_settings": { + "min_model": "sonnet", + "max_model": "opus", + "log_selections": false } ``` @@ -22,6 +27,9 @@ Configuration options for `.planning/` directory behavior. | `git.branching_strategy` | `"none"` | Git branching approach: `"none"`, `"phase"`, or `"milestone"` | | `git.phase_branch_template` | `"gsd/phase-{phase}-{slug}"` | Branch template for phase strategy | | `git.milestone_branch_template` | `"gsd/{milestone}-{slug}"` | Branch template for milestone strategy | +| `adaptive_settings.min_model` | none | Floor model for adaptive profile (never select below) | +| `adaptive_settings.max_model` | none | Ceiling model for adaptive profile (never select above) | +| `adaptive_settings.log_selections` | `false` | When true, appends each model selection to `.planning/adaptive-usage.json` | diff --git a/get-shit-done/templates/bug-report.md b/get-shit-done/templates/bug-report.md new file mode 100644 index 0000000000..324c6f7e7b --- /dev/null +++ b/get-shit-done/templates/bug-report.md @@ -0,0 +1,73 @@ +# Bug Report Template + +Documents the `.planning/bugs/BUG-{NNN}.md` file format, frontmatter fields, sections, and lifecycle. + +## File Format + +Bug files live in `.planning/bugs/` (active) or `.planning/bugs/resolved/` (closed). + +### Frontmatter Fields + +```yaml +--- +id: BUG-{NNN} # Unique identifier, zero-padded to 3 digits +title: "{title}" # Short description, 3-10 words +severity: critical|high|medium|low +status: reported|investigating|fixing|resolved +area: {area} # Inferred from file paths (auth, api, ui, etc.) +phase: {phase or null} # Related phase number, if applicable +created: {ISO timestamp} # When the bug was reported +updated: {ISO timestamp} # Last modification time +github_issue: {URL or null} # GitHub issue URL if created +files: # Related source files + - {file paths} +--- +``` + +### Sections + +| Section | Mutability | Description | +|---------|-----------|-------------| +| `## Description` | Immutable | What the bug is | +| `## Expected Behavior` | Immutable | What should happen | +| `## Actual Behavior` | Immutable | What actually happens | +| `## Reproduction Steps` | Immutable | How to reproduce (or "unknown") | +| `## Environment` | Immutable | Branch, OS, Node version, etc. | +| `## Related Code` | Appendable | File paths and code snippets | +| `## Diagnostic Logs` | Appendable | Git state, log files, error output | + +## Severity Guide + +| Level | Keywords | Examples | +|-------|----------|----------| +| **critical** | crash, data loss, security, vulnerability, corruption, infinite loop, memory leak | App crashes on startup, user data deleted | +| **high** | broken, fails, error, exception, cannot, blocks, regression, timeout | Feature completely broken, blocking other work | +| **medium** | incorrect, wrong, unexpected, inconsistent, slow, intermittent | Wrong output, occasional failures | +| **low** | typo, alignment, color, spacing, formatting, cosmetic, minor | Visual glitches, text errors | + +Default severity is **medium** if no keywords match. + +## Lifecycle States + +``` +reported -> investigating -> fixing -> resolved +``` + +- **reported**: Bug filed, not yet triaged +- **investigating**: Actively looking into root cause +- **fixing**: Root cause known, fix in progress +- **resolved**: Fix applied and verified (file moved to `bugs/resolved/`) + +## Area Inference + +Areas are inferred from related file paths: + +| Path Pattern | Area | +|-------------|------| +| `src/api`, `routes/`, `endpoints/` | api | +| `src/auth`, `auth/`, `login` | auth | +| `src/ui`, `components/`, `pages/` | ui | +| `src/db`, `prisma/`, `migrations/` | database | +| `tests/`, `__tests__/`, `*.test.*` | testing | +| `docs/`, `*.md` | docs | +| (no match) | general | diff --git a/get-shit-done/templates/planner-subagent-prompt.md b/get-shit-done/templates/planner-subagent-prompt.md index bcaa68d275..3f34235f51 100644 --- a/get-shit-done/templates/planner-subagent-prompt.md +++ b/get-shit-done/templates/planner-subagent-prompt.md @@ -13,23 +13,23 @@ Template for spawning gsd-planner agent. The agent contains all planning experti **Mode:** {standard | gap_closure} **Project State:** -@.planning/STATE.md +@{state_path} **Roadmap:** -@.planning/ROADMAP.md +@{roadmap_path} **Requirements (if exists):** -@.planning/REQUIREMENTS.md +@{requirements_path} **Phase Context (if exists):** -@.planning/phases/{phase_dir}/{phase_num}-CONTEXT.md +@{phase_dir}/{phase_num}-CONTEXT.md **Research (if exists):** -@.planning/phases/{phase_dir}/{phase_num}-RESEARCH.md +@{phase_dir}/{phase_num}-RESEARCH.md **Gap Closure (if --gaps mode):** -@.planning/phases/{phase_dir}/{phase_num}-VERIFICATION.md -@.planning/phases/{phase_dir}/{phase_num}-UAT.md +@{phase_dir}/{phase_num}-VERIFICATION.md +@{phase_dir}/{phase_num}-UAT.md @@ -98,8 +98,8 @@ Continue planning for Phase {phase_number}: {phase_name} -Phase directory: @.planning/phases/{phase_dir}/ -Existing plans: @.planning/phases/{phase_dir}/*-PLAN.md +Phase directory: @{phase_dir}/ +Existing plans: @{phase_dir}/*-PLAN.md diff --git a/get-shit-done/templates/state.md b/get-shit-done/templates/state.md index 3e5b503044..09cd632c01 100644 --- a/get-shit-done/templates/state.md +++ b/get-shit-done/templates/state.md @@ -60,6 +60,12 @@ Recent decisions affecting current work: None yet. +### Active Bugs + +[From .planning/bugs/ — reported via /gsd:report-bug] + +None yet. + ### Blockers/Concerns [Issues that affect future work] @@ -150,6 +156,11 @@ Updated after each plan completion. - Reference to .planning/todos/pending/ - Brief list if few, count if many (e.g., "5 pending todos — see /gsd:check-todos") +**Active Bugs:** Bugs reported via /gsd:report-bug +- Count of active bugs by severity +- Reference to .planning/bugs/ +- Brief list if few, count if many (e.g., "2 active bugs (1 high, 1 medium)") + **Blockers/Concerns:** From "Next Phase Readiness" sections - Issues that affect future work - Prefix with originating phase diff --git a/get-shit-done/workflows/add-phase.md b/get-shit-done/workflows/add-phase.md index 56eb418684..cc46558d05 100644 --- a/get-shit-done/workflows/add-phase.md +++ b/get-shit-done/workflows/add-phase.md @@ -32,6 +32,8 @@ Load phase operation context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "0") ``` +Extract from init JSON: `state_path`, `roadmap_path`, `phase_dir`, `planning_base`. + Check `roadmap_exists` from init JSON. If false: ``` ERROR: No roadmap found (.planning/ROADMAP.md) @@ -51,7 +53,7 @@ The CLI handles: - Finding the highest existing integer phase number - Calculating next phase number (max + 1) - Generating slug from description -- Creating the phase directory (`.planning/phases/{NN}-{slug}/`) +- Creating the phase directory (`{planning_base}/phases/{NN}-{slug}/`) - Inserting the phase entry into ROADMAP.md with Goal, Depends on, and Plans sections Extract from result: `phase_number`, `padded`, `name`, `slug`, `directory`. @@ -60,7 +62,7 @@ Extract from result: `phase_number`, `padded`, `name`, `slug`, `directory`. Update STATE.md to reflect the new phase: -1. Read `.planning/STATE.md` +1. Read `{state_path}` 2. Under "## Accumulated Context" → "### Roadmap Evolution" add entry: ``` - Phase {N} added: {description} @@ -75,10 +77,10 @@ Present completion summary: ``` Phase {N} added to current milestone: - Description: {description} -- Directory: .planning/phases/{phase-num}-{slug}/ +- Directory: {planning_base}/phases/{phase-num}-{slug}/ - Status: Not planned yet -Roadmap updated: .planning/ROADMAP.md +Roadmap updated: {roadmap_path} --- diff --git a/get-shit-done/workflows/add-tests.md b/get-shit-done/workflows/add-tests.md index 6dfc03bd6a..fb86d2bf6f 100644 --- a/get-shit-done/workflows/add-tests.md +++ b/get-shit-done/workflows/add-tests.md @@ -36,12 +36,12 @@ Load phase operation context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE_ARG}") ``` -Extract from init JSON: `phase_dir`, `phase_number`, `phase_name`. +Extract from init JSON: `phase_dir`, `phase_number`, `phase_name`, `planning_base`. Verify the phase directory exists. If not: ``` ERROR: Phase directory not found for phase ${PHASE_ARG} -Ensure the phase exists in .planning/phases/ +Ensure the phase exists in {planning_base}/phases/ ``` Exit. diff --git a/get-shit-done/workflows/add-todo.md b/get-shit-done/workflows/add-todo.md index cd15cc8a91..4163abc20d 100644 --- a/get-shit-done/workflows/add-todo.md +++ b/get-shit-done/workflows/add-todo.md @@ -15,11 +15,11 @@ Load todo context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init todos) ``` -Extract from init JSON: `commit_docs`, `date`, `timestamp`, `todo_count`, `todos`, `pending_dir`, `todos_dir_exists`. +Extract from init JSON: `commit_docs`, `date`, `timestamp`, `todo_count`, `todos`, `pending_dir`, `completed_dir`, `planning_base`, `todos_dir_exists`. Ensure directories exist: ```bash -mkdir -p .planning/todos/pending .planning/todos/done +mkdir -p "${pending_dir}" "${completed_dir}" ``` Note existing areas from the todos array for consistency in infer_area step. @@ -62,7 +62,7 @@ Use existing area from step 2 if similar match exists. ```bash # Search for key words from title in existing todos -grep -l -i "[key words from title]" .planning/todos/pending/*.md 2>/dev/null +grep -l -i "[key words from title]" ${pending_dir}/*.md 2>/dev/null ``` If potential duplicate found: @@ -86,7 +86,7 @@ Generate slug for the title: slug=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" generate-slug "$title" --raw) ``` -Write to `.planning/todos/pending/${date}-${slug}.md`: +Write to `${pending_dir}/${date}-${slug}.md`: ```markdown --- @@ -108,7 +108,7 @@ files: -If `.planning/STATE.md` exists: +If `{planning_base}/STATE.md` exists: 1. Use `todo_count` from init context (or re-run `init todos` if count changed) 2. Update "### Pending Todos" under "## Accumulated Context" @@ -118,7 +118,7 @@ If `.planning/STATE.md` exists: Commit the todo and any updated state: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: capture todo - [title]" --files .planning/todos/pending/[filename] .planning/STATE.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: capture todo - [title]" --files ${pending_dir}/[filename] {planning_base}/STATE.md ``` Tool respects `commit_docs` config and gitignore automatically. @@ -128,7 +128,7 @@ Confirm: "Committed: docs: capture todo - [title]" ``` -Todo saved: .planning/todos/pending/[filename] +Todo saved: ${pending_dir}/[filename] [title] Area: [area] diff --git a/get-shit-done/workflows/audit-milestone.md b/get-shit-done/workflows/audit-milestone.md index 7eee93975b..a2c1f4970a 100644 --- a/get-shit-done/workflows/audit-milestone.md +++ b/get-shit-done/workflows/audit-milestone.md @@ -14,7 +14,7 @@ Read all files referenced by the invoking prompt's execution_context before star INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init milestone-op) ``` -Extract from init JSON: `milestone_version`, `milestone_name`, `phase_count`, `completed_phases`, `commit_docs`. +Extract from init JSON: `milestone_version`, `milestone_name`, `phase_count`, `completed_phases`, `commit_docs`, `planning_base`. Resolve integration checker model: ```bash @@ -103,7 +103,7 @@ For each phase's VERIFICATION.md, extract the expanded requirements table: For each phase's SUMMARY.md, extract `requirements-completed` from YAML frontmatter: ```bash -for summary in .planning/phases/*-*/*-SUMMARY.md; do +for summary in ${planning_base}/phases/*-*/*-SUMMARY.md; do node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" summary-extract "$summary" --fields requirements_completed | jq -r '.requirements_completed' done ``` @@ -129,7 +129,7 @@ For each REQ-ID, determine status using all three sources: ## 6. Aggregate into v{version}-MILESTONE-AUDIT.md -Create `.planning/v{version}-v{version}-MILESTONE-AUDIT.md` with: +Create `{planning_base}/v{version}-MILESTONE-AUDIT.md` with: ```yaml --- @@ -186,7 +186,7 @@ Output this markdown directly (not as a code block). Route based on status: ## ✓ Milestone {version} — Audit Passed **Score:** {N}/{M} requirements satisfied -**Report:** .planning/v{version}-MILESTONE-AUDIT.md +**Report:** {planning_base}/v{version}-MILESTONE-AUDIT.md All requirements covered. Cross-phase integration verified. E2E flows complete. @@ -209,7 +209,7 @@ All requirements covered. Cross-phase integration verified. E2E flows complete. ## ⚠ Milestone {version} — Gaps Found **Score:** {N}/{M} requirements satisfied -**Report:** .planning/v{version}-MILESTONE-AUDIT.md +**Report:** {planning_base}/v{version}-MILESTONE-AUDIT.md ### Unsatisfied Requirements @@ -240,7 +240,7 @@ All requirements covered. Cross-phase integration verified. E2E flows complete. ─────────────────────────────────────────────────────────────── **Also available:** -- cat .planning/v{version}-MILESTONE-AUDIT.md — see full report +- cat {planning_base}/v{version}-MILESTONE-AUDIT.md — see full report - /gsd:complete-milestone {version} — proceed anyway (accept tech debt) ─────────────────────────────────────────────────────────────── @@ -252,7 +252,7 @@ All requirements covered. Cross-phase integration verified. E2E flows complete. ## ⚡ Milestone {version} — Tech Debt Review **Score:** {N}/{M} requirements satisfied -**Report:** .planning/v{version}-MILESTONE-AUDIT.md +**Report:** {planning_base}/v{version}-MILESTONE-AUDIT.md All requirements met. No critical blockers. Accumulated tech debt needs review. @@ -291,7 +291,7 @@ All requirements met. No critical blockers. Accumulated tech debt needs review. - [ ] Orphaned requirements detected (in traceability but absent from all VERIFICATIONs) - [ ] Tech debt and deferred gaps aggregated - [ ] Integration checker spawned with milestone requirement IDs -- [ ] v{version}-MILESTONE-AUDIT.md created with structured requirement gap objects +- [ ] {planning_base}/v{version}-MILESTONE-AUDIT.md created with structured requirement gap objects - [ ] FAIL gate enforced — any unsatisfied requirement forces gaps_found status - [ ] Results presented with actionable next steps diff --git a/get-shit-done/workflows/check-todos.md b/get-shit-done/workflows/check-todos.md index 43598fc719..51b23c40ca 100644 --- a/get-shit-done/workflows/check-todos.md +++ b/get-shit-done/workflows/check-todos.md @@ -15,7 +15,7 @@ Load todo context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init todos) ``` -Extract from init JSON: `todo_count`, `todos`, `pending_dir`. +Extract from init JSON: `todo_count`, `todos`, `pending_dir`, `completed_dir`, `planning_base`. If `todo_count` is 0: ``` @@ -92,7 +92,7 @@ If `files` field has entries, read and briefly summarize each. Check for roadmap (can use init progress or directly check file existence): -If `.planning/ROADMAP.md` exists: +If `{planning_base}/ROADMAP.md` exists: 1. Check if todo's area matches an upcoming phase 2. Check if todo's files overlap with a phase's scope 3. Note any match for action options @@ -125,7 +125,7 @@ Use AskUserQuestion: **Work on it now:** ```bash -mv ".planning/todos/pending/[filename]" ".planning/todos/done/" +mv "${pending_dir}/[filename]" "${completed_dir}/" ``` Update STATE.md todo count. Present problem/solution context. Begin work or ask how to proceed. @@ -153,8 +153,8 @@ Re-run `init todos` to get updated count, then update STATE.md "### Pending Todo If todo was moved to done/, commit the change: ```bash -git rm --cached .planning/todos/pending/[filename] 2>/dev/null || true -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: start work on todo - [title]" --files .planning/todos/done/[filename] .planning/STATE.md +git rm --cached ${pending_dir}/[filename] 2>/dev/null || true +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: start work on todo - [title]" --files ${completed_dir}/[filename] {planning_base}/STATE.md ``` Tool respects `commit_docs` config and gitignore automatically. diff --git a/get-shit-done/workflows/cleanup.md b/get-shit-done/workflows/cleanup.md index c1f772e774..2ed3fb0b36 100644 --- a/get-shit-done/workflows/cleanup.md +++ b/get-shit-done/workflows/cleanup.md @@ -16,6 +16,14 @@ Archive accumulated phase directories from completed milestones into `.planning/ +**Load milestone-aware paths:** + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init milestone-op) +``` + +Extract from init JSON: `planning_base`. + Read `.planning/MILESTONES.md` to identify completed milestones and their versions. ```bash @@ -55,7 +63,7 @@ Extract phase numbers and names from the archived roadmap (e.g., Phase 1: Founda Check which of those phase directories still exist in `.planning/phases/`: ```bash -ls -d .planning/phases/*/ 2>/dev/null +ls -d {planning_base}/phases/*/ 2>/dev/null ``` Match phase directories to milestone membership. Only include directories that still exist in `.planning/phases/`. @@ -110,7 +118,7 @@ mkdir -p .planning/milestones/v{X.Y}-phases For each phase directory belonging to this milestone: ```bash -mv .planning/phases/{dir} .planning/milestones/v{X.Y}-phases/ +mv {planning_base}/phases/{dir} .planning/milestones/v{X.Y}-phases/ ``` Repeat for all milestones in the cleanup set. @@ -122,7 +130,7 @@ Repeat for all milestones in the cleanup set. Commit the changes: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: archive phase directories from completed milestones" --files .planning/milestones/ .planning/phases/ +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: archive phase directories from completed milestones" --files .planning/milestones/ {planning_base}/phases/ ``` diff --git a/get-shit-done/workflows/complete-milestone.md b/get-shit-done/workflows/complete-milestone.md index d1a77b6c53..fba15243ac 100644 --- a/get-shit-done/workflows/complete-milestone.md +++ b/get-shit-done/workflows/complete-milestone.md @@ -37,6 +37,14 @@ When a milestone completes: +**Load milestone-aware paths:** + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init milestone-op) +``` + +Extract from init JSON: `state_path`, `roadmap_path`, `requirements_path`, `config_path`, `planning_base`. + **Use `roadmap analyze` for comprehensive readiness check:** ```bash @@ -88,7 +96,7 @@ If user selects "Proceed anyway": note incomplete requirements in MILESTONES.md ```bash -cat .planning/config.json 2>/dev/null +cat {config_path} 2>/dev/null ``` @@ -153,7 +161,7 @@ Extract one-liners from SUMMARY.md files using summary-extract: ```bash # For each phase in milestone, extract one-liner -for summary in .planning/phases/*-*/*-SUMMARY.md; do +for summary in {planning_base}/phases/*-*/*-SUMMARY.md; do node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" summary-extract "$summary" --fields one_liner | jq -r '.one_liner' done ``` @@ -186,7 +194,7 @@ Full PROJECT.md evolution review at milestone completion. Read all phase summaries: ```bash -cat .planning/phases/*-*/*-SUMMARY.md +cat {planning_base}/phases/*-*/*-SUMMARY.md ``` **Full review checklist:** @@ -389,8 +397,8 @@ AskUserQuestion(header="Archive Phases", question="Archive phase directories to If "Yes": move phase directories to the milestone archive: ```bash mkdir -p .planning/milestones/v[X.Y]-phases -# For each phase directory in .planning/phases/: -mv .planning/phases/{phase-dir} .planning/milestones/v[X.Y]-phases/ +# For each phase directory in {planning_base}/phases/: +mv {planning_base}/phases/{phase-dir} .planning/milestones/v[X.Y]-phases/ ``` Verify: `✅ Phase directories archived to .planning/milestones/v[X.Y]-phases/` @@ -432,8 +440,8 @@ After `milestone complete` has archived, reorganize ROADMAP.md with milestone gr **Then delete originals:** ```bash -rm .planning/ROADMAP.md -rm .planning/REQUIREMENTS.md +rm {roadmap_path} +rm {requirements_path} ``` @@ -676,7 +684,7 @@ git push origin v[X.Y] Commit milestone completion. ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: complete v[X.Y] milestone" --files .planning/milestones/v[X.Y]-ROADMAP.md .planning/milestones/v[X.Y]-REQUIREMENTS.md .planning/milestones/v[X.Y]-MILESTONE-AUDIT.md .planning/MILESTONES.md .planning/PROJECT.md .planning/STATE.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: complete v[X.Y] milestone" --files .planning/milestones/v[X.Y]-ROADMAP.md .planning/milestones/v[X.Y]-REQUIREMENTS.md .planning/milestones/v[X.Y]-MILESTONE-AUDIT.md .planning/MILESTONES.md .planning/PROJECT.md {state_path} ``` ``` diff --git a/get-shit-done/workflows/diagnose-issues.md b/get-shit-done/workflows/diagnose-issues.md index 274b50c57c..250f6a83c3 100644 --- a/get-shit-done/workflows/diagnose-issues.md +++ b/get-shit-done/workflows/diagnose-issues.md @@ -79,7 +79,7 @@ For each gap, fill the debug-subagent-prompt template and spawn: ``` Task( - prompt=filled_debug_subagent_prompt + "\n\n\n- {phase_dir}/{phase_num}-UAT.md\n- .planning/STATE.md\n", + prompt=filled_debug_subagent_prompt + "\n\n\n- {phase_dir}/{phase_num}-UAT.md\n- {planning_base}/STATE.md\n", subagent_type="general-purpose", description="Debug: {truth_short}" ) @@ -158,7 +158,7 @@ Update status in frontmatter to "diagnosed". Commit the updated UAT.md: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs({phase_num}): add root causes from diagnosis" --files ".planning/phases/XX-name/{phase_num}-UAT.md" +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs({phase_num}): add root causes from diagnosis" --files "{phase_dir}/{phase_num}-UAT.md" ``` diff --git a/get-shit-done/workflows/discovery-phase.md b/get-shit-done/workflows/discovery-phase.md index 6f71d19421..0c6c37a65b 100644 --- a/get-shit-done/workflows/discovery-phase.md +++ b/get-shit-done/workflows/discovery-phase.md @@ -116,7 +116,7 @@ For: Choosing between options, new external integration. 7. Return to plan-phase.md. -**Output:** `.planning/phases/XX-name/DISCOVERY.md` +**Output:** `{phase_dir}/DISCOVERY.md` @@ -169,7 +169,7 @@ For: Architectural decisions, novel problems, high-risk choices. 8. Return to plan-phase.md. -**Output:** `.planning/phases/XX-name/DISCOVERY.md` (comprehensive) +**Output:** `{phase_dir}/DISCOVERY.md` (comprehensive) @@ -203,7 +203,7 @@ Run the discovery: -Write `.planning/phases/XX-name/DISCOVERY.md`: +Write `{phase_dir}/DISCOVERY.md`: - Summary with recommendation - Key findings with sources - Code examples if applicable @@ -246,7 +246,7 @@ If "address first": Gather user input on questions, update discovery. ``` -Discovery complete: .planning/phases/XX-name/DISCOVERY.md +Discovery complete: {phase_dir}/DISCOVERY.md Recommendation: [one-liner] Confidence: [level] diff --git a/get-shit-done/workflows/discuss-phase.md b/get-shit-done/workflows/discuss-phase.md index 225dd07136..cadc90d6a3 100644 --- a/get-shit-done/workflows/discuss-phase.md +++ b/get-shit-done/workflows/discuss-phase.md @@ -116,7 +116,7 @@ Phase number from argument (required). INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE}") ``` -Parse JSON for: `commit_docs`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `phase_slug`, `padded_phase`, `has_research`, `has_context`, `has_plans`, `has_verification`, `plan_count`, `roadmap_exists`, `planning_exists`. +Parse JSON for: `commit_docs`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `phase_slug`, `padded_phase`, `has_research`, `has_context`, `has_plans`, `has_verification`, `plan_count`, `roadmap_exists`, `planning_exists`, `state_path`, `roadmap_path`, `requirements_path`, `phase_dir`, `planning_base`. **If `phase_found` is false:** ``` @@ -368,7 +368,7 @@ Use values from init: `phase_dir`, `phase_slug`, `padded_phase`. If `phase_dir` is null (phase exists in roadmap but no directory): ```bash -mkdir -p ".planning/phases/${padded_phase}-${phase_slug}" +mkdir -p "${planning_base}/phases/${padded_phase}-${phase_slug}" ``` **File location:** `${phase_dir}/${padded_phase}-CONTEXT.md` @@ -504,7 +504,7 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" state record-session \ Commit STATE.md: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(state): record phase ${PHASE} context session" --files .planning/STATE.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(state): record phase ${PHASE} context session" --files "${state_path}" ``` diff --git a/get-shit-done/workflows/evaluate-complexity.md b/get-shit-done/workflows/evaluate-complexity.md new file mode 100644 index 0000000000..60e9043020 --- /dev/null +++ b/get-shit-done/workflows/evaluate-complexity.md @@ -0,0 +1,78 @@ + +Reusable complexity evaluation for adaptive model selection. Other workflows consult this when they need to determine plan complexity for model resolution. + + + + +## Input Context + +The evaluation function accepts a context object with: + +| Field | Type | Description | +|-------|------|-------------| +| `files_modified` | string[] | List of files the plan modifies | +| `task_count` | number | Number of tasks in the plan | +| `objective` | string | Plan objective text | +| `plan_type` | string | Plan type (e.g., `'tdd'`, `'execute'`) | +| `depends_on` | string[] | Plan IDs this plan depends on | + +## Scoring + +| Factor | Points | Condition | +|--------|--------|-----------| +| Files modified | 1pt each (max 5) | Count of files_modified | +| Task count | 1pt | 3-5 tasks | +| Task count | 2pts | 6+ tasks | +| Architecture keywords | 3pts | objective matches: architect, system design, data model | +| Integration keywords | 2pts | objective matches: integrat*, external api, third party, webhook | +| Cross-cutting keywords | 2pts | objective matches: cross cutting, multiple modules, refactor across | +| Novel pattern keywords | 3pts | objective matches: new library, unfamiliar, prototype | +| Refactoring keywords | 1pt | objective matches: refactor, restructure, migrate | +| TDD plan type | 2pts | plan_type === 'tdd' | +| Dependencies | 1pt | depends_on has 1+ entries | +| Test files | 1pt | files_modified contains test/spec files | + +## Tier Mapping + +| Score | Tier | +|-------|------| +| 0-3 | simple | +| 4-7 | medium | +| 8+ | complex | + +## Default + +When no context is provided, returns `{ score: 5, tier: 'medium' }` as a safe fallback. + + + + + +### From CLI + +```bash +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" resolve-adaptive-model --context '' +``` + +### From Code + +```javascript +const { evaluateComplexity } = require('./core.cjs'); +const result = evaluateComplexity({ + files_modified: ['src/api.ts', 'src/db.ts'], + task_count: 5, + objective: 'integrate external webhook API' +}); +// result: { score: 9, tier: 'complex', factors: [...] } +``` + +### In Workflows + +When `model_profile === 'adaptive'` in execute-phase: + +1. Read plan metadata from `phase-plan-index` +2. Build context: `{ files_modified, task_count, objective }` +3. Call `resolve-adaptive-model` to get per-plan model +4. Use resolved model for agent spawn + + diff --git a/get-shit-done/workflows/execute-phase.md b/get-shit-done/workflows/execute-phase.md index 5149594ce0..797c08641f 100644 --- a/get-shit-done/workflows/execute-phase.md +++ b/get-shit-done/workflows/execute-phase.md @@ -19,7 +19,7 @@ Load all context in one call: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init execute-phase "${PHASE_ARG}") ``` -Parse JSON for: `executor_model`, `verifier_model`, `commit_docs`, `parallelization`, `branching_strategy`, `branch_name`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `phase_slug`, `plans`, `incomplete_plans`, `plan_count`, `incomplete_count`, `state_exists`, `roadmap_exists`, `phase_req_ids`. +Parse JSON for: `executor_model`, `verifier_model`, `commit_docs`, `parallelization`, `branching_strategy`, `branch_name`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `phase_slug`, `plans`, `incomplete_plans`, `plan_count`, `incomplete_count`, `state_exists`, `roadmap_exists`, `phase_req_ids`, `state_path`, `roadmap_path`, `config_path`, `planning_base`, `milestone`, `is_multi_milestone`, `model_profile`, `adaptive_settings`. **If `phase_found` is false:** Error — phase directory not found. **If `plan_count` is 0:** Error — no plans found in phase. @@ -99,10 +99,23 @@ Execute each wave in sequence. Within a wave: parallel if `PARALLELIZATION=true` Pass paths only — executors read files themselves with their fresh 200k context. This keeps orchestrator context lean (~10-15%). + **Adaptive model resolution:** When `model_profile === 'adaptive'`, resolve the executor model per-plan using plan metadata from `phase-plan-index`: + + ```bash + ADAPTIVE_MODEL=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" resolve-adaptive-model gsd-executor --context '{"files_modified": [from plan index], "task_count": [from plan index], "objective": "[from plan objective]", "plan_type": "[from plan index type field]", "depends_on": [from plan index depends_on field]}' --raw) + ``` + + Display the complexity tier in the wave description: + ``` + Model: {model} ({tier} complexity, score {score}) + ``` + + For non-adaptive profiles, use `executor_model` from init directly. + ``` Task( subagent_type="gsd-executor", - model="{executor_model}", + model="{executor_model or adaptive_model}", prompt=" Execute plan {plan_number} of phase {phase_number}-{phase_name}. @@ -119,8 +132,8 @@ Execute each wave in sequence. Within a wave: parallel if `PARALLELIZATION=true` Read these files at execution start using the Read tool: - {phase_dir}/{plan_file} (Plan) - - .planning/STATE.md (State) - - .planning/config.json (Config, if exists) + - {state_path} (State) + - {config_path} (Config, if exists) - ./CLAUDE.md (Project instructions, if exists — follow project-specific guidelines and coding conventions) - .claude/skills/ or .agents/skills/ (Project skills, if either exists — list skills, read SKILL.md for each, follow relevant rules during implementation) @@ -294,6 +307,15 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(phase-${PARENT Verify phase achieved its GOAL, not just completed tasks. +**Adaptive verifier resolution:** When `model_profile === 'adaptive'`, aggregate all plan metadata from `phase-plan-index` (total files across all plans, total tasks, phase goal as objective) and resolve the verifier model per-phase: + +```bash +# Aggregate: collect all files_modified, sum task_count, use phase goal as objective +VERIFIER_MODEL=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" resolve-adaptive-model gsd-verifier --context '{"files_modified": [all files from all plans], "task_count": [sum of all plan task_counts], "objective": "[phase goal from ROADMAP.md]"}' --raw) +``` + +For non-adaptive profiles, use `verifier_model` from init directly (unchanged). + ``` Task( prompt="Verify phase {phase_number} goal achievement. @@ -304,7 +326,7 @@ Check must_haves against actual codebase. Cross-reference requirement IDs from PLAN frontmatter against REQUIREMENTS.md — every ID MUST be accounted for. Create VERIFICATION.md.", subagent_type="gsd-verifier", - model="{verifier_model}" + model="{verifier_model or adaptive_verifier_model}" ) ``` @@ -371,7 +393,7 @@ The CLI handles: Extract from result: `next_phase`, `next_phase_name`, `is_last_phase`. ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(phase-{X}): complete phase execution" --files .planning/ROADMAP.md .planning/STATE.md .planning/REQUIREMENTS.md {phase_dir}/*-VERIFICATION.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(phase-{X}): complete phase execution" --files {roadmap_path} {state_path} {planning_base}/REQUIREMENTS.md {phase_dir}/*-VERIFICATION.md ``` diff --git a/get-shit-done/workflows/execute-plan.md b/get-shit-done/workflows/execute-plan.md index 180dfcc290..a9e1080802 100644 --- a/get-shit-done/workflows/execute-plan.md +++ b/get-shit-done/workflows/execute-plan.md @@ -18,7 +18,7 @@ Load execution context (paths only to minimize orchestrator context): INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init execute-phase "${PHASE}") ``` -Extract from init JSON: `executor_model`, `commit_docs`, `phase_dir`, `phase_number`, `plans`, `summaries`, `incomplete_plans`, `state_path`, `config_path`. +Extract from init JSON: `executor_model`, `commit_docs`, `phase_dir`, `phase_number`, `plans`, `summaries`, `incomplete_plans`, `state_path`, `roadmap_path`, `config_path`, `planning_base`, `milestone`, `is_multi_milestone`. If `.planning/` missing: error. @@ -26,8 +26,8 @@ If `.planning/` missing: error. ```bash # Use plans/summaries from INIT JSON, or list files -ls .planning/phases/XX-name/*-PLAN.md 2>/dev/null | sort -ls .planning/phases/XX-name/*-SUMMARY.md 2>/dev/null | sort +ls {phase_dir}/*-PLAN.md 2>/dev/null | sort +ls {phase_dir}/*-SUMMARY.md 2>/dev/null | sort ``` Find first PLAN without matching SUMMARY. Decimal phases supported (`01.1-hotfix/`): @@ -55,7 +55,7 @@ PLAN_START_EPOCH=$(date +%s) ```bash -grep -n "type=\"checkpoint" .planning/phases/XX-name/{phase}-{plan}-PLAN.md +grep -n "type=\"checkpoint" {phase_dir}/{phase}-{plan}-PLAN.md ``` **Routing by checkpoint type:** @@ -115,7 +115,7 @@ Pattern B only (verify-only checkpoints). Skip for A/C. ```bash -cat .planning/phases/XX-name/{phase}-{plan}-PLAN.md +cat {phase_dir}/{phase}-{plan}-PLAN.md ``` This IS the execution instructions. Follow exactly. If plan references CONTEXT.md: honor user's vision throughout. @@ -253,6 +253,9 @@ git add src/types/user.ts **4. Format:** `{type}({phase}-{plan}): {description}` with bullet points for key changes. +**Multi-milestone mode** (when `is_multi_milestone` is true from init JSON): prefix scope with milestone name: +`{type}({milestone}/{phase}-{plan}): {description}` — e.g., `feat(v2.0/08-02): create user registration endpoint` + **5. Record hash:** ```bash TASK_COMMIT=$(git rev-parse --short HEAD) @@ -309,14 +312,14 @@ fi ```bash -grep -A 50 "^user_setup:" .planning/phases/XX-name/{phase}-{plan}-PLAN.md | head -50 +grep -A 50 "^user_setup:" {phase_dir}/{phase}-{plan}-PLAN.md | head -50 ``` If user_setup exists: create `{phase}-USER-SETUP.md` using template `~/.claude/get-shit-done/templates/user-setup.md`. Per service: env vars table, account setup checklist, dashboard config, local dev notes, verification commands. Status "Incomplete". Set `USER_SETUP_CREATED=true`. If empty/missing: skip. -Create `{phase}-{plan}-SUMMARY.md` at `.planning/phases/XX-name/`. Use `~/.claude/get-shit-done/templates/summary.md`. +Create `{phase}-{plan}-SUMMARY.md` at `{phase_dir}/`. Use `~/.claude/get-shit-done/templates/summary.md`. **Frontmatter:** phase, plan, subsystem, tags | requires/provides/affects | tech-stack.added/patterns | key-files.created/modified | key-decisions | requirements-completed (**MUST** copy `requirements` array from PLAN.md frontmatter verbatim) | duration ($DURATION), completed ($PLAN_END_TIME date). @@ -397,7 +400,7 @@ Extract requirement IDs from the plan's frontmatter (e.g., `requirements: [AUTH- Task code already committed per-task. Commit plan metadata: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs({phase}-{plan}): complete [plan-name] plan" --files .planning/phases/XX-name/{phase}-{plan}-SUMMARY.md .planning/STATE.md .planning/ROADMAP.md .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs({milestone_prefix}{phase}-{plan}): complete [plan-name] plan" --files {phase_dir}/{phase}-{plan}-SUMMARY.md {state_path} {roadmap_path} {planning_base}/REQUIREMENTS.md ``` @@ -405,7 +408,7 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs({phase}-{plan} If .planning/codebase/ doesn't exist: skip. ```bash -FIRST_TASK=$(git log --oneline --grep="feat({phase}-{plan}):" --grep="fix({phase}-{plan}):" --grep="test({phase}-{plan}):" --reverse | head -1 | cut -d' ' -f1) +FIRST_TASK=$(git log --oneline --grep="({phase}-{plan}):" --reverse | head -1 | cut -d' ' -f1) git diff --name-only ${FIRST_TASK}^..HEAD 2>/dev/null ``` @@ -420,8 +423,8 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "" --files .planning If `USER_SETUP_CREATED=true`: display `⚠️ USER SETUP REQUIRED` with path + env/config tasks at TOP. ```bash -ls -1 .planning/phases/[current-phase-dir]/*-PLAN.md 2>/dev/null | wc -l -ls -1 .planning/phases/[current-phase-dir]/*-SUMMARY.md 2>/dev/null | wc -l +ls -1 {phase_dir}/*-PLAN.md 2>/dev/null | wc -l +ls -1 {phase_dir}/*-SUMMARY.md 2>/dev/null | wc -l ``` | Condition | Route | Action | diff --git a/get-shit-done/workflows/help.md b/get-shit-done/workflows/help.md index 2991aa18eb..feb3d12f6e 100644 --- a/get-shit-done/workflows/help.md +++ b/get-shit-done/workflows/help.md @@ -184,6 +184,15 @@ Archive completed milestone and prepare for next version. Usage: `/gsd:complete-milestone 1.0.0` +**`/gsd:switch-milestone `** +Switch active milestone for concurrent work. + +- Warns if current milestone has in-progress work +- Updates ACTIVE_MILESTONE pointer +- Shows status of target milestone + +Usage: `/gsd:switch-milestone v1.5-hotfix` + ### Progress Tracking **`/gsd:progress`** @@ -232,6 +241,26 @@ Systematic debugging with persistent state across context resets. Usage: `/gsd:debug "login button doesn't work"` Usage: `/gsd:debug` (resume active session) +### Bug Tracking + +**`/gsd:report-bug [description]`** +Report and track a bug with structured format and severity classification. + +- Gathers bug details (title, actual/expected behavior, repro steps) +- Infers severity from keywords (critical/high/medium/low) +- Captures diagnostic logs (git state, error output, log files) +- Creates structured bug file in `.planning/bugs/` +- Optionally creates GitHub issue via `gh` CLI +- Routes to next action (investigate, plan fix, continue) + +Usage: `/gsd:report-bug "login button crashes on click"` +Usage: `/gsd:report-bug` (infers from conversation) + +Manage bugs via CLI: +- `gsd-tools bug list [--area X] [--severity Y] [--status Z]` +- `gsd-tools bug update --status ` +- `gsd-tools bug resolve ` + ### Todo Management **`/gsd:add-todo [description]`** @@ -270,6 +299,20 @@ Validate built features through conversational UAT. Usage: `/gsd:verify-work 3` +### Test Generation + +**`/gsd:add-tests [additional instructions]`** +Generate unit and E2E tests for a completed phase. + +- Reads phase SUMMARY.md, CONTEXT.md, and VERIFICATION.md +- Classifies changed files into TDD (unit), E2E (browser), or Skip +- Presents classification for approval before generating +- Runs tests after generation — flags bugs but doesn't fix them +- Commits passing tests + +Usage: `/gsd:add-tests 3` +Usage: `/gsd:add-tests 3 focus on edge cases for auth` + ### Milestone Auditing **`/gsd:audit-milestone [version]`** @@ -298,7 +341,7 @@ Usage: `/gsd:plan-milestone-gaps` Configure workflow toggles and model profile interactively. - Toggle researcher, plan checker, verifier agents -- Select model profile (quality/balanced/budget) +- Select model profile (quality/balanced/budget/adaptive) - Updates `.planning/config.json` Usage: `/gsd:settings` @@ -309,6 +352,7 @@ Quick switch model profile for GSD agents. - `quality` — Opus everywhere except verification - `balanced` — Opus for planning, Sonnet for execution (default) - `budget` — Sonnet for writing, Haiku for research/verification +- `adaptive` — Auto-selects model per-plan based on complexity Usage: `/gsd:set-profile budget` @@ -358,6 +402,8 @@ Usage: `/gsd:join-discord` ├── todos/ # Captured ideas and tasks │ ├── pending/ # Todos waiting to be worked on │ └── done/ # Completed todos +├── bugs/ # Active bug reports +│ └── resolved/ # Resolved bugs ├── debug/ # Active debug sessions │ └── resolved/ # Archived resolved issues ├── milestones/ @@ -438,6 +484,7 @@ Example config: /gsd:plan-phase 1 # Create plans for first phase /clear /gsd:execute-phase 1 # Execute all plans in phase +/gsd:add-tests 1 # Generate tests for completed phase ``` **Resuming work after a break:** @@ -471,6 +518,13 @@ Example config: /gsd:check-todos api # Filter by area ``` +**Tracking bugs:** + +``` +/gsd:report-bug "login crashes on submit" # Report a bug +/gsd:report-bug # Report from conversation context +``` + **Debugging an issue:** ``` diff --git a/get-shit-done/workflows/insert-phase.md b/get-shit-done/workflows/insert-phase.md index 8f2569c461..16e0b1cdb8 100644 --- a/get-shit-done/workflows/insert-phase.md +++ b/get-shit-done/workflows/insert-phase.md @@ -37,6 +37,8 @@ Load phase operation context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${after_phase}") ``` +Extract from init JSON: `state_path`, `roadmap_path`, `phase_dir`, `planning_base`. + Check `roadmap_exists` from init JSON. If false: ``` ERROR: No roadmap found (.planning/ROADMAP.md) @@ -55,7 +57,7 @@ The CLI handles: - Verifying target phase exists in ROADMAP.md - Calculating next decimal phase number (checking existing decimals on disk) - Generating slug from description -- Creating the phase directory (`.planning/phases/{N.M}-{slug}/`) +- Creating the phase directory (`{planning_base}/phases/{N.M}-{slug}/`) - Inserting the phase entry into ROADMAP.md after the target phase with (INSERTED) marker Extract from result: `phase_number`, `after_phase`, `name`, `slug`, `directory`. @@ -64,7 +66,7 @@ Extract from result: `phase_number`, `after_phase`, `name`, `slug`, `directory`. Update STATE.md to reflect the inserted phase: -1. Read `.planning/STATE.md` +1. Read `{state_path}` 2. Under "## Accumulated Context" → "### Roadmap Evolution" add entry: ``` - Phase {decimal_phase} inserted after Phase {after_phase}: {description} (URGENT) @@ -79,12 +81,12 @@ Present completion summary: ``` Phase {decimal_phase} inserted after Phase {after_phase}: - Description: {description} -- Directory: .planning/phases/{decimal-phase}-{slug}/ +- Directory: {planning_base}/phases/{decimal-phase}-{slug}/ - Status: Not planned yet - Marker: (INSERTED) - indicates urgent work -Roadmap updated: .planning/ROADMAP.md -Project state updated: .planning/STATE.md +Roadmap updated: {roadmap_path} +Project state updated: {state_path} --- diff --git a/get-shit-done/workflows/list-phase-assumptions.md b/get-shit-done/workflows/list-phase-assumptions.md index 3269d28300..fc874fde30 100644 --- a/get-shit-done/workflows/list-phase-assumptions.md +++ b/get-shit-done/workflows/list-phase-assumptions.md @@ -21,10 +21,18 @@ Example: /gsd:list-phase-assumptions 3 Exit workflow. **If argument provided:** +Load context to get milestone-aware paths: + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE}") +``` + +Extract from init JSON: `roadmap_path`, `state_path`, `phase_dir`, `requirements_path`, `planning_base`. + Validate phase exists in roadmap: ```bash -cat .planning/ROADMAP.md | grep -i "Phase ${PHASE}" +cat ${roadmap_path} | grep -i "Phase ${PHASE}" ``` **If phase not found:** diff --git a/get-shit-done/workflows/new-milestone.md b/get-shit-done/workflows/new-milestone.md index 252694ae4a..69252c3556 100644 --- a/get-shit-done/workflows/new-milestone.md +++ b/get-shit-done/workflows/new-milestone.md @@ -36,6 +36,18 @@ Read all files referenced by the invoking prompt's execution_context before star - Suggest next version (v1.0 → v1.1, or v2.0 for major) - Confirm with user +## 3b. Initialize Multi-Milestone (if applicable) + +If the project already has an active milestone (check `ACTIVE_MILESTONE` file or milestones/ directory): + +```bash +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" milestone create "${VERSION_SLUG}" +``` + +This creates the milestone directory structure and sets `ACTIVE_MILESTONE`. Subsequent path resolution will automatically scope to the new milestone directory. + +If this is the first milestone (no milestones/ directory yet), skip — legacy mode paths are fine. + ## 4. Update PROJECT.md Add/update: @@ -71,7 +83,7 @@ Keep Accumulated Context section from previous milestone. Delete MILESTONE-CONTEXT.md if exists (consumed). ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: start milestone v[X.Y] [Name]" --files .planning/PROJECT.md .planning/STATE.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: start milestone v[X.Y] [Name]" --files .planning/PROJECT.md {state_path} ``` ## 7. Load Context and Resolve Models @@ -80,7 +92,7 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: start milesto INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init new-milestone) ``` -Extract from init JSON: `researcher_model`, `synthesizer_model`, `roadmapper_model`, `commit_docs`, `research_enabled`, `current_milestone`, `project_exists`, `roadmap_exists`. +Extract from init JSON: `researcher_model`, `synthesizer_model`, `roadmapper_model`, `commit_docs`, `research_enabled`, `current_milestone`, `project_exists`, `roadmap_exists`, `project_path`, `roadmap_path`, `state_path`, `planning_base`. ## 8. Research Decision @@ -110,7 +122,7 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-set workflow.researc ``` ```bash -mkdir -p .planning/research +mkdir -p {planning_base}/research ``` Spawn 4 parallel gsd-project-researcher agents. Each uses this template with dimension-specific fields: @@ -137,7 +149,7 @@ Focus ONLY on what's needed for the NEW features. {GATES} -Write to: .planning/research/{FILE} +Write to: {planning_base}/research/{FILE} Use template: ~/.claude/get-shit-done/templates/research-project/{FILE} ", subagent_type="gsd-project-researcher", model="{researcher_model}", description="{DIMENSION} research") @@ -160,13 +172,13 @@ Task(prompt=" Synthesize research outputs into SUMMARY.md. -- .planning/research/STACK.md -- .planning/research/FEATURES.md -- .planning/research/ARCHITECTURE.md -- .planning/research/PITFALLS.md +- {planning_base}/research/STACK.md +- {planning_base}/research/FEATURES.md +- {planning_base}/research/ARCHITECTURE.md +- {planning_base}/research/PITFALLS.md -Write to: .planning/research/SUMMARY.md +Write to: {planning_base}/research/SUMMARY.md Use template: ~/.claude/get-shit-done/templates/research-project/SUMMARY.md Commit after writing. ", subagent_type="gsd-research-synthesizer", model="{synthesizer_model}", description="Synthesize research") @@ -253,7 +265,7 @@ If "adjust": Return to scoping. **Commit requirements:** ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: define milestone v[X.Y] requirements" --files .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: define milestone v[X.Y] requirements" --files {planning_base}/REQUIREMENTS.md ``` ## 10. Create Roadmap @@ -273,9 +285,9 @@ Task(prompt=" - .planning/PROJECT.md -- .planning/REQUIREMENTS.md -- .planning/research/SUMMARY.md (if exists) -- .planning/config.json +- {planning_base}/REQUIREMENTS.md +- {planning_base}/research/SUMMARY.md (if exists) +- {planning_base}/config.json - .planning/MILESTONES.md @@ -330,7 +342,7 @@ Success criteria: **Commit roadmap** (after approval): ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: create milestone v[X.Y] roadmap ([N] phases)" --files .planning/ROADMAP.md .planning/STATE.md .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: create milestone v[X.Y] roadmap ([N] phases)" --files {roadmap_path} {state_path} {planning_base}/REQUIREMENTS.md ``` ## 11. Done @@ -342,12 +354,12 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: create milest **Milestone v[X.Y]: [Name]** -| Artifact | Location | -|----------------|-----------------------------| -| Project | `.planning/PROJECT.md` | -| Research | `.planning/research/` | -| Requirements | `.planning/REQUIREMENTS.md` | -| Roadmap | `.planning/ROADMAP.md` | +| Artifact | Location | +|----------------|-----------------------------------| +| Project | `.planning/PROJECT.md` | +| Research | `{planning_base}/research/` | +| Requirements | `{planning_base}/REQUIREMENTS.md` | +| Roadmap | `{planning_base}/ROADMAP.md` | **[N] phases** | **[X] requirements** | Ready to build ✓ diff --git a/get-shit-done/workflows/new-project.md b/get-shit-done/workflows/new-project.md index e7c56a4a9f..01975e7c25 100644 --- a/get-shit-done/workflows/new-project.md +++ b/get-shit-done/workflows/new-project.md @@ -49,7 +49,7 @@ The document should describe what you want to build. INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init new-project) ``` -Parse JSON for: `researcher_model`, `synthesizer_model`, `roadmapper_model`, `commit_docs`, `project_exists`, `has_codebase_map`, `planning_exists`, `has_existing_code`, `has_package_file`, `is_brownfield`, `needs_codebase_map`, `has_git`, `project_path`. +Parse JSON for: `researcher_model`, `synthesizer_model`, `roadmapper_model`, `commit_docs`, `project_exists`, `has_codebase_map`, `planning_exists`, `has_existing_code`, `has_package_file`, `is_brownfield`, `needs_codebase_map`, `has_git`, `project_path`, `planning_base`. **If `project_exists` is true:** Error — project already initialized. Use `/gsd:progress`. @@ -158,7 +158,8 @@ AskUserQuestion([ options: [ { label: "Balanced (Recommended)", description: "Sonnet for most agents — good quality/cost ratio" }, { label: "Quality", description: "Opus for research/roadmap — higher cost, deeper analysis" }, - { label: "Budget", description: "Haiku where possible — fastest, lowest cost" } + { label: "Budget", description: "Haiku where possible — fastest, lowest cost" }, + { label: "Adaptive", description: "Auto-selects model per-plan based on complexity (35-65% savings)" } ] } ]) @@ -172,7 +173,7 @@ Create `.planning/config.json` with mode set to "yolo": "depth": "[selected]", "parallelization": true|false, "commit_docs": true|false, - "model_profile": "quality|balanced|budget", + "model_profile": "quality|balanced|budget|adaptive", "workflow": { "research": true|false, "plan_check": true|false, @@ -457,7 +458,8 @@ questions: [ options: [ { label: "Balanced (Recommended)", description: "Sonnet for most agents — good quality/cost ratio" }, { label: "Quality", description: "Opus for research/roadmap — higher cost, deeper analysis" }, - { label: "Budget", description: "Haiku where possible — fastest, lowest cost" } + { label: "Budget", description: "Haiku where possible — fastest, lowest cost" }, + { label: "Adaptive", description: "Auto-selects model per-plan based on complexity (35-65% savings)" } ] } ] @@ -471,7 +473,7 @@ Create `.planning/config.json` with all settings: "depth": "quick|standard|comprehensive", "parallelization": true|false, "commit_docs": true|false, - "model_profile": "quality|balanced|budget", + "model_profile": "quality|balanced|budget|adaptive", "workflow": { "research": true|false, "plan_check": true|false, @@ -523,7 +525,7 @@ Researching [domain] ecosystem... Create research directory: ```bash -mkdir -p .planning/research +mkdir -p {planning_base}/research ``` **Determine milestone context:** @@ -579,7 +581,7 @@ Your STACK.md feeds into roadmap creation. Be prescriptive: -Write to: .planning/research/STACK.md +Write to: {planning_base}/research/STACK.md Use template: ~/.claude/get-shit-done/templates/research-project/STACK.md ", subagent_type="general-purpose", model="{researcher_model}", description="Stack research") @@ -619,7 +621,7 @@ Your FEATURES.md feeds into requirements definition. Categorize clearly: -Write to: .planning/research/FEATURES.md +Write to: {planning_base}/research/FEATURES.md Use template: ~/.claude/get-shit-done/templates/research-project/FEATURES.md ", subagent_type="general-purpose", model="{researcher_model}", description="Features research") @@ -659,7 +661,7 @@ Your ARCHITECTURE.md informs phase structure in roadmap. Include: -Write to: .planning/research/ARCHITECTURE.md +Write to: {planning_base}/research/ARCHITECTURE.md Use template: ~/.claude/get-shit-done/templates/research-project/ARCHITECTURE.md ", subagent_type="general-purpose", model="{researcher_model}", description="Architecture research") @@ -699,7 +701,7 @@ Your PITFALLS.md prevents mistakes in roadmap/planning. For each pitfall: -Write to: .planning/research/PITFALLS.md +Write to: {planning_base}/research/PITFALLS.md Use template: ~/.claude/get-shit-done/templates/research-project/PITFALLS.md ", subagent_type="general-purpose", model="{researcher_model}", description="Pitfalls research") @@ -714,14 +716,14 @@ Synthesize research outputs into SUMMARY.md. -- .planning/research/STACK.md -- .planning/research/FEATURES.md -- .planning/research/ARCHITECTURE.md -- .planning/research/PITFALLS.md +- {planning_base}/research/STACK.md +- {planning_base}/research/FEATURES.md +- {planning_base}/research/ARCHITECTURE.md +- {planning_base}/research/PITFALLS.md -Write to: .planning/research/SUMMARY.md +Write to: {planning_base}/research/SUMMARY.md Use template: ~/.claude/get-shit-done/templates/research-project/SUMMARY.md Commit after writing. @@ -839,7 +841,7 @@ Cross-check requirements against Core Value from PROJECT.md. If gaps detected, s **Generate REQUIREMENTS.md:** -Create `.planning/REQUIREMENTS.md` with: +Create `{planning_base}/REQUIREMENTS.md` with: - v1 Requirements grouped by category (checkboxes, REQ-IDs) - v2 Requirements (deferred) - Out of Scope (explicit exclusions with reasoning) @@ -887,7 +889,7 @@ If "adjust": Return to scoping. **Commit requirements:** ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: define v1 requirements" --files .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: define v1 requirements" --files {planning_base}/REQUIREMENTS.md ``` ## 8. Create Roadmap @@ -909,9 +911,9 @@ Task(prompt=" - .planning/PROJECT.md (Project context) -- .planning/REQUIREMENTS.md (v1 Requirements) -- .planning/research/SUMMARY.md (Research findings - if exists) -- .planning/config.json (Depth and mode settings) +- {planning_base}/REQUIREMENTS.md (v1 Requirements) +- {planning_base}/research/SUMMARY.md (Research findings - if exists) +- {planning_base}/config.json (Depth and mode settings) @@ -1001,7 +1003,7 @@ Use AskUserQuestion: [user's notes] - - .planning/ROADMAP.md (Current roadmap to revise) + - {planning_base}/ROADMAP.md (Current roadmap to revise) Update the roadmap based on feedback. Edit files in place. @@ -1012,12 +1014,12 @@ Use AskUserQuestion: - Present revised roadmap - Loop until user approves -**If "Review full file":** Display raw `cat .planning/ROADMAP.md`, then re-ask. +**If "Review full file":** Display raw `cat {planning_base}/ROADMAP.md`, then re-ask. **Commit roadmap (after approval or auto mode):** ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: create roadmap ([N] phases)" --files .planning/ROADMAP.md .planning/STATE.md .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: create roadmap ([N] phases)" --files {planning_base}/ROADMAP.md {planning_base}/STATE.md {planning_base}/REQUIREMENTS.md ``` ## 9. Done diff --git a/get-shit-done/workflows/plan-milestone-gaps.md b/get-shit-done/workflows/plan-milestone-gaps.md index 373147bd28..19c0e81a5d 100644 --- a/get-shit-done/workflows/plan-milestone-gaps.md +++ b/get-shit-done/workflows/plan-milestone-gaps.md @@ -8,6 +8,14 @@ Read all files referenced by the invoking prompt's execution_context before star +## 0. Initialize + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init progress) +``` + +Parse JSON for: `planning_base`, `roadmap_path`, `config_path`. + ## 1. Load Audit Results ```bash @@ -135,19 +143,19 @@ Reset checked-off requirements the audit found unsatisfied: ```bash # Verify traceability table reflects gap closure assignments -grep -c "Pending" .planning/REQUIREMENTS.md +grep -c "Pending" "${planning_base}/REQUIREMENTS.md" ``` ## 8. Create Phase Directories ```bash -mkdir -p ".planning/phases/{NN}-{name}" +mkdir -p "${planning_base}/phases/{NN}-{name}" ``` ## 9. Commit Roadmap and Requirements Update ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(roadmap): add gap closure phases {N}-{M}" --files .planning/ROADMAP.md .planning/REQUIREMENTS.md +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs(roadmap): add gap closure phases {N}-{M}" --files "${planning_base}/ROADMAP.md" "${planning_base}/REQUIREMENTS.md" ``` ## 10. Offer Next Steps diff --git a/get-shit-done/workflows/plan-phase.md b/get-shit-done/workflows/plan-phase.md index 7bf31efba3..9b17def810 100644 --- a/get-shit-done/workflows/plan-phase.md +++ b/get-shit-done/workflows/plan-phase.md @@ -34,7 +34,7 @@ Extract `--prd ` from $ARGUMENTS. If present, set PRD_FILE to the file **If `phase_found` is false:** Validate phase exists in ROADMAP.md. If valid, create the directory using `phase_slug` and `padded_phase` from init: ```bash -mkdir -p ".planning/phases/${padded_phase}-${phase_slug}" +mkdir -p "${planning_base}/phases/${padded_phase}-${phase_slug}" ``` **Existing artifacts from init:** `has_research`, `has_plans`, `plan_count`. diff --git a/get-shit-done/workflows/progress.md b/get-shit-done/workflows/progress.md index e1dcc2eb1c..466ee4abae 100644 --- a/get-shit-done/workflows/progress.md +++ b/get-shit-done/workflows/progress.md @@ -15,7 +15,7 @@ Read all files referenced by the invoking prompt's execution_context before star INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init progress) ``` -Extract from init JSON: `project_exists`, `roadmap_exists`, `state_exists`, `phases`, `current_phase`, `next_phase`, `milestone_version`, `completed_count`, `phase_count`, `paused_at`, `state_path`, `roadmap_path`, `project_path`, `config_path`. +Extract from init JSON: `project_exists`, `roadmap_exists`, `state_exists`, `phases`, `current_phase`, `next_phase`, `milestone_version`, `completed_count`, `phase_count`, `paused_at`, `state_path`, `roadmap_path`, `project_path`, `config_path`, `planning_base`. If `project_exists` is false (no `.planning/` directory): @@ -97,7 +97,7 @@ Present: # [Project Name] **Progress:** {PROGRESS_BAR} -**Profile:** [quality/balanced/budget] +**Profile:** [quality/balanced/budget/adaptive] ## Recent Work - [Phase X, Plan Y]: [what was accomplished - 1 line from summary-extract] @@ -119,6 +119,10 @@ CONTEXT: [✓ if has_context | - if not] ## Pending Todos - [count] pending — /gsd:check-todos to review +## Active Bugs +- [count] active — /gsd:report-bug to file, gsd-tools bug list to review +(Only show this section if count > 0) + ## Active Debug Sessions - [count] active — /gsd:debug to continue (Only show this section if count > 0) @@ -137,9 +141,9 @@ CONTEXT: [✓ if has_context | - if not] List files in the current phase directory: ```bash -ls -1 .planning/phases/[current-phase-dir]/*-PLAN.md 2>/dev/null | wc -l -ls -1 .planning/phases/[current-phase-dir]/*-SUMMARY.md 2>/dev/null | wc -l -ls -1 .planning/phases/[current-phase-dir]/*-UAT.md 2>/dev/null | wc -l +ls -1 {planning_base}/phases/[current-phase-dir]/*-PLAN.md 2>/dev/null | wc -l +ls -1 {planning_base}/phases/[current-phase-dir]/*-SUMMARY.md 2>/dev/null | wc -l +ls -1 {planning_base}/phases/[current-phase-dir]/*-UAT.md 2>/dev/null | wc -l ``` State: "This phase has {X} plans, {Y} summaries." @@ -150,7 +154,7 @@ Check for UAT.md files with status "diagnosed" (has gaps needing fixes). ```bash # Check for diagnosed UAT with gaps -grep -l "status: diagnosed" .planning/phases/[current-phase-dir]/*-UAT.md 2>/dev/null +grep -l "status: diagnosed" {planning_base}/phases/[current-phase-dir]/*-UAT.md 2>/dev/null ``` Track: diff --git a/get-shit-done/workflows/quick.md b/get-shit-done/workflows/quick.md index e68f140437..2c84efa24c 100644 --- a/get-shit-done/workflows/quick.md +++ b/get-shit-done/workflows/quick.md @@ -46,7 +46,7 @@ If `$FULL_MODE`: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init quick "$DESCRIPTION") ``` -Parse JSON for: `planner_model`, `executor_model`, `checker_model`, `verifier_model`, `commit_docs`, `next_num`, `slug`, `date`, `timestamp`, `quick_dir`, `task_dir`, `roadmap_exists`, `planning_exists`. +Parse JSON for: `planner_model`, `executor_model`, `checker_model`, `verifier_model`, `commit_docs`, `next_num`, `slug`, `date`, `timestamp`, `quick_dir`, `task_dir`, `planning_base`, `roadmap_exists`, `planning_exists`. **If `roadmap_exists` is false:** Error — Quick mode requires an active project with ROADMAP.md. Run `/gsd:new-project` first. @@ -67,7 +67,7 @@ mkdir -p "${task_dir}" Create the directory for this quick task: ```bash -QUICK_DIR=".planning/quick/${next_num}-${slug}" +QUICK_DIR="${quick_dir}/${next_num}-${slug}" mkdir -p "$QUICK_DIR" ``` @@ -97,7 +97,7 @@ Task( **Description:** ${DESCRIPTION} -- .planning/STATE.md (Project State) +- {planning_base}/STATE.md (Project State) - ./CLAUDE.md (if exists — follow project-specific guidelines) @@ -250,7 +250,7 @@ Execute quick task ${next_num}. - ${QUICK_DIR}/${next_num}-PLAN.md (Plan) -- .planning/STATE.md (Project state) +- {planning_base}/STATE.md (Project state) - ./CLAUDE.md (Project instructions, if exists) - .claude/skills/ or .agents/skills/ (Project skills, if either exists — list skills, read SKILL.md for each, follow relevant rules during implementation) @@ -388,7 +388,7 @@ Stage and commit quick task artifacts: Build file list: - `${QUICK_DIR}/${next_num}-PLAN.md` - `${QUICK_DIR}/${next_num}-SUMMARY.md` -- `.planning/STATE.md` +- `{planning_base}/STATE.md` - If `$FULL_MODE` and verification file exists: `${QUICK_DIR}/${next_num}-VERIFICATION.md` ```bash @@ -443,7 +443,7 @@ Ready for next task: /gsd:quick - [ ] `--full` flag parsed from arguments when present - [ ] Slug generated (lowercase, hyphens, max 40 chars) - [ ] Next number calculated (001, 002, 003...) -- [ ] Directory created at `.planning/quick/NNN-slug/` +- [ ] Directory created at `${quick_dir}/NNN-slug/` - [ ] `${next_num}-PLAN.md` created by planner - [ ] (--full) Plan checker validates plan, revision loop capped at 2 - [ ] `${next_num}-SUMMARY.md` created by executor diff --git a/get-shit-done/workflows/remove-phase.md b/get-shit-done/workflows/remove-phase.md index cd168cd2a1..ad2e308700 100644 --- a/get-shit-done/workflows/remove-phase.md +++ b/get-shit-done/workflows/remove-phase.md @@ -32,9 +32,9 @@ Load phase operation context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${target}") ``` -Extract: `phase_found`, `phase_dir`, `phase_number`, `commit_docs`, `roadmap_exists`. +Extract: `phase_found`, `phase_dir`, `phase_number`, `commit_docs`, `roadmap_exists`, `state_path`, `roadmap_path`, `planning_base`. -Also read STATE.md and ROADMAP.md content for parsing current position. +Also read `{state_path}` and `{roadmap_path}` content for parsing current position. @@ -65,7 +65,7 @@ Present removal summary and confirm: Removing Phase {target}: {Name} This will: -- Delete: .planning/phases/{target}-{slug}/ +- Delete: {planning_base}/phases/{target}-{slug}/ - Renumber all subsequent phases - Update: ROADMAP.md, STATE.md @@ -102,7 +102,7 @@ Extract from result: `removed`, `directory_deleted`, `renamed_directories`, `ren Stage and commit the removal: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: remove phase {target} ({original-phase-name})" --files .planning/ +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "chore: remove phase {target} ({original-phase-name})" --files {planning_base}/ ``` The commit message preserves the historical record of what was removed. @@ -115,7 +115,7 @@ Present completion summary: Phase {target} ({original-name}) removed. Changes: -- Deleted: .planning/phases/{target}-{slug}/ +- Deleted: {planning_base}/phases/{target}-{slug}/ - Renumbered: {N} directories and {M} files - Updated: ROADMAP.md, STATE.md - Committed: chore: remove phase {target} ({original-name}) diff --git a/get-shit-done/workflows/report-bug.md b/get-shit-done/workflows/report-bug.md new file mode 100644 index 0000000000..973e6c14a8 --- /dev/null +++ b/get-shit-done/workflows/report-bug.md @@ -0,0 +1,216 @@ + +Report and track a bug with structured format, severity classification, diagnostic log capture, and optional GitHub issue creation. Enables full bug lifecycle: report -> triage -> investigate -> fix -> resolve. + + + +Read all files referenced by the invoking prompt's execution_context before starting. + + + + + +Load bug context: + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init bugs) +``` + +Extract from init JSON: `commit_docs`, `date`, `timestamp`, `bug_count`, `bugs`, `next_id`, `next_id_padded`, `bugs_dir`, `resolved_dir`, `bugs_dir_exists`. + + + +Ensure bug directories exist: + +```bash +mkdir -p .planning/bugs .planning/bugs/resolved +``` + + + +**With arguments:** Use `$ARGUMENTS` as seed description. + +**Without arguments:** Analyze recent conversation for bug symptoms, error messages, and unexpected behavior. + +Then use AskUserQuestion to gather structured details: + +1. **Title** (3-10 words describing the bug) +2. **Actual behavior** (what happened) +3. **Expected behavior** (what should happen) +4. **Reproduction steps** (step-by-step or "unknown") +5. **Related files** (optional file paths) + +If `$ARGUMENTS` provides sufficient detail, pre-fill answers and present for confirmation rather than asking from scratch. + + + +Automatically gather diagnostic context: + +```bash +# Recent commits +git log --oneline -10 + +# Working tree state +git diff --stat + +# Current branch +git branch --show-current +``` + +Scan for log files and include tails of recent ones: +- `*.log` files in project root +- `logs/` directory +- `.planning/debug/` active debug sessions + +Capture any error output from the current conversation (stack traces, error messages, failed commands). + +Bundle everything into a `## Diagnostic Logs` section. + + + +Match keywords in title + description to determine severity: + +| Severity | Keywords | +|----------|----------| +| **critical** | crash, data loss, security, vulnerability, corruption, infinite loop, memory leak | +| **high** | broken, fails, error, exception, cannot, blocks, regression, timeout | +| **medium** | incorrect, wrong, unexpected, inconsistent, slow, intermittent | +| **low** | typo, alignment, color, spacing, formatting, cosmetic, minor | + +Default: **medium** if no keywords match. + +Present inferred severity to user for confirmation via AskUserQuestion: +- header: "Severity" +- question: "Inferred severity: {severity}. Is this correct?" +- options: "Yes", "Critical", "High", "Medium", "Low" + + + +Infer area from file paths: + +| Path pattern | Area | +|--------------|------| +| `src/api/*`, `api/*`, `routes/*`, `endpoints/*` | `api` | +| `src/auth/*`, `auth/*`, `login` | `auth` | +| `src/components/*`, `src/ui/*`, `pages/*` | `ui` | +| `src/db/*`, `database/*`, `prisma/*`, `migrations/*` | `database` | +| `tests/*`, `__tests__/*`, `*.test.*` | `testing` | +| `docs/*`, `*.md` | `docs` | +| `.planning/*` | `planning` | +| `scripts/*`, `bin/*` | `tooling` | +| No files or unclear | `general` | + + + +Write bug report to `.planning/bugs/BUG-{next_id_padded}.md`: + +```markdown +--- +id: BUG-{next_id_padded} +title: "{title}" +severity: {severity} +status: reported +area: {area} +phase: {phase or null} +created: {timestamp} +updated: {timestamp} +github_issue: null +files: + - {file paths} +--- + +# BUG-{next_id_padded}: {title} + +## Description + +{description combining actual behavior context} + +## Expected Behavior + +{what should happen} + +## Actual Behavior + +{what actually happens} + +## Reproduction Steps + +{numbered steps or "Unknown - discovered during development"} + +## Environment + +- Branch: {current branch} +- Date: {date} + +## Related Code + +{file paths with relevant line numbers or code snippets} + +## Diagnostic Logs + +{git log, diff stat, error output, log file tails} +``` + + + +Commit the bug report: + +```bash +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "docs: report BUG-{next_id_padded} - {title}" --files .planning/bugs/BUG-{next_id_padded}.md +``` + +Tool respects `commit_docs` config and gitignore automatically. + + + +Attempt to create a GitHub issue: + +```bash +gh issue create \ + --title "BUG-{next_id_padded}: {title}" \ + --body "{bug description, repro steps, severity, diagnostic logs}" \ + --label "bug" --label "severity: {severity}" +``` + +If `gh` is not available, no remote configured, or the command fails: +- Skip gracefully +- Note in output: "GitHub issue creation skipped (gh not available or no remote)" + +If successful: +- Update bug file frontmatter: `github_issue: {issue URL}` +- Amend the commit to include the updated file + + + +Display summary: + +``` +Bug reported: .planning/bugs/BUG-{next_id_padded}.md + + BUG-{next_id_padded}: {title} + Severity: {severity} + Area: {area} + {GitHub issue URL if created} +``` + +Use AskUserQuestion: +- header: "Next step" +- question: "What would you like to do next?" +- options: + - "Investigate now" — route to `/gsd:debug BUG-{next_id_padded}: {title}` + - "Plan a fix" — route to `/gsd:add-phase` + - "Continue working" — return to previous context + - "Report another" — restart this workflow + + + + + +- [ ] Bug directories exist (.planning/bugs/ and .planning/bugs/resolved/) +- [ ] Bug file created with valid frontmatter (id, title, severity, status, area, timestamps) +- [ ] All sections populated (Description, Expected, Actual, Repro Steps, Diagnostic Logs) +- [ ] Severity inferred from keywords and confirmed by user +- [ ] Diagnostic logs captured (git state, error output) +- [ ] Bug file committed to git +- [ ] GitHub issue created if gh available (graceful skip if not) +- [ ] User routed to next action + diff --git a/get-shit-done/workflows/research-phase.md b/get-shit-done/workflows/research-phase.md index f527e8f841..6acf2a9fc9 100644 --- a/get-shit-done/workflows/research-phase.md +++ b/get-shit-done/workflows/research-phase.md @@ -25,8 +25,9 @@ If `found` is false: Error and exit. ## Step 2: Check Existing Research +Use `directory` from `PHASE_INFO` (step 1): ```bash -ls .planning/phases/${PHASE}-*/RESEARCH.md 2>/dev/null +ls ${PHASE_INFO.directory}/RESEARCH.md 2>/dev/null ``` If exists: Offer update/view/skip options. @@ -35,7 +36,7 @@ If exists: Offer update/view/skip options. ```bash INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE}") -# Extract: phase_dir, padded_phase, phase_number, state_path, requirements_path, context_path +# Extract: phase_dir, padded_phase, phase_number, state_path, requirements_path, context_path, planning_base ``` ## Step 4: Spawn Researcher @@ -57,7 +58,7 @@ Phase description: {description} -Write to: .planning/phases/${PHASE}-{slug}/${PHASE}-RESEARCH.md +Write to: ${phase_dir}/${PHASE}-RESEARCH.md ", subagent_type="gsd-phase-researcher", model="{researcher_model}" diff --git a/get-shit-done/workflows/resume-project.md b/get-shit-done/workflows/resume-project.md index f71cadbfa2..3ed1b5d467 100644 --- a/get-shit-done/workflows/resume-project.md +++ b/get-shit-done/workflows/resume-project.md @@ -23,7 +23,7 @@ Load all context in one call: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init resume) ``` -Parse JSON for: `state_exists`, `roadmap_exists`, `project_exists`, `planning_exists`, `has_interrupted_agent`, `interrupted_agent_id`, `commit_docs`. +Parse JSON for: `state_exists`, `roadmap_exists`, `project_exists`, `planning_exists`, `has_interrupted_agent`, `interrupted_agent_id`, `commit_docs`, `state_path`, `roadmap_path`, `project_path`, `planning_base`. **If `state_exists` is true:** Proceed to load_state **If `state_exists` is false but `roadmap_exists` or `project_exists` is true:** Offer to reconstruct STATE.md @@ -35,8 +35,8 @@ Parse JSON for: `state_exists`, `roadmap_exists`, `project_exists`, `planning_ex Read and parse STATE.md, then PROJECT.md: ```bash -cat .planning/STATE.md -cat .planning/PROJECT.md +cat "${state_path}" +cat "${project_path}" ``` **From STATE.md extract:** @@ -63,10 +63,10 @@ Look for incomplete work that needs attention: ```bash # Check for continue-here files (mid-plan resumption) -ls .planning/phases/*/.continue-here*.md 2>/dev/null +ls "${planning_base}"/phases/*/.continue-here*.md 2>/dev/null # Check for plans without summaries (incomplete execution) -for plan in .planning/phases/*/*-PLAN.md; do +for plan in "${planning_base}"/phases/*/*-PLAN.md; do summary="${plan/PLAN/SUMMARY}" [ ! -f "$summary" ] && echo "Incomplete: $plan" done 2>/dev/null @@ -196,7 +196,7 @@ What would you like to do? **Note:** When offering phase planning, check for CONTEXT.md existence first: ```bash -ls .planning/phases/XX-name/*-CONTEXT.md 2>/dev/null +ls "${planning_base}"/phases/XX-name/*-CONTEXT.md 2>/dev/null ``` If missing, suggest discuss-phase before plan. If exists, offer plan directly. diff --git a/get-shit-done/workflows/set-profile.md b/get-shit-done/workflows/set-profile.md index 00c2f5d3fa..fa7a98b3ea 100644 --- a/get-shit-done/workflows/set-profile.md +++ b/get-shit-done/workflows/set-profile.md @@ -12,9 +12,9 @@ Read all files referenced by the invoking prompt's execution_context before star Validate argument: ``` -if $ARGUMENTS.profile not in ["quality", "balanced", "budget"]: +if $ARGUMENTS.profile not in ["quality", "balanced", "budget", "adaptive"]: Error: Invalid profile "$ARGUMENTS.profile" - Valid profiles: quality, balanced, budget + Valid profiles: quality, balanced, budget, adaptive EXIT ``` @@ -27,7 +27,9 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-ensure-section INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" state load) ``` -This creates `.planning/config.json` with defaults if missing and loads current config. +This creates `{planning_base}/config.json` with defaults if missing and loads current config. + +Extract `planning_base` from init JSON (or derive from `state load` response). @@ -40,7 +42,7 @@ Update `model_profile` field: } ``` -Write updated config back to `.planning/config.json`. +Write updated config back to `{planning_base}/config.json`. @@ -68,6 +70,7 @@ Map profile names: - quality: use "quality" column from MODEL_PROFILES - balanced: use "balanced" column from MODEL_PROFILES - budget: use "budget" column from MODEL_PROFILES +- adaptive: show "Adaptive — model selected per-plan based on complexity (Simple→haiku/sonnet, Medium→sonnet, Complex→opus/sonnet)" diff --git a/get-shit-done/workflows/settings.md b/get-shit-done/workflows/settings.md index 9677001db0..02c3e3d44f 100644 --- a/get-shit-done/workflows/settings.md +++ b/get-shit-done/workflows/settings.md @@ -16,12 +16,14 @@ node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" config-ensure-section INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" state load) ``` -Creates `.planning/config.json` with defaults if missing and loads current config values. +Creates `{planning_base}/config.json` with defaults if missing and loads current config values. + +Extract `planning_base` from init JSON (or derive from `state load` response). ```bash -cat .planning/config.json +cat {planning_base}/config.json ``` Parse current values (default to `true` if not present): @@ -45,7 +47,8 @@ AskUserQuestion([ options: [ { label: "Quality", description: "Opus everywhere except verification (highest cost)" }, { label: "Balanced (Recommended)", description: "Opus for planning, Sonnet for execution/verification" }, - { label: "Budget", description: "Sonnet for writing, Haiku for research/verification (lowest cost)" } + { label: "Budget", description: "Sonnet for writing, Haiku for research/verification (lowest cost)" }, + { label: "Adaptive", description: "Auto-selects model per-plan based on complexity (35-65% savings)" } ] }, { @@ -113,7 +116,7 @@ Merge new settings into existing config.json: ```json { ...existing_config, - "model_profile": "quality" | "balanced" | "budget", + "model_profile": "quality" | "balanced" | "budget" | "adaptive", "workflow": { "research": true/false, "plan_check": true/false, @@ -127,7 +130,7 @@ Merge new settings into existing config.json: } ``` -Write updated config to `.planning/config.json`. +Write updated config to `{planning_base}/config.json`. @@ -183,7 +186,7 @@ Display: | Setting | Value | |----------------------|-------| -| Model Profile | {quality/balanced/budget} | +| Model Profile | {quality/balanced/budget/adaptive} | | Plan Researcher | {On/Off} | | Plan Checker | {On/Off} | | Execution Verifier | {On/Off} | diff --git a/get-shit-done/workflows/switch-milestone.md b/get-shit-done/workflows/switch-milestone.md new file mode 100644 index 0000000000..0cd49e3c3a --- /dev/null +++ b/get-shit-done/workflows/switch-milestone.md @@ -0,0 +1,66 @@ + +Switch the active milestone. Shows available milestones, warns about in-progress work on the current milestone, and updates the active milestone pointer. + + + + +## 0. Initialize + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init milestone-op) +``` + +Parse JSON for: `planning_base`. + +## 1. List Available Milestones + +```bash +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" milestone list +``` + +Display milestones with their status. If only one milestone exists, inform user there's nothing to switch to. + +## 2. Get Target Milestone + +If not provided as argument, ask user which milestone to switch to using AskUserQuestion. + +## 3. Switch + +```bash +RESULT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" milestone switch "${TARGET}") +``` + +Parse JSON for: `switched`, `name`, `status`, `state_path`, `previous_milestone`, `previous_status`, `has_in_progress`. + +**If `has_in_progress` is true:** + +Present warning before confirming: +``` +## Warning: In-Progress Work + +Milestone **{previous_milestone}** has status: {previous_status} + +Switching won't lose any work — you can switch back anytime. +``` + +## 4. Confirm + +``` +## Switched to: {name} + +**Status:** {status} +**State:** {state_path} + +--- + +Run `/gsd:progress` to see where this milestone stands. +``` + + + + +- [ ] Available milestones shown +- [ ] In-progress warning displayed if applicable +- [ ] ACTIVE_MILESTONE updated +- [ ] User sees new milestone status + diff --git a/get-shit-done/workflows/transition.md b/get-shit-done/workflows/transition.md index 553fc193b5..c87e471337 100644 --- a/get-shit-done/workflows/transition.md +++ b/get-shit-done/workflows/transition.md @@ -22,10 +22,18 @@ Mark current phase complete and advance to next. This is the natural point where +**Load milestone-aware paths:** + +```bash +INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init transition) +``` + +Extract from init JSON: `state_path`, `roadmap_path`, `config_path`, `planning_base`. + Before transition, read project state: ```bash -cat .planning/STATE.md 2>/dev/null +cat {state_path} 2>/dev/null cat .planning/PROJECT.md 2>/dev/null ``` @@ -39,8 +47,8 @@ Note accumulated context that may need updating after transition. Check current phase has all plan summaries: ```bash -ls .planning/phases/XX-current/*-PLAN.md 2>/dev/null | sort -ls .planning/phases/XX-current/*-SUMMARY.md 2>/dev/null | sort +ls {planning_base}/phases/XX-current/*-PLAN.md 2>/dev/null | sort +ls {planning_base}/phases/XX-current/*-SUMMARY.md 2>/dev/null | sort ``` **Verification logic:** @@ -53,7 +61,7 @@ ls .planning/phases/XX-current/*-SUMMARY.md 2>/dev/null | sort ```bash -cat .planning/config.json 2>/dev/null +cat {config_path} 2>/dev/null ``` @@ -111,7 +119,7 @@ Wait for user decision. Check for lingering handoffs: ```bash -ls .planning/phases/XX-current/.continue-here*.md 2>/dev/null +ls {planning_base}/phases/XX-current/.continue-here*.md 2>/dev/null ``` If found, delete them — phase is complete, handoffs are stale. @@ -151,7 +159,7 @@ Evolve PROJECT.md to reflect learnings from completed phase. **Read phase summaries:** ```bash -cat .planning/phases/XX-current/*-SUMMARY.md +cat {planning_base}/phases/XX-current/*-SUMMARY.md ``` **Assess requirement changes:** @@ -361,7 +369,7 @@ Read ROADMAP.md to get the next phase's name and goal. **Check if next phase has CONTEXT.md:** ```bash -ls .planning/phases/*[X+1]*/*-CONTEXT.md 2>/dev/null +ls {planning_base}/phases/*[X+1]*/*-CONTEXT.md 2>/dev/null ``` **If next phase exists:** diff --git a/get-shit-done/workflows/verify-phase.md b/get-shit-done/workflows/verify-phase.md index 33efb834c5..ebfdadf24b 100644 --- a/get-shit-done/workflows/verify-phase.md +++ b/get-shit-done/workflows/verify-phase.md @@ -31,12 +31,12 @@ Load phase operation context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init phase-op "${PHASE_ARG}") ``` -Extract from init JSON: `phase_dir`, `phase_number`, `phase_name`, `has_plans`, `plan_count`. +Extract from init JSON: `phase_dir`, `phase_number`, `phase_name`, `has_plans`, `plan_count`, `state_path`, `roadmap_path`, `requirements_path`, `planning_base`. Then load phase details and list plans/summaries: ```bash node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" roadmap get-phase "${phase_number}" -grep -E "^| ${phase_number}" .planning/REQUIREMENTS.md 2>/dev/null +grep -E "^| ${phase_number}" "${requirements_path}" 2>/dev/null ls "$phase_dir"/*-SUMMARY.md "$phase_dir"/*-PLAN.md 2>/dev/null ``` @@ -159,7 +159,7 @@ Record status and evidence for each key link. If REQUIREMENTS.md exists: ```bash -grep -E "Phase ${PHASE_NUM}" .planning/REQUIREMENTS.md 2>/dev/null +grep -E "Phase ${PHASE_NUM}" "${requirements_path}" 2>/dev/null ``` For each requirement: parse description → identify supporting truths/artifacts → status: ✓ SATISFIED / ✗ BLOCKED / ? NEEDS HUMAN. diff --git a/get-shit-done/workflows/verify-work.md b/get-shit-done/workflows/verify-work.md index 466a80c4ff..6d85bdd409 100644 --- a/get-shit-done/workflows/verify-work.md +++ b/get-shit-done/workflows/verify-work.md @@ -27,14 +27,14 @@ If $ARGUMENTS contains a phase number, load context: INIT=$(node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" init verify-work "${PHASE_ARG}") ``` -Parse JSON for: `planner_model`, `checker_model`, `commit_docs`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `has_verification`. +Parse JSON for: `planner_model`, `checker_model`, `commit_docs`, `phase_found`, `phase_dir`, `phase_number`, `phase_name`, `has_verification`, `planning_base`. **First: Check for active UAT sessions** ```bash -find .planning/phases -name "*-UAT.md" -type f 2>/dev/null | head -5 +find "${planning_base}/phases" -name "*-UAT.md" -type f 2>/dev/null | head -5 ``` **If active sessions exist AND no $ARGUMENTS provided:** @@ -292,7 +292,7 @@ Clear Current Test section: Commit the UAT file: ```bash -node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "test({phase_num}): complete UAT - {passed} passed, {issues} issues" --files ".planning/phases/XX-name/{phase_num}-UAT.md" +node "$HOME/.claude/get-shit-done/bin/gsd-tools.cjs" commit "test({phase_num}): complete UAT - {passed} passed, {issues} issues" --files "${phase_dir}/{phase_num}-UAT.md" ``` Present summary: @@ -367,8 +367,8 @@ Task( - {phase_dir}/{phase_num}-UAT.md (UAT with diagnoses) -- .planning/STATE.md (Project State) -- .planning/ROADMAP.md (Roadmap) +- {planning_base}/STATE.md (Project State) +- {planning_base}/ROADMAP.md (Roadmap) diff --git a/hooks/gsd-statusline.js b/hooks/gsd-statusline.js index 29185d6835..77718a4ad0 100755 --- a/hooks/gsd-statusline.js +++ b/hooks/gsd-statusline.js @@ -95,12 +95,20 @@ process.stdin.on('end', () => { } catch (e) {} } + // Active milestone (multi-milestone mode) + let milestone = ''; + const activeMilestonePath = path.join(dir, '.planning', 'ACTIVE_MILESTONE'); + try { + const ms = fs.readFileSync(activeMilestonePath, 'utf-8').trim(); + if (ms) milestone = `\x1b[36m[${ms}]\x1b[0m │ `; + } catch {} + // Output const dirname = path.basename(dir); if (task) { - process.stdout.write(`${gsdUpdate}\x1b[2m${model}\x1b[0m │ \x1b[1m${task}\x1b[0m │ \x1b[2m${dirname}\x1b[0m${ctx}`); + process.stdout.write(`${gsdUpdate}\x1b[2m${model}\x1b[0m │ \x1b[1m${task}\x1b[0m │ ${milestone}\x1b[2m${dirname}\x1b[0m${ctx}`); } else { - process.stdout.write(`${gsdUpdate}\x1b[2m${model}\x1b[0m │ \x1b[2m${dirname}\x1b[0m${ctx}`); + process.stdout.write(`${gsdUpdate}\x1b[2m${model}\x1b[0m │ ${milestone}\x1b[2m${dirname}\x1b[0m${ctx}`); } } catch (e) { // Silent fail - don't break statusline on parse errors diff --git a/package.json b/package.json index b9c9676869..cdc79cc900 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "get-shit-done-cc", "version": "1.22.0", - "description": "A meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini and Codex by TÂCHES.", + "description": "A meta-prompting, context engineering and spec-driven development system for Claude Code, OpenCode, Gemini, Codex, and Kimi by TÂCHES.", "bin": { "get-shit-done-cc": "bin/install.js" }, @@ -23,7 +23,9 @@ "gemini", "gemini-cli", "codex", - "codex-cli" + "codex-cli", + "kimi", + "kimi-cli" ], "author": "TÂCHES", "license": "MIT", diff --git a/tests/commands.test.cjs b/tests/commands.test.cjs index e2497a800d..ba36c3cd45 100644 --- a/tests/commands.test.cjs +++ b/tests/commands.test.cjs @@ -8,6 +8,97 @@ const fs = require('fs'); const path = require('path'); const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs'); +// ─── resolve-adaptive-model CLI ───────────────────────────────────────────── + +describe('resolve-adaptive-model command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('returns model and complexity for adaptive profile', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive' }) + ); + const ctx = JSON.stringify({ files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }); + const result = runGsdTools(['resolve-adaptive-model', 'gsd-executor', '--context', ctx], tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model, 'haiku'); + assert.strictEqual(output.profile, 'adaptive'); + assert.ok(output.complexity); + assert.strictEqual(output.complexity.tier, 'simple'); + }); + + test('falls back to balanced behavior when not adaptive', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'balanced' }) + ); + const ctx = JSON.stringify({ files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }); + const result = runGsdTools(['resolve-adaptive-model', 'gsd-executor', '--context', ctx], tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model, 'sonnet'); + assert.strictEqual(output.profile, 'balanced'); + assert.strictEqual(output.complexity, undefined); + }); + + test('adaptive with log_selections true creates usage log', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive', adaptive_settings: { log_selections: true } }) + ); + const ctx = JSON.stringify({ files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }); + const result = runGsdTools(['resolve-adaptive-model', 'gsd-executor', '--context', ctx], tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const logPath = path.join(tmpDir, '.planning', 'adaptive-usage.json'); + assert.ok(fs.existsSync(logPath), 'adaptive-usage.json should exist'); + const log = JSON.parse(fs.readFileSync(logPath, 'utf-8')); + assert.strictEqual(log.length, 1); + assert.strictEqual(log[0].agent, 'gsd-executor'); + assert.strictEqual(log[0].tier, 'simple'); + assert.strictEqual(typeof log[0].timestamp, 'string'); + assert.strictEqual(log[0].model, 'haiku'); + }); + + test('adaptive with log_selections false does not create log', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive', adaptive_settings: { log_selections: false } }) + ); + const ctx = JSON.stringify({ files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }); + const result = runGsdTools(['resolve-adaptive-model', 'gsd-executor', '--context', ctx], tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const logPath = path.join(tmpDir, '.planning', 'adaptive-usage.json'); + assert.ok(!fs.existsSync(logPath), 'adaptive-usage.json should not exist'); + }); + + test('non-adaptive profile does not create log regardless of settings', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'balanced', adaptive_settings: { log_selections: true } }) + ); + const ctx = JSON.stringify({ files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }); + const result = runGsdTools(['resolve-adaptive-model', 'gsd-executor', '--context', ctx], tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const logPath = path.join(tmpDir, '.planning', 'adaptive-usage.json'); + assert.ok(!fs.existsSync(logPath), 'adaptive-usage.json should not exist for non-adaptive'); + }); +}); + + describe('history-digest command', () => { let tmpDir; @@ -1186,3 +1277,395 @@ describe('websearch command', () => { assert.strictEqual(output.error, 'Network timeout'); }); }); + +// ───────────────────────────────────────────────────────────────────────────── +// bug list command +// ───────────────────────────────────────────────────────────────────────────── + +describe('bug list command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('empty directory returns zero count', () => { + const result = runGsdTools('bug list', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 0, 'count should be 0'); + assert.deepStrictEqual(output.bugs, [], 'bugs should be empty'); + }); + + test('lists bugs with correct fields', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Login button broken" +severity: high +status: reported +area: auth +created: 2026-02-01T00:00:00.000Z +--- + +# BUG-001: Login button broken +`); + + fs.writeFileSync(path.join(bugsDir, 'BUG-002.md'), `--- +id: BUG-002 +title: "Typo in footer" +severity: low +status: investigating +area: ui +created: 2026-02-02T00:00:00.000Z +--- + +# BUG-002: Typo in footer +`); + + const result = runGsdTools('bug list', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 2, 'should have 2 bugs'); + + const bug1 = output.bugs.find(b => b.id === 'BUG-001'); + assert.ok(bug1, 'BUG-001 should be listed'); + assert.strictEqual(bug1.title, 'Login button broken'); + assert.strictEqual(bug1.severity, 'high'); + assert.strictEqual(bug1.status, 'reported'); + assert.strictEqual(bug1.area, 'auth'); + }); + + test('severity filter works', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Critical crash" +severity: critical +status: reported +area: core +created: 2026-02-01T00:00:00.000Z +--- +`); + fs.writeFileSync(path.join(bugsDir, 'BUG-002.md'), `--- +id: BUG-002 +title: "Minor typo" +severity: low +status: reported +area: ui +created: 2026-02-02T00:00:00.000Z +--- +`); + + const result = runGsdTools('bug list --severity critical', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 1, 'should have 1 critical bug'); + assert.strictEqual(output.bugs[0].severity, 'critical'); + }); + + test('status filter works', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Bug A" +severity: medium +status: investigating +area: api +created: 2026-02-01T00:00:00.000Z +--- +`); + fs.writeFileSync(path.join(bugsDir, 'BUG-002.md'), `--- +id: BUG-002 +title: "Bug B" +severity: medium +status: reported +area: api +created: 2026-02-02T00:00:00.000Z +--- +`); + + const result = runGsdTools('bug list --status investigating', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 1); + assert.strictEqual(output.bugs[0].status, 'investigating'); + }); + + test('area filter works', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Auth bug" +severity: high +status: reported +area: auth +created: 2026-02-01T00:00:00.000Z +--- +`); + fs.writeFileSync(path.join(bugsDir, 'BUG-002.md'), `--- +id: BUG-002 +title: "API bug" +severity: high +status: reported +area: api +created: 2026-02-02T00:00:00.000Z +--- +`); + + const result = runGsdTools('bug list --area auth', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 1); + assert.strictEqual(output.bugs[0].area, 'auth'); + }); +}); + +// ───────────────────────────────────────────────────────────────────────────── +// bug update command +// ───────────────────────────────────────────────────────────────────────────── + +describe('bug update command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('updates status in frontmatter', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Test bug" +severity: high +status: reported +area: core +created: 2026-02-01T00:00:00.000Z +--- + +# BUG-001: Test bug +`); + + const result = runGsdTools('bug update BUG-001 --status investigating', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.updated, true); + assert.strictEqual(output.status, 'investigating'); + + // Verify file was updated + const content = fs.readFileSync(path.join(bugsDir, 'BUG-001.md'), 'utf-8'); + assert.ok(content.includes('status: investigating'), 'status should be updated in file'); + assert.ok(content.match(/^updated:/m), 'should have updated timestamp'); + }); + + test('resolve moves to bugs/resolved/', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Resolved bug" +severity: medium +status: fixing +area: api +created: 2026-02-01T00:00:00.000Z +--- +`); + + const result = runGsdTools('bug resolve BUG-001', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.updated, true); + assert.strictEqual(output.status, 'resolved'); + assert.strictEqual(output.moved, 'resolved'); + + // Verify moved + assert.ok( + !fs.existsSync(path.join(bugsDir, 'BUG-001.md')), + 'should be removed from bugs dir' + ); + assert.ok( + fs.existsSync(path.join(bugsDir, 'resolved', 'BUG-001.md')), + 'should be in resolved dir' + ); + }); + + test('fails for nonexistent bug', () => { + const result = runGsdTools('bug update BUG-999 --status investigating', tmpDir); + assert.ok(!result.success, 'should fail'); + assert.ok(result.error.includes('not found'), 'error mentions not found'); + }); + + test('fails for invalid status', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Test" +severity: low +status: reported +area: ui +created: 2026-02-01T00:00:00.000Z +--- +`); + + const result = runGsdTools('bug update BUG-001 --status invalid-status', tmpDir); + assert.ok(!result.success, 'should fail for invalid status'); + assert.ok(result.error.includes('Invalid status'), 'error mentions invalid status'); + }); +}); + +// ───────────────────────────────────────────────────────────────────────────── +// init bugs command +// ───────────────────────────────────────────────────────────────────────────── + +describe('init bugs command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('returns correct schema when empty', () => { + const result = runGsdTools('init bugs', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.bug_count, 0, 'should have 0 bugs'); + assert.deepStrictEqual(output.bugs, [], 'bugs should be empty'); + assert.strictEqual(output.next_id, 1, 'next_id should be 1'); + assert.strictEqual(output.next_id_padded, '001', 'next_id_padded should be 001'); + assert.ok(output.date, 'should have date'); + assert.ok(output.timestamp, 'should have timestamp'); + assert.strictEqual(output.bugs_dir, '.planning/bugs'); + assert.strictEqual(output.resolved_dir, '.planning/bugs/resolved'); + }); + + test('calculates next_id from existing bugs', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + fs.mkdirSync(bugsDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-003.md'), `--- +id: BUG-003 +title: "Test bug" +severity: high +status: reported +area: core +created: 2026-02-01T00:00:00.000Z +--- +`); + fs.writeFileSync(path.join(bugsDir, 'BUG-007.md'), `--- +id: BUG-007 +title: "Another bug" +severity: low +status: investigating +area: ui +created: 2026-02-02T00:00:00.000Z +--- +`); + + const result = runGsdTools('init bugs', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.bug_count, 2, 'should have 2 bugs'); + assert.strictEqual(output.next_id, 8, 'next_id should be 8'); + assert.strictEqual(output.next_id_padded, '008', 'next_id_padded should be 008'); + }); + + test('accounts for resolved bugs in next_id', () => { + const bugsDir = path.join(tmpDir, '.planning', 'bugs'); + const resolvedDir = path.join(bugsDir, 'resolved'); + fs.mkdirSync(resolvedDir, { recursive: true }); + + fs.writeFileSync(path.join(bugsDir, 'BUG-001.md'), `--- +id: BUG-001 +title: "Active bug" +severity: medium +status: reported +area: api +created: 2026-02-01T00:00:00.000Z +--- +`); + fs.writeFileSync(path.join(resolvedDir, 'BUG-010.md'), `--- +id: BUG-010 +title: "Old resolved bug" +severity: low +status: resolved +area: ui +created: 2026-01-01T00:00:00.000Z +--- +`); + + const result = runGsdTools('init bugs', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.bug_count, 1, 'should only count active bugs'); + assert.strictEqual(output.next_id, 11, 'next_id should account for resolved BUG-010'); + assert.strictEqual(output.next_id_padded, '011'); + }); +}); + +// ───────────────────────────────────────────────────────────────────────────── +// scaffold bugs command +// ───────────────────────────────────────────────────────────────────────────── + +describe('scaffold bugs command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('creates both directories', () => { + const result = runGsdTools('scaffold bugs', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.created, true); + + assert.ok( + fs.existsSync(path.join(tmpDir, '.planning', 'bugs')), + 'bugs directory should be created' + ); + assert.ok( + fs.existsSync(path.join(tmpDir, '.planning', 'bugs', 'resolved')), + 'bugs/resolved directory should be created' + ); + }); +}); diff --git a/tests/core.test.cjs b/tests/core.test.cjs index d859269c26..5ea648ce14 100644 --- a/tests/core.test.cjs +++ b/tests/core.test.cjs @@ -14,7 +14,9 @@ const os = require('os'); const { loadConfig, resolveModelInternal, + evaluateComplexity, MODEL_PROFILES, + ADAPTIVE_TIERS, escapeRegex, generateSlugInternal, normalizePhaseName, @@ -119,6 +121,18 @@ describe('loadConfig', () => { const config = loadConfig(tmpDir); assert.strictEqual(config.commit_docs, false); }); + + test('reads adaptive_settings from config', () => { + writeConfig({ model_profile: 'adaptive', adaptive_settings: { min_model: 'sonnet', max_model: 'opus' } }); + const config = loadConfig(tmpDir); + assert.deepStrictEqual(config.adaptive_settings, { min_model: 'sonnet', max_model: 'opus' }); + }); + + test('returns null adaptive_settings when not present', () => { + writeConfig({ model_profile: 'balanced' }); + const config = loadConfig(tmpDir); + assert.strictEqual(config.adaptive_settings, null); + }); }); // ─── resolveModelInternal ────────────────────────────────────────────────────── @@ -146,7 +160,7 @@ describe('resolveModelInternal', () => { test('all known agents resolve to a valid string for each profile', () => { const knownAgents = ['gsd-planner', 'gsd-executor', 'gsd-phase-researcher', 'gsd-codebase-mapper']; const profiles = ['quality', 'balanced', 'budget']; - const validValues = ['inherit', 'sonnet', 'haiku', 'opus']; + const validValues = ['opus', 'sonnet', 'haiku']; for (const profile of profiles) { writeConfig({ model_profile: profile }); @@ -170,11 +184,11 @@ describe('resolveModelInternal', () => { assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-executor'), 'haiku'); }); - test('opus override resolves to inherit', () => { + test('opus override resolves to opus', () => { writeConfig({ model_overrides: { 'gsd-executor': 'opus' }, }); - assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-executor'), 'inherit'); + assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-executor'), 'opus'); }); test('agents not in override fall back to profile', () => { @@ -182,8 +196,8 @@ describe('resolveModelInternal', () => { model_profile: 'quality', model_overrides: { 'gsd-executor': 'haiku' }, }); - // gsd-planner not overridden, should use quality profile -> opus -> inherit - assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-planner'), 'inherit'); + // gsd-planner not overridden, should use quality profile -> opus + assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-planner'), 'opus'); }); }); @@ -195,8 +209,8 @@ describe('resolveModelInternal', () => { test('defaults to balanced profile when model_profile missing', () => { writeConfig({}); - // balanced profile, gsd-planner -> opus -> inherit - assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-planner'), 'inherit'); + // balanced profile, gsd-planner -> opus + assert.strictEqual(resolveModelInternal(tmpDir, 'gsd-planner'), 'opus'); }); }); }); @@ -653,7 +667,7 @@ describe('getRoadmapPhaseInternal', () => { assert.strictEqual(result, null); }); - test('extracts full section text', () => { + test('extracts full section text (with next phase delimiter)', () => { fs.writeFileSync( path.join(tmpDir, '.planning', 'ROADMAP.md'), '### Phase 1: Foundation\n**Goal**: Build the base\n**Requirements**: TEST-01\nSome details here\n\n### Phase 2: API\n**Goal**: REST\n' @@ -665,3 +679,295 @@ describe('getRoadmapPhaseInternal', () => { assert.ok(!result.section.includes('Phase 2: API')); }); }); + +// ─── evaluateComplexity ──────────────────────────────────────────────────────── + +describe('evaluateComplexity', () => { + test('null context returns medium tier with score 5', () => { + const result = evaluateComplexity(null); + assert.strictEqual(result.score, 5); + assert.strictEqual(result.tier, 'medium'); + assert.ok(result.factors.length > 0); + }); + + test('undefined context returns medium tier with score 5', () => { + const result = evaluateComplexity(undefined); + assert.strictEqual(result.score, 5); + assert.strictEqual(result.tier, 'medium'); + }); + + test('low complexity returns simple tier', () => { + const result = evaluateComplexity({ + files_modified: ['a.js'], + task_count: 1, + objective: 'fix typo', + }); + assert.ok(result.score <= 3, `Expected score <= 3, got ${result.score}`); + assert.strictEqual(result.tier, 'simple'); + }); + + test('moderate complexity returns medium tier', () => { + const result = evaluateComplexity({ + files_modified: ['a.js', 'b.js', 'c.js'], + task_count: 4, + objective: 'add user profile page', + }); + assert.ok(result.score >= 4 && result.score <= 7, `Expected score 4-7, got ${result.score}`); + assert.strictEqual(result.tier, 'medium'); + }); + + test('high complexity returns complex tier', () => { + const result = evaluateComplexity({ + files_modified: ['a.js', 'b.js', 'c.js', 'd.js', 'e.js'], + task_count: 8, + objective: 'architect new integration with external API', + }); + assert.ok(result.score >= 8, `Expected score >= 8, got ${result.score}`); + assert.strictEqual(result.tier, 'complex'); + }); + + test('detects architecture keywords', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'architect new data model', + }); + assert.ok(result.factors.some(f => f.includes('architecture'))); + }); + + test('bare design does not trigger architecture keywords', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'design the button', + }); + assert.ok(!result.factors.some(f => f.includes('architecture'))); + }); + + test('system-design and system_design still match architecture keywords', () => { + const r1 = evaluateComplexity({ files_modified: [], task_count: 1, objective: 'system-design the auth flow' }); + assert.ok(r1.factors.some(f => f.includes('architecture')), 'system-design should match'); + const r2 = evaluateComplexity({ files_modified: [], task_count: 1, objective: 'system_design the auth flow' }); + assert.ok(r2.factors.some(f => f.includes('architecture')), 'system_design should match'); + }); + + test('detects integration keywords', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'integrate with external API', + }); + assert.ok(result.factors.some(f => f.includes('integration'))); + }); + + test('detects novel pattern keywords', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'prototype new library', + }); + assert.ok(result.factors.some(f => f.includes('novel'))); + }); + + test('detects refactoring keywords', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'refactor authentication module', + }); + assert.ok(result.factors.some(f => f.includes('refactoring'))); + }); + + test('files_modified capped at 5 points', () => { + const result = evaluateComplexity({ + files_modified: ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], + task_count: 0, + objective: '', + }); + // 8 files but capped at 5 points + assert.strictEqual(result.score, 5); + assert.ok(result.factors.some(f => f.includes('files_modified: 8 (+5)'))); + }); + + test('plan_type tdd adds 2 to score', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'add tests', + plan_type: 'tdd', + }); + assert.ok(result.factors.some(f => f.includes('tdd plan type (+2)'))); + // base: 0 files + 0 task pts + tdd (+2) = 2 + assert.ok(result.score >= 2); + }); + + test('depends_on adds 1 to score', () => { + const result = evaluateComplexity({ + files_modified: [], + task_count: 1, + objective: 'build integration', + depends_on: ['03-01', '03-02'], + }); + assert.ok(result.factors.some(f => f.includes('depends_on: 2 deps (+1)'))); + }); + + test('test files in files_modified adds 1 to score', () => { + const result = evaluateComplexity({ + files_modified: ['src/api.ts', 'src/api.test.ts', 'src/__tests__/helper.js'], + task_count: 1, + objective: 'add api', + }); + assert.ok(result.factors.some(f => f.includes('test_files:'))); + }); + + test('all new signals combined produce expected aggregate', () => { + const result = evaluateComplexity({ + files_modified: ['src/a.ts', 'src/a.test.ts'], + task_count: 1, + objective: 'build module', + plan_type: 'tdd', + depends_on: ['03-01'], + }); + // 2 files (+2) + tdd (+2) + depends_on (+1) + test_files (+1) = 6 + assert.strictEqual(result.score, 6, `Expected score 6, got ${result.score}: ${result.factors.join(', ')}`); + assert.strictEqual(result.tier, 'medium'); + }); + + test('factor labels included in output', () => { + const result = evaluateComplexity({ + files_modified: ['a.js', 'b.js'], + task_count: 6, + objective: 'refactor across modules', + }); + assert.ok(Array.isArray(result.factors)); + assert.ok(result.factors.length > 0); + // Each factor should be a descriptive string + result.factors.forEach(f => assert.strictEqual(typeof f, 'string')); + }); +}); + +// ─── resolveModelInternal (adaptive profile) ─────────────────────────────────── + +describe('resolveModelInternal (adaptive profile)', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gsd-core-test-')); + fs.mkdirSync(path.join(tmpDir, '.planning'), { recursive: true }); + }); + + afterEach(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); + }); + + function writeConfig(obj) { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify(obj, null, 2) + ); + } + + test('no context defaults to medium tier', () => { + writeConfig({ model_profile: 'adaptive' }); + // gsd-executor in medium tier = sonnet + const result = resolveModelInternal(tmpDir, 'gsd-executor'); + assert.strictEqual(result, 'sonnet'); + // gsd-planner in medium tier = opus + const plannerResult = resolveModelInternal(tmpDir, 'gsd-planner'); + assert.strictEqual(plannerResult, 'opus'); + }); + + test('medium tier codebase-mapper resolves to haiku', () => { + writeConfig({ model_profile: 'adaptive' }); + // No context -> medium tier; codebase-mapper in medium = haiku + const result = resolveModelInternal(tmpDir, 'gsd-codebase-mapper'); + assert.strictEqual(result, 'haiku'); + }); + + test('simple context returns haiku for executor', () => { + writeConfig({ model_profile: 'adaptive' }); + const ctx = { files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }; + const result = resolveModelInternal(tmpDir, 'gsd-executor', ctx); + assert.strictEqual(result, 'haiku'); + }); + + test('complex context returns opus for planner', () => { + writeConfig({ model_profile: 'adaptive' }); + const ctx = { + files_modified: ['a.js', 'b.js', 'c.js', 'd.js', 'e.js'], + task_count: 8, + objective: 'architect new integration with external API', + }; + const result = resolveModelInternal(tmpDir, 'gsd-planner', ctx); + assert.strictEqual(result, 'opus'); + }); + + test('min_model clamping upgrades haiku to sonnet', () => { + writeConfig({ + model_profile: 'adaptive', + adaptive_settings: { min_model: 'sonnet' }, + }); + // Simple context would normally give haiku for executor + const ctx = { files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }; + const result = resolveModelInternal(tmpDir, 'gsd-executor', ctx); + assert.strictEqual(result, 'sonnet'); + }); + + test('max_model clamping caps opus to sonnet', () => { + writeConfig({ + model_profile: 'adaptive', + adaptive_settings: { max_model: 'sonnet' }, + }); + // Complex context would normally give opus for planner + const ctx = { + files_modified: ['a.js', 'b.js', 'c.js', 'd.js', 'e.js'], + task_count: 8, + objective: 'architect new integration with external API', + }; + const result = resolveModelInternal(tmpDir, 'gsd-planner', ctx); + assert.strictEqual(result, 'sonnet'); + }); + + test('overrides take precedence over adaptive', () => { + writeConfig({ + model_profile: 'adaptive', + model_overrides: { 'gsd-executor': 'opus' }, + }); + const ctx = { files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }; + // Override should win over adaptive + const result = resolveModelInternal(tmpDir, 'gsd-executor', ctx); + assert.strictEqual(result, 'opus'); + }); + + test('context ignored for non-adaptive profiles', () => { + writeConfig({ model_profile: 'balanced' }); + const ctx = { files_modified: ['a.js'], task_count: 1, objective: 'fix typo' }; + // balanced profile, gsd-executor -> sonnet regardless of context + const result = resolveModelInternal(tmpDir, 'gsd-executor', ctx); + assert.strictEqual(result, 'sonnet'); + }); +}); + +// ─── ADAPTIVE_TIERS constant ─────────────────────────────────────────────────── + +describe('ADAPTIVE_TIERS', () => { + test('has all three tiers', () => { + assert.ok(ADAPTIVE_TIERS.simple); + assert.ok(ADAPTIVE_TIERS.medium); + assert.ok(ADAPTIVE_TIERS.complex); + }); + + test('all known agents have entries in each tier', () => { + const knownAgents = Object.keys(MODEL_PROFILES); + const validModels = ['opus', 'sonnet', 'haiku']; + for (const tier of ['simple', 'medium', 'complex']) { + for (const agent of knownAgents) { + const model = ADAPTIVE_TIERS[tier][agent]; + assert.ok( + validModels.includes(model), + `ADAPTIVE_TIERS.${tier}.${agent} = ${model}, expected one of ${validModels.join(', ')}` + ); + } + } + }); +}); diff --git a/tests/init.test.cjs b/tests/init.test.cjs index 13af9efd8e..d5f5ba10e6 100644 --- a/tests/init.test.cjs +++ b/tests/init.test.cjs @@ -33,6 +33,36 @@ describe('init commands', () => { assert.strictEqual(output.config_path, '.planning/config.json'); }); + test('init execute-phase includes model_profile and adaptive_settings', () => { + const phaseDir = path.join(tmpDir, '.planning', 'phases', '03-api'); + fs.mkdirSync(phaseDir, { recursive: true }); + fs.writeFileSync(path.join(phaseDir, '03-01-PLAN.md'), '# Plan'); + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive', adaptive_settings: { min_model: 'sonnet' } }) + ); + + const result = runGsdTools('init execute-phase 03', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model_profile, 'adaptive'); + assert.deepStrictEqual(output.adaptive_settings, { min_model: 'sonnet' }); + }); + + test('init execute-phase returns null adaptive_settings for non-adaptive profile', () => { + const phaseDir = path.join(tmpDir, '.planning', 'phases', '03-api'); + fs.mkdirSync(phaseDir, { recursive: true }); + fs.writeFileSync(path.join(phaseDir, '03-01-PLAN.md'), '# Plan'); + + const result = runGsdTools('init execute-phase 03', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model_profile, 'balanced'); + assert.strictEqual(output.adaptive_settings, null); + }); + test('init plan-phase returns file paths', () => { const phaseDir = path.join(tmpDir, '.planning', 'phases', '03-api'); fs.mkdirSync(phaseDir, { recursive: true }); @@ -670,6 +700,62 @@ describe('cmdInitQuick', () => { const output = JSON.parse(result.output); assert.ok(output.slug.length <= 40, `Slug should be <= 40 chars, got ${output.slug.length}: "${output.slug}"`); }); + + test('init quick with adaptive config returns model_profile and adaptive_settings', () => { + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive', adaptive_settings: { min_model: 'sonnet' } }) + ); + + const result = runGsdTools('init quick "Test task"', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model_profile, 'adaptive'); + assert.deepStrictEqual(output.adaptive_settings, { min_model: 'sonnet' }); + }); +}); + +// ───────────────────────────────────────────────────────────────────────────── +// cmdInitPlanPhase adaptive fields (INIT-07) +// ───────────────────────────────────────────────────────────────────────────── + +describe('cmdInitPlanPhase adaptive fields', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('init plan-phase with adaptive config returns model_profile and adaptive_settings', () => { + fs.mkdirSync(path.join(tmpDir, '.planning', 'phases', '03-api'), { recursive: true }); + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive', adaptive_settings: { max_model: 'sonnet' } }) + ); + + const result = runGsdTools('init plan-phase 3', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model_profile, 'adaptive'); + assert.deepStrictEqual(output.adaptive_settings, { max_model: 'sonnet' }); + }); + + test('init plan-phase with non-adaptive config returns null adaptive_settings', () => { + fs.mkdirSync(path.join(tmpDir, '.planning', 'phases', '03-api'), { recursive: true }); + + const result = runGsdTools('init plan-phase 3', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.model_profile, 'balanced'); + assert.strictEqual(output.adaptive_settings, null); + }); }); // ───────────────────────────────────────────────────────────────────────────── diff --git a/tests/kimi-config.test.cjs b/tests/kimi-config.test.cjs new file mode 100644 index 0000000000..153f4e7c6b --- /dev/null +++ b/tests/kimi-config.test.cjs @@ -0,0 +1,313 @@ +/** + * GSD Tools Tests - kimi-config.cjs + * + * Tests for Kimi CLI adapter: tool name conversion, skill conversion, + * agent YAML conversion, and integration with copyCommandsAsKimiSkills. + */ + +// Enable test exports from install.js (skips main CLI logic) +process.env.GSD_TEST_MODE = '1'; + +const { test, describe, beforeEach, afterEach } = require('node:test'); +const assert = require('node:assert'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); + +const { + convertKimiToolName, + convertClaudeToKimiSkill, + convertClaudeToKimiAgent, +} = require('../bin/install.js'); + +// ─── convertKimiToolName ───────────────────────────────────────────────────── + +describe('convertKimiToolName', () => { + test('maps file tools to kimi_cli.tools.file module', () => { + assert.strictEqual(convertKimiToolName('Read'), 'kimi_cli.tools.file:ReadFile'); + assert.strictEqual(convertKimiToolName('Write'), 'kimi_cli.tools.file:WriteFile'); + assert.strictEqual(convertKimiToolName('Edit'), 'kimi_cli.tools.file:StrReplaceFile'); + assert.strictEqual(convertKimiToolName('Glob'), 'kimi_cli.tools.file:Glob'); + assert.strictEqual(convertKimiToolName('Grep'), 'kimi_cli.tools.file:Grep'); + assert.strictEqual(convertKimiToolName('ReadMediaFile'), 'kimi_cli.tools.file:ReadMediaFile'); + }); + + test('maps Bash to Shell', () => { + assert.strictEqual(convertKimiToolName('Bash'), 'kimi_cli.tools.shell:Shell'); + }); + + test('maps web tools', () => { + assert.strictEqual(convertKimiToolName('WebSearch'), 'kimi_cli.tools.web:SearchWeb'); + assert.strictEqual(convertKimiToolName('WebFetch'), 'kimi_cli.tools.web:FetchURL'); + }); + + test('maps interaction tools', () => { + assert.strictEqual(convertKimiToolName('TodoWrite'), 'kimi_cli.tools.todo:SetTodoList'); + assert.strictEqual(convertKimiToolName('AskUserQuestion'), 'kimi_cli.tools.ask_user:AskUserQuestion'); + assert.strictEqual(convertKimiToolName('Task'), 'kimi_cli.tools.multiagent:Task'); + }); + + test('returns null for MCP tools', () => { + assert.strictEqual(convertKimiToolName('mcp__context7__search'), null); + assert.strictEqual(convertKimiToolName('mcp__plugin_supabase_supabase__list_tables'), null); + assert.strictEqual(convertKimiToolName('mcp__'), null); + }); + + test('returns null for unknown tools', () => { + assert.strictEqual(convertKimiToolName('UnknownTool'), null); + assert.strictEqual(convertKimiToolName('Agent'), null); + assert.strictEqual(convertKimiToolName('ExitPlanMode'), null); + }); +}); + +// ─── convertClaudeToKimiSkill ──────────────────────────────────────────────── + +describe('convertClaudeToKimiSkill', () => { + test('converts slash commands to /skill: syntax (template literal regression)', () => { + const input = `--- +name: gsd-test +description: Test skill +--- + +Run /gsd:execute-phase to proceed, then /gsd:verify-work.`; + + const result = convertClaudeToKimiSkill(input, 'gsd-test'); + assert.ok(result.includes('/skill:gsd-execute-phase'), 'converts execute-phase'); + assert.ok(result.includes('/skill:gsd-verify-work'), 'converts verify-work'); + assert.ok(!result.includes('/gsd:execute-phase'), 'removes original slash command'); + // Regression: template literal must interpolate — not emit raw backtick string + assert.ok(!result.includes('`/skill:gsd-'), 'no backtick-wrapped syntax'); + assert.ok(!result.includes('${cmd}'), 'no un-interpolated ${cmd}'); + }); + + test('rewrites frontmatter to name + description only', () => { + const input = `--- +name: gsd-plan-phase +description: Create a detailed plan +tools: Read, Write, Bash +color: purple +--- + +Body content here.`; + + const result = convertClaudeToKimiSkill(input, 'gsd-plan-phase'); + assert.ok(result.startsWith('---\n'), 'starts with frontmatter delimiter'); + assert.ok(result.includes('name: gsd-plan-phase'), 'has skill name'); + assert.ok(result.includes('description: Create a detailed plan'), 'has description'); + assert.ok(!result.includes('tools:'), 'drops tools from frontmatter'); + assert.ok(!result.includes('color:'), 'drops color from frontmatter'); + }); + + test('preserves body content after frontmatter', () => { + const input = `--- +name: gsd-debug +description: Debug session +--- + +## Instructions + +Do the thing.`; + + const result = convertClaudeToKimiSkill(input, 'gsd-debug'); + assert.ok(result.includes('## Instructions'), 'body preserved'); + assert.ok(result.includes('Do the thing.'), 'body text preserved'); + }); + + test('converts Claude tool references in body text', () => { + const input = `--- +name: gsd-test +description: Test +--- + +Use Read("file") and Write("file") and Bash("cmd"). +Also TodoWrite("todos") and WebSearch("query") and WebFetch("url"). +Edit("file") too.`; + + const result = convertClaudeToKimiSkill(input, 'gsd-test'); + assert.ok(result.includes('ReadFile('), 'Read → ReadFile'); + assert.ok(result.includes('WriteFile('), 'Write → WriteFile'); + assert.ok(result.includes('StrReplaceFile('), 'Edit → StrReplaceFile'); + assert.ok(result.includes('Shell('), 'Bash → Shell'); + assert.ok(result.includes('SetTodoList('), 'TodoWrite → SetTodoList'); + assert.ok(result.includes('SearchWeb('), 'WebSearch → SearchWeb'); + assert.ok(result.includes('FetchURL('), 'WebFetch → FetchURL'); + }); + + test('works with content that has no frontmatter', () => { + const input = 'Simple content with /gsd:help slash command.'; + const result = convertClaudeToKimiSkill(input, 'gsd-help'); + assert.ok(result.includes('/skill:gsd-help'), 'converts slash command'); + assert.ok(result.includes('name: gsd-help'), 'adds frontmatter with skill name'); + }); + + test('preserves description when frontmatter has none', () => { + const input = `--- +name: gsd-minimal +--- + +Minimal skill content.`; + + const result = convertClaudeToKimiSkill(input, 'gsd-minimal'); + assert.ok(result.includes('name: gsd-minimal'), 'has name'); + assert.ok(!result.includes('description:'), 'no description field when absent'); + }); +}); + +// ─── convertClaudeToKimiAgent ──────────────────────────────────────────────── + +describe('convertClaudeToKimiAgent', () => { + const sampleAgent = `--- +name: gsd-executor +description: Executes GSD plans with atomic commits +tools: Read, Write, Edit, Bash, Grep, Glob +color: yellow +--- + + +You are a GSD plan executor. + + +Execute the plan step by step.`; + + test('returns null for content without frontmatter', () => { + assert.strictEqual(convertClaudeToKimiAgent('Just plain content'), null); + assert.strictEqual(convertClaudeToKimiAgent(''), null); + }); + + test('returns object with yaml and systemPrompt properties', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result !== null, 'returns non-null'); + assert.ok(typeof result.yaml === 'string', 'has yaml string'); + assert.ok(typeof result.systemPrompt === 'string', 'has systemPrompt string'); + }); + + test('yaml starts with version: 1', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result.yaml.startsWith('version: 1\n'), 'yaml starts with version: 1'); + }); + + test('yaml includes agent name and description', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result.yaml.includes('name: gsd-executor'), 'has name'); + assert.ok(result.yaml.includes('description: Executes GSD plans with atomic commits'), 'has description'); + }); + + test('yaml maps tools to kimi module paths', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:ReadFile"'), 'maps Read'); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:WriteFile"'), 'maps Write'); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:StrReplaceFile"'), 'maps Edit'); + assert.ok(result.yaml.includes('"kimi_cli.tools.shell:Shell"'), 'maps Bash'); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:Grep"'), 'maps Grep'); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:Glob"'), 'maps Glob'); + }); + + test('yaml excludes MCP tools from tools list', () => { + const agentWithMcp = `--- +name: gsd-researcher +description: Research agent +tools: Read, Bash, mcp__context7__search, mcp__supabase__query +--- + +Researcher body.`; + const result = convertClaudeToKimiAgent(agentWithMcp); + assert.ok(!result.yaml.includes('mcp__'), 'excludes MCP tools'); + assert.ok(result.yaml.includes('"kimi_cli.tools.file:ReadFile"'), 'keeps valid tools'); + }); + + test('yaml sets system_prompt_path to ./.md', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result.yaml.includes('system_prompt_path: ./gsd-executor.md'), 'has correct system_prompt_path'); + }); + + test('systemPrompt contains agent body content', () => { + const result = convertClaudeToKimiAgent(sampleAgent); + assert.ok(result.systemPrompt.includes(''), 'body in systemPrompt'); + assert.ok(result.systemPrompt.includes('Execute the plan step by step.'), 'body text in systemPrompt'); + }); + + test('escapes ${VAR} patterns in systemPrompt', () => { + const agentWithVars = `--- +name: gsd-test +description: Test +tools: Read +--- + +The value is \${SOME_VAR} and also \${OTHER}.`; + + const result = convertClaudeToKimiAgent(agentWithVars); + assert.ok(result.systemPrompt.includes('$SOME_VAR'), 'escapes ${SOME_VAR} to $SOME_VAR'); + assert.ok(result.systemPrompt.includes('$OTHER'), 'escapes ${OTHER} to $OTHER'); + assert.ok(!result.systemPrompt.includes('${'), 'no remaining ${} patterns'); + }); + + test('handles agent with no tools field', () => { + const noTools = `--- +name: gsd-minimal +description: Minimal agent +--- + +Body only.`; + const result = convertClaudeToKimiAgent(noTools); + assert.ok(result !== null, 'returns non-null'); + assert.ok(!result.yaml.includes('tools:'), 'no tools section when none provided'); + }); +}); + +// ─── Integration: copyCommandsAsKimiSkills ─────────────────────────────────── + +describe('copyCommandsAsKimiSkills (integration)', () => { + let tmpSkillsDir; + const commandsSrc = path.join(__dirname, '..', 'commands'); + const hasCommands = fs.existsSync(commandsSrc); + + beforeEach(() => { + tmpSkillsDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gsd-kimi-skills-')); + }); + + afterEach(() => { + fs.rmSync(tmpSkillsDir, { recursive: true, force: true }); + }); + + (hasCommands ? test : test.skip)('installs skills to XDG-style skills dir', () => { + const { copyCommandsAsKimiSkills } = require('../bin/install.js'); + copyCommandsAsKimiSkills(commandsSrc, tmpSkillsDir, 'gsd', 'gsd', 'kimi'); + + const skillDirs = fs.readdirSync(tmpSkillsDir); + assert.ok(skillDirs.length > 0, 'at least one skill installed'); + + // Each skill is in its own gsd-/ directory + for (const dir of skillDirs) { + assert.ok(dir.startsWith('gsd-'), `skill dir starts with gsd-: ${dir}`); + const skillFile = path.join(tmpSkillsDir, dir, 'SKILL.md'); + assert.ok(fs.existsSync(skillFile), `SKILL.md exists in ${dir}`); + } + }); + + (hasCommands ? test : test.skip)('each SKILL.md uses /skill: syntax (not /gsd:)', () => { + const { copyCommandsAsKimiSkills } = require('../bin/install.js'); + copyCommandsAsKimiSkills(commandsSrc, tmpSkillsDir, 'gsd', 'gsd', 'kimi'); + + const skillDirs = fs.readdirSync(tmpSkillsDir); + for (const dir of skillDirs) { + const skillFile = path.join(tmpSkillsDir, dir, 'SKILL.md'); + const content = fs.readFileSync(skillFile, 'utf8'); + assert.ok(!content.includes('/gsd:'), `no /gsd: in ${dir}/SKILL.md`); + } + }); + + (hasCommands ? test : test.skip)('SKILL.md has kimi-style frontmatter', () => { + const { copyCommandsAsKimiSkills } = require('../bin/install.js'); + copyCommandsAsKimiSkills(commandsSrc, tmpSkillsDir, 'gsd', 'gsd', 'kimi'); + + const skillDirs = fs.readdirSync(tmpSkillsDir); + const helpDir = skillDirs.find(d => d === 'gsd-help'); + if (helpDir) { + const content = fs.readFileSync(path.join(tmpSkillsDir, helpDir, 'SKILL.md'), 'utf8'); + assert.ok(content.startsWith('---\n'), 'starts with frontmatter'); + assert.ok(content.includes('name: gsd-help'), 'has skill name in frontmatter'); + assert.ok(!content.includes('tools:'), 'no tools key in frontmatter'); + assert.ok(!content.includes('color:'), 'no color key in frontmatter'); + } + }); +}); diff --git a/tests/milestone.test.cjs b/tests/milestone.test.cjs index e2694ceaf7..c49caceef6 100644 --- a/tests/milestone.test.cjs +++ b/tests/milestone.test.cjs @@ -605,6 +605,39 @@ describe('requirements mark-complete command', () => { }); }); +// ───────────────────────────────────────────────────────────────────────────── +// milestone create auto-migration +// ───────────────────────────────────────────────────────────────────────────── + +describe('milestone create auto-migration', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('migrates legacy STATE.md to milestone directory on first create', () => { + // Write legacy STATE.md with content + const legacyStatePath = path.join(tmpDir, '.planning', 'STATE.md'); + fs.writeFileSync(legacyStatePath, '---\nphase: 3\n---\n# State\n\n**Status:** Executing Phase 3\n'); + + // Create first milestone + const result = runGsdTools('milestone create v1.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.created, true); + + // Check milestone STATE.md exists + const msStatePath = path.join(tmpDir, '.planning', 'milestones', 'v1.0', 'STATE.md'); + assert.ok(fs.existsSync(msStatePath)); + }); +}); + // ───────────────────────────────────────────────────────────────────────────── // validate consistency command // ───────────────────────────────────────────────────────────────────────────── diff --git a/tests/paths.test.cjs b/tests/paths.test.cjs new file mode 100644 index 0000000000..8fe227f2b6 --- /dev/null +++ b/tests/paths.test.cjs @@ -0,0 +1,450 @@ +/** + * GSD Tools Tests - paths.cjs + * + * Tests for resolvePlanningPaths, setMilestoneOverride, and milestone CLI commands. + */ + +const { test, describe, beforeEach, afterEach } = require('node:test'); +const assert = require('node:assert'); +const fs = require('fs'); +const path = require('path'); +const { runGsdTools, createTempProject, cleanup } = require('./helpers.cjs'); + +const { + resolvePlanningPaths, + setMilestoneOverride, + getMilestoneOverride, +} = require('../get-shit-done/bin/lib/paths.cjs'); + +// ─── resolvePlanningPaths — legacy mode ───────────────────────────────────── + +describe('resolvePlanningPaths — legacy mode', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + setMilestoneOverride(null); + }); + + afterEach(() => { + setMilestoneOverride(null); + cleanup(tmpDir); + }); + + test('returns .planning/STATE.md for rel.state when no ACTIVE_MILESTONE', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.rel.state, '.planning/STATE.md'); + }); + + test('isMultiMilestone is false', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.isMultiMilestone, false); + }); + + test('milestone is null', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.milestone, null); + }); + + test('abs.state ends with .planning/STATE.md', () => { + const p = resolvePlanningPaths(tmpDir); + assert.ok(p.abs.state.endsWith(path.join('.planning', 'STATE.md')), + `expected abs.state to end with .planning/STATE.md, got ${p.abs.state}`); + }); + + test('global.abs.project ends with .planning/PROJECT.md', () => { + const p = resolvePlanningPaths(tmpDir); + assert.ok(p.global.abs.project.endsWith(path.join('.planning', 'PROJECT.md')), + `expected global.abs.project to end with .planning/PROJECT.md, got ${p.global.abs.project}`); + }); +}); + +// ─── resolvePlanningPaths — multi-milestone mode ──────────────────────────── + +describe('resolvePlanningPaths — multi-milestone mode', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + setMilestoneOverride(null); + // Create ACTIVE_MILESTONE file with content "v2.0" + fs.writeFileSync(path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'v2.0', 'utf-8'); + }); + + afterEach(() => { + setMilestoneOverride(null); + cleanup(tmpDir); + }); + + test('returns .planning/milestones/v2.0/STATE.md for rel.state', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.rel.state, '.planning/milestones/v2.0/STATE.md'); + }); + + test('isMultiMilestone is true', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.isMultiMilestone, true); + }); + + test('milestone is "v2.0"', () => { + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.milestone, 'v2.0'); + }); + + test('abs.phases ends with .planning/milestones/v2.0/phases', () => { + const p = resolvePlanningPaths(tmpDir); + assert.ok( + p.abs.phases.endsWith(path.join('.planning', 'milestones', 'v2.0', 'phases')), + `expected abs.phases to end with .planning/milestones/v2.0/phases, got ${p.abs.phases}` + ); + }); + + test('global.abs.project still points to .planning/ root', () => { + const p = resolvePlanningPaths(tmpDir); + assert.ok( + p.global.abs.project.endsWith(path.join('.planning', 'PROJECT.md')), + `expected global.abs.project to end with .planning/PROJECT.md, got ${p.global.abs.project}` + ); + // Should NOT contain milestones subpath + assert.ok( + !p.global.abs.project.includes('milestones'), + 'global.abs.project should not contain milestones' + ); + }); + + test('global.abs.milestones still points to .planning/ root', () => { + const p = resolvePlanningPaths(tmpDir); + assert.ok( + p.global.abs.milestones.endsWith(path.join('.planning', 'MILESTONES.md')), + `expected global.abs.milestones to end with .planning/MILESTONES.md, got ${p.global.abs.milestones}` + ); + }); +}); + +// ─── resolvePlanningPaths — explicit override ─────────────────────────────── + +describe('resolvePlanningPaths — explicit override', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + setMilestoneOverride(null); + }); + + afterEach(() => { + setMilestoneOverride(null); + cleanup(tmpDir); + }); + + test('milestoneOverride="hotfix" returns milestone-scoped paths regardless of ACTIVE_MILESTONE', () => { + // Write a different ACTIVE_MILESTONE to prove override takes precedence + fs.writeFileSync(path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'v2.0', 'utf-8'); + + const p = resolvePlanningPaths(tmpDir, 'hotfix'); + assert.strictEqual(p.rel.state, '.planning/milestones/hotfix/STATE.md'); + assert.strictEqual(p.milestone, 'hotfix'); + assert.strictEqual(p.isMultiMilestone, true); + }); + + test('milestoneOverride="hotfix" works when no ACTIVE_MILESTONE file exists', () => { + const p = resolvePlanningPaths(tmpDir, 'hotfix'); + assert.strictEqual(p.rel.state, '.planning/milestones/hotfix/STATE.md'); + assert.strictEqual(p.milestone, 'hotfix'); + assert.strictEqual(p.isMultiMilestone, true); + }); +}); + +// ─── setMilestoneOverride ─────────────────────────────────────────────────── + +describe('setMilestoneOverride', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + setMilestoneOverride(null); + }); + + afterEach(() => { + setMilestoneOverride(null); + cleanup(tmpDir); + }); + + test('setting override causes resolvePlanningPaths to return milestone-scoped paths', () => { + setMilestoneOverride('v3.0'); + const p = resolvePlanningPaths(tmpDir); + assert.strictEqual(p.milestone, 'v3.0'); + assert.strictEqual(p.isMultiMilestone, true); + assert.strictEqual(p.rel.state, '.planning/milestones/v3.0/STATE.md'); + setMilestoneOverride(null); + }); + + test('clearing override restores legacy paths', () => { + setMilestoneOverride('v3.0'); + const p1 = resolvePlanningPaths(tmpDir); + assert.strictEqual(p1.milestone, 'v3.0'); + + setMilestoneOverride(null); + const p2 = resolvePlanningPaths(tmpDir); + assert.strictEqual(p2.milestone, null); + assert.strictEqual(p2.isMultiMilestone, false); + assert.strictEqual(p2.rel.state, '.planning/STATE.md'); + }); + + test('getMilestoneOverride reflects current value', () => { + assert.strictEqual(getMilestoneOverride(), null); + setMilestoneOverride('v3.0'); + assert.strictEqual(getMilestoneOverride(), 'v3.0'); + setMilestoneOverride(null); + assert.strictEqual(getMilestoneOverride(), null); + }); +}); + +// ─── milestone create command (via CLI) ───────────────────────────────────── + +describe('milestone create command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('creates milestone directory with STATE.md, ROADMAP.md, config.json, phases/', () => { + const result = runGsdTools('milestone create v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.created, true); + assert.strictEqual(output.name, 'v2.0'); + + const milestoneDir = path.join(tmpDir, '.planning', 'milestones', 'v2.0'); + assert.ok(fs.existsSync(path.join(milestoneDir, 'STATE.md')), 'STATE.md should exist'); + assert.ok(fs.existsSync(path.join(milestoneDir, 'ROADMAP.md')), 'ROADMAP.md should exist'); + assert.ok(fs.existsSync(path.join(milestoneDir, 'config.json')), 'config.json should exist'); + assert.ok(fs.existsSync(path.join(milestoneDir, 'phases')), 'phases/ should exist'); + }); + + test('writes ACTIVE_MILESTONE file', () => { + runGsdTools('milestone create v2.0', tmpDir); + + const activeMilestone = fs.readFileSync( + path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'utf-8' + ).trim(); + assert.strictEqual(activeMilestone, 'v2.0'); + }); +}); + +// ─── milestone switch command ─────────────────────────────────────────────── + +describe('milestone switch command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + // Create two milestones + runGsdTools('milestone create v1.0', tmpDir); + runGsdTools('milestone create v2.0', tmpDir); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('switches between milestones and updates ACTIVE_MILESTONE', () => { + // After creating v2.0, it should be active + let active = fs.readFileSync( + path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'utf-8' + ).trim(); + assert.strictEqual(active, 'v2.0'); + + // Switch to v1.0 + const result = runGsdTools('milestone switch v1.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.switched, true); + assert.strictEqual(output.name, 'v1.0'); + + active = fs.readFileSync( + path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'utf-8' + ).trim(); + assert.strictEqual(active, 'v1.0'); + }); + + test('switch back to second milestone', () => { + runGsdTools('milestone switch v1.0', tmpDir); + const result = runGsdTools('milestone switch v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const active = fs.readFileSync( + path.join(tmpDir, '.planning', 'ACTIVE_MILESTONE'), 'utf-8' + ).trim(); + assert.strictEqual(active, 'v2.0'); + }); + + test('switch warns when current milestone has in-progress work', () => { + // beforeEach already created v1.0 and v2.0; recreate to get fresh STATE.md + runGsdTools('milestone create v1.0', tmpDir); + + // Set v1.0 STATE.md to have in-progress status + const v1StatePath = path.join(tmpDir, '.planning', 'milestones', 'v1.0', 'STATE.md'); + let stateContent = fs.readFileSync(v1StatePath, 'utf-8'); + stateContent = stateContent.replace('**Status:** Ready to plan', '**Status:** Executing Phase 2'); + fs.writeFileSync(v1StatePath, stateContent); + + // Recreate second milestone (makes v2.0 active) + runGsdTools('milestone create v2.0', tmpDir); + + // Switch back to v1.0 first (so v1.0 is active) + runGsdTools('milestone switch v1.0', tmpDir); + + // Now set v1.0 to executing again and switch to v2.0 + stateContent = fs.readFileSync(v1StatePath, 'utf-8'); + stateContent = stateContent.replace(/\*\*Status:\*\*.*/, '**Status:** Executing Phase 3'); + fs.writeFileSync(v1StatePath, stateContent); + + const result = runGsdTools('milestone switch v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.switched, true); + assert.strictEqual(output.has_in_progress, true); + assert.strictEqual(output.previous_milestone, 'v1.0'); + assert.ok(output.previous_status.includes('Executing')); + }); + + test('switch has no warning when current milestone is idle', () => { + // beforeEach created v1.0 then v2.0; v2.0 is active with "Ready to plan" status + // Switch to v1.0: previous=v2.0 with "Ready to plan" — not in-progress + const result = runGsdTools('milestone switch v1.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.switched, true); + assert.strictEqual(output.has_in_progress, false); + assert.strictEqual(output.previous_milestone, 'v2.0'); + }); + + test('switch to same milestone has no in-progress warning', () => { + // beforeEach created v1.0 then v2.0; v2.0 is active + // Switch to v2.0 (same as current): previousMilestone === name, so no in-progress check + const result = runGsdTools('milestone switch v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.switched, true); + assert.strictEqual(output.has_in_progress, false); + }); +}); + +// ─── milestone list command ───────────────────────────────────────────────── + +describe('milestone list command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + runGsdTools('milestone create v1.0', tmpDir); + runGsdTools('milestone create v2.0', tmpDir); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('lists both milestones with correct active flag', () => { + const result = runGsdTools('milestone list', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.count, 2, 'should have 2 milestones'); + + // Find milestones by name (note: may also include auto-migrated 'initial') + const names = output.milestones.map(m => m.name); + assert.ok(names.includes('v1.0'), 'v1.0 should be listed'); + assert.ok(names.includes('v2.0'), 'v2.0 should be listed'); + + // v2.0 was created last, so it should be active + const v2 = output.milestones.find(m => m.name === 'v2.0'); + assert.strictEqual(v2.active, true, 'v2.0 should be active'); + + const v1 = output.milestones.find(m => m.name === 'v1.0'); + assert.strictEqual(v1.active, false, 'v1.0 should not be active'); + }); +}); + +// ─── milestone status command ─────────────────────────────────────────────── + +describe('milestone status command', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('reports legacy mode when no active milestone', () => { + const result = runGsdTools('milestone status', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.active, null); + assert.strictEqual(output.is_multi_milestone, false); + assert.strictEqual(output.state_path, '.planning/STATE.md'); + }); + + test('reports active milestone name when one is set', () => { + runGsdTools('milestone create v2.0', tmpDir); + + const result = runGsdTools('milestone status', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.active, 'v2.0'); + assert.strictEqual(output.is_multi_milestone, true); + assert.strictEqual(output.state_path, '.planning/milestones/v2.0/STATE.md'); + }); +}); + +// ─── --milestone flag integration ─────────────────────────────────────────── + +describe('--milestone flag integration', () => { + let tmpDir; + + beforeEach(() => { + tmpDir = createTempProject(); + // Create a milestone so the directory exists + runGsdTools('milestone create v2.0', tmpDir); + }); + + afterEach(() => { + cleanup(tmpDir); + }); + + test('state load --milestone v2.0 reads from milestone-scoped directory', () => { + const result = runGsdTools('state load --milestone v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + // The state should be loaded (state_exists depends on whether the milestone has a STATE.md) + // Since milestone create writes STATE.md, it should exist + assert.strictEqual(output.state_exists, true, 'state should exist in milestone directory'); + }); + + test('milestone status --milestone v2.0 shows v2.0 paths', () => { + // Switch away from v2.0 first + runGsdTools('milestone create v3.0', tmpDir); + + const result = runGsdTools('milestone status --milestone v2.0', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.state_path, '.planning/milestones/v2.0/STATE.md'); + }); +}); diff --git a/tests/phase.test.cjs b/tests/phase.test.cjs index f1888eb3c1..f564a0dfa6 100644 --- a/tests/phase.test.cjs +++ b/tests/phase.test.cjs @@ -356,6 +356,52 @@ objective: Manual review needed const output = JSON.parse(result.output); assert.strictEqual(output.error, 'Phase not found', 'should report phase not found'); }); + + test('plan with type and depends_on frontmatter', () => { + const phaseDir = path.join(tmpDir, '.planning', 'phases', '03-api'); + fs.mkdirSync(phaseDir, { recursive: true }); + + fs.writeFileSync( + path.join(phaseDir, '03-01-PLAN.md'), + `--- +wave: 1 +type: tdd +depends_on: ["03-01"] +--- + +## Task 1: Setup +` + ); + + const result = runGsdTools('phase-plan-index 03', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.plans[0].type, 'tdd', 'type should be tdd'); + assert.deepStrictEqual(output.plans[0].depends_on, ['03-01'], 'depends_on should be parsed'); + }); + + test('plan without type/depends_on defaults correctly', () => { + const phaseDir = path.join(tmpDir, '.planning', 'phases', '03-api'); + fs.mkdirSync(phaseDir, { recursive: true }); + + fs.writeFileSync( + path.join(phaseDir, '03-01-PLAN.md'), + `--- +wave: 1 +--- + +## Task 1: Setup +` + ); + + const result = runGsdTools('phase-plan-index 03', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.strictEqual(output.plans[0].type, 'execute', 'type should default to execute'); + assert.deepStrictEqual(output.plans[0].depends_on, [], 'depends_on should default to empty array'); + }); }); diff --git a/tests/verify-health.test.cjs b/tests/verify-health.test.cjs index 3bf48b68f7..f3b7755fe0 100644 --- a/tests/verify-health.test.cjs +++ b/tests/verify-health.test.cjs @@ -255,6 +255,26 @@ describe('validate health command', () => { ); }); + test('does not warn when config.json has adaptive model_profile', () => { + writeMinimalProjectMd(tmpDir); + writeMinimalRoadmap(tmpDir, ['1']); + writeMinimalStateMd(tmpDir); + fs.writeFileSync( + path.join(tmpDir, '.planning', 'config.json'), + JSON.stringify({ model_profile: 'adaptive' }) + ); + fs.mkdirSync(path.join(tmpDir, '.planning', 'phases', '01-a'), { recursive: true }); + + const result = runGsdTools('validate health', tmpDir); + assert.ok(result.success, `Command failed: ${result.error}`); + + const output = JSON.parse(result.output); + assert.ok( + !output.warnings.some(w => w.code === 'W004'), + `Should NOT have W004 for adaptive profile: ${JSON.stringify(output.warnings)}` + ); + }); + // ─── Check 6: Phase directory naming (NN-name format) ───────────────────── test('warns about incorrectly named phase directories', () => {