diff --git a/lib/mock-github.cjs b/lib/mock-github.cjs new file mode 100644 index 0000000..1d707db --- /dev/null +++ b/lib/mock-github.cjs @@ -0,0 +1,386 @@ +'use strict'; + +/** + * lib/mock-github.cjs — GitHub API interceptor for tests + * + * Intercepts `child_process.execSync` calls that invoke the `gh` CLI, returning + * pre-baked fixture responses instead of making real network calls. + * + * Usage: + * + * const mockGitHub = require('./lib/mock-github.cjs'); + * + * // Activate before test (optionally pass a scenario name) + * mockGitHub.activate(); // base fixtures only + * mockGitHub.activate('pr-error'); // load test/fixtures/github/pr-error/ overrides + * + * // ... run code that calls gh CLI ... + * + * const calls = mockGitHub.getCallLog(); // inspect what was called + * mockGitHub.deactivate(); // restore real execSync + * + * Scenario support: + * Scenarios live in test/fixtures/github// and override base fixtures. + * Any fixture key present in the scenario directory takes precedence over the + * corresponding base fixture. This allows targeted per-test overrides. + * + * Inline overrides (highest precedence): + * mockGitHub.setResponse('gh issue view', '{"number":999}'); + * + * Call log format: + * Each entry: { cmd, fixture, returnValue, timestamp } + * + * Safety: + * - Re-activating without deactivating first is safe (auto-deactivates). + * - Module never makes real gh CLI calls. + * - Fixture load errors throw descriptive Error messages. + */ + +const path = require('path'); +const fs = require('fs'); +const childProcess = require('child_process'); + +// --------------------------------------------------------------------------- +// Path resolution +// --------------------------------------------------------------------------- + +/** + * Resolve the fixtures base directory relative to this file. + * Works whether installed as a package or used in-repo. + */ +function resolveFixturesDir() { + // Walk up from lib/ to find test/fixtures/github/ + const libDir = __dirname; + const repoRoot = path.resolve(libDir, '..'); + return path.join(repoRoot, 'test', 'fixtures', 'github'); +} + +// --------------------------------------------------------------------------- +// Fixture loading +// --------------------------------------------------------------------------- + +/** + * Load a fixture file and return its contents as a string. + * Returns the raw file content — callers receive exactly what execSync would. + * + * For fixtures whose JSON root is a string (e.g. `"https://..."`) we strip + * the outer quotes since execSync output never includes JSON string quoting. + * For fixtures whose JSON root is an object or array, we return the raw JSON. + * + * @param {string} fixtureKey - e.g. "issue-view", "pr-create" + * @param {string} baseDir - resolved fixtures base directory + * @param {string|null} scenarioDir - resolved scenario override directory (or null) + * @returns {string} fixture content as execSync would return it + * @throws {Error} if fixture file not found in either location + */ +function loadFixture(fixtureKey, baseDir, scenarioDir) { + const filename = `${fixtureKey}.json`; + + // Scenario directory takes precedence + if (scenarioDir) { + const scenarioPath = path.join(scenarioDir, filename); + if (fs.existsSync(scenarioPath)) { + return parseFixtureFile(scenarioPath, fixtureKey); + } + } + + // Base fixture + const basePath = path.join(baseDir, filename); + if (fs.existsSync(basePath)) { + return parseFixtureFile(basePath, fixtureKey); + } + + throw new Error( + `mock-github: fixture not found: "${fixtureKey}" (looked for ${filename} in ${baseDir}${scenarioDir ? ` and ${scenarioDir}` : ''})` + ); +} + +/** + * Read a fixture file and convert its content to execSync-compatible output. + * + * JSON strings (root is a quoted string) are unwrapped: `"foo"` → `foo` + * JSON objects/arrays are returned as compact JSON strings. + * Empty strings (`""`) are returned as `""`. + * + * @param {string} filePath - absolute path to fixture .json file + * @param {string} fixtureKey - used in error messages + * @returns {string} + */ +function parseFixtureFile(filePath, fixtureKey) { + let raw; + try { + raw = fs.readFileSync(filePath, 'utf-8').trim(); + } catch (err) { + throw new Error(`mock-github: failed to read fixture "${fixtureKey}" at ${filePath}: ${err.message}`); + } + + let parsed; + try { + parsed = JSON.parse(raw); + } catch (err) { + throw new Error(`mock-github: fixture "${fixtureKey}" is not valid JSON (${filePath}): ${err.message}`); + } + + // Unwrap JSON string values (execSync output is never JSON-encoded strings) + if (typeof parsed === 'string') { + return parsed; + } + + // Objects and arrays: return compact JSON (callers parse with JSON.parse) + return JSON.stringify(parsed); +} + +// --------------------------------------------------------------------------- +// Command routing +// --------------------------------------------------------------------------- + +/** + * Route table: ordered list of [pattern, fixtureKey] pairs. + * First match wins. Patterns are matched against the full command string. + * + * Built-in responses (not loaded from fixtures) are also handled in routeCommand(). + */ +const ROUTE_TABLE = [ + // Issue operations + [/\bgh issue view\b/, 'issue-view'], + [/\bgh issue list\b/, 'issue-list'], + [/\bgh issue comment\b/, 'issue-comment'], + [/\bgh issue edit\b/, 'issue-edit'], + + // Milestone operations (order matters: PATCH before GET) + [/\bgh api\b.*\/milestones\/\d+.*--method PATCH/, 'milestone-close'], + [/\bgh api\b.*--method POST.*\/milestones/, 'milestone-create'], + [/\bgh api\b.*\/milestones\b.*--method POST/, 'milestone-create'], + [/\bgh api repos\/.*\/milestones\/\d+/, 'milestone-view'], + + // Label operations + [/\bgh label create\b/, 'label-create'], + [/\bgh label list\b/, 'label-list'], + + // PR operations + [/\bgh pr create\b/, 'pr-create'], + [/\bgh pr view\b/, 'pr-view'], + + // Rate limit + [/\bgh api rate_limit\b/, 'rate-limit'], + + // Board / GraphQL operations (order matters: specific mutations before generic graphql) + [/\bgh api graphql\b.*updateProjectV2ItemFieldValue/, 'graphql-board-mutation'], + [/\bgh api graphql\b.*discussionCategories/, 'repo-meta'], + [/\bgh api graphql\b.*createDiscussion/, 'discussion-create'], + [/\bgh project item-add\b/, 'board-item'], +]; + +/** + * Built-in responses — returned directly without loading a fixture file. + * These cover repo identity and user queries that are near-universal. + */ +const BUILTINS = [ + [/\bgh repo view\b/, 'snipcodeit/mgw'], + [/\bgh api user\b/, '{"login":"snipcodeit"}'], + [/\bgh api\b.*\/user\b/, '{"login":"snipcodeit"}'], +]; + +/** + * Find the matching fixture key or builtin value for a command string. + * + * @param {string} cmd - the execSync command string + * @param {Map} inlineOverrides - per-command inline overrides + * @returns {{ type: 'fixture'|'builtin'|'empty', key?: string, value?: string }} + */ +function routeCommand(cmd, inlineOverrides) { + // 1. Inline overrides (highest precedence) — match by prefix/substring + for (const [pattern, value] of inlineOverrides) { + if (cmd.includes(pattern)) { + return { type: 'builtin', key: pattern, value }; + } + } + + // 2. Builtin responses + for (const [pattern, value] of BUILTINS) { + if (pattern.test(cmd)) { + return { type: 'builtin', key: String(pattern), value }; + } + } + + // 3. Route table → fixture key + for (const [pattern, fixtureKey] of ROUTE_TABLE) { + if (pattern.test(cmd)) { + return { type: 'fixture', key: fixtureKey }; + } + } + + // 4. Default: empty string (unknown command) + return { type: 'empty', key: null }; +} + +// --------------------------------------------------------------------------- +// State +// --------------------------------------------------------------------------- + +/** The original child_process.execSync before any mock was installed */ +let _originalExecSync = null; + +/** Whether the mock is currently active */ +let _active = false; + +/** Ordered log of intercepted calls */ +let _callLog = []; + +/** Resolved path to base fixtures directory */ +let _baseDir = null; + +/** Resolved path to scenario override directory (or null) */ +let _scenarioDir = null; + +/** Per-command inline overrides: Map (pattern string → return value) */ +let _inlineOverrides = new Map(); + +// --------------------------------------------------------------------------- +// Core API +// --------------------------------------------------------------------------- + +/** + * Activate the mock. Replaces child_process.execSync with an interceptor. + * + * Safe to call when already active — deactivates first, then re-activates. + * + * @param {string} [scenario] - Optional scenario name. If provided, fixtures from + * `test/fixtures/github//` override the base fixtures. + * @throws {Error} if the fixtures base directory does not exist + */ +function activate(scenario) { + if (_active) { + deactivate(); + } + + _baseDir = resolveFixturesDir(); + + if (!fs.existsSync(_baseDir)) { + throw new Error( + `mock-github: fixtures directory not found: ${_baseDir}\n` + + 'Create test/fixtures/github/ with fixture JSON files before activating the mock.' + ); + } + + if (scenario) { + _scenarioDir = path.join(_baseDir, scenario); + if (!fs.existsSync(_scenarioDir)) { + throw new Error( + `mock-github: scenario directory not found: ${_scenarioDir}` + ); + } + } else { + _scenarioDir = null; + } + + _callLog = []; + _inlineOverrides = new Map(); + + // Store original and install interceptor + _originalExecSync = childProcess.execSync; + + childProcess.execSync = function mockExecSync(cmd, _opts) { + const route = routeCommand(cmd, _inlineOverrides); + + let returnValue; + let fixtureKey; + + if (route.type === 'builtin') { + returnValue = route.value; + fixtureKey = route.key; + } else if (route.type === 'fixture') { + returnValue = loadFixture(route.key, _baseDir, _scenarioDir); + fixtureKey = route.key; + } else { + returnValue = ''; + fixtureKey = null; + } + + _callLog.push({ + cmd, + fixture: fixtureKey, + returnValue, + timestamp: new Date().toISOString(), + }); + + return returnValue; + }; + + _active = true; +} + +/** + * Deactivate the mock. Restores the original child_process.execSync. + * Safe to call when not active (no-op). + */ +function deactivate() { + if (!_active) return; + + childProcess.execSync = _originalExecSync; + _originalExecSync = null; + _active = false; + _baseDir = null; + _scenarioDir = null; + _inlineOverrides = new Map(); + // Note: call log is preserved after deactivation — callers inspect it after the test +} + +/** + * Return the ordered array of intercepted call entries since the last activate(). + * Each entry: { cmd, fixture, returnValue, timestamp } + * + * @returns {Array<{cmd: string, fixture: string|null, returnValue: string, timestamp: string}>} + */ +function getCallLog() { + return _callLog.slice(); // defensive copy +} + +/** + * Clear the call log without deactivating the mock. + * Useful for resetting between sub-scenarios in a single test. + */ +function clearCallLog() { + _callLog = []; +} + +/** + * Set an inline response override for commands matching the given pattern string. + * The pattern is matched with String.prototype.includes() against the full command. + * Inline overrides take precedence over all other routing (builtins and fixture table). + * + * Must be called after activate(). + * + * @param {string} cmdPattern - Substring to match in the command string + * @param {string} returnValue - Value to return when the pattern matches + * @throws {Error} if called before activate() + */ +function setResponse(cmdPattern, returnValue) { + if (!_active) { + throw new Error('mock-github: setResponse() called before activate(). Call activate() first.'); + } + _inlineOverrides.set(cmdPattern, returnValue); +} + +/** + * Whether the mock is currently active. + * Useful for guard assertions in test setup/teardown. + * + * @returns {boolean} + */ +function isActive() { + return _active; +} + +// --------------------------------------------------------------------------- +// Exports +// --------------------------------------------------------------------------- + +module.exports = { + activate, + deactivate, + getCallLog, + clearCallLog, + setResponse, + isActive, +}; diff --git a/lib/mock-gsd-agent.cjs b/lib/mock-gsd-agent.cjs new file mode 100644 index 0000000..e537a0a --- /dev/null +++ b/lib/mock-gsd-agent.cjs @@ -0,0 +1,367 @@ +'use strict'; + +/** + * lib/mock-gsd-agent.cjs — Fake GSD agent runner for tests + * + * Intercepts Task() spawns in MGW command tests, returning configurable + * fixture outputs and recording spawn calls for assertion. + * + * Usage: + * + * const mockAgent = require('./lib/mock-gsd-agent.cjs'); + * + * // Activate before test (optionally pass a scenario name) + * mockAgent.activate(); // base fixtures only + * mockAgent.activate('planner-error'); // load test/fixtures/agents/planner-error/ overrides + * + * // ... call spawnStub() where code would call Task() ... + * const output = mockAgent.spawnStub({ + * subagent_type: 'gsd-planner', + * prompt: 'Plan phase 47...', + * model: 'inherit', + * description: 'Plan Phase 47' + * }); + * + * const calls = mockAgent.getCallLog(); // inspect what was spawned + * mockAgent.assertSpawned('gsd-planner'); // assert a specific agent type was used + * mockAgent.deactivate(); // clean up + * + * Scenario support: + * Scenarios live in test/fixtures/agents// and override base fixtures. + * Any fixture file present in the scenario directory takes precedence over the + * corresponding base fixture. This allows targeted per-test overrides. + * + * Inline overrides (highest precedence): + * mockAgent.setResponse('gsd-planner', '## PLANNING COMPLETE\n...'); + * + * Call log format: + * Each entry: { subagent_type, prompt, model, description, output, timestamp } + * + * Safety: + * - Re-activating without deactivating first is safe (auto-deactivates). + * - Fixture load errors throw descriptive Error messages. + * - spawnStub() throws if called before activate(). + * - All state is module-local — safe to require multiple times. + */ + +const path = require('path'); +const fs = require('fs'); +const assert = require('assert'); + +// --------------------------------------------------------------------------- +// Path resolution +// --------------------------------------------------------------------------- + +/** + * Resolve the fixtures base directory relative to this file. + * Works whether installed as a package or used in-repo. + * + * @returns {string} Absolute path to test/fixtures/agents/ + */ +function resolveFixturesDir() { + // Walk up from lib/ to find test/fixtures/agents/ + const libDir = __dirname; + const repoRoot = path.resolve(libDir, '..'); + return path.join(repoRoot, 'test', 'fixtures', 'agents'); +} + +// --------------------------------------------------------------------------- +// Fixture loading +// --------------------------------------------------------------------------- + +/** + * Load a fixture file and return its contents as a string. + * Returns the raw fixture content — callers receive exactly what the agent "returned". + * + * For fixtures whose JSON root is a string (e.g. `"## PLANNING COMPLETE\n..."`) we + * strip the outer quotes since agent outputs are plain text, not JSON-encoded strings. + * For fixtures whose JSON root is an object or array, we return compact JSON. + * + * @param {string} agentType - e.g. "gsd-planner", "general-purpose" + * @param {string} baseDir - resolved fixtures base directory + * @param {string|null} scenarioDir - resolved scenario override directory (or null) + * @returns {string} fixture content as the agent would return it + * @throws {Error} if fixture file not found in either location + */ +function loadFixture(agentType, baseDir, scenarioDir) { + const filename = `${agentType}.json`; + + // Scenario directory takes precedence + if (scenarioDir) { + const scenarioPath = path.join(scenarioDir, filename); + if (fs.existsSync(scenarioPath)) { + return parseFixtureFile(scenarioPath, agentType); + } + } + + // Base fixture + const basePath = path.join(baseDir, filename); + if (fs.existsSync(basePath)) { + return parseFixtureFile(basePath, agentType); + } + + // No fixture found — return empty string (unknown agent type) + return ''; +} + +/** + * Read a fixture file and convert its content to agent-output-compatible string. + * + * JSON strings (root is a quoted string) are unwrapped: `"foo"` → `foo` + * JSON objects/arrays are returned as compact JSON strings. + * Empty strings (`""`) are returned as `""`. + * + * @param {string} filePath - absolute path to fixture .json file + * @param {string} agentType - used in error messages + * @returns {string} + */ +function parseFixtureFile(filePath, agentType) { + let raw; + try { + raw = fs.readFileSync(filePath, 'utf-8').trim(); + } catch (err) { + throw new Error(`mock-gsd-agent: failed to read fixture "${agentType}" at ${filePath}: ${err.message}`); + } + + let parsed; + try { + parsed = JSON.parse(raw); + } catch (err) { + throw new Error(`mock-gsd-agent: fixture "${agentType}" is not valid JSON (${filePath}): ${err.message}`); + } + + // Unwrap JSON string values (agent outputs are plain text, not JSON-encoded strings) + if (typeof parsed === 'string') { + return parsed; + } + + // Objects and arrays: return compact JSON (callers parse with JSON.parse) + return JSON.stringify(parsed); +} + +// --------------------------------------------------------------------------- +// State +// --------------------------------------------------------------------------- + +/** Whether the mock is currently active */ +let _active = false; + +/** Ordered log of recorded spawn calls */ +let _callLog = []; + +/** Resolved path to base fixtures directory */ +let _baseDir = null; + +/** Resolved path to scenario override directory (or null) */ +let _scenarioDir = null; + +/** Per-agent-type inline overrides: Map (agentType → output string) */ +let _inlineOverrides = new Map(); + +// --------------------------------------------------------------------------- +// Core API +// --------------------------------------------------------------------------- + +/** + * Activate the mock. Sets up fixture directory resolution and resets state. + * + * Safe to call when already active — deactivates first, then re-activates. + * + * @param {string} [scenario] - Optional scenario name. If provided, fixtures from + * `test/fixtures/agents//` override the base fixtures. + * @throws {Error} if the fixtures base directory does not exist + */ +function activate(scenario) { + if (_active) { + deactivate(); + } + + _baseDir = resolveFixturesDir(); + + if (!fs.existsSync(_baseDir)) { + throw new Error( + `mock-gsd-agent: fixtures directory not found: ${_baseDir}\n` + + 'Create test/fixtures/agents/ with fixture JSON files before activating the mock.' + ); + } + + if (scenario) { + _scenarioDir = path.join(_baseDir, scenario); + if (!fs.existsSync(_scenarioDir)) { + throw new Error( + `mock-gsd-agent: scenario directory not found: ${_scenarioDir}` + ); + } + } else { + _scenarioDir = null; + } + + _callLog = []; + _inlineOverrides = new Map(); + _active = true; +} + +/** + * Deactivate the mock. Clears scenario dir and inline overrides. + * Preserves the call log — callers inspect it after the test. + * Safe to call when not active (no-op). + */ +function deactivate() { + if (!_active) return; + + _active = false; + _baseDir = null; + _scenarioDir = null; + _inlineOverrides = new Map(); + // Note: _callLog is preserved after deactivation +} + +/** + * Simulate a Task() spawn. Records the call and returns fixture output. + * + * This is the test-side replacement for the Task() orchestrator primitive. + * Call it in test code wherever the production path would call Task(). + * + * Output resolution order (highest to lowest precedence): + * 1. Inline override set via setResponse(agentType, output) + * 2. Scenario fixture: test/fixtures/agents//.json + * 3. Base fixture: test/fixtures/agents/.json + * 4. Empty string (if no fixture found) + * + * @param {object} config - Spawn configuration + * @param {string} config.subagent_type - Agent type (e.g. "gsd-planner", "gsd-executor") + * @param {string} [config.prompt] - The prompt that would be sent to the agent + * @param {string} [config.model] - Model identifier (e.g. "inherit", "sonnet") + * @param {string} [config.description] - Human-readable spawn description + * @returns {string} The fixture output representing what the agent "returned" + * @throws {Error} if called before activate() + */ +function spawnStub(config) { + if (!_active) { + throw new Error('mock-gsd-agent: spawnStub() called before activate(). Call activate() first.'); + } + + const { + subagent_type, + prompt = '', + model = '', + description = '', + } = config || {}; + + if (!subagent_type) { + throw new Error('mock-gsd-agent: spawnStub() requires config.subagent_type'); + } + + // Output resolution: inline override > scenario fixture > base fixture > empty + let output; + if (_inlineOverrides.has(subagent_type)) { + output = _inlineOverrides.get(subagent_type); + } else { + output = loadFixture(subagent_type, _baseDir, _scenarioDir); + } + + _callLog.push({ + subagent_type, + prompt, + model, + description, + output, + timestamp: new Date().toISOString(), + }); + + return output; +} + +/** + * Return the ordered array of recorded spawn entries since the last activate(). + * Each entry: { subagent_type, prompt, model, description, output, timestamp } + * + * @returns {Array<{subagent_type: string, prompt: string, model: string, description: string, output: string, timestamp: string}>} + */ +function getCallLog() { + return _callLog.slice(); // defensive copy +} + +/** + * Clear the call log without deactivating the mock. + * Useful for resetting between sub-scenarios in a single test. + */ +function clearCallLog() { + _callLog = []; +} + +/** + * Set an inline response override for a given agent type. + * Inline overrides take precedence over all fixture loading. + * + * Must be called after activate(). + * + * @param {string} agentType - Agent type to override (e.g. "gsd-planner") + * @param {string} output - Output string to return when this agent type is spawned + * @throws {Error} if called before activate() + */ +function setResponse(agentType, output) { + if (!_active) { + throw new Error('mock-gsd-agent: setResponse() called before activate(). Call activate() first.'); + } + _inlineOverrides.set(agentType, output); +} + +/** + * Whether the mock is currently active. + * Useful for guard assertions in test setup/teardown. + * + * @returns {boolean} + */ +function isActive() { + return _active; +} + +/** + * Return the number of spawn calls recorded. + * + * @param {string} [agentType] - If provided, count only calls with this subagent_type. + * If omitted, return total spawn count. + * @returns {number} + */ +function getSpawnCount(agentType) { + if (agentType === undefined) { + return _callLog.length; + } + return _callLog.filter(entry => entry.subagent_type === agentType).length; +} + +/** + * Assert that a specific agent type was spawned at least once. + * Throws an AssertionError with a descriptive message if not. + * + * Useful as a single-line assertion in tests: + * mockAgent.assertSpawned('gsd-planner'); + * + * @param {string} agentType - Agent type that should have been spawned + * @throws {AssertionError} if the agent type was not spawned + */ +function assertSpawned(agentType) { + const count = getSpawnCount(agentType); + assert.ok( + count > 0, + `mock-gsd-agent: assertSpawned('${agentType}') failed — no calls recorded for this agent type.\n` + + `Recorded spawns: [${_callLog.map(e => e.subagent_type).join(', ')}]` + ); +} + +// --------------------------------------------------------------------------- +// Exports +// --------------------------------------------------------------------------- + +module.exports = { + activate, + deactivate, + spawnStub, + getCallLog, + clearCallLog, + setResponse, + isActive, + getSpawnCount, + assertSpawned, +}; diff --git a/lib/state.cjs b/lib/state.cjs index 6487c6b..cffc1e9 100644 --- a/lib/state.cjs +++ b/lib/state.cjs @@ -791,6 +791,98 @@ function topologicalSort(issues, links) { return sorted; } +/** + * Detect the mgw:project STATE_CLASS from five signals. + * + * Encapsulates the classification logic from workflows/detect-state.md so that + * it is testable in JavaScript without spawning shell processes. + * + * Signals: + * P — .planning/PROJECT.md exists + * R — .planning/ROADMAP.md exists + * S — .planning/STATE.md exists + * M — .mgw/project.json exists + * G — Count of GitHub milestones (caller is responsible for fetching this) + * + * State classes: + * Fresh — no signals active + * GSD-Only — P=true, M=false, G=0 + * GSD-Mid-Exec — P=true, R=true (or S=true), M=false, G=0 + * Aligned — M=true, G>0, local milestone count consistent with G + * Diverged — M=true, G>0, local milestone count inconsistent with G + * Extend — M=true, G>0, all milestones complete (current_milestone > count) + * + * Consistency check simplification: + * The bash version calls `gh api` a second time to do a name-overlap check. + * This JS version uses only count proximity (|local - G| <= 1) to keep the + * function pure and testable. Callers that need name-overlap validation should + * implement it outside this function. + * + * @param {object} [options={}] + * @param {string} [options.repoRoot] - Absolute path to repo root. Defaults to process.cwd(). + * @param {number} [options.githubMilestoneCount=0] - Count of open GitHub milestones (G signal). + * @returns {{ stateClass: string, signals: { P: boolean, R: boolean, S: boolean, M: boolean, G: number } }} + */ +function detectProjectState(options = {}) { + const repoRoot = options.repoRoot || process.cwd(); + const G = typeof options.githubMilestoneCount === 'number' ? options.githubMilestoneCount : 0; + + // Read file-system signals + const P = fs.existsSync(path.join(repoRoot, '.planning', 'PROJECT.md')); + const R = fs.existsSync(path.join(repoRoot, '.planning', 'ROADMAP.md')); + const S = fs.existsSync(path.join(repoRoot, '.planning', 'STATE.md')); + const M = fs.existsSync(path.join(repoRoot, '.mgw', 'project.json')); + + const signals = { P, R, S, M, G }; + + // --- Classification --- + if (M && G > 0) { + // Read project.json to check if all milestones are done (Extend check) + let projectData = null; + try { + const raw = fs.readFileSync(path.join(repoRoot, '.mgw', 'project.json'), 'utf-8'); + projectData = JSON.parse(raw); + } catch (_e) { + // Corrupt or missing project.json despite M=true — treat as Diverged + return { stateClass: 'Diverged', signals }; + } + + const milestones = Array.isArray(projectData.milestones) ? projectData.milestones : []; + const currentMilestone = typeof projectData.current_milestone === 'number' + ? projectData.current_milestone + : 1; + + // Extend: current_milestone pointer exceeds array length (all done) + const allComplete = milestones.length > 0 && currentMilestone > milestones.length; + if (allComplete) { + return { stateClass: 'Extend', signals }; + } + + // Aligned vs Diverged: count proximity check + const localCount = milestones.length; + const countDiff = Math.abs(localCount - G); + if (countDiff <= 1) { + return { stateClass: 'Aligned', signals }; + } else { + return { stateClass: 'Diverged', signals }; + } + } + + if (!M && G === 0) { + // No MGW state, no GitHub milestones — GSD signals determine class + if (P && (R || S)) { + return { stateClass: 'GSD-Mid-Exec', signals }; + } + if (P) { + return { stateClass: 'GSD-Only', signals }; + } + return { stateClass: 'Fresh', signals }; + } + + // Edge case: M=true but G=0, or M=false but G>0 → treat as Fresh + return { stateClass: 'Fresh', signals }; +} + module.exports = { getMgwDir, getActiveDir, @@ -814,4 +906,5 @@ module.exports = { parseDependencies, storeDependencies, topologicalSort, + detectProjectState, }; diff --git a/package-lock.json b/package-lock.json index 87ad986..02c7d22 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@snipcodeit/mgw", - "version": "0.3.0", + "version": "0.5.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@snipcodeit/mgw", - "version": "0.3.0", + "version": "0.5.1", "hasInstallScript": true, "license": "MIT", "dependencies": { @@ -18,7 +18,8 @@ "devDependencies": { "@eslint/js": "^10.0.1", "eslint": "^10.0.2", - "pkgroll": "^2.26.3" + "pkgroll": "^2.26.3", + "vitest": "^2.1.9" }, "engines": { "node": ">=18.0.0" @@ -1211,6 +1212,129 @@ "dev": true, "license": "MIT" }, + "node_modules/@vitest/expect": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", + "integrity": "sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "2.1.9", + "@vitest/utils": "2.1.9", + "chai": "^5.1.2", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.9.tgz", + "integrity": "sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "2.1.9", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.12" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz", + "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.9.tgz", + "integrity": "sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "2.1.9", + "pathe": "^1.1.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.9.tgz", + "integrity": "sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "2.1.9", + "magic-string": "^0.30.12", + "pathe": "^1.1.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.9.tgz", + "integrity": "sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^3.0.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.9.tgz", + "integrity": "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "2.1.9", + "loupe": "^3.1.2", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, "node_modules/acorn": { "version": "8.16.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", @@ -1251,6 +1375,16 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, "node_modules/astring": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", @@ -1297,6 +1431,43 @@ "node": ">=8" } }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/check-error": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", + "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, "node_modules/cjs-module-lexer": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-2.2.0.tgz", @@ -1353,6 +1524,16 @@ } } }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -1370,6 +1551,13 @@ "node": ">=0.10.0" } }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, "node_modules/esbuild": { "version": "0.26.0", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.26.0.tgz", @@ -1597,6 +1785,16 @@ "node": ">=0.10.0" } }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -1932,6 +2130,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, "node_modules/magic-string": { "version": "0.30.21", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", @@ -2002,6 +2207,25 @@ "dev": true, "license": "MIT" }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", @@ -2099,6 +2323,30 @@ "dev": true, "license": "MIT" }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, "node_modules/picomatch": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", @@ -2150,6 +2398,35 @@ } } }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -2357,6 +2634,37 @@ "node": ">=8" } }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", @@ -2370,6 +2678,50 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", + "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", + "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -2406,20 +2758,616 @@ "punycode": "^2.1.0" } }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, - "license": "ISC", + "license": "MIT", "dependencies": { - "isexe": "^2.0.0" + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" }, "bin": { - "node-which": "bin/node-which" + "vite": "bin/vite.js" }, "engines": { - "node": ">= 8" + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.9.tgz", + "integrity": "sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.7", + "es-module-lexer": "^1.5.4", + "pathe": "^1.1.2", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vite/node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/vitest": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.9.tgz", + "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "2.1.9", + "@vitest/mocker": "2.1.9", + "@vitest/pretty-format": "^2.1.9", + "@vitest/runner": "2.1.9", + "@vitest/snapshot": "2.1.9", + "@vitest/spy": "2.1.9", + "@vitest/utils": "2.1.9", + "chai": "^5.1.2", + "debug": "^4.3.7", + "expect-type": "^1.1.0", + "magic-string": "^0.30.12", + "pathe": "^1.1.2", + "std-env": "^3.8.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.1", + "tinypool": "^1.0.1", + "tinyrainbow": "^1.2.0", + "vite": "^5.0.0", + "vite-node": "2.1.9", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "2.1.9", + "@vitest/ui": "2.1.9", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" } }, "node_modules/word-wrap": { diff --git a/package.json b/package.json index 7ff044a..26a50e0 100644 --- a/package.json +++ b/package.json @@ -16,7 +16,9 @@ "scripts": { "build": "pkgroll --clean-dist --src .", "dev": "pkgroll --watch --src .", - "test": "node --test test/*.test.cjs", + "test": "vitest run", + "test:watch": "vitest", + "test:node": "node --test test/*.test.cjs", "lint": "eslint lib/ bin/ test/", "prepublishOnly": "npm run build", "completions": "node bin/generate-completions.cjs", @@ -31,7 +33,8 @@ "devDependencies": { "@eslint/js": "^10.0.1", "eslint": "^10.0.2", - "pkgroll": "^2.26.3" + "pkgroll": "^2.26.3", + "vitest": "^2.1.9" }, "engines": { "node": ">=18.0.0" diff --git a/test/checkpoint.test.js b/test/checkpoint.test.js new file mode 100644 index 0000000..f004919 --- /dev/null +++ b/test/checkpoint.test.js @@ -0,0 +1,821 @@ +/** + * test/checkpoint.test.js — Unit tests for checkpoint read/write and resume + * detection functions in lib/state.cjs. + * + * Covers: + * - updateCheckpoint() merge semantics (step_progress shallow merge, + * artifacts/step_history append-only, resume full-replace) + * - detectCheckpoint() returning null for triage-only checkpoints and + * non-null for checkpoints at plan/execute/verify/pr steps + * - resumeFromCheckpoint() mapping resume.action to resumeStage for all + * documented action values, plus unknown/null default + * - clearCheckpoint() resetting checkpoint to null + * - Forward-compat round-trip: unknown fields in checkpoint are preserved + * + * Isolation strategy: + * - fs.mkdtempSync() creates a real tmp dir per describe block + * - process.cwd() is overridden so getMgwDir() stays sandboxed + * - require.cache is cleared before each require of state.cjs + * - afterEach removes .mgw/ and restores process.cwd() + * - Tmp dirs removed in afterAll via fs.rmSync + * + * This file uses the same isolation pattern as test/state.test.cjs and + * test/validate-and-load.test.js but imports via vitest (ESM) format. + */ + +import { describe, it, expect, beforeEach, afterEach, afterAll } from 'vitest'; +import { createRequire } from 'module'; +import { fileURLToPath } from 'url'; +import path from 'path'; +import fs from 'fs'; +import os from 'os'; + +const _require = createRequire(import.meta.url); +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = path.resolve(__dirname, '..'); +const STATE_MODULE = path.join(REPO_ROOT, 'lib', 'state.cjs'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** + * Clear state module cache and re-require fresh. + * Required so that process.cwd() overrides take effect on path resolution. + */ +function loadState() { + delete _require.cache[STATE_MODULE]; + return _require(STATE_MODULE); +} + +/** + * Override process.cwd to return tmpDir. + * Returns a restore function — call it in afterEach. + */ +function overrideCwd(tmpDir) { + const original = process.cwd.bind(process); + process.cwd = () => tmpDir; + return () => { + process.cwd = original; + }; +} + +/** + * Remove .mgw/ inside tmpDir if it exists. + */ +function cleanMgw(tmpDir) { + const mgwDir = path.join(tmpDir, '.mgw'); + if (fs.existsSync(mgwDir)) { + fs.rmSync(mgwDir, { recursive: true, force: true }); + } +} + +/** + * Write a minimal issue state file into .mgw/active/. + * Creates directories as needed. + * + * @param {string} tmpDir - Tmp directory root (process.cwd() override target) + * @param {number} issueNumber - Issue number used to name the file + * @param {object} overrides - Fields to merge onto the base state + * @returns {{ filePath: string, state: object }} Written file path and state object + */ +function writeIssueState(tmpDir, issueNumber, overrides = {}) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + + const base = { + issue_number: issueNumber, + slug: `test-issue-${issueNumber}`, + title: `Test issue ${issueNumber}`, + pipeline_stage: 'triaged', + gsd_route: 'plan-phase', + checkpoint: null, + }; + const state = Object.assign({}, base, overrides); + const fileName = `${issueNumber}-test-issue-${issueNumber}.json`; + const filePath = path.join(activeDir, fileName); + fs.writeFileSync(filePath, JSON.stringify(state, null, 2), 'utf-8'); + return { filePath, state }; +} + +/** + * Read and parse the issue state file from .mgw/active/. + */ +function readIssueState(tmpDir, issueNumber) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + const entries = fs.readdirSync(activeDir); + const match = entries.find(f => f.startsWith(`${issueNumber}-`) && f.endsWith('.json')); + if (!match) return null; + return JSON.parse(fs.readFileSync(path.join(activeDir, match), 'utf-8')); +} + +// --------------------------------------------------------------------------- +// Group 1: updateCheckpoint() — merge semantics +// --------------------------------------------------------------------------- + +describe('updateCheckpoint() — merge semantics', () => { + let tmpDir; + let restoreCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-cp-test-g1-')); + restoreCwd = overrideCwd(tmpDir); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('initializes checkpoint from null when none exists', () => { + writeIssueState(tmpDir, 1, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + const result = updateCheckpoint(1, { pipeline_step: 'plan' }); + + expect(result.updated).toBe(true); + expect(result.checkpoint).toBeTruthy(); + expect(result.checkpoint.pipeline_step).toBe('plan'); + expect(result.checkpoint.schema_version).toBe(1); + expect(result.checkpoint.artifacts).toEqual([]); + expect(result.checkpoint.step_history).toEqual([]); + + const persisted = readIssueState(tmpDir, 1); + expect(persisted.checkpoint).toBeTruthy(); + expect(persisted.checkpoint.pipeline_step).toBe('plan'); + }); + + it('overwrites pipeline_step on subsequent calls', () => { + writeIssueState(tmpDir, 2, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + updateCheckpoint(2, { pipeline_step: 'plan' }); + updateCheckpoint(2, { pipeline_step: 'execute' }); + + const persisted = readIssueState(tmpDir, 2); + expect(persisted.checkpoint.pipeline_step).toBe('execute'); + }); + + it('shallow-merges step_progress — existing keys preserved, new keys added', () => { + writeIssueState(tmpDir, 3, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + // First write: sets plan_path and plan_checked + updateCheckpoint(3, { + pipeline_step: 'plan', + step_progress: { plan_path: '/some/plan.md', plan_checked: false }, + }); + + // Second write: only updates plan_checked — plan_path must be preserved + updateCheckpoint(3, { + step_progress: { plan_checked: true }, + }); + + const persisted = readIssueState(tmpDir, 3); + expect(persisted.checkpoint.step_progress.plan_path).toBe('/some/plan.md'); + expect(persisted.checkpoint.step_progress.plan_checked).toBe(true); + }); + + it('appends artifacts — never replaces existing entries', () => { + writeIssueState(tmpDir, 4, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + const artifact1 = { path: 'plan.md', type: 'plan', created_at: '2026-03-06T10:00:00Z' }; + const artifact2 = { path: 'summary.md', type: 'summary', created_at: '2026-03-06T11:00:00Z' }; + + updateCheckpoint(4, { artifacts: [artifact1] }); + updateCheckpoint(4, { artifacts: [artifact2] }); + + const persisted = readIssueState(tmpDir, 4); + expect(persisted.checkpoint.artifacts).toHaveLength(2); + expect(persisted.checkpoint.artifacts[0].path).toBe('plan.md'); + expect(persisted.checkpoint.artifacts[1].path).toBe('summary.md'); + }); + + it('appends step_history — never replaces existing entries', () => { + writeIssueState(tmpDir, 5, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + const entry1 = { step: 'plan', completed_at: '2026-03-06T10:00:00Z', agent_type: 'gsd-planner' }; + const entry2 = { step: 'execute', completed_at: '2026-03-06T11:00:00Z', agent_type: 'gsd-executor' }; + + updateCheckpoint(5, { step_history: [entry1] }); + updateCheckpoint(5, { step_history: [entry2] }); + + const persisted = readIssueState(tmpDir, 5); + expect(persisted.checkpoint.step_history).toHaveLength(2); + expect(persisted.checkpoint.step_history[0].step).toBe('plan'); + expect(persisted.checkpoint.step_history[1].step).toBe('execute'); + }); + + it('fully replaces resume on each call (resume.context is opaque)', () => { + writeIssueState(tmpDir, 6, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + updateCheckpoint(6, { resume: { action: 'spawn-executor', context: { quick_dir: '/a' } } }); + updateCheckpoint(6, { resume: { action: 'spawn-verifier', context: { quick_dir: '/b', plan_num: 2 } } }); + + const persisted = readIssueState(tmpDir, 6); + expect(persisted.checkpoint.resume.action).toBe('spawn-verifier'); + expect(persisted.checkpoint.resume.context.quick_dir).toBe('/b'); + expect(persisted.checkpoint.resume.context.plan_num).toBe(2); + // Old context from first call must not persist + expect(Object.keys(persisted.checkpoint.resume.context)).toHaveLength(2); + }); + + it('updates last_agent_output on each call', () => { + writeIssueState(tmpDir, 7, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + updateCheckpoint(7, { last_agent_output: '/first/output.md' }); + updateCheckpoint(7, { last_agent_output: '/second/output.md' }); + + const persisted = readIssueState(tmpDir, 7); + expect(persisted.checkpoint.last_agent_output).toBe('/second/output.md'); + }); + + it('always updates updated_at timestamp', () => { + writeIssueState(tmpDir, 8, { checkpoint: null }); + const { updateCheckpoint } = loadState(); + + const before = new Date().toISOString(); + const result = updateCheckpoint(8, { pipeline_step: 'plan' }); + + expect(result.checkpoint.updated_at).toBeDefined(); + expect(result.checkpoint.updated_at >= before).toBe(true); + }); + + it('throws when no state file exists for the issue number', () => { + // Do not create a state file for issue 9 + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + const { updateCheckpoint } = loadState(); + + expect(() => updateCheckpoint(9, { pipeline_step: 'plan' })).toThrow(/No state file found/); + }); +}); + +// --------------------------------------------------------------------------- +// Group 2: detectCheckpoint() — null-return semantics +// --------------------------------------------------------------------------- + +describe('detectCheckpoint() — null-return semantics', () => { + let tmpDir; + let restoreCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-cp-test-g2-')); + restoreCwd = overrideCwd(tmpDir); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('returns null when no state file exists for the issue number', () => { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + const { detectCheckpoint } = loadState(); + + expect(detectCheckpoint(100)).toBeNull(); + }); + + it('returns null when checkpoint field is null', () => { + writeIssueState(tmpDir, 101, { checkpoint: null }); + const { detectCheckpoint } = loadState(); + + expect(detectCheckpoint(101)).toBeNull(); + }); + + it('returns null when pipeline_step is "triage" (index 0 — not resumable)', () => { + writeIssueState(tmpDir, 102, { + checkpoint: { + schema_version: 1, + pipeline_step: 'triage', + step_progress: { comment_check_done: true }, + last_agent_output: null, + artifacts: [], + resume: { action: 'begin-execution', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:01:00Z', + step_history: [], + }, + }); + const { detectCheckpoint } = loadState(); + + expect(detectCheckpoint(102)).toBeNull(); + }); + + it('returns checkpoint data when pipeline_step is "plan" (index 1)', () => { + writeIssueState(tmpDir, 103, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: { plan_path: '/plan.md', plan_checked: false }, + last_agent_output: '/plan.md', + artifacts: [{ path: '/plan.md', type: 'plan', created_at: '2026-03-06T10:00:00Z' }], + resume: { action: 'run-plan-checker', context: { quick_dir: '/q' } }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + }, + }); + const { detectCheckpoint } = loadState(); + + const cp = detectCheckpoint(103); + expect(cp).not.toBeNull(); + expect(cp.pipeline_step).toBe('plan'); + expect(cp.step_progress.plan_path).toBe('/plan.md'); + expect(cp.artifacts).toHaveLength(1); + expect(cp.resume.action).toBe('run-plan-checker'); + }); + + it('returns checkpoint data when pipeline_step is "execute"', () => { + writeIssueState(tmpDir, 104, { + checkpoint: { + schema_version: 1, + pipeline_step: 'execute', + step_progress: { gsd_phase: 1, tasks_completed: 2, tasks_total: 5 }, + last_agent_output: null, + artifacts: [], + resume: { action: 'continue-execution', context: { phase_number: 1 } }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:10:00Z', + step_history: [], + }, + }); + const { detectCheckpoint } = loadState(); + + const cp = detectCheckpoint(104); + expect(cp).not.toBeNull(); + expect(cp.pipeline_step).toBe('execute'); + }); + + it('returns checkpoint data when pipeline_step is "verify"', () => { + writeIssueState(tmpDir, 105, { + checkpoint: { + schema_version: 1, + pipeline_step: 'verify', + step_progress: { verification_path: '/verify.md', must_haves_checked: true }, + last_agent_output: '/verify.md', + artifacts: [], + resume: { action: 'create-pr', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:20:00Z', + step_history: [], + }, + }); + const { detectCheckpoint } = loadState(); + + const cp = detectCheckpoint(105); + expect(cp).not.toBeNull(); + expect(cp.pipeline_step).toBe('verify'); + }); + + it('returns checkpoint data when pipeline_step is "pr"', () => { + writeIssueState(tmpDir, 106, { + checkpoint: { + schema_version: 1, + pipeline_step: 'pr', + step_progress: { branch_pushed: true, pr_number: 42, pr_url: 'https://github.com/r/p/pulls/42' }, + last_agent_output: 'https://github.com/r/p/pulls/42', + artifacts: [], + resume: { action: 'cleanup', context: { pr_number: 42 } }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:30:00Z', + step_history: [], + }, + }); + const { detectCheckpoint } = loadState(); + + const cp = detectCheckpoint(106); + expect(cp).not.toBeNull(); + expect(cp.pipeline_step).toBe('pr'); + }); +}); + +// --------------------------------------------------------------------------- +// Group 3: resumeFromCheckpoint() — action → stage mapping +// --------------------------------------------------------------------------- + +describe('resumeFromCheckpoint() — action to resumeStage mapping', () => { + let tmpDir; + let restoreCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-cp-test-g3-')); + restoreCwd = overrideCwd(tmpDir); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + /** + * Helper: write an issue state with a plan-step checkpoint and a given resume action. + */ + function writeCheckpointWithAction(issueNumber, action, extraHistory = []) { + writeIssueState(tmpDir, issueNumber, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: {}, + last_agent_output: null, + artifacts: [], + resume: { action, context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: extraHistory, + }, + }); + } + + it('returns null when no resumable checkpoint exists (triage-only)', () => { + writeIssueState(tmpDir, 200, { + checkpoint: { + schema_version: 1, + pipeline_step: 'triage', + step_progress: {}, + last_agent_output: null, + artifacts: [], + resume: { action: 'begin-execution', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:00:00Z', + step_history: [], + }, + }); + const { resumeFromCheckpoint } = loadState(); + + expect(resumeFromCheckpoint(200)).toBeNull(); + }); + + it('maps "run-plan-checker" → resumeStage "planning"', () => { + writeCheckpointWithAction(201, 'run-plan-checker'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(201); + expect(result).not.toBeNull(); + expect(result.resumeStage).toBe('planning'); + expect(result.resumeAction).toBe('run-plan-checker'); + }); + + it('maps "spawn-executor" → resumeStage "executing"', () => { + writeCheckpointWithAction(202, 'spawn-executor'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(202); + expect(result.resumeStage).toBe('executing'); + expect(result.resumeAction).toBe('spawn-executor'); + }); + + it('maps "continue-execution" → resumeStage "executing"', () => { + writeCheckpointWithAction(203, 'continue-execution'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(203); + expect(result.resumeStage).toBe('executing'); + expect(result.resumeAction).toBe('continue-execution'); + }); + + it('maps "spawn-verifier" → resumeStage "verifying"', () => { + writeCheckpointWithAction(204, 'spawn-verifier'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(204); + expect(result.resumeStage).toBe('verifying'); + expect(result.resumeAction).toBe('spawn-verifier'); + }); + + it('maps "create-pr" → resumeStage "pr-pending"', () => { + writeCheckpointWithAction(205, 'create-pr'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(205); + expect(result.resumeStage).toBe('pr-pending'); + expect(result.resumeAction).toBe('create-pr'); + }); + + it('maps "begin-execution" → resumeStage "planning"', () => { + writeCheckpointWithAction(206, 'begin-execution'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(206); + expect(result.resumeStage).toBe('planning'); + expect(result.resumeAction).toBe('begin-execution'); + }); + + it('maps null action → resumeStage "planning" (safe default), resumeAction "unknown"', () => { + writeCheckpointWithAction(207, null); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(207); + expect(result.resumeStage).toBe('planning'); + expect(result.resumeAction).toBe('unknown'); + }); + + it('maps unrecognized action → resumeStage "planning" (safe default)', () => { + writeCheckpointWithAction(208, 'future-unknown-action'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(208); + expect(result.resumeStage).toBe('planning'); + expect(result.resumeAction).toBe('future-unknown-action'); + }); + + it('derives completedSteps from step_history entries', () => { + const history = [ + { step: 'plan', completed_at: '2026-03-06T10:00:00Z', agent_type: 'gsd-planner' }, + { step: 'execute', completed_at: '2026-03-06T10:30:00Z', agent_type: 'gsd-executor' }, + ]; + writeCheckpointWithAction(209, 'spawn-verifier', history); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(209); + expect(result.completedSteps).toEqual(['plan', 'execute']); + }); + + it('returns empty completedSteps when step_history is empty', () => { + writeCheckpointWithAction(210, 'run-plan-checker', []); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(210); + expect(result.completedSteps).toEqual([]); + }); + + it('returns checkpoint data nested under result.checkpoint', () => { + writeCheckpointWithAction(211, 'spawn-executor'); + const { resumeFromCheckpoint } = loadState(); + + const result = resumeFromCheckpoint(211); + expect(result.checkpoint).toBeDefined(); + expect(result.checkpoint.pipeline_step).toBe('plan'); + expect(result.checkpoint.resume.action).toBe('spawn-executor'); + }); +}); + +// --------------------------------------------------------------------------- +// Group 4: clearCheckpoint() — reset behavior +// --------------------------------------------------------------------------- + +describe('clearCheckpoint() — reset to null', () => { + let tmpDir; + let restoreCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-cp-test-g4-')); + restoreCwd = overrideCwd(tmpDir); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('sets checkpoint to null and returns { cleared: true } when checkpoint was non-null', () => { + writeIssueState(tmpDir, 300, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: {}, + last_agent_output: null, + artifacts: [], + resume: { action: 'spawn-executor', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + }, + }); + const { clearCheckpoint } = loadState(); + + const result = clearCheckpoint(300); + + expect(result).toEqual({ cleared: true }); + + const persisted = readIssueState(tmpDir, 300); + expect(persisted.checkpoint).toBeNull(); + }); + + it('returns { cleared: false } when checkpoint was already null', () => { + writeIssueState(tmpDir, 301, { checkpoint: null }); + const { clearCheckpoint } = loadState(); + + const result = clearCheckpoint(301); + + expect(result).toEqual({ cleared: false }); + + const persisted = readIssueState(tmpDir, 301); + expect(persisted.checkpoint).toBeNull(); + }); + + it('preserves other fields in the state file (pipeline_stage, triage, etc.)', () => { + writeIssueState(tmpDir, 302, { + pipeline_stage: 'executing', + gsd_route: 'plan-phase', + checkpoint: { + schema_version: 1, + pipeline_step: 'execute', + step_progress: {}, + last_agent_output: null, + artifacts: [], + resume: { action: null, context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + }, + }); + const { clearCheckpoint } = loadState(); + + clearCheckpoint(302); + + const persisted = readIssueState(tmpDir, 302); + expect(persisted.pipeline_stage).toBe('executing'); + expect(persisted.gsd_route).toBe('plan-phase'); + expect(persisted.checkpoint).toBeNull(); + }); + + it('writes atomically (uses atomicWriteJson — no .tmp file left behind)', () => { + writeIssueState(tmpDir, 303, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: {}, + last_agent_output: null, + artifacts: [], + resume: { action: null, context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + }, + }); + const { clearCheckpoint } = loadState(); + + clearCheckpoint(303); + + const activeDir = path.join(tmpDir, '.mgw', 'active'); + const entries = fs.readdirSync(activeDir); + const tmpFiles = entries.filter(f => f.endsWith('.tmp')); + expect(tmpFiles).toHaveLength(0); + }); + + it('throws when no state file found for the issue number', () => { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + const { clearCheckpoint } = loadState(); + + expect(() => clearCheckpoint(999)).toThrow(/No state file found/); + }); +}); + +// --------------------------------------------------------------------------- +// Group 5: Forward-compatibility — unknown fields preserved on round-trip +// --------------------------------------------------------------------------- + +describe('Forward-compatibility — unknown fields preserved on round-trip', () => { + let tmpDir; + let restoreCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-cp-test-g5-')); + restoreCwd = overrideCwd(tmpDir); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('preserves unknown top-level checkpoint fields on updateCheckpoint round-trip', () => { + // Simulate a checkpoint written by a future version with an extra field + writeIssueState(tmpDir, 400, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: { plan_path: '/plan.md' }, + last_agent_output: '/plan.md', + artifacts: [], + resume: { action: 'spawn-executor', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + // Future field that current consumers do not know about + future_field: 'preserve-me', + another_future_field: { nested: true }, + }, + }); + + const { updateCheckpoint } = loadState(); + + // Perform a read-modify-write (update step_progress) + updateCheckpoint(400, { + step_progress: { plan_checked: true }, + }); + + const persisted = readIssueState(tmpDir, 400); + + // Known fields work correctly + expect(persisted.checkpoint.step_progress.plan_path).toBe('/plan.md'); + expect(persisted.checkpoint.step_progress.plan_checked).toBe(true); + + // Unknown fields must be preserved + expect(persisted.checkpoint.future_field).toBe('preserve-me'); + expect(persisted.checkpoint.another_future_field).toEqual({ nested: true }); + }); + + it('preserves unknown step_progress keys on shallow merge', () => { + writeIssueState(tmpDir, 401, { + checkpoint: { + schema_version: 1, + pipeline_step: 'execute', + step_progress: { + gsd_phase: 1, + tasks_completed: 2, + tasks_total: 5, + // Key from a future pipeline version + future_progress_key: 'do-not-lose-me', + }, + last_agent_output: null, + artifacts: [], + resume: { action: 'continue-execution', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:10:00Z', + step_history: [], + }, + }); + + const { updateCheckpoint } = loadState(); + + // Update tasks_completed only — future_progress_key must survive + updateCheckpoint(401, { + step_progress: { tasks_completed: 3 }, + }); + + const persisted = readIssueState(tmpDir, 401); + expect(persisted.checkpoint.step_progress.gsd_phase).toBe(1); + expect(persisted.checkpoint.step_progress.tasks_completed).toBe(3); + expect(persisted.checkpoint.step_progress.tasks_total).toBe(5); + expect(persisted.checkpoint.step_progress.future_progress_key).toBe('do-not-lose-me'); + }); + + it('detectCheckpoint returns unknown step_progress keys intact', () => { + writeIssueState(tmpDir, 402, { + checkpoint: { + schema_version: 1, + pipeline_step: 'plan', + step_progress: { + plan_path: '/plan.md', + unknown_future_key: 42, + }, + last_agent_output: null, + artifacts: [], + resume: { action: 'run-plan-checker', context: {} }, + started_at: '2026-03-06T10:00:00Z', + updated_at: '2026-03-06T10:05:00Z', + step_history: [], + }, + }); + + const { detectCheckpoint } = loadState(); + const cp = detectCheckpoint(402); + + expect(cp).not.toBeNull(); + expect(cp.step_progress.plan_path).toBe('/plan.md'); + expect(cp.step_progress.unknown_future_key).toBe(42); + }); +}); diff --git a/test/fixtures/agents/general-purpose.json b/test/fixtures/agents/general-purpose.json new file mode 100644 index 0000000..81086b4 --- /dev/null +++ b/test/fixtures/agents/general-purpose.json @@ -0,0 +1 @@ +{"classification":"informational","reasoning":"Standard status update","new_requirements":[],"blocking_reason":null} diff --git a/test/fixtures/agents/gsd-executor.json b/test/fixtures/agents/gsd-executor.json new file mode 100644 index 0000000..e086646 --- /dev/null +++ b/test/fixtures/agents/gsd-executor.json @@ -0,0 +1 @@ +"## EXECUTION COMPLETE\n\nAll tasks completed successfully.\n\n- Files modified as specified\n- No errors encountered\n- Commits staged" diff --git a/test/fixtures/agents/gsd-plan-checker.json b/test/fixtures/agents/gsd-plan-checker.json new file mode 100644 index 0000000..236715a --- /dev/null +++ b/test/fixtures/agents/gsd-plan-checker.json @@ -0,0 +1 @@ +"## VERIFICATION PASSED\n\nPlan quality checks passed:\n- Frontmatter valid\n- Tasks specific and actionable\n- Dependencies correctly identified\n- must_haves present" diff --git a/test/fixtures/agents/gsd-planner.json b/test/fixtures/agents/gsd-planner.json new file mode 100644 index 0000000..5611620 --- /dev/null +++ b/test/fixtures/agents/gsd-planner.json @@ -0,0 +1 @@ +"## PLANNING COMPLETE\n\nPlan created successfully.\n\n- 1 plan in 1 wave\n- Tasks: actionable and specific\n- Frontmatter: valid\n- Verification criteria: included" diff --git a/test/fixtures/agents/gsd-verifier.json b/test/fixtures/agents/gsd-verifier.json new file mode 100644 index 0000000..d403b6e --- /dev/null +++ b/test/fixtures/agents/gsd-verifier.json @@ -0,0 +1 @@ +"## VERIFICATION PASSED\n\nAll must_haves confirmed:\n- Implementation matches plan\n- Files exist at specified paths\n- No regressions detected" diff --git a/test/fixtures/github/board-item.json b/test/fixtures/github/board-item.json new file mode 100644 index 0000000..cefad09 --- /dev/null +++ b/test/fixtures/github/board-item.json @@ -0,0 +1 @@ +"PVT_kwDOABC123" diff --git a/test/fixtures/github/discussion-create.json b/test/fixtures/github/discussion-create.json new file mode 100644 index 0000000..b20a069 --- /dev/null +++ b/test/fixtures/github/discussion-create.json @@ -0,0 +1,3 @@ +{ + "url": "https://github.com/snipcodeit/mgw/discussions/99" +} diff --git a/test/fixtures/github/graphql-board-mutation.json b/test/fixtures/github/graphql-board-mutation.json new file mode 100644 index 0000000..7616a6b --- /dev/null +++ b/test/fixtures/github/graphql-board-mutation.json @@ -0,0 +1,9 @@ +{ + "data": { + "updateProjectV2ItemFieldValue": { + "projectV2Item": { + "id": "PVTI_kwDOABC123" + } + } + } +} diff --git a/test/fixtures/github/issue-comment.json b/test/fixtures/github/issue-comment.json new file mode 100644 index 0000000..e16c76d --- /dev/null +++ b/test/fixtures/github/issue-comment.json @@ -0,0 +1 @@ +"" diff --git a/test/fixtures/github/issue-edit.json b/test/fixtures/github/issue-edit.json new file mode 100644 index 0000000..fd710fa --- /dev/null +++ b/test/fixtures/github/issue-edit.json @@ -0,0 +1 @@ +"https://github.com/snipcodeit/mgw/issues/42" diff --git a/test/fixtures/github/issue-list.json b/test/fixtures/github/issue-list.json new file mode 100644 index 0000000..a35c131 --- /dev/null +++ b/test/fixtures/github/issue-list.json @@ -0,0 +1,26 @@ +[ + { + "number": 1, + "title": "First issue", + "state": "OPEN", + "labels": [{ "name": "bug" }], + "milestone": { "title": "v1.0", "number": 1 }, + "assignees": [{ "login": "hat" }], + "createdAt": "2026-01-01T00:00:00Z", + "url": "https://github.com/snipcodeit/mgw/issues/1", + "body": "First issue body", + "comments": [] + }, + { + "number": 2, + "title": "Second issue", + "state": "OPEN", + "labels": [], + "milestone": null, + "assignees": [], + "createdAt": "2026-01-02T00:00:00Z", + "url": "https://github.com/snipcodeit/mgw/issues/2", + "body": "Second issue body", + "comments": [] + } +] diff --git a/test/fixtures/github/issue-view.json b/test/fixtures/github/issue-view.json new file mode 100644 index 0000000..8c395a7 --- /dev/null +++ b/test/fixtures/github/issue-view.json @@ -0,0 +1,13 @@ +{ + "number": 42, + "title": "Fix everything", + "state": "OPEN", + "labels": [{ "name": "bug" }], + "milestone": { "title": "v1.0", "number": 1 }, + "assignees": [{ "login": "hat" }], + "body": "Body text", + "url": "https://github.com/snipcodeit/mgw/issues/42", + "comments": [], + "createdAt": "2026-01-01T00:00:00Z", + "updatedAt": "2026-03-01T00:00:00Z" +} diff --git a/test/fixtures/github/label-create.json b/test/fixtures/github/label-create.json new file mode 100644 index 0000000..e16c76d --- /dev/null +++ b/test/fixtures/github/label-create.json @@ -0,0 +1 @@ +"" diff --git a/test/fixtures/github/label-list.json b/test/fixtures/github/label-list.json new file mode 100644 index 0000000..29a42dc --- /dev/null +++ b/test/fixtures/github/label-list.json @@ -0,0 +1,5 @@ +[ + { "name": "bug", "color": "d73a4a", "description": "Something isn't working" }, + { "name": "mgw:triaged", "color": "0e8a16", "description": "Issue triaged and ready for pipeline" }, + { "name": "mgw:in-progress", "color": "1d76db", "description": "Pipeline actively executing" } +] diff --git a/test/fixtures/github/milestone-close.json b/test/fixtures/github/milestone-close.json new file mode 100644 index 0000000..daeb03e --- /dev/null +++ b/test/fixtures/github/milestone-close.json @@ -0,0 +1,11 @@ +{ + "number": 3, + "title": "v1.0", + "state": "closed", + "open_issues": 0, + "closed_issues": 7, + "description": "First milestone", + "html_url": "https://github.com/snipcodeit/mgw/milestone/3", + "created_at": "2026-01-01T00:00:00Z", + "updated_at": "2026-03-06T00:00:00Z" +} diff --git a/test/fixtures/github/milestone-create.json b/test/fixtures/github/milestone-create.json new file mode 100644 index 0000000..7b79653 --- /dev/null +++ b/test/fixtures/github/milestone-create.json @@ -0,0 +1,12 @@ +{ + "number": 4, + "title": "v2.0", + "state": "open", + "open_issues": 0, + "closed_issues": 0, + "description": "Second milestone", + "html_url": "https://github.com/snipcodeit/mgw/milestone/4", + "id": 98765432, + "created_at": "2026-03-06T00:00:00Z", + "updated_at": "2026-03-06T00:00:00Z" +} diff --git a/test/fixtures/github/milestone-view.json b/test/fixtures/github/milestone-view.json new file mode 100644 index 0000000..7e75491 --- /dev/null +++ b/test/fixtures/github/milestone-view.json @@ -0,0 +1,11 @@ +{ + "number": 3, + "title": "v1.0", + "state": "open", + "open_issues": 2, + "closed_issues": 5, + "description": "First milestone", + "html_url": "https://github.com/snipcodeit/mgw/milestone/3", + "created_at": "2026-01-01T00:00:00Z", + "updated_at": "2026-03-01T00:00:00Z" +} diff --git a/test/fixtures/github/pr-create.json b/test/fixtures/github/pr-create.json new file mode 100644 index 0000000..12199d8 --- /dev/null +++ b/test/fixtures/github/pr-create.json @@ -0,0 +1 @@ +"https://github.com/snipcodeit/mgw/pull/99" diff --git a/test/fixtures/github/pr-view.json b/test/fixtures/github/pr-view.json new file mode 100644 index 0000000..2526983 --- /dev/null +++ b/test/fixtures/github/pr-view.json @@ -0,0 +1,10 @@ +{ + "number": 99, + "title": "feat: add mock github layer", + "state": "OPEN", + "mergedAt": null, + "url": "https://github.com/snipcodeit/mgw/pull/99", + "headRefName": "issue/247-mock-github", + "baseRefName": "main", + "body": "Implements mock GitHub API layer" +} diff --git a/test/fixtures/github/rate-limit-low.json b/test/fixtures/github/rate-limit-low.json new file mode 100644 index 0000000..646318a --- /dev/null +++ b/test/fixtures/github/rate-limit-low.json @@ -0,0 +1,22 @@ +{ + "resources": { + "core": { + "remaining": 50, + "limit": 5000, + "reset": 1700000000, + "used": 4950 + }, + "search": { + "remaining": 5, + "limit": 30, + "reset": 1700000060, + "used": 25 + } + }, + "rate": { + "remaining": 50, + "limit": 5000, + "reset": 1700000000, + "used": 4950 + } +} diff --git a/test/fixtures/github/rate-limit.json b/test/fixtures/github/rate-limit.json new file mode 100644 index 0000000..61b0521 --- /dev/null +++ b/test/fixtures/github/rate-limit.json @@ -0,0 +1,22 @@ +{ + "resources": { + "core": { + "remaining": 4999, + "limit": 5000, + "reset": 1700000000, + "used": 1 + }, + "search": { + "remaining": 29, + "limit": 30, + "reset": 1700000060, + "used": 1 + } + }, + "rate": { + "remaining": 4999, + "limit": 5000, + "reset": 1700000000, + "used": 1 + } +} diff --git a/test/fixtures/github/repo-meta.json b/test/fixtures/github/repo-meta.json new file mode 100644 index 0000000..1d42641 --- /dev/null +++ b/test/fixtures/github/repo-meta.json @@ -0,0 +1,10 @@ +{ + "id": "R_kgDOABC", + "discussionCategories": { + "nodes": [ + { "id": "DIC_kwDOABC", "name": "Announcements" }, + { "id": "DIC_kwDOXYZ", "name": "General" }, + { "id": "DIC_kwDOQ&A", "name": "Q&A" } + ] + } +} diff --git a/test/fixtures/project-state/aligned.json b/test/fixtures/project-state/aligned.json new file mode 100644 index 0000000..12a1267 --- /dev/null +++ b/test/fixtures/project-state/aligned.json @@ -0,0 +1,8 @@ +{ + "milestones": [ + { "name": "v1 — Core Features", "issues": [] }, + { "name": "v2 — Polish", "issues": [] } + ], + "current_milestone": 1, + "active_gsd_milestone": "v1" +} diff --git a/test/fixtures/project-state/diverged.json b/test/fixtures/project-state/diverged.json new file mode 100644 index 0000000..a35e005 --- /dev/null +++ b/test/fixtures/project-state/diverged.json @@ -0,0 +1,11 @@ +{ + "milestones": [ + { "name": "v1 — Core", "issues": [] }, + { "name": "v2 — Features", "issues": [] }, + { "name": "v3 — Polish", "issues": [] }, + { "name": "v4 — Hardening", "issues": [] }, + { "name": "v5 — Launch", "issues": [] } + ], + "current_milestone": 1, + "active_gsd_milestone": "v1" +} diff --git a/test/fixtures/project-state/extend.json b/test/fixtures/project-state/extend.json new file mode 100644 index 0000000..48f49e1 --- /dev/null +++ b/test/fixtures/project-state/extend.json @@ -0,0 +1,8 @@ +{ + "milestones": [ + { "name": "v1 — Core Features", "issues": [] }, + { "name": "v2 — Polish", "issues": [] } + ], + "current_milestone": 3, + "active_gsd_milestone": null +} diff --git a/test/fixtures/project-state/milestone-execution.json b/test/fixtures/project-state/milestone-execution.json new file mode 100644 index 0000000..7980ccb --- /dev/null +++ b/test/fixtures/project-state/milestone-execution.json @@ -0,0 +1,79 @@ +{ + "project": { + "name": "test-project", + "repo": "snipcodeit/mgw", + "coauthor": "Test User " + }, + "current_milestone": 1, + "active_gsd_milestone": "v1.0", + "milestones": [ + { + "name": "v1 — Test Milestone", + "github_number": 10, + "gsd_milestone_id": "v1.0", + "gsd_state": "active", + "roadmap_archived_at": null, + "issues": [ + { + "github_number": 101, + "title": "Set up base infrastructure", + "phase_number": 1, + "phase_name": "Base Setup", + "gsd_route": "plan-phase", + "pipeline_stage": "new", + "depends_on_slugs": [], + "board_item_id": null + }, + { + "github_number": 102, + "title": "Add core logic layer", + "phase_number": 2, + "phase_name": "Core Logic", + "gsd_route": "plan-phase", + "pipeline_stage": "new", + "depends_on_slugs": ["set-up-base-infrastructure"], + "board_item_id": null + }, + { + "github_number": 103, + "title": "Implement API endpoints", + "phase_number": 3, + "phase_name": "API Layer", + "gsd_route": "plan-phase", + "pipeline_stage": "new", + "depends_on_slugs": ["add-core-logic-layer"], + "board_item_id": null + }, + { + "github_number": 104, + "title": "Add documentation", + "phase_number": 4, + "phase_name": "Documentation", + "gsd_route": "gsd:quick", + "pipeline_stage": "new", + "depends_on_slugs": [], + "board_item_id": null + } + ] + }, + { + "name": "v2 — Next Milestone", + "github_number": 11, + "gsd_milestone_id": null, + "gsd_state": "planned", + "roadmap_archived_at": null, + "issues": [ + { + "github_number": 201, + "title": "Build UI layer", + "phase_number": 1, + "phase_name": "UI", + "gsd_route": "plan-phase", + "pipeline_stage": "new", + "depends_on_slugs": [], + "board_item_id": null + } + ] + } + ] +} diff --git a/test/loadFixture.js b/test/loadFixture.js new file mode 100644 index 0000000..8c3eefc --- /dev/null +++ b/test/loadFixture.js @@ -0,0 +1,65 @@ +/** + * test/loadFixture.js — Fixture loader helper + * + * Loads JSON fixture files from test/fixtures/. + * + * Usage: + * + * import { loadFixture } from './loadFixture.js'; + * + * const issueData = loadFixture('github/issue-view'); + * const plannerOutput = loadFixture('agents/gsd-planner'); + * + * Fixture file resolution: + * loadFixture('github/issue-view') → test/fixtures/github/issue-view.json + * loadFixture('agents/gsd-planner') → test/fixtures/agents/gsd-planner.json + * loadFixture('my-fixture') → test/fixtures/my-fixture.json + * + * @param {string} name - Fixture name, optionally prefixed with subdirectory. + * Forward slashes are used as path separators (platform-independent). + * @returns {unknown} Parsed JSON content of the fixture file. + * @throws {Error} if the fixture file is not found. + * @throws {Error} if the fixture file contains invalid JSON. + */ + +import { fileURLToPath } from 'url'; +import path from 'path'; +import fs from 'fs'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Absolute path to the test/fixtures/ directory. + * All fixture names are resolved relative to this directory. + */ +export const FIXTURES_DIR = path.resolve(__dirname, 'fixtures'); + +/** + * Load a fixture by name and return its parsed JSON content. + * + * @param {string} name - Fixture identifier, e.g. 'github/issue-view' or 'agents/gsd-planner' + * @returns {unknown} Parsed JSON value — may be an object, array, string, number, or boolean. + */ +export function loadFixture(name) { + // Normalize forward slashes to platform path separator + const normalizedName = name.split('/').join(path.sep); + const fixturePath = path.resolve(FIXTURES_DIR, `${normalizedName}.json`); + + if (!fs.existsSync(fixturePath)) { + throw new Error( + `loadFixture: fixture not found: "${name}"\n` + + ` Looked for: ${fixturePath}\n` + + ` Fixtures directory: ${FIXTURES_DIR}` + ); + } + + const raw = fs.readFileSync(fixturePath, 'utf-8').trim(); + + try { + return JSON.parse(raw); + } catch (err) { + throw new Error( + `loadFixture: fixture "${name}" is not valid JSON (${fixturePath}): ${err.message}` + ); + } +} diff --git a/test/milestone-execution.test.js b/test/milestone-execution.test.js new file mode 100644 index 0000000..545621e --- /dev/null +++ b/test/milestone-execution.test.js @@ -0,0 +1,992 @@ +/** + * test/milestone-execution.test.js — Scenario tests for mgw:milestone execution loop + * + * Tests the orchestration logic described in commands/milestone.md: + * - Dependency-ordered execution (topological sort via lib/state.cjs) + * - Failed-issue recovery: Retry (resetRetryState), Skip (blocked dependents), Abort + * - Rate limit guard (REMAINING < ESTIMATED_CALLS → cap MAX_ISSUES) + * - Next-milestone GSD linkage check (linked vs unlinked gsd_milestone_id) + * + * Isolation strategy: + * - lib/state.cjs is loaded fresh (cache evicted) per describe block + * - lib/retry.cjs is loaded fresh per describe block + * - mock-github intercepts gh CLI calls; mock-gsd-agent records agent spawns + * - fs.mkdtempSync() creates a real tmp dir; process.cwd() override sandboxes it + * - Fixtures in test/fixtures/project-state/milestone-execution.json seed project.json + * - test/fixtures/github/rate-limit-low.json provides a constrained rate limit scenario + * - afterAll() removes tmp dirs + * + * No live GitHub tokens or Claude API calls are used. + */ + +import { describe, it, expect, beforeEach, afterEach, beforeAll, afterAll } from 'vitest'; +import { createRequire } from 'module'; +import { fileURLToPath } from 'url'; +import path from 'path'; +import fs from 'fs'; +import os from 'os'; + +const _require = createRequire(import.meta.url); +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = path.resolve(__dirname, '..'); + +const STATE_MODULE = path.join(REPO_ROOT, 'lib', 'state.cjs'); +const RETRY_MODULE = path.join(REPO_ROOT, 'lib', 'retry.cjs'); +const MOCK_GITHUB_MODULE = path.join(REPO_ROOT, 'lib', 'mock-github.cjs'); +const MOCK_AGENT_MODULE = path.join(REPO_ROOT, 'lib', 'mock-gsd-agent.cjs'); + +const FIXTURE_DIR = path.join(__dirname, 'fixtures', 'project-state'); +const MILESTONE_FIXTURE = path.join(FIXTURE_DIR, 'milestone-execution.json'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Reload lib/state.cjs fresh (evict cache). */ +function loadState() { + delete _require.cache[STATE_MODULE]; + return _require(STATE_MODULE); +} + +/** Reload lib/retry.cjs fresh (evict cache). */ +function loadRetry() { + delete _require.cache[RETRY_MODULE]; + return _require(RETRY_MODULE); +} + +/** Override process.cwd to return tmpDir. Returns restore function. */ +function overrideCwd(tmpDir) { + const original = process.cwd.bind(process); + process.cwd = () => tmpDir; + return () => { process.cwd = original; }; +} + +/** Create .mgw/active/ and .mgw/project.json inside tmpDir. */ +function seedMgwDir(tmpDir, projectFixture) { + const mgwDir = path.join(tmpDir, '.mgw'); + const activeDir = path.join(mgwDir, 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + fs.mkdirSync(path.join(mgwDir, 'completed'), { recursive: true }); + fs.writeFileSync( + path.join(mgwDir, 'project.json'), + JSON.stringify(projectFixture, null, 2) + ); + return { mgwDir, activeDir }; +} + +/** Write a minimal active issue state file into tmpDir/.mgw/active/. */ +function writeIssueState(tmpDir, issueNum, slug, overrides = {}) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + const state = Object.assign( + { + issue_number: issueNum, + slug, + title: `Test issue ${issueNum}`, + pipeline_stage: 'new', + gsd_route: 'plan-phase', + retry_count: 0, + dead_letter: false, + last_failure_class: null, + checkpoint: null, + }, + overrides + ); + const filePath = path.join(activeDir, `${issueNum}-${slug}.json`); + fs.writeFileSync(filePath, JSON.stringify(state, null, 2)); + return filePath; +} + +/** Read active issue state from tmpDir/.mgw/active/. */ +function readIssueState(tmpDir, issueNum) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + const entries = fs.readdirSync(activeDir); + const match = entries.find(f => f.startsWith(`${issueNum}-`) && f.endsWith('.json')); + if (!match) throw new Error(`No state file for #${issueNum} in ${activeDir}`); + return JSON.parse(fs.readFileSync(path.join(activeDir, match), 'utf-8')); +} + +/** Load the milestone-execution fixture. */ +function loadMilestoneFixture() { + return JSON.parse(fs.readFileSync(MILESTONE_FIXTURE, 'utf-8')); +} + +/** Remove a directory tree if it exists. */ +function removeTmpDir(dir) { + if (fs.existsSync(dir)) { + fs.rmSync(dir, { recursive: true, force: true }); + } +} + +// --------------------------------------------------------------------------- +// Scenario 1: Dependency-ordered execution (topological sort) +// --------------------------------------------------------------------------- + +describe('dependency-order: topological sort respects blocked-by links', () => { + let tmpDir; + let restoreCwd; + let mockGitHub; + let mockAgent; + + beforeAll(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-milestone-dep-test-')); + }); + + beforeEach(() => { + restoreCwd = overrideCwd(tmpDir); + mockGitHub = _require(MOCK_GITHUB_MODULE); + mockAgent = _require(MOCK_AGENT_MODULE); + mockGitHub.activate(); + mockAgent.activate(); + }); + + afterEach(() => { + mockGitHub.deactivate(); + mockAgent.deactivate(); + restoreCwd(); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('sorts four issues respecting a linear dependency chain', () => { + const state = loadState(); + + // Issues: 101 ← 102 ← 103 (dependency chain), 104 standalone + const issues = [ + { number: 101, title: 'Set up base infrastructure' }, + { number: 102, title: 'Add core logic layer' }, + { number: 103, title: 'Implement API endpoints' }, + { number: 104, title: 'Add documentation' }, + ]; + + // 102 blocked by 101, 103 blocked by 102 + const links = [ + { a: 'issue:#102', b: 'issue:#101', type: 'blocked-by' }, + { a: 'issue:#103', b: 'issue:#102', type: 'blocked-by' }, + ]; + + const sorted = state.topologicalSort(issues, links); + const nums = sorted.map(i => i.number); + + // 101 must come before 102 + expect(nums.indexOf(101)).toBeLessThan(nums.indexOf(102)); + // 102 must come before 103 + expect(nums.indexOf(102)).toBeLessThan(nums.indexOf(103)); + // All 4 issues appear exactly once + expect(nums).toHaveLength(4); + expect(new Set(nums).size).toBe(4); + }); + + it('returns all issues when there are no dependency links', () => { + const state = loadState(); + + const issues = [ + { number: 101, title: 'Issue A' }, + { number: 102, title: 'Issue B' }, + { number: 103, title: 'Issue C' }, + ]; + + const sorted = state.topologicalSort(issues, []); + expect(sorted).toHaveLength(3); + // Order is preserved (original order) when no dependencies + expect(sorted.map(i => i.number)).toEqual([101, 102, 103]); + }); + + it('sorts standalone issues before their dependents', () => { + const state = loadState(); + + // 104 has no deps; should appear before 102 and 103 OR at least before its own dependents + const issues = [ + { number: 101, title: 'Set up base infrastructure' }, + { number: 102, title: 'Add core logic layer' }, + { number: 103, title: 'Implement API endpoints' }, + { number: 104, title: 'Add documentation' }, + ]; + + // Only 102 → 101, 103 → 102 chain. 104 is independent. + const links = [ + { a: 'issue:#102', b: 'issue:#101', type: 'blocked-by' }, + { a: 'issue:#103', b: 'issue:#102', type: 'blocked-by' }, + ]; + + const sorted = state.topologicalSort(issues, links); + const nums = sorted.map(i => i.number); + + // Core invariant: dependency order respected + expect(nums.indexOf(101)).toBeLessThan(nums.indexOf(102)); + expect(nums.indexOf(102)).toBeLessThan(nums.indexOf(103)); + // 104 has no deps and no dependents — it appears somewhere in the result + expect(nums).toContain(104); + }); + + it('handles a single issue with no links', () => { + const state = loadState(); + + const issues = [{ number: 55, title: 'Solo issue' }]; + const sorted = state.topologicalSort(issues, []); + + expect(sorted).toHaveLength(1); + expect(sorted[0].number).toBe(55); + }); + + it('ignores non-blocked-by link types during sort', () => { + const state = loadState(); + + const issues = [ + { number: 101, title: 'Issue A' }, + { number: 102, title: 'Issue B' }, + ]; + + // 'related' links should not affect sort order + const links = [ + { a: 'issue:#102', b: 'issue:#101', type: 'related' }, + ]; + + const sorted = state.topologicalSort(issues, links); + // Without blocked-by constraints, order follows original + expect(sorted.map(i => i.number)).toEqual([101, 102]); + }); +}); + +// --------------------------------------------------------------------------- +// Scenario 2: Failed issue → Retry (resetRetryState, re-run) +// --------------------------------------------------------------------------- + +describe('failed-issue-retry: resetRetryState clears retry fields for re-run', () => { + let tmpDir; + let restoreCwd; + + beforeAll(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-milestone-retry-test-')); + }); + + beforeEach(() => { + restoreCwd = overrideCwd(tmpDir); + delete _require.cache[STATE_MODULE]; + delete _require.cache[RETRY_MODULE]; + }); + + afterEach(() => { + restoreCwd(); + delete _require.cache[STATE_MODULE]; + delete _require.cache[RETRY_MODULE]; + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('resetRetryState clears retry_count, dead_letter, and last_failure_class', () => { + const retry = loadRetry(); + + const failedState = { + issue_number: 102, + pipeline_stage: 'failed', + retry_count: 2, + dead_letter: false, + last_failure_class: 'transient', + }; + + const reset = retry.resetRetryState(failedState); + + expect(reset.retry_count).toBe(0); + expect(reset.dead_letter).toBe(false); + expect(reset.last_failure_class).toBeNull(); + // pipeline_stage is NOT changed by resetRetryState — caller sets it separately + expect(reset.pipeline_stage).toBe('failed'); + }); + + it('resetRetryState clears dead_letter=true (dead-lettered issues become retriable)', () => { + const retry = loadRetry(); + + const deadLettered = { + issue_number: 102, + pipeline_stage: 'failed', + retry_count: 3, + dead_letter: true, + last_failure_class: 'permanent', + }; + + const reset = retry.resetRetryState(deadLettered); + + expect(reset.dead_letter).toBe(false); + expect(reset.retry_count).toBe(0); + expect(reset.last_failure_class).toBeNull(); + }); + + it('canRetry returns true after resetRetryState', () => { + const retry = loadRetry(); + + const exhausted = { + issue_number: 102, + retry_count: 3, + dead_letter: false, + last_failure_class: 'transient', + }; + + // Before reset: not retryable (retry_count at MAX_RETRIES) + expect(retry.canRetry(exhausted)).toBe(false); + + // After reset: retryable + const reset = retry.resetRetryState(exhausted); + expect(retry.canRetry(reset)).toBe(true); + }); + + it('resetRetryState is immutable — does not modify the original state object', () => { + const retry = loadRetry(); + + const original = { + issue_number: 102, + retry_count: 2, + dead_letter: true, + last_failure_class: 'transient', + }; + const originalCopy = Object.assign({}, original); + + const reset = retry.resetRetryState(original); + + // Original unchanged + expect(original).toEqual(originalCopy); + // Reset is a new object + expect(reset).not.toBe(original); + }); + + it('canRetry is false when retry_count equals MAX_RETRIES (3)', () => { + const retry = loadRetry(); + + const atLimit = { retry_count: 3, dead_letter: false }; + expect(retry.canRetry(atLimit)).toBe(false); + }); + + it('canRetry is true when retry_count is below MAX_RETRIES', () => { + const retry = loadRetry(); + + expect(retry.canRetry({ retry_count: 0, dead_letter: false })).toBe(true); + expect(retry.canRetry({ retry_count: 1, dead_letter: false })).toBe(true); + expect(retry.canRetry({ retry_count: 2, dead_letter: false })).toBe(true); + }); + + it('pipeline_stage should be set to triaged after retry — resetRetryState + caller sets stage', () => { + const retry = loadRetry(); + + const failedState = { + issue_number: 102, + pipeline_stage: 'failed', + retry_count: 1, + dead_letter: false, + last_failure_class: 'transient', + }; + + // resetRetryState does NOT change pipeline_stage — the milestone loop does + const reset = retry.resetRetryState(failedState); + const readyForRetry = Object.assign({}, reset, { pipeline_stage: 'triaged' }); + + expect(readyForRetry.pipeline_stage).toBe('triaged'); + expect(readyForRetry.retry_count).toBe(0); + expect(retry.canRetry(readyForRetry)).toBe(true); + }); +}); + +// --------------------------------------------------------------------------- +// Scenario 3: Failed issue → Skip (marks blocked, continues loop) +// --------------------------------------------------------------------------- + +describe('failed-issue-skip: dependents blocked when blocker in FAILED_ISSUES', () => { + let tmpDir; + let restoreCwd; + + beforeAll(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-milestone-skip-test-')); + }); + + beforeEach(() => { + restoreCwd = overrideCwd(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterEach(() => { + restoreCwd(); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('issue with a failed blocker in FAILED_ISSUES should be skipped', () => { + // Simulate the milestone execute_loop blocking logic: + // IS_BLOCKED = true when any dependency appears in FAILED_ISSUES + + const FAILED_ISSUES = [101]; // issue 101 failed + + // Issue 102 depends on 101 (slug-based dependency) + const issueData = { + github_number: 102, + title: 'Add core logic layer', + depends_on_slugs: ['set-up-base-infrastructure'], + }; + + // Slug for issue 101 + const issue101Slug = 'set-up-base-infrastructure'; + const issue101Number = 101; + + // Simulate the blocking check: IS_BLOCKED when any failed issue's slug + // matches a dependency slug of the current issue + const issueMap = new Map([ + [101, { slug: issue101Slug, github_number: 101 }], + ]); + + let isBlocked = false; + for (const failedNum of FAILED_ISSUES) { + const failedIssue = issueMap.get(failedNum); + if (!failedIssue) continue; + if (issueData.depends_on_slugs.includes(failedIssue.slug)) { + isBlocked = true; + break; + } + } + + expect(isBlocked).toBe(true); + }); + + it('issue with no failed blockers should NOT be blocked', () => { + const FAILED_ISSUES = [101]; // issue 101 failed + + // Issue 104 has no deps — should run regardless + const issueData = { + github_number: 104, + title: 'Add documentation', + depends_on_slugs: [], + }; + + const issueMap = new Map([ + [101, { slug: 'set-up-base-infrastructure', github_number: 101 }], + ]); + + let isBlocked = false; + for (const failedNum of FAILED_ISSUES) { + const failedIssue = issueMap.get(failedNum); + if (!failedIssue) continue; + if (issueData.depends_on_slugs.includes(failedIssue.slug)) { + isBlocked = true; + break; + } + } + + expect(isBlocked).toBe(false); + }); + + it('blocked issues are excluded from completed count and added to BLOCKED_ISSUES', () => { + const COMPLETED_ISSUES = []; + const BLOCKED_ISSUES = []; + const FAILED_ISSUES = [101]; + const SKIPPED_ISSUES = []; + + // Simulate loop behavior: issue 102 is blocked by failed 101 + const issueMap = new Map([ + [101, { slug: 'set-up-base-infrastructure', github_number: 101 }], + ]); + + const issue102 = { + github_number: 102, + title: 'Add core logic layer', + depends_on_slugs: ['set-up-base-infrastructure'], + }; + + // Check blocking + let isBlocked = false; + for (const failedNum of FAILED_ISSUES) { + const fi = issueMap.get(failedNum); + if (fi && issue102.depends_on_slugs.includes(fi.slug)) { + isBlocked = true; + break; + } + } + + if (isBlocked) { + BLOCKED_ISSUES.push(issue102.github_number); + // continue — do NOT add to completed + } else { + COMPLETED_ISSUES.push(issue102.github_number); + } + + expect(BLOCKED_ISSUES).toContain(102); + expect(COMPLETED_ISSUES).not.toContain(102); + expect(SKIPPED_ISSUES).not.toContain(102); + }); + + it('skip does not affect issues that are not dependents of the failed issue', () => { + const FAILED_ISSUES = [102]; // only 102 failed + const BLOCKED_ISSUES = []; + + const issueMap = new Map([ + [102, { slug: 'add-core-logic-layer', github_number: 102 }], + ]); + + // Issue 104 has no deps on 102 + const issue104 = { + github_number: 104, + title: 'Add documentation', + depends_on_slugs: [], + }; + + let isBlocked = false; + for (const failedNum of FAILED_ISSUES) { + const fi = issueMap.get(failedNum); + if (fi && issue104.depends_on_slugs.includes(fi.slug)) { + isBlocked = true; + break; + } + } + + if (isBlocked) BLOCKED_ISSUES.push(104); + + expect(BLOCKED_ISSUES).not.toContain(104); + }); +}); + +// --------------------------------------------------------------------------- +// Scenario 4: Failed issue → Abort (stops loop) +// --------------------------------------------------------------------------- + +describe('failed-issue-abort: abort choice stops the execution loop', () => { + it('abort stops loop — no subsequent issues executed', () => { + // Simulate the INTERACTIVE abort path from the execute_loop. + // When user chooses Abort, the loop breaks. We model this as a + // function returning whether to continue after each issue result. + + function handleIssueResult(issueNumber, pipelineStage, userChoice, abortFlag) { + if (userChoice === 'Abort') { + return { shouldContinue: false, aborted: true }; + } + return { shouldContinue: true, aborted: false }; + } + + // Issue 101 completes but user chooses Abort + const result = handleIssueResult(101, 'done', 'Abort', false); + expect(result.shouldContinue).toBe(false); + expect(result.aborted).toBe(true); + }); + + it('abort after first issue means remaining issues are not run', () => { + const issues = [101, 102, 103, 104]; + const executed = []; + let aborted = false; + + for (const issueNum of issues) { + if (aborted) break; + + executed.push(issueNum); + + // Simulate: user aborts after issue 101 + if (issueNum === 101) { + aborted = true; + break; + } + } + + expect(executed).toEqual([101]); + expect(executed).not.toContain(102); + expect(executed).not.toContain(103); + expect(executed).not.toContain(104); + }); + + it('abort is distinguishable from skip (skip continues, abort stops)', () => { + // Skip: moves to next issue (continue in loop) + // Abort: stops the entire loop (break) + + const issues = [101, 102, 103]; + const executedWithSkip = []; + const executedWithAbort = []; + + // Skip scenario: skip 102, continue to 103 + for (const issueNum of issues) { + if (issueNum === 102) { + // continue (skip) + continue; + } + executedWithSkip.push(issueNum); + } + + // Abort scenario: stop after 101 + let aborted = false; + for (const issueNum of issues) { + if (aborted) break; + executedWithAbort.push(issueNum); + if (issueNum === 101) { + aborted = true; + break; + } + } + + // Skip: 101 and 103 run, 102 skipped + expect(executedWithSkip).toEqual([101, 103]); + expect(executedWithSkip).not.toContain(102); + + // Abort: only 101 runs + expect(executedWithAbort).toEqual([101]); + expect(executedWithAbort).not.toContain(102); + expect(executedWithAbort).not.toContain(103); + }); + + it('FAILED_ISSUES receives failed issue before abort check', () => { + // In the execute_loop, after an issue fails, it is pushed to FAILED_ISSUES + // regardless of whether the user then chooses Abort. + const FAILED_ISSUES = []; + const COMPLETED_ISSUES = []; + + // Issue 101 fails + const issue101Result = { prNumber: null }; // no PR created + if (!issue101Result.prNumber) { + FAILED_ISSUES.push(101); + } else { + COMPLETED_ISSUES.push(101); + } + + // User chooses Abort — loop breaks, but FAILED_ISSUES already has 101 + const aborted = true; + + expect(FAILED_ISSUES).toContain(101); + expect(COMPLETED_ISSUES).not.toContain(101); + expect(aborted).toBe(true); + }); +}); + +// --------------------------------------------------------------------------- +// Scenario 5: Rate limit guard (REMAINING < ESTIMATED_CALLS → cap MAX_ISSUES) +// --------------------------------------------------------------------------- + +describe('rate-limit-guard: caps MAX_ISSUES when REMAINING < ESTIMATED_CALLS', () => { + let mockGitHub; + + beforeEach(() => { + mockGitHub = _require(MOCK_GITHUB_MODULE); + mockGitHub.activate(); + }); + + afterEach(() => { + mockGitHub.deactivate(); + }); + + it('calculates MAX_ISSUES as REMAINING / 25 when rate limit is constrained', () => { + // From milestone.md: ESTIMATED_CALLS = UNFINISHED_COUNT * 25, SAFE_ISSUES = REMAINING / 25 + const REMAINING = 50; + const UNFINISHED_COUNT = 4; + const CALLS_PER_ISSUE = 25; + + const ESTIMATED_CALLS = UNFINISHED_COUNT * CALLS_PER_ISSUE; // 100 + const SAFE_ISSUES = Math.floor(REMAINING / CALLS_PER_ISSUE); // 2 + const MAX_ISSUES = REMAINING < ESTIMATED_CALLS ? SAFE_ISSUES : UNFINISHED_COUNT; + + expect(ESTIMATED_CALLS).toBe(100); + expect(SAFE_ISSUES).toBe(2); + expect(MAX_ISSUES).toBe(2); + expect(MAX_ISSUES).toBeLessThan(UNFINISHED_COUNT); + }); + + it('does NOT cap MAX_ISSUES when REMAINING >= ESTIMATED_CALLS', () => { + const REMAINING = 4999; + const UNFINISHED_COUNT = 4; + const CALLS_PER_ISSUE = 25; + + const ESTIMATED_CALLS = UNFINISHED_COUNT * CALLS_PER_ISSUE; // 100 + const SAFE_ISSUES = Math.floor(REMAINING / CALLS_PER_ISSUE); // 199 + const MAX_ISSUES = REMAINING < ESTIMATED_CALLS ? SAFE_ISSUES : UNFINISHED_COUNT; + + expect(MAX_ISSUES).toBe(UNFINISHED_COUNT); // No cap + expect(MAX_ISSUES).toBe(4); + }); + + it('loop breaks when ISSUES_RUN reaches MAX_ISSUES', () => { + const MAX_ISSUES = 2; + const issues = [101, 102, 103, 104]; + const executed = []; + let ISSUES_RUN = 0; + + for (const issueNum of issues) { + if (ISSUES_RUN >= MAX_ISSUES) { + // Rate limit cap reached — stop + break; + } + executed.push(issueNum); + ISSUES_RUN++; + } + + expect(executed).toHaveLength(MAX_ISSUES); + expect(executed).toEqual([101, 102]); + expect(executed).not.toContain(103); + expect(executed).not.toContain(104); + }); + + it('mock-github returns low rate limit from rate-limit-low fixture', () => { + // Override mock to return the low rate limit fixture + mockGitHub.setResponse('gh api rate_limit', JSON.stringify({ + resources: { + core: { remaining: 50, limit: 5000, reset: 1700000000, used: 4950 }, + }, + rate: { remaining: 50, limit: 5000, reset: 1700000000, used: 4950 }, + })); + + // Simulate the rate limit check from milestone.md + // In real code: RATE_JSON=$(gh api rate_limit --jq '.resources.core') + // Here we verify the mock intercepts the call correctly + + const callLog = mockGitHub.getCallLog(); + // No calls made yet — just verifying override was set + expect(callLog).toHaveLength(0); + + // Verify the low limit logic + const REMAINING = 50; + const UNFINISHED_COUNT = 4; + const ESTIMATED_CALLS = UNFINISHED_COUNT * 25; + const rateLimitTriggered = REMAINING < ESTIMATED_CALLS; + + expect(rateLimitTriggered).toBe(true); + }); + + it('SAFE_ISSUES is 0 when REMAINING < 25 (one issue cost)', () => { + const REMAINING = 10; + const SAFE_ISSUES = Math.floor(REMAINING / 25); // 0 + + expect(SAFE_ISSUES).toBe(0); + + // With MAX_ISSUES = 0, loop never executes + const issues = [101, 102, 103]; + const executed = []; + let ISSUES_RUN = 0; + for (const issueNum of issues) { + if (ISSUES_RUN >= SAFE_ISSUES) break; // breaks immediately + executed.push(issueNum); + ISSUES_RUN++; + } + + expect(executed).toHaveLength(0); + }); + + it('rate limit check is bypassed when gh api call fails (undefined REMAINING)', () => { + // If gh api rate_limit fails, RATE_JSON is empty — MAX_ISSUES = UNFINISHED_COUNT + const UNFINISHED_COUNT = 4; + const RATE_JSON = ''; // simulates failed gh api call + + // From milestone.md: if RATE_JSON is empty, skip check and proceed without cap + const MAX_ISSUES = RATE_JSON ? Math.floor(50 / 25) : UNFINISHED_COUNT; + + expect(MAX_ISSUES).toBe(UNFINISHED_COUNT); + }); +}); + +// --------------------------------------------------------------------------- +// Scenario 6: Next-milestone GSD linkage check +// --------------------------------------------------------------------------- + +describe('next-milestone-gsd-linkage: linked vs unlinked gsd_milestone_id', () => { + let tmpDir; + let restoreCwd; + + beforeAll(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-milestone-linkage-test-')); + }); + + beforeEach(() => { + restoreCwd = overrideCwd(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterEach(() => { + restoreCwd(); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('next milestone with gsd_milestone_id set → linked check reports linked', () => { + // Simulate the NEXT_MILESTONE_CHECK logic from milestone.md post_loop step. + // linked:: when gsd_milestone_id is set + const nextMilestone = { + name: 'v2 — Next Milestone', + gsd_milestone_id: 'v2.0', + }; + + const gsdId = nextMilestone.gsd_milestone_id; + const name = nextMilestone.name; + + const checkResult = gsdId ? `linked:${name}:${gsdId}` : `unlinked:${name}`; + + expect(checkResult).toMatch(/^linked:/); + expect(checkResult).toContain('v2.0'); + expect(checkResult).toContain(name); + }); + + it('next milestone with null gsd_milestone_id → unlinked check reports unlinked', () => { + const nextMilestone = { + name: 'v2 — Next Milestone', + gsd_milestone_id: null, + }; + + const gsdId = nextMilestone.gsd_milestone_id; + const name = nextMilestone.name; + + const checkResult = gsdId ? `linked:${name}:${gsdId}` : `unlinked:${name}`; + + expect(checkResult).toMatch(/^unlinked:/); + expect(checkResult).toContain(name); + expect(checkResult).not.toContain('v2.0'); + }); + + it('no next milestone → reports none (all milestones complete)', () => { + // From milestone.md: if activeIdx >= milestones.length, output "none" + const milestones = [ + { name: 'v1', gsd_milestone_id: 'v1.0', gsd_state: 'completed' }, + ]; + // After completing last milestone, active pointer moves past end + const activeIdx = 1; // past end of array + + const nextMilestone = milestones[activeIdx] || null; + + const checkResult = nextMilestone + ? (nextMilestone.gsd_milestone_id + ? `linked:${nextMilestone.name}:${nextMilestone.gsd_milestone_id}` + : `unlinked:${nextMilestone.name}`) + : 'none'; + + expect(checkResult).toBe('none'); + }); + + it('reads next milestone correctly from milestone-execution fixture', () => { + const fixture = loadMilestoneFixture(); + + // milestone[0] is v1 (active, gsd_milestone_id = 'v1.0') + // milestone[1] is v2 (planned, gsd_milestone_id = null) + + const currentIdx = 0; // completing milestone 0 + const nextMilestone = fixture.milestones[currentIdx + 1]; + + expect(nextMilestone).toBeDefined(); + expect(nextMilestone.name).toBe('v2 — Next Milestone'); + expect(nextMilestone.gsd_milestone_id).toBeNull(); + + const checkResult = nextMilestone.gsd_milestone_id + ? `linked:${nextMilestone.name}:${nextMilestone.gsd_milestone_id}` + : `unlinked:${nextMilestone.name}`; + + expect(checkResult).toMatch(/^unlinked:/); + }); + + it('active milestone pointer advances correctly after milestone completion', () => { + const state = loadState(); + const fixture = loadMilestoneFixture(); + + // Seed project.json + const { mgwDir } = seedMgwDir(tmpDir, fixture); + + // Verify resolveActiveMilestoneIndex returns 0 (v1.0 is active) + delete _require.cache[STATE_MODULE]; + const freshState = loadState(); + const projectState = freshState.loadProjectState(); + + expect(projectState).not.toBeNull(); + const activeIdx = freshState.resolveActiveMilestoneIndex(projectState); + expect(activeIdx).toBe(0); + + // After completing milestone 0, active pointer should advance to 1 + const nextMilestone = projectState.milestones[activeIdx + 1]; + expect(nextMilestone).toBeDefined(); + expect(nextMilestone.gsd_milestone_id).toBeNull(); // unlinked + }); + + it('linked milestone with ROADMAP.md match is fully ready', () => { + // Simulate the full linked path check: + // linked:: → verify ROADMAP.md contains gsdId + const nextMilestone = { + name: 'v3 — Third Milestone', + gsd_milestone_id: 'v3.0', + }; + + // Simulate ROADMAP.md containing the GSD milestone ID + const roadmapContent = '# Roadmap\n\n## v3.0 — Third Milestone\n\nPhases...'; + const roadmapValid = roadmapContent.includes(nextMilestone.gsd_milestone_id); + + const checkResult = nextMilestone.gsd_milestone_id + ? `linked:${nextMilestone.name}:${nextMilestone.gsd_milestone_id}` + : `unlinked:${nextMilestone.name}`; + + expect(checkResult).toMatch(/^linked:/); + expect(roadmapValid).toBe(true); + }); +}); + +// --------------------------------------------------------------------------- +// Integration: project.json fixture integrity check +// --------------------------------------------------------------------------- + +describe('fixture integrity: milestone-execution.json is well-formed', () => { + it('fixture file exists and is valid JSON', () => { + expect(() => loadMilestoneFixture()).not.toThrow(); + const fixture = loadMilestoneFixture(); + expect(fixture).toBeDefined(); + expect(typeof fixture).toBe('object'); + }); + + it('fixture has two milestones', () => { + const fixture = loadMilestoneFixture(); + expect(Array.isArray(fixture.milestones)).toBe(true); + expect(fixture.milestones).toHaveLength(2); + }); + + it('first milestone has 4 issues with dependency relationships', () => { + const fixture = loadMilestoneFixture(); + const m1 = fixture.milestones[0]; + expect(m1.issues).toHaveLength(4); + + const issue101 = m1.issues.find(i => i.github_number === 101); + const issue102 = m1.issues.find(i => i.github_number === 102); + const issue103 = m1.issues.find(i => i.github_number === 103); + const issue104 = m1.issues.find(i => i.github_number === 104); + + expect(issue101).toBeDefined(); + expect(issue102).toBeDefined(); + expect(issue103).toBeDefined(); + expect(issue104).toBeDefined(); + + // Dependency chain: 102 depends on 101, 103 depends on 102 + expect(issue102.depends_on_slugs).toContain('set-up-base-infrastructure'); + expect(issue103.depends_on_slugs).toContain('add-core-logic-layer'); + // 101 and 104 have no dependencies + expect(issue101.depends_on_slugs).toHaveLength(0); + expect(issue104.depends_on_slugs).toHaveLength(0); + }); + + it('first milestone has gsd_milestone_id set (linked)', () => { + const fixture = loadMilestoneFixture(); + expect(fixture.milestones[0].gsd_milestone_id).toBe('v1.0'); + }); + + it('second milestone has null gsd_milestone_id (unlinked)', () => { + const fixture = loadMilestoneFixture(); + expect(fixture.milestones[1].gsd_milestone_id).toBeNull(); + }); + + it('active_gsd_milestone matches first milestone gsd_milestone_id', () => { + const fixture = loadMilestoneFixture(); + expect(fixture.active_gsd_milestone).toBe(fixture.milestones[0].gsd_milestone_id); + }); + + it('mock-gsd-agent and mock-github activate without errors', () => { + const mockGitHub = _require(MOCK_GITHUB_MODULE); + const mockAgent = _require(MOCK_AGENT_MODULE); + + expect(() => mockGitHub.activate()).not.toThrow(); + expect(() => mockAgent.activate()).not.toThrow(); + + expect(mockGitHub.getCallLog()).toHaveLength(0); + expect(mockAgent.getCallLog()).toHaveLength(0); + + mockGitHub.deactivate(); + mockAgent.deactivate(); + }); +}); diff --git a/test/pipeline-transitions.test.js b/test/pipeline-transitions.test.js new file mode 100644 index 0000000..766e59f --- /dev/null +++ b/test/pipeline-transitions.test.js @@ -0,0 +1,671 @@ +/** + * test/pipeline-transitions.test.js — Pipeline stage transition tests for mgw:run + * + * Simulates a full mgw:run cycle (triage → plan → execute → verify → pr-created → done) + * using mock agents. Asserts: + * - pipeline_stage transitions via lib/pipeline.cjs + * - checkpoint pipeline_step progression via lib/state.cjs + * - onTransition hooks fire at each stage change + * - mock GitHub call log captures expected commands + * - mock GSD agent spawns are recorded + * + * Tests: + * 1. happy-path — new → triaged → planning → executing → verifying → pr-created → done + * 2. failure-mode: agent returns no output → failed + * 3. failure-mode: blocking comment detected → blocked + * 4. checkpoint: pipeline_step progression throughout happy path + * + * Isolation strategy: + * - lib/pipeline.cjs is loaded fresh per describe block (cache evicted) + * - lib/state.cjs uses a tmp dir with process.cwd() override + * - mock-github and mock-gsd-agent are activated/deactivated in beforeEach/afterEach + * - clearHooks() called in beforeEach to prevent hook accumulation + * - Tmp dirs removed in afterAll + * + * No live GitHub tokens or Claude API calls are used. + */ + +import { describe, it, expect, beforeEach, afterEach, afterAll } from 'vitest'; +import { createRequire } from 'module'; +import { fileURLToPath } from 'url'; +import path from 'path'; +import fs from 'fs'; +import os from 'os'; + +const _require = createRequire(import.meta.url); +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = path.resolve(__dirname, '..'); + +const PIPELINE_MODULE = path.join(REPO_ROOT, 'lib', 'pipeline.cjs'); +const STATE_MODULE = path.join(REPO_ROOT, 'lib', 'state.cjs'); +const MOCK_GITHUB_MODULE = path.join(REPO_ROOT, 'lib', 'mock-github.cjs'); +const MOCK_AGENT_MODULE = path.join(REPO_ROOT, 'lib', 'mock-gsd-agent.cjs'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Reload lib/pipeline.cjs fresh (evict module cache). */ +function loadPipeline() { + delete _require.cache[PIPELINE_MODULE]; + return _require(PIPELINE_MODULE); +} + +/** Reload lib/state.cjs fresh (evict module cache). */ +function loadState() { + delete _require.cache[STATE_MODULE]; + return _require(STATE_MODULE); +} + +/** Override process.cwd to return tmpDir. Returns restore function. */ +function overrideCwd(tmpDir) { + const original = process.cwd.bind(process); + process.cwd = () => tmpDir; + return () => { process.cwd = original; }; +} + +/** Remove .mgw/ inside tmpDir if it exists. */ +function cleanMgw(tmpDir) { + const mgwDir = path.join(tmpDir, '.mgw'); + if (fs.existsSync(mgwDir)) { + fs.rmSync(mgwDir, { recursive: true, force: true }); + } +} + +/** + * Write a minimal issue state file into tmpDir/.mgw/active/. + * Returns the file path. + */ +function writeIssueState(tmpDir, issueNum, overrides = {}) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + fs.mkdirSync(activeDir, { recursive: true }); + const state = Object.assign( + { + issue_number: issueNum, + slug: `test-issue-${issueNum}`, + title: `Test issue ${issueNum}`, + pipeline_stage: 'new', + gsd_route: 'plan-phase', + checkpoint: null, + }, + overrides + ); + const filePath = path.join(activeDir, `${issueNum}-test-issue-${issueNum}.json`); + fs.writeFileSync(filePath, JSON.stringify(state, null, 2)); + return filePath; +} + +/** Read a persisted issue state back from tmpDir/.mgw/active/. */ +function readIssueState(tmpDir, issueNum) { + const activeDir = path.join(tmpDir, '.mgw', 'active'); + const entries = fs.readdirSync(activeDir); + const match = entries.find( + f => f.startsWith(`${issueNum}-`) && f.endsWith('.json') + ); + if (!match) throw new Error(`No state file for issue #${issueNum} in ${activeDir}`); + return JSON.parse(fs.readFileSync(path.join(activeDir, match), 'utf-8')); +} + +/** + * Build a minimal issue state object suitable for transitionStage(). + * Does NOT write to disk — used for pure in-memory state machine tests. + */ +function makeIssueState(overrides = {}) { + return Object.assign( + { + issue_number: 252, + slug: 'test-issue-252', + title: 'Write pipeline stage transition tests', + pipeline_stage: 'new', + gsd_route: 'plan-phase', + checkpoint: null, + }, + overrides + ); +} + +// --------------------------------------------------------------------------- +// Suite 1: happy-path — new → triaged → planning → executing → verifying → pr-created → done +// --------------------------------------------------------------------------- + +describe('happy-path: full mgw:run cycle', () => { + let pipeline; + let transitionLog; + + beforeEach(() => { + pipeline = loadPipeline(); + transitionLog = []; + // Register a hook to capture all transitions + pipeline.onTransition((from, to, ctx) => { + transitionLog.push({ from, to, ctx }); + }); + }); + + afterEach(() => { + pipeline.clearHooks(); + }); + + it('transitions through all stages from new to done', () => { + const { transitionStage, STAGES, VALID_TRANSITIONS } = pipeline; + let state = makeIssueState(); + + // new → triaged + state = transitionStage(state, STAGES.TRIAGED); + expect(state.pipeline_stage).toBe('triaged'); + expect(state.previous_stage).toBe('new'); + + // triaged → planning + state = transitionStage(state, STAGES.PLANNING); + expect(state.pipeline_stage).toBe('planning'); + expect(state.previous_stage).toBe('triaged'); + + // planning → executing + state = transitionStage(state, STAGES.EXECUTING); + expect(state.pipeline_stage).toBe('executing'); + expect(state.previous_stage).toBe('planning'); + + // executing → verifying + state = transitionStage(state, STAGES.VERIFYING); + expect(state.pipeline_stage).toBe('verifying'); + expect(state.previous_stage).toBe('executing'); + + // verifying → pr-created + state = transitionStage(state, STAGES.PR_CREATED); + expect(state.pipeline_stage).toBe('pr-created'); + expect(state.previous_stage).toBe('verifying'); + + // pr-created → done + state = transitionStage(state, STAGES.DONE); + expect(state.pipeline_stage).toBe('done'); + expect(state.previous_stage).toBe('pr-created'); + + // done is terminal — no forward transitions + expect(VALID_TRANSITIONS[STAGES.DONE]).toEqual([]); + expect(() => transitionStage(state, STAGES.FAILED)).toThrow(); + }); + + it('fires onTransition hooks at each stage change', () => { + const { transitionStage, STAGES } = pipeline; + let state = makeIssueState(); + + state = transitionStage(state, STAGES.TRIAGED); + state = transitionStage(state, STAGES.PLANNING); + state = transitionStage(state, STAGES.EXECUTING); + state = transitionStage(state, STAGES.VERIFYING); + state = transitionStage(state, STAGES.PR_CREATED); + state = transitionStage(state, STAGES.DONE); + + expect(transitionLog).toHaveLength(6); + expect(transitionLog[0]).toMatchObject({ from: 'new', to: 'triaged' }); + expect(transitionLog[1]).toMatchObject({ from: 'triaged', to: 'planning' }); + expect(transitionLog[2]).toMatchObject({ from: 'planning', to: 'executing' }); + expect(transitionLog[3]).toMatchObject({ from: 'executing', to: 'verifying' }); + expect(transitionLog[4]).toMatchObject({ from: 'verifying', to: 'pr-created' }); + expect(transitionLog[5]).toMatchObject({ from: 'pr-created', to: 'done' }); + }); + + it('clearHooks() prevents hook from firing after clear', () => { + const { transitionStage, STAGES, clearHooks } = pipeline; + const firedAfterClear = []; + pipeline.onTransition((from, to) => firedAfterClear.push({ from, to })); + + let state = makeIssueState(); + state = transitionStage(state, STAGES.TRIAGED); + clearHooks(); + + // This transition should NOT fire hooks + state = transitionStage(state, STAGES.PLANNING); + + // transitionLog (registered in beforeEach) captured the first transition + // firedAfterClear captured before clearHooks, so it has one entry + expect(firedAfterClear).toHaveLength(1); + expect(firedAfterClear[0]).toMatchObject({ from: 'new', to: 'triaged' }); + }); + + it('self-transition throws', () => { + const { transitionStage, STAGES } = pipeline; + const state = makeIssueState({ pipeline_stage: 'planning' }); + expect(() => transitionStage(state, STAGES.PLANNING)).toThrow(/self-transition/); + }); + + it('invalid transition throws with descriptive message', () => { + const { transitionStage, STAGES } = pipeline; + const state = makeIssueState({ pipeline_stage: 'new' }); + // new → done is not a valid transition + expect(() => transitionStage(state, STAGES.DONE)).toThrow(/Invalid transition/); + }); + + it('isValidTransition covers all happy-path edges', () => { + const { isValidTransition, STAGES } = pipeline; + const happyPath = [ + [STAGES.NEW, STAGES.TRIAGED], + [STAGES.TRIAGED, STAGES.PLANNING], + [STAGES.PLANNING, STAGES.EXECUTING], + [STAGES.EXECUTING, STAGES.VERIFYING], + [STAGES.VERIFYING, STAGES.PR_CREATED], + [STAGES.PR_CREATED, STAGES.DONE], + ]; + for (const [from, to] of happyPath) { + expect(isValidTransition(from, to)).toBe(true); + } + }); +}); + +// --------------------------------------------------------------------------- +// Suite 2: failure-mode — agent returns no output → failed +// --------------------------------------------------------------------------- + +describe('failure-mode: agent returns no output', () => { + let pipeline; + let mockAgent; + + beforeEach(() => { + pipeline = loadPipeline(); + pipeline.clearHooks(); + mockAgent = _require(MOCK_AGENT_MODULE); + mockAgent.activate(); + // Override gsd-planner to return empty string (simulates silent agent failure) + mockAgent.setResponse('gsd-planner', ''); + }); + + afterEach(() => { + pipeline.clearHooks(); + mockAgent.deactivate(); + }); + + it('transitions to failed when planner returns no output', () => { + const { transitionStage, STAGES, isValidTransition } = pipeline; + let state = makeIssueState(); + + // Simulate pipeline progress up to executing + state = transitionStage(state, STAGES.TRIAGED); + state = transitionStage(state, STAGES.PLANNING); + + // Simulate agent spawn — returns empty output + const output = mockAgent.spawnStub({ + subagent_type: 'gsd-planner', + prompt: 'Create PLAN.md for issue #252', + description: 'Planner for issue 252', + }); + + // Agent failure detection: empty output means execution failed + expect(output).toBe(''); + + // On empty output, pipeline transitions to failed + state = transitionStage(state, STAGES.FAILED); + expect(state.pipeline_stage).toBe('failed'); + }); + + it('assertSpawned passes for gsd-planner', () => { + mockAgent.spawnStub({ + subagent_type: 'gsd-planner', + prompt: 'Plan issue 252', + description: 'Test planner spawn', + }); + // Should not throw + expect(() => mockAgent.assertSpawned('gsd-planner')).not.toThrow(); + }); + + it('assertSpawned throws when agent was not spawned', () => { + // gsd-executor was never called + expect(() => mockAgent.assertSpawned('gsd-executor')).toThrow(); + }); + + it('failed stage can recover to triaged or planning', () => { + const { isValidTransition, STAGES } = pipeline; + expect(isValidTransition(STAGES.FAILED, STAGES.TRIAGED)).toBe(true); + expect(isValidTransition(STAGES.FAILED, STAGES.PLANNING)).toBe(true); + expect(isValidTransition(STAGES.FAILED, STAGES.EXECUTING)).toBe(true); + // Cannot skip directly to done from failed + expect(isValidTransition(STAGES.FAILED, STAGES.DONE)).toBe(false); + // Cannot skip to pr-created from failed + expect(isValidTransition(STAGES.FAILED, STAGES.PR_CREATED)).toBe(false); + }); + + it('call log records the failed spawn attempt', () => { + mockAgent.spawnStub({ + subagent_type: 'gsd-planner', + prompt: 'Plan issue 252', + description: 'Planner', + }); + + const log = mockAgent.getCallLog(); + expect(log).toHaveLength(1); + expect(log[0].subagent_type).toBe('gsd-planner'); + expect(log[0].output).toBe(''); + }); + + it('getSpawnCount returns correct count per agent type', () => { + mockAgent.spawnStub({ subagent_type: 'gsd-planner', prompt: '', description: '' }); + mockAgent.spawnStub({ subagent_type: 'gsd-planner', prompt: '', description: '' }); + expect(mockAgent.getSpawnCount('gsd-planner')).toBe(2); + expect(mockAgent.getSpawnCount('gsd-executor')).toBe(0); + expect(mockAgent.getSpawnCount()).toBe(2); // total + }); + + it('any-stage can transition to failed', () => { + const { isValidTransition, STAGES } = pipeline; + const nonTerminal = [ + STAGES.NEW, STAGES.TRIAGED, STAGES.NEEDS_INFO, STAGES.DISCUSSING, + STAGES.APPROVED, STAGES.PLANNING, STAGES.DIAGNOSING, + STAGES.EXECUTING, STAGES.VERIFYING, STAGES.PR_CREATED, + ]; + for (const stage of nonTerminal) { + expect(isValidTransition(stage, STAGES.FAILED)).toBe(true); + } + }); +}); + +// --------------------------------------------------------------------------- +// Suite 3: failure-mode — blocking comment detected → blocked +// --------------------------------------------------------------------------- + +describe('failure-mode: blocking comment detected', () => { + let pipeline; + let mockGitHub; + + beforeEach(() => { + pipeline = loadPipeline(); + pipeline.clearHooks(); + mockGitHub = _require(MOCK_GITHUB_MODULE); + mockGitHub.activate(); + // Override gh issue view to return a blocking comment + mockGitHub.setResponse( + 'gh issue view', + JSON.stringify({ + number: 252, + title: 'Write pipeline stage transition tests', + comments: [ + { + author: { login: 'stakeholder' }, + body: 'Hold off, do not work on this yet. Blocked by design review.', + createdAt: '2026-03-06T10:00:00Z', + }, + ], + }) + ); + }); + + afterEach(() => { + pipeline.clearHooks(); + mockGitHub.deactivate(); + }); + + it('transitions to blocked when blocking comment is detected', () => { + const { transitionStage, STAGES } = pipeline; + let state = makeIssueState({ pipeline_stage: 'triaged' }); + + // Simulate comment classification: 'Hold off' → blocking + const commentBody = 'Hold off, do not work on this yet. Blocked by design review.'; + const isBlocking = /hold off|do not work|blocked|wait/i.test(commentBody); + expect(isBlocking).toBe(true); + + // Pipeline transitions to blocked on blocking comment + state = transitionStage(state, STAGES.BLOCKED); + expect(state.pipeline_stage).toBe('blocked'); + }); + + it('mock GitHub call log includes gh issue view', () => { + const { execSync } = _require('child_process'); + // Trigger the mock by calling execSync (mock-github intercepts child_process.execSync) + const result = execSync('gh issue view 252 --json comments'); + const parsed = JSON.parse(result); + expect(parsed.number).toBe(252); + expect(parsed.comments).toHaveLength(1); + expect(parsed.comments[0].body).toMatch(/Hold off/); + + const log = mockGitHub.getCallLog(); + expect(log.length).toBeGreaterThan(0); + const issueViewCall = log.find(entry => entry.cmd.includes('gh issue view')); + expect(issueViewCall).toBeDefined(); + }); + + it('blocked stage can recover to triaged or planning', () => { + const { isValidTransition, STAGES } = pipeline; + expect(isValidTransition(STAGES.BLOCKED, STAGES.TRIAGED)).toBe(true); + expect(isValidTransition(STAGES.BLOCKED, STAGES.PLANNING)).toBe(true); + expect(isValidTransition(STAGES.BLOCKED, STAGES.EXECUTING)).toBe(true); + // Cannot skip to done from blocked + expect(isValidTransition(STAGES.BLOCKED, STAGES.DONE)).toBe(false); + // Cannot skip to pr-created from blocked + expect(isValidTransition(STAGES.BLOCKED, STAGES.PR_CREATED)).toBe(false); + }); + + it('any non-terminal stage can transition to blocked', () => { + const { isValidTransition, STAGES } = pipeline; + const nonTerminal = [ + STAGES.NEW, STAGES.TRIAGED, STAGES.NEEDS_INFO, STAGES.DISCUSSING, + STAGES.APPROVED, STAGES.PLANNING, STAGES.DIAGNOSING, + STAGES.EXECUTING, STAGES.VERIFYING, STAGES.PR_CREATED, + ]; + for (const stage of nonTerminal) { + expect(isValidTransition(stage, STAGES.BLOCKED)).toBe(true); + } + }); + + it('clearCallLog resets the log without deactivating mock', () => { + expect(mockGitHub.isActive()).toBe(true); + const { execSync } = _require('child_process'); + execSync('gh issue view 252 --json comments'); + expect(mockGitHub.getCallLog().length).toBeGreaterThan(0); + + mockGitHub.clearCallLog(); + expect(mockGitHub.getCallLog()).toHaveLength(0); + expect(mockGitHub.isActive()).toBe(true); // still active after clear + }); + + it('deactivate restores real execSync (isActive becomes false)', () => { + expect(mockGitHub.isActive()).toBe(true); + mockGitHub.deactivate(); + expect(mockGitHub.isActive()).toBe(false); + // Re-activate for afterEach cleanup + mockGitHub.activate(); + }); +}); + +// --------------------------------------------------------------------------- +// Suite 4: checkpoint — pipeline_step progression throughout happy path +// --------------------------------------------------------------------------- + +describe('checkpoint: pipeline_step progression', () => { + let tmpDir; + let restoreCwd; + let stateLib; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-pipeline-test-')); + restoreCwd = overrideCwd(tmpDir); + stateLib = loadState(); + writeIssueState(tmpDir, 300); + }); + + afterEach(() => { + restoreCwd(); + cleanMgw(tmpDir); + delete _require.cache[STATE_MODULE]; + }); + + afterAll(() => { + // tmpDir may have already been removed by afterEach — guard with existsSync + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); + + it('CHECKPOINT_STEP_ORDER matches documented progression', () => { + const { CHECKPOINT_STEP_ORDER } = stateLib; + expect(CHECKPOINT_STEP_ORDER).toEqual(['triage', 'plan', 'execute', 'verify', 'pr']); + }); + + it('checkpoint is null before first updateCheckpoint call', () => { + const state = readIssueState(tmpDir, 300); + expect(state.checkpoint).toBeNull(); + }); + + it('initializes checkpoint on first updateCheckpoint call', () => { + const { updateCheckpoint } = stateLib; + updateCheckpoint(300, { + pipeline_step: 'triage', + step_progress: { route_selected: 'plan-phase', comment_check_done: true }, + resume: { action: 'begin-execution', context: { gsd_route: 'plan-phase' } }, + }); + + const state = readIssueState(tmpDir, 300); + expect(state.checkpoint).not.toBeNull(); + expect(state.checkpoint.schema_version).toBe(1); + expect(state.checkpoint.pipeline_step).toBe('triage'); + expect(state.checkpoint.step_progress.route_selected).toBe('plan-phase'); + expect(state.checkpoint.step_progress.comment_check_done).toBe(true); + }); + + it('advances pipeline_step through all 5 steps', () => { + const { updateCheckpoint } = stateLib; + + // triage + updateCheckpoint(300, { + pipeline_step: 'triage', + step_progress: { route_selected: 'plan-phase' }, + resume: { action: 'begin-execution', context: {} }, + }); + let state = readIssueState(tmpDir, 300); + expect(state.checkpoint.pipeline_step).toBe('triage'); + + // plan + updateCheckpoint(300, { + pipeline_step: 'plan', + step_progress: { plan_path: '/plan.md', plan_checked: false }, + artifacts: [{ path: '/plan.md', type: 'plan', created_at: new Date().toISOString() }], + resume: { action: 'spawn-executor', context: { quick_dir: '/q', plan_num: '11' } }, + }); + state = readIssueState(tmpDir, 300); + expect(state.checkpoint.pipeline_step).toBe('plan'); + expect(state.checkpoint.step_progress.plan_path).toBe('/plan.md'); + expect(state.checkpoint.artifacts).toHaveLength(1); + expect(state.checkpoint.artifacts[0].type).toBe('plan'); + + // execute + updateCheckpoint(300, { + pipeline_step: 'execute', + step_progress: { gsd_phase: 1, tasks_completed: 0, tasks_total: 1 }, + resume: { action: 'spawn-verifier', context: { quick_dir: '/q', plan_num: '11' } }, + }); + state = readIssueState(tmpDir, 300); + expect(state.checkpoint.pipeline_step).toBe('execute'); + expect(state.checkpoint.step_progress.gsd_phase).toBe(1); + // plan artifact still present (append-only) + expect(state.checkpoint.artifacts).toHaveLength(1); + + // verify + updateCheckpoint(300, { + pipeline_step: 'verify', + step_progress: { + verification_path: '/verify.md', + must_haves_checked: true, + artifact_check_done: true, + }, + artifacts: [{ path: '/verify.md', type: 'verification', created_at: new Date().toISOString() }], + resume: { action: 'create-pr', context: { quick_dir: '/q', plan_num: '11' } }, + }); + state = readIssueState(tmpDir, 300); + expect(state.checkpoint.pipeline_step).toBe('verify'); + expect(state.checkpoint.step_progress.verification_path).toBe('/verify.md'); + expect(state.checkpoint.artifacts).toHaveLength(2); // plan + verification + + // pr + updateCheckpoint(300, { + pipeline_step: 'pr', + step_progress: { branch_pushed: true, pr_number: 99, pr_url: 'https://github.com/test/repo/pull/99' }, + step_history: [{ step: 'pr', completed_at: new Date().toISOString(), agent_type: 'general-purpose', output_path: 'https://github.com/test/repo/pull/99' }], + resume: { action: 'cleanup', context: { pr_number: 99 } }, + }); + state = readIssueState(tmpDir, 300); + expect(state.checkpoint.pipeline_step).toBe('pr'); + expect(state.checkpoint.step_progress.pr_number).toBe(99); + expect(state.checkpoint.step_progress.branch_pushed).toBe(true); + expect(state.checkpoint.step_history).toHaveLength(1); + expect(state.checkpoint.step_history[0].step).toBe('pr'); + }); + + it('detectCheckpoint returns null for triage-only checkpoint', () => { + const { updateCheckpoint, detectCheckpoint } = stateLib; + updateCheckpoint(300, { + pipeline_step: 'triage', + step_progress: { route_selected: 'plan-phase' }, + }); + const cp = detectCheckpoint(300); + // triage-only is not resumable (index 0 in CHECKPOINT_STEP_ORDER) + expect(cp).toBeNull(); + }); + + it('detectCheckpoint returns data for post-triage checkpoints', () => { + const { updateCheckpoint, detectCheckpoint } = stateLib; + updateCheckpoint(300, { + pipeline_step: 'plan', + step_progress: { plan_path: '/plan.md' }, + resume: { action: 'spawn-executor', context: {} }, + }); + const cp = detectCheckpoint(300); + expect(cp).not.toBeNull(); + expect(cp.pipeline_step).toBe('plan'); + expect(cp.step_progress.plan_path).toBe('/plan.md'); + expect(cp.resume.action).toBe('spawn-executor'); + }); + + it('artifacts array is append-only across multiple updateCheckpoint calls', () => { + const { updateCheckpoint } = stateLib; + const t = new Date().toISOString(); + + updateCheckpoint(300, { + pipeline_step: 'plan', + artifacts: [{ path: '/plan.md', type: 'plan', created_at: t }], + }); + updateCheckpoint(300, { + pipeline_step: 'execute', + artifacts: [{ path: '/summary.md', type: 'summary', created_at: t }], + }); + updateCheckpoint(300, { + pipeline_step: 'verify', + artifacts: [{ path: '/verify.md', type: 'verification', created_at: t }], + }); + + const state = readIssueState(tmpDir, 300); + expect(state.checkpoint.artifacts).toHaveLength(3); + expect(state.checkpoint.artifacts.map(a => a.type)).toEqual(['plan', 'summary', 'verification']); + }); + + it('step_history is append-only across multiple updateCheckpoint calls', () => { + const { updateCheckpoint } = stateLib; + const t = new Date().toISOString(); + + updateCheckpoint(300, { + pipeline_step: 'plan', + step_history: [{ step: 'plan', completed_at: t, agent_type: 'gsd-planner', output_path: '/plan.md' }], + }); + updateCheckpoint(300, { + pipeline_step: 'execute', + step_history: [{ step: 'execute', completed_at: t, agent_type: 'gsd-executor', output_path: '/summary.md' }], + }); + + const state = readIssueState(tmpDir, 300); + expect(state.checkpoint.step_history).toHaveLength(2); + expect(state.checkpoint.step_history[0].step).toBe('plan'); + expect(state.checkpoint.step_history[1].step).toBe('execute'); + }); + + it('updated_at is advanced on every checkpoint write', async () => { + const { updateCheckpoint } = stateLib; + + updateCheckpoint(300, { pipeline_step: 'plan' }); + const first = readIssueState(tmpDir, 300); + const firstTs = first.checkpoint.updated_at; + + // Small delay to ensure clock advances + await new Promise(resolve => setTimeout(resolve, 5)); + + updateCheckpoint(300, { pipeline_step: 'execute' }); + const second = readIssueState(tmpDir, 300); + const secondTs = second.checkpoint.updated_at; + + expect(new Date(secondTs).getTime()).toBeGreaterThanOrEqual(new Date(firstTs).getTime()); + }); +}); diff --git a/test/project-state-detection.test.js b/test/project-state-detection.test.js new file mode 100644 index 0000000..4a0f4aa --- /dev/null +++ b/test/project-state-detection.test.js @@ -0,0 +1,396 @@ +/** + * test/project-state-detection.test.js — Scenario tests for mgw:project state detection + * + * Tests all six STATE_CLASS paths of the detectProjectState() function in lib/state.cjs. + * The function encapsulates the five-signal classification logic from workflows/detect-state.md. + * + * Five signals: + * P — .planning/PROJECT.md exists + * R — .planning/ROADMAP.md exists + * S — .planning/STATE.md exists + * M — .mgw/project.json exists + * G — Count of GitHub milestones (passed as githubMilestoneCount, no live API call) + * + * Six STATE_CLASS values: + * Fresh — no GSD state, no MGW state, no GitHub milestones + * GSD-Only — PROJECT.md present, no ROADMAP, no MGW, G=0 + * GSD-Mid-Exec — PROJECT.md + ROADMAP (or STATE.md) present, no MGW, G=0 + * Aligned — MGW project.json + GitHub milestones present, counts consistent + * Diverged — MGW project.json + GitHub milestones present, counts inconsistent + * Extend — MGW project.json present, all milestones complete (current_milestone > count) + * + * Isolation strategy: + * - fs.mkdtempSync() creates a real tmp dir per describe block + * - Fixtures pre-seed .mgw/ and .planning/ directories in the tmp dir + * - detectProjectState() takes { repoRoot, githubMilestoneCount } — no live calls + * - afterAll() removes tmp dirs + * - No live GitHub tokens or Claude API calls used + * + * Dependencies: + * - lib/state.cjs:detectProjectState — extracted from workflows/detect-state.md + * - test/fixtures/project-state/{aligned,diverged,extend}.json — pre-baked project.json content + */ + +import { describe, it, expect, afterAll, beforeAll } from 'vitest'; +import { createRequire } from 'module'; +import { fileURLToPath } from 'url'; +import path from 'path'; +import fs from 'fs'; +import os from 'os'; + +const _require = createRequire(import.meta.url); +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = path.resolve(__dirname, '..'); + +const STATE_MODULE = path.join(REPO_ROOT, 'lib', 'state.cjs'); +const FIXTURES_DIR = path.join(__dirname, 'fixtures', 'project-state'); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** + * Reload lib/state.cjs fresh (evict module cache so process.cwd override + * takes effect on each reload if needed). + */ +function loadState() { + delete _require.cache[STATE_MODULE]; + return _require(STATE_MODULE); +} + +/** + * Create a temp directory. Returns tmpDir path. + */ +function makeTmpDir() { + return fs.mkdtempSync(path.join(os.tmpdir(), 'mgw-state-test-')); +} + +/** + * Remove a temp directory. + */ +function removeTmpDir(tmpDir) { + if (fs.existsSync(tmpDir)) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } +} + +/** + * Seed a file into tmpDir, creating parent directories as needed. + * + * @param {string} tmpDir - Base temp directory + * @param {string} relPath - Relative path within tmpDir (e.g. '.planning/PROJECT.md') + * @param {string} [content=''] - File content + */ +function seedFile(tmpDir, relPath, content = '') { + const fullPath = path.join(tmpDir, relPath); + fs.mkdirSync(path.dirname(fullPath), { recursive: true }); + fs.writeFileSync(fullPath, content, 'utf-8'); +} + +/** + * Seed .mgw/project.json from a fixture file. + * + * @param {string} tmpDir - Base temp directory + * @param {string} fixtureName - Name of fixture in test/fixtures/project-state/ + */ +function seedProjectJson(tmpDir, fixtureName) { + const fixturePath = path.join(FIXTURES_DIR, `${fixtureName}.json`); + if (!fs.existsSync(fixturePath)) { + throw new Error(`Fixture not found: ${fixturePath}`); + } + seedFile(tmpDir, '.mgw/project.json', fs.readFileSync(fixturePath, 'utf-8')); +} + +// --------------------------------------------------------------------------- +// Test suite +// --------------------------------------------------------------------------- + +describe('detectProjectState', () => { + // --------------------------------------------------------------------------- + // STATE_CLASS: Fresh + // Signals: P=false, R=false, S=false, M=false, G=0 + // No files on disk, no GitHub milestones. + // --------------------------------------------------------------------------- + describe('Fresh', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // No files seeded — completely empty directory + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns Fresh when no planning files and no project.json exist and G=0', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(result.stateClass).toBe('Fresh'); + }); + + it('reports all signals as false/0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(signals.P).toBe(false); + expect(signals.R).toBe(false); + expect(signals.S).toBe(false); + expect(signals.M).toBe(false); + expect(signals.G).toBe(0); + }); + }); + + // --------------------------------------------------------------------------- + // STATE_CLASS: GSD-Only + // Signals: P=true, R=false, S=false, M=false, G=0 + // .planning/PROJECT.md exists — project is scoped but no roadmap yet. + // --------------------------------------------------------------------------- + describe('GSD-Only', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // Seed only PROJECT.md — no ROADMAP.md, no STATE.md, no project.json + seedFile(tmpDir, '.planning/PROJECT.md', '# Test Project\n'); + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns GSD-Only when only PROJECT.md exists and G=0', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(result.stateClass).toBe('GSD-Only'); + }); + + it('reports P=true, M=false, G=0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(signals.P).toBe(true); + expect(signals.R).toBe(false); + expect(signals.S).toBe(false); + expect(signals.M).toBe(false); + expect(signals.G).toBe(0); + }); + }); + + // --------------------------------------------------------------------------- + // STATE_CLASS: GSD-Mid-Exec + // Signals: P=true, R=true, S=true, M=false, G=0 + // Full GSD state exists (.planning/PROJECT.md + ROADMAP.md + STATE.md), + // but MGW has not yet been initialized. + // --------------------------------------------------------------------------- + describe('GSD-Mid-Exec', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // Seed PROJECT.md, ROADMAP.md, and STATE.md — no project.json + seedFile(tmpDir, '.planning/PROJECT.md', '# Test Project\n'); + seedFile(tmpDir, '.planning/ROADMAP.md', '# Roadmap\n\n## v1 — Core\n'); + seedFile(tmpDir, '.planning/STATE.md', '# State\nphase: 1\n'); + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns GSD-Mid-Exec when PROJECT.md + ROADMAP.md + STATE.md exist and G=0', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(result.stateClass).toBe('GSD-Mid-Exec'); + }); + + it('reports P=true, R=true, S=true, M=false, G=0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 0 }); + + expect(signals.P).toBe(true); + expect(signals.R).toBe(true); + expect(signals.S).toBe(true); + expect(signals.M).toBe(false); + expect(signals.G).toBe(0); + }); + + it('also returns GSD-Mid-Exec when only PROJECT.md + ROADMAP.md exist (no STATE.md)', () => { + // Edge case: R=true is sufficient for GSD-Mid-Exec per detect-state.md logic + const midExecDir = makeTmpDir(); + seedFile(midExecDir, '.planning/PROJECT.md', '# Test Project\n'); + seedFile(midExecDir, '.planning/ROADMAP.md', '# Roadmap\n'); + // No STATE.md + + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: midExecDir, githubMilestoneCount: 0 }); + + expect(result.stateClass).toBe('GSD-Mid-Exec'); + + removeTmpDir(midExecDir); + }); + }); + + // --------------------------------------------------------------------------- + // STATE_CLASS: Aligned + // Signals: M=true, G>0, local milestone count consistent with G (|local-G| <= 1) + // Both MGW project.json and GitHub milestones exist and counts match. + // --------------------------------------------------------------------------- + describe('Aligned', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // Seed project.json with 2 milestones, current_milestone=1 (not all done) + seedProjectJson(tmpDir, 'aligned'); + // Also seed planning files (P=true) to match realistic Aligned scenario + seedFile(tmpDir, '.planning/PROJECT.md', '# Test Project\n'); + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns Aligned when project.json has 2 milestones and G=2 (exact match)', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(result.stateClass).toBe('Aligned'); + }); + + it('returns Aligned when G is off by 1 (|local-G| <= 1 tolerance)', () => { + // project.json has 2 milestones, G=1 — off by 1 is still Aligned + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 1 }); + + expect(result.stateClass).toBe('Aligned'); + }); + + it('reports M=true and G>0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(signals.M).toBe(true); + expect(signals.G).toBeGreaterThan(0); + }); + }); + + // --------------------------------------------------------------------------- + // STATE_CLASS: Diverged + // Signals: M=true, G>0, local milestone count inconsistent with G (|local-G| > 1) + // MGW project.json and GitHub milestones exist but counts diverge significantly. + // --------------------------------------------------------------------------- + describe('Diverged', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // Seed project.json with 5 milestones — G will be set to 2 (diff = 3 > 1) + seedProjectJson(tmpDir, 'diverged'); + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns Diverged when local has 5 milestones but G=2 (diff > 1)', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(result.stateClass).toBe('Diverged'); + }); + + it('returns Diverged when local has 5 milestones but G=0 is not applicable (M=true, G>0 path)', () => { + // G=8: 5 local vs 8 GitHub — diff=3 > 1 → Diverged + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 8 }); + + expect(result.stateClass).toBe('Diverged'); + }); + + it('reports M=true and G>0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(signals.M).toBe(true); + expect(signals.G).toBeGreaterThan(0); + }); + }); + + // --------------------------------------------------------------------------- + // STATE_CLASS: Extend + // Signals: M=true, G>0, current_milestone > milestones.length (all done) + // All milestones in project.json are complete — project is ready to extend. + // --------------------------------------------------------------------------- + describe('Extend', () => { + let tmpDir; + + beforeAll(() => { + tmpDir = makeTmpDir(); + // Seed project.json with 2 milestones, current_milestone=3 (all done) + seedProjectJson(tmpDir, 'extend'); + }); + + afterAll(() => { + removeTmpDir(tmpDir); + }); + + it('returns Extend when current_milestone (3) > milestones.length (2)', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(result.stateClass).toBe('Extend'); + }); + + it('returns Extend regardless of G value (Extend check runs before consistency check)', () => { + // Even if G is very different (e.g. G=10), Extend is detected first + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 10 }); + + expect(result.stateClass).toBe('Extend'); + }); + + it('reports M=true and G>0', () => { + const { detectProjectState } = loadState(); + const { signals } = detectProjectState({ repoRoot: tmpDir, githubMilestoneCount: 2 }); + + expect(signals.M).toBe(true); + expect(signals.G).toBeGreaterThan(0); + }); + }); + + // --------------------------------------------------------------------------- + // Return shape contract + // --------------------------------------------------------------------------- + describe('return shape', () => { + it('always returns { stateClass: string, signals: { P, R, S, M, G } }', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: '/tmp', githubMilestoneCount: 0 }); + + expect(result).toHaveProperty('stateClass'); + expect(typeof result.stateClass).toBe('string'); + expect(result).toHaveProperty('signals'); + expect(typeof result.signals.P).toBe('boolean'); + expect(typeof result.signals.R).toBe('boolean'); + expect(typeof result.signals.S).toBe('boolean'); + expect(typeof result.signals.M).toBe('boolean'); + expect(typeof result.signals.G).toBe('number'); + }); + + it('defaults repoRoot to process.cwd() when not provided', () => { + const { detectProjectState } = loadState(); + // Called without repoRoot — should not throw + expect(() => detectProjectState({ githubMilestoneCount: 0 })).not.toThrow(); + }); + + it('defaults githubMilestoneCount to 0 when not provided', () => { + const { detectProjectState } = loadState(); + const result = detectProjectState({ repoRoot: '/tmp' }); + + expect(result.signals.G).toBe(0); + }); + }); +}); diff --git a/test/setup.js b/test/setup.js new file mode 100644 index 0000000..3034cb5 --- /dev/null +++ b/test/setup.js @@ -0,0 +1,88 @@ +/** + * test/setup.js — Vitest global setup + * + * Auto-activates mock-github and mock-gsd-agent before each test, and + * deactivates them after. Both mocks are conditionally required — the + * setup works correctly even when lib/mock-github.cjs or + * lib/mock-gsd-agent.cjs are not yet present (e.g., when PRs #258 and + * #259 are not yet merged to main). + * + * To use mocks in a vitest test file: + * + * import { mockGitHub, mockGsdAgent } from './setup.js'; + * + * test('my test', () => { + * // mocks are already active (activated in beforeEach) + * mockGitHub.setResponse('gh issue view', '{"number":999}'); + * // ... + * }); + * + * To use a scenario: + * + * import { mockGitHub } from './setup.js'; + * + * beforeEach(() => { + * // Override the global beforeEach activation with a scenario + * mockGitHub.deactivate(); + * mockGitHub.activate('pr-error'); + * }); + */ + +import { beforeEach, afterEach } from 'vitest'; +import { createRequire } from 'module'; +import { fileURLToPath } from 'url'; +import path from 'path'; + +const require = createRequire(import.meta.url); +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const repoRoot = path.resolve(__dirname, '..'); + +// --------------------------------------------------------------------------- +// Conditional mock loading +// --------------------------------------------------------------------------- + +// Conditionally load mock-github — gracefully skip if not present +// (lib/mock-github.cjs lands via PR #258) +let mockGitHub = null; +try { + mockGitHub = require(path.join(repoRoot, 'lib', 'mock-github.cjs')); +} catch (_e) { + // mock-github.cjs not available — tests run without GitHub API interception +} + +// Conditionally load mock-gsd-agent — gracefully skip if not present +// (lib/mock-gsd-agent.cjs lands via PR #259) +let mockGsdAgent = null; +try { + mockGsdAgent = require(path.join(repoRoot, 'lib', 'mock-gsd-agent.cjs')); +} catch (_e) { + // mock-gsd-agent.cjs not available — tests run without agent spawn interception +} + +// --------------------------------------------------------------------------- +// Auto-activate hooks +// --------------------------------------------------------------------------- + +beforeEach(() => { + if (mockGitHub && typeof mockGitHub.activate === 'function') { + mockGitHub.activate(); + } + if (mockGsdAgent && typeof mockGsdAgent.activate === 'function') { + mockGsdAgent.activate(); + } +}); + +afterEach(() => { + if (mockGitHub && typeof mockGitHub.deactivate === 'function') { + mockGitHub.deactivate(); + } + if (mockGsdAgent && typeof mockGsdAgent.deactivate === 'function') { + mockGsdAgent.deactivate(); + } +}); + +// --------------------------------------------------------------------------- +// Exports — available for test files that need direct mock access +// --------------------------------------------------------------------------- + +export { mockGitHub, mockGsdAgent }; diff --git a/vitest.config.js b/vitest.config.js new file mode 100644 index 0000000..0cbcbb9 --- /dev/null +++ b/vitest.config.js @@ -0,0 +1,12 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + environment: 'node', + setupFiles: ['./test/setup.js'], + // Target .test.js and .spec.js files for vitest + // Exclude .test.cjs files — those use node:test (run via npm run test:node) + include: ['test/**/*.{test,spec}.{js,mjs}'], + exclude: ['test/**/*.test.cjs'], + }, +});