diff --git a/CHANGELOG.md b/CHANGELOG.md index 0473caa469..355631d5c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,45 @@ +## 2.7.16 - Memory System & Custom Agents + +### ✨ New Features + +- **Memory system improvements** — Enhanced Graphiti integration with episode types, embedding dimension validation, and session-scoped context retrieval +- **Custom agents integration** — Full YAML frontmatter parsing with tool/MCP server overrides, thinking level control, and category-based agent catalog + +### 🐛 Bug Fixes + +- **Critical: UnboundLocalError in client.py** — Fixed `global_mcp_added` being referenced before definition in security settings dict; pre-load global MCP servers and update settings file after population +- **TOCTOU race condition** — Eliminated filesystem race in MCP plugin cache reader by replacing `fs.access()` + `fs.readFile()` with a single read call +- **Test environment isolation** — Fixed 14 failing tests caused by `CLAUDE_CONFIG_DIR` env var leaking into auth/client test fixtures +- **NaN propagation** — Guarded `parseIssueId`/`parsePRId` against `NaN` from malformed composite IDs +- **Accessibility** — Added `aria-label` to Lock/Globe repo visibility icons in CustomerReposModal +- **i18n** — Added private/public repository label keys for en + fr locales +- **Config path resolution** — `fast_mode.py` now respects `CLAUDE_CONFIG_DIR` for settings file location +- **Code quality** — Removed unused variables (`filePath`, `_mock_driver`), added explanatory comments to empty except clauses + +## 2.7.15 - Global MCP Integration & Code Quality + +### ✨ New Features + +- **Global MCP server integration** — Auto-Claude now automatically loads all MCP servers from `~/.claude.json`, making tools like Keycloak, Redis, Kubernetes, Grafana, and others available in agent sessions. Auto-Claude's built-in MCPs take priority over global ones to avoid conflicts. + +### 🛠️ Improvements + +- **Custom agents** — Use `sanitize_thinking_level()` for consistent thinking level validation, include root-level agents in catalog, extract category name helper for readability +- **Route detection** — Improved .NET HTTP attribute regex handling for optional parameters and method modifiers +- **Project analysis** — Fixed single-solution early return that could misclassify monorepos with sibling services +- **Port detection** — Handle `--port=` equals form in commandLineArgs parsing +- **Graphiti memory** — Non-negative TTL validation, safe env parsing for MAX_RESULTS, proper exception logging instead of silent swallowing +- **Frontend async safety** — Added cancellation guards to prevent stale async responses from corrupting UI state across customer/PR/issue hooks +- **Accessibility** — Added aria-labels to icon-only buttons, fixed invisible keyboard focus targets on drag handles +- **i18n** — Proper singular/plural handling for repo counts (en + fr), progress message keys for localization + +### 🐛 Bug Fixes + +- **Security** — Path traversal validation on custom agent IDs, CRLF normalization for Windows frontmatter files, Windows command-line length protection for agent prompts +- **Type safety** — Guard `phaseCustomAgents` dict type before `.get()`, validate IPC parameters, normalize keychain token checks +- **Frontend** — Fix division by zero in progress bars, guard stale PR detail rendering, clamp negative timestamps, handle project registration failures, keep GitHub setup modal open on save failure +- **Claude MCP handlers** — Convert sync filesystem operations to async, validate header values for type safety + ## 2.7.6 - Stability & Feature Enhancements ### ✨ New Features diff --git a/apps/backend/__init__.py b/apps/backend/__init__.py index b544f95fe0..fbb9a74108 100644 --- a/apps/backend/__init__.py +++ b/apps/backend/__init__.py @@ -19,5 +19,5 @@ See README.md for full documentation. """ -__version__ = "2.7.6" +__version__ = "2.7.16" __author__ = "Auto Claude Team" diff --git a/apps/backend/agents/coder.py b/apps/backend/agents/coder.py index de44991a8c..8d73560522 100644 --- a/apps/backend/agents/coder.py +++ b/apps/backend/agents/coder.py @@ -83,6 +83,7 @@ sanitize_error_message, ) from .memory_manager import debug_memory_system_status, get_graphiti_context +from .custom_agents import build_agents_catalog_prompt from .session import post_session_processing, run_agent_session from .utils import ( find_phase_for_subtask, @@ -982,6 +983,9 @@ def _reset_concurrency_state() -> None: f"[Coder] [Fast Mode] {'ENABLED' if fast_mode else 'disabled'} for phase={current_phase}" ) + # Build catalog of available specialist agents (loaded once, cached) + agents_catalog = build_agents_catalog_prompt() + if first_run: # Create client for planning phase client = create_client( @@ -991,6 +995,7 @@ def _reset_concurrency_state() -> None: agent_type="planner", betas=phase_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **thinking_kwargs, ) prompt = generate_planner_prompt(spec_dir, project_dir) @@ -1139,6 +1144,7 @@ def _reset_concurrency_state() -> None: agent_type="coder", betas=phase_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **thinking_kwargs, ) diff --git a/apps/backend/agents/custom_agents.py b/apps/backend/agents/custom_agents.py new file mode 100644 index 0000000000..59b997d742 --- /dev/null +++ b/apps/backend/agents/custom_agents.py @@ -0,0 +1,364 @@ +""" +Custom Agents Integration +========================= + +Parses custom agent .md files from ~/.claude/agents/ and provides +their configuration (system prompt, optional tool/MCP overrides) +for use in the Auto-Claude build pipeline. + +Custom agent files are markdown files that may include YAML frontmatter +for tool and MCP server configuration: + + --- + tools: [Read, Write, Edit, Bash, Glob, Grep] + mcp_servers: [context7, graphiti] + thinking: high + --- + You are a frontend specialist... + +If no frontmatter is provided, only the system prompt (markdown body) +is used, and tool/MCP configuration comes from the base AGENT_CONFIGS. +""" + +import logging +import os +import re +from dataclasses import dataclass, field +from pathlib import Path + +from phase_config import sanitize_thinking_level + +logger = logging.getLogger(__name__) + + +@dataclass +class CustomAgentConfig: + """Parsed configuration from a custom agent .md file.""" + + agent_id: str + system_prompt: str + tools: list[str] | None = None # Override base tools (None = use defaults) + mcp_servers: list[str] | None = None # Override MCP servers (None = use defaults) + thinking: str | None = None # Override thinking level (None = use default) + raw_frontmatter: dict = field(default_factory=dict) + + +def get_agents_dir() -> Path: + """Get the custom agents directory (~/.claude/agents/).""" + config_dir = os.environ.get("CLAUDE_CONFIG_DIR") or os.path.join( + os.path.expanduser("~"), ".claude" + ) + return Path(config_dir) / "agents" + + +def parse_agent_file(file_path: Path) -> CustomAgentConfig | None: + """ + Parse a custom agent .md file. + + Extracts optional YAML frontmatter (between --- delimiters) and the + markdown body as the system prompt. + + Args: + file_path: Path to the .md agent file + + Returns: + CustomAgentConfig if file is valid, None if file doesn't exist or is invalid + """ + if not file_path.exists() or not file_path.is_file(): + logger.warning(f"Custom agent file not found: {file_path}") + return None + + try: + content = file_path.read_text(encoding="utf-8") + except Exception as e: + logger.warning(f"Failed to read custom agent file {file_path}: {e}") + return None + + agent_id = file_path.stem # filename without .md + frontmatter = {} + + # Normalize CRLF to LF so frontmatter regex works on Windows files + normalized = content.replace("\r\n", "\n") + body = normalized + + # Extract YAML frontmatter if present + fm_match = re.match(r"^---\s*\n(.*?)\n---\s*\n(.*)", normalized, re.DOTALL) + if fm_match: + fm_text = fm_match.group(1) + body = fm_match.group(2).strip() + + # Simple YAML-like parsing (avoid heavy yaml dependency) + frontmatter = _parse_simple_yaml(fm_text) + + system_prompt = body.strip() + if not system_prompt: + logger.warning(f"Custom agent file has no content: {file_path}") + return None + + # Extract optional overrides from frontmatter + tools = _parse_string_list(frontmatter.get("tools")) + mcp_servers = _parse_string_list(frontmatter.get("mcp_servers")) + thinking = frontmatter.get("thinking") + + if thinking: + sanitized = sanitize_thinking_level(str(thinking)) + if sanitized != str(thinking): + logger.warning( + f"Thinking level '{thinking}' in {file_path} was sanitized to '{sanitized}'" + ) + thinking = sanitized + + return CustomAgentConfig( + agent_id=agent_id, + system_prompt=system_prompt, + tools=tools, + mcp_servers=mcp_servers, + thinking=thinking, + raw_frontmatter=frontmatter, + ) + + +def load_custom_agent(agent_id: str) -> CustomAgentConfig | None: + """ + Load a custom agent by ID. + + Searches through category directories in ~/.claude/agents/ for + a matching agent file. + + Args: + agent_id: Agent ID (filename without .md extension) + + Returns: + CustomAgentConfig if found, None otherwise + """ + # Validate agent_id to prevent path traversal + if not re.fullmatch(r"[A-Za-z0-9._-]+", agent_id): + return None + + agents_dir = get_agents_dir() + if not agents_dir.exists(): + return None + + # Search in all category directories + for category_dir in sorted(agents_dir.iterdir()): + if not category_dir.is_dir(): + continue + agent_file = category_dir / f"{agent_id}.md" + if agent_file.exists(): + return parse_agent_file(agent_file) + + # Also check root agents dir (no category) + root_file = agents_dir / f"{agent_id}.md" + if root_file.exists(): + return parse_agent_file(root_file) + + logger.debug(f"Custom agent '{agent_id}' not found in {agents_dir}") + return None + + +def load_all_agents() -> list[CustomAgentConfig]: + """Load all custom agents from all categories and root level in ~/.claude/agents/.""" + agents_dir = get_agents_dir() + if not agents_dir.exists(): + return [] + + agents = [] + + # Load agents from category subdirectories + for category_dir in sorted(agents_dir.iterdir()): + if not category_dir.is_dir(): + continue + for agent_file in sorted(category_dir.glob("*.md")): + if agent_file.name == "README.md": + continue + agent = parse_agent_file(agent_file) + if agent: + agents.append(agent) + + # Also load root-level agent files (no category) + for agent_file in sorted(agents_dir.glob("*.md")): + if agent_file.name == "README.md": + continue + agent = parse_agent_file(agent_file) + if agent: + agents.append(agent) + + return agents + + +def _format_category_name(dir_name: str) -> str: + """Format a directory name into a human-readable category name. + + Strips a leading numeric prefix (e.g. '01-backend' -> 'Backend') and + replaces hyphens with spaces, then title-cases the result. + + Args: + dir_name: Raw directory name (e.g. '02-frontend-tools') + + Returns: + Formatted category name (e.g. 'Frontend Tools') + """ + if "-" in dir_name: + return dir_name.split("-", 1)[-1].replace("-", " ").title() + return dir_name + + +def build_agents_catalog_prompt() -> str | None: + """ + Build a concise catalog of all available custom agents for system prompt injection. + + Returns a formatted string listing all agents by category with their descriptions, + or None if no agents are available. + """ + agents_dir = get_agents_dir() + if not agents_dir.exists(): + return None + + categories: list[tuple[str, list[tuple[str, str]]]] = [] + + for category_dir in sorted(agents_dir.iterdir()): + if not category_dir.is_dir(): + continue + category_name = _format_category_name(category_dir.name) + + agent_entries = [] + for agent_file in sorted(category_dir.glob("*.md")): + if agent_file.name == "README.md": + continue + agent = parse_agent_file(agent_file) + if agent: + # Get description from frontmatter, or first line of prompt + description = agent.raw_frontmatter.get("description", "") + if not description: + # Use first sentence of system prompt as fallback + first_line = agent.system_prompt.split("\n")[0].strip() + description = first_line[:120] + elif len(description) > 150: + description = description[:147] + "..." + agent_entries.append((agent.agent_id, description)) + + if agent_entries: + categories.append((category_name, agent_entries)) + + # Also include root-level agent files (no category) + root_entries: list[tuple[str, str]] = [] + for agent_file in sorted(agents_dir.glob("*.md")): + if agent_file.name == "README.md": + continue + agent = parse_agent_file(agent_file) + if agent: + description = agent.raw_frontmatter.get("description", "") + if not description: + first_line = agent.system_prompt.split("\n")[0].strip() + description = first_line[:120] + elif len(description) > 150: + description = description[:147] + "..." + root_entries.append((agent.agent_id, description)) + if root_entries: + categories.append(("General", root_entries)) + + if not categories: + return None + + total = sum(len(entries) for _, entries in categories) + lines = [ + f"# Available Specialist Agents ({total} agents)", + "", + "You have access to the following specialist agents organized by category.", + "Use them when the task requires specialized expertise — spawn them as subagents", + "via the Agent tool with the appropriate subagent_type.", + "", + ] + + for category_name, entries in categories: + lines.append(f"## {category_name}") + for agent_id, desc in entries: + lines.append(f"- **{agent_id}**: {desc}") + lines.append("") + + return "\n".join(lines) + + +def _parse_simple_yaml(text: str) -> dict: + """ + Parse simple YAML-like frontmatter (key: value pairs). + + Uses ``yaml.safe_load`` when available for full YAML support (including + block-list syntax). Falls back to a manual parser that handles: + - key: value (strings) + - key: [item1, item2] (inline lists) + - key: (empty value) + - key:\\n - item1\\n - item2 (YAML block lists) + """ + # Prefer yaml.safe_load for robust parsing + try: + import yaml + + parsed = yaml.safe_load(text) + if isinstance(parsed, dict): + return parsed + except Exception: + # yaml.safe_load may fail or return non-dict; fall through to manual parser + pass + + # Fallback: manual parser with block-list support + result: dict = {} + lines = text.split("\n") + i = 0 + while i < len(lines): + line = lines[i] + stripped = line.strip() + if not stripped or stripped.startswith("#"): + i += 1 + continue + if ":" not in stripped: + i += 1 + continue + key, _, value = stripped.partition(":") + key = key.strip() + value = value.strip() + + # Parse inline list: [item1, item2] + if value.startswith("[") and value.endswith("]"): + items = value[1:-1].split(",") + result[key] = [item.strip().strip("\"'") for item in items if item.strip()] + elif value: + # Strip quotes + result[key] = value.strip("\"'") + else: + # Empty value — check if next lines are block-list items (- item) + block_items: list[str] = [] + j = i + 1 + while j < len(lines): + next_line = lines[j] + next_stripped = next_line.strip() + if not next_stripped or next_stripped.startswith("#"): + j += 1 + continue + if next_stripped.startswith("- "): + block_items.append(next_stripped[2:].strip().strip("\"'")) + j += 1 + else: + break + if block_items: + result[key] = block_items + i = j + continue + else: + result[key] = "" + + i += 1 + + return result + + +def _parse_string_list(value: object) -> list[str] | None: + """Parse a value as a list of strings, or None if empty/invalid.""" + if value is None: + return None + if isinstance(value, list): + cleaned = [str(v).strip() for v in value if v] + return cleaned if cleaned else None + if isinstance(value, str) and value: + return [v.strip() for v in value.split(",") if v.strip()] + return None diff --git a/apps/backend/agents/planner.py b/apps/backend/agents/planner.py index 6875c14df8..2e7caa5911 100644 --- a/apps/backend/agents/planner.py +++ b/apps/backend/agents/planner.py @@ -32,6 +32,7 @@ print_status, ) +from .custom_agents import build_agents_catalog_prompt from .session import run_agent_session logger = logging.getLogger(__name__) @@ -107,6 +108,10 @@ async def run_followup_planner( logger.info( f"[Planner] [Fast Mode] {'ENABLED' if fast_mode else 'disabled'} for follow-up planning" ) + + # Build catalog of available specialist agents + agents_catalog = build_agents_catalog_prompt() + client = create_client( project_dir, spec_dir, @@ -114,6 +119,7 @@ async def run_followup_planner( agent_type="planner", betas=planning_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **thinking_kwargs, ) diff --git a/apps/backend/analysis/analyzers/base.py b/apps/backend/analysis/analyzers/base.py index 0a7dd4c2fe..ffcb1f3558 100644 --- a/apps/backend/analysis/analyzers/base.py +++ b/apps/backend/analysis/analyzers/base.py @@ -68,6 +68,9 @@ "core", "shared", "common", + "docs", + "documentation", + "microservices", } # Files that indicate a service root @@ -83,8 +86,44 @@ "build.gradle", "Makefile", "Dockerfile", + # Documentation tools + "mkdocs.yml", + "mkdocs.yaml", + "docusaurus.config.js", + "docusaurus.config.ts", + "conf.py", + "book.toml", } +# Glob patterns that indicate a service root (for files with variable names) +SERVICE_ROOT_GLOBS = [ + "*.csproj", + "*.fsproj", + "*.sln", +] + +# Deeper glob patterns for projects that nest manifests in subdirectories (e.g. .NET repos with src/) +SERVICE_ROOT_DEEP_GLOBS = [ + "src/**/*.csproj", + "src/**/*.fsproj", + "src/**/*.cs", # Fallback: .cs source files without .csproj (incomplete repos) +] + + +def has_service_root(dir_path: Path) -> bool: + """Check if a directory has service root indicators (exact files or glob patterns).""" + if any((dir_path / f).exists() for f in SERVICE_ROOT_FILES): + return True + if any( + next(dir_path.glob(pattern), None) is not None for pattern in SERVICE_ROOT_GLOBS + ): + return True + # Check deeper patterns for .NET repos that keep .csproj in src/ subdirectories + return any( + next(dir_path.glob(pattern), None) is not None + for pattern in SERVICE_ROOT_DEEP_GLOBS + ) + class BaseAnalyzer: """Base class with common utilities for all analyzers.""" diff --git a/apps/backend/analysis/analyzers/context/env_detector.py b/apps/backend/analysis/analyzers/context/env_detector.py index 534cdfb789..b671dc1c1d 100644 --- a/apps/backend/analysis/analyzers/context/env_detector.py +++ b/apps/backend/analysis/analyzers/context/env_detector.py @@ -6,7 +6,9 @@ - .env files and variants - .env.example files - docker-compose.yml +- .NET: appsettings.json, appsettings.{Environment}.json - Source code (os.getenv, process.env) +- C#: IConfiguration, Environment.GetEnvironmentVariable """ from __future__ import annotations @@ -36,10 +38,19 @@ def detect(self) -> None: required_vars = set() optional_vars = set() - # Parse various sources + # Parse concrete config sources first so their values take precedence. + # Code references (placeholders) run last and only fill in missing keys. self._parse_env_files(env_vars) self._parse_env_example(env_vars, required_vars) self._parse_docker_compose(env_vars) + + # .NET appsettings.json + self._parse_appsettings(env_vars) + + # .NET launchSettings.json (Properties/launchSettings.json) + self._parse_launch_settings(env_vars) + + # Code references last — only adds keys not already discovered above self._parse_code_references(env_vars, optional_vars) # Mark required vs optional @@ -175,6 +186,8 @@ def _parse_code_references( "index.ts", "config.js", "config.ts", + "Program.cs", + "Startup.cs", ] for entry_file in entry_files: @@ -194,7 +207,14 @@ def _parse_code_references( r"process\.env\.([A-Z_][A-Z0-9_]*)", ] - for pattern in python_patterns + js_patterns: + # C#: IConfiguration / Environment.GetEnvironmentVariable + csharp_patterns = [ + r'GetEnvironmentVariable\(["\']([A-Z_][A-Z0-9_]*)["\']', + r'configuration\[["\']([A-Za-z_:][A-Za-z0-9_:]*)["\']', + r'Configuration\[["\']([A-Za-z_:][A-Za-z0-9_:]*)["\']', + ] + + for pattern in python_patterns + js_patterns + csharp_patterns: matches = re.findall(pattern, content) for var_name in matches: if var_name not in env_vars: @@ -207,6 +227,124 @@ def _parse_code_references( "required": False, } + def _parse_appsettings(self, env_vars: dict[str, Any]) -> None: + """Parse .NET appsettings.json configuration files. + + For .NET solutions, scans entry point sub-project directories. + """ + base_files = [ + "appsettings.json", + "appsettings.Development.json", + "appsettings.Production.json", + "appsettings.Staging.json", + ] + + # Build list of directories to scan + scan_dirs = [""] # Root directory + solution = self.analysis.get("dotnet_solution") + if solution: + for ep in solution.get("entry_points", []): + scan_dirs.append(ep["path"]) + + for scan_dir in scan_dirs: + for base_file in base_files: + settings_file = f"{scan_dir}/{base_file}" if scan_dir else base_file + content = self._read_json(settings_file) + if not content: + continue + + source = settings_file if scan_dir else base_file + self._flatten_appsettings(content, "", source, env_vars) + + def _flatten_appsettings( + self, obj: dict, prefix: str, source: str, env_vars: dict[str, Any] + ) -> None: + """Recursively flatten appsettings JSON into colon-separated keys.""" + # Skip certain top-level keys that are just noise + skip_sections = {"$schema", "iisSettings", "profiles"} + + for key, value in obj.items(): + if key in skip_sections: + continue + + full_key = f"{prefix}:{key}" if prefix else key + + if isinstance(value, dict): + self._flatten_appsettings(value, full_key, source, env_vars) + elif isinstance(value, list): + # Skip arrays (usually complex config) + continue + else: + # Leaf value + str_value = str(value) if value is not None else "" + is_sensitive = self._is_sensitive_key(full_key) + var_type = self._infer_env_var_type(str_value) + + # Connection strings are URLs + if "connectionstring" in full_key.lower(): + var_type = "url" + is_sensitive = True + + if full_key not in env_vars: + env_vars[full_key] = { + "value": "" if is_sensitive else str_value, + "source": source, + "type": var_type, + "sensitive": is_sensitive, + } + + def _parse_launch_settings(self, env_vars: dict[str, Any]) -> None: + """Parse .NET Properties/launchSettings.json for environment variables. + + For .NET solutions, scans entry point sub-project directories. + """ + launch_paths = [ + "Properties/launchSettings.json", + "properties/launchSettings.json", + ] + + # For .NET solutions, also scan entry point directories + solution = self.analysis.get("dotnet_solution") + if solution: + for ep in solution.get("entry_points", []): + launch_paths.append(f"{ep['path']}/Properties/launchSettings.json") + launch_paths.append(f"{ep['path']}/properties/launchSettings.json") + + for launch_path in launch_paths: + data = self._read_json(launch_path) + if not data: + continue + + profiles = data.get("profiles", {}) + + # Prefer "http" profile, then first available + for profile_key in ["http", *profiles.keys()]: + profile = profiles.get(profile_key) + if not profile: + continue + + env_variables = profile.get("environmentVariables", {}) + if not env_variables: + continue + + for key, value in env_variables.items(): + if key in env_vars: + continue + + str_value = str(value) if value is not None else "" + is_sensitive = self._is_sensitive_key(key) + var_type = self._infer_env_var_type(str_value) + + env_vars[key] = { + "value": "" if is_sensitive else str_value, + "source": f"launchSettings:{profile_key}", + "type": var_type, + "sensitive": is_sensitive, + } + + # Only use the first profile per launchSettings file + break + @staticmethod def _is_sensitive_key(key: str) -> bool: """Determine if an environment variable key contains sensitive data.""" diff --git a/apps/backend/analysis/analyzers/database_detector.py b/apps/backend/analysis/analyzers/database_detector.py index 21b534796b..8479da2e40 100644 --- a/apps/backend/analysis/analyzers/database_detector.py +++ b/apps/backend/analysis/analyzers/database_detector.py @@ -5,6 +5,7 @@ Detects database models and schemas across different ORMs: - Python: SQLAlchemy, Django ORM - JavaScript/TypeScript: Prisma, TypeORM, Drizzle, Mongoose +- C#/.NET: Entity Framework Core """ from __future__ import annotations @@ -43,6 +44,9 @@ def detect_all_models(self) -> dict: # Mongoose models models.update(self._detect_mongoose_models()) + # C#/.NET Entity Framework Core + models.update(self._detect_ef_core_models()) + return models def _detect_sqlalchemy_models(self) -> dict: @@ -314,3 +318,230 @@ def _detect_mongoose_models(self) -> dict: } return models + + def _detect_ef_core_models(self) -> dict: + """Detect Entity Framework Core models.""" + models = {} + + # Directories to exclude from scanning + excluded_dirs = {"bin", "obj", "node_modules", ".git", "TestResults"} + + cs_files = [ + f + for f in self.path.glob("**/*.cs") + if not any(part in excluded_dirs for part in f.parts) + ] + + # Step 1: Find DbContext files and extract DbSet declarations + dbset_map = {} # Maps entity type name -> DbSet property name (used as table name) + dbcontext_pattern = re.compile( + r"class\s+\w+\s*:\s*(?:Identity)?DbContext(?:<[^>]+>)?" + ) + dbset_pattern = re.compile(r"DbSet<(\w+)>\s+(\w+)") + + for file_path in cs_files: + try: + content = file_path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + + if not dbcontext_pattern.search(content): + continue + + for dbset_match in dbset_pattern.finditer(content): + entity_type = dbset_match.group(1) + property_name = dbset_match.group(2) + dbset_map[entity_type] = property_name + + # Step 2: Collect entity names from IEntityTypeConfiguration + config_pattern = re.compile(r"IEntityTypeConfiguration<(\w+)>") + configured_entities = set() + + for file_path in cs_files: + try: + content = file_path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + + for config_match in config_pattern.finditer(content): + configured_entities.add(config_match.group(1)) + + # Build the full set of known entity names + known_entities = set(dbset_map.keys()) | configured_entities + + # Step 3: Scan .cs files for entity classes and extract properties + table_attr_pattern = re.compile(r'\[Table\(["\'](\w+)["\']\)\]') + class_pattern = re.compile(r"class\s+(\w+)") + property_pattern = re.compile( + r"public\s+(virtual\s+)?" + r"([\w<>,?\[\]\s]+?)\s+" + r"(\w+)\s*\{\s*get;\s*set;\s*\}" + ) + key_attr_pattern = re.compile(r"\[Key\]") + required_attr_pattern = re.compile(r"\[Required\]") + maxlength_attr_pattern = re.compile(r"\[(?:MaxLength|StringLength)\((\d+)\)\]") + column_attr_pattern = re.compile(r'\[Column\(["\'](\w+)["\']\)\]') + + # Navigation type prefixes to skip + navigation_prefixes = ( + "ICollection<", + "IList<", + "IEnumerable<", + "List<", + "Collection<", + "HashSet<", + ) + + # Known C# value/primitive types + csharp_types = { + "int", + "long", + "string", + "bool", + "DateTime", + "DateTimeOffset", + "Guid", + "decimal", + "double", + "float", + "byte[]", + "short", + "byte", + } + + for file_path in cs_files: + try: + content = file_path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + + lines = content.split("\n") + + # Check for [Table] attribute at file level to find entity classes + file_table_attrs = {} + for i, line in enumerate(lines): + table_match = table_attr_pattern.search(line) + if table_match: + # The [Table] attribute applies to the next class definition + for j in range(i + 1, min(i + 5, len(lines))): + class_match = class_pattern.search(lines[j]) + if class_match: + file_table_attrs[class_match.group(1)] = table_match.group( + 1 + ) + break + + # Find all classes in this file + class_matches = list(class_pattern.finditer(content)) + for idx, class_match in enumerate(class_matches): + class_name = class_match.group(1) + + # Only process classes that are known entities or have [Table] attr + is_known_entity = class_name in known_entities + has_table_attr = class_name in file_table_attrs + if not is_known_entity and not has_table_attr: + continue + + # Determine the region of this class body + class_start = class_match.end() + if idx + 1 < len(class_matches): + class_end = class_matches[idx + 1].start() + else: + class_end = len(content) + + # Limit scanning to a reasonable size + class_body = content[class_start : min(class_start + 5000, class_end)] + class_lines = class_body.split("\n") + + # Extract properties + fields = {} + for line_idx, line in enumerate(class_lines): + prop_match = property_pattern.search(line) + if not prop_match: + continue + + is_virtual = prop_match.group(1) is not None + raw_type = prop_match.group(2).strip() + prop_name = prop_match.group(3) + + # Skip virtual navigation properties + if is_virtual: + continue + + # Skip collection/navigation types + if any( + raw_type.startswith(prefix) for prefix in navigation_prefixes + ): + continue + + # Determine if nullable (type ends with ?) + is_nullable = raw_type.endswith("?") + clean_type = raw_type.rstrip("?") + + if not clean_type: + continue + + # Only include properties with recognized types or common patterns + if clean_type not in csharp_types and ( + not clean_type or not clean_type[0].isupper() + ): + continue + + # Look at preceding lines for attributes + is_primary_key = False + is_required = False + max_length = None + column_name = None + + # Check up to 4 lines above for attributes + attr_start = max(0, line_idx - 4) + attr_lines = "\n".join(class_lines[attr_start:line_idx]) + + if key_attr_pattern.search(attr_lines): + is_primary_key = True + if required_attr_pattern.search(attr_lines): + is_required = True + maxlen_match = maxlength_attr_pattern.search(attr_lines) + if maxlen_match: + max_length = int(maxlen_match.group(1)) + col_match = column_attr_pattern.search(attr_lines) + if col_match: + column_name = col_match.group(1) + + # Convention: property named "Id" or "Id" is primary key + if prop_name == "Id" or prop_name == f"{class_name}Id": + is_primary_key = True + + field_info = { + "type": clean_type, + "primary_key": is_primary_key, + "unique": is_primary_key, + "nullable": is_nullable and not is_required, + } + if max_length is not None: + field_info["max_length"] = max_length + if column_name is not None: + field_info["column"] = column_name + + fields[prop_name] = field_info + + if not fields: + continue + + # Determine table name + if has_table_attr: + table_name = file_table_attrs[class_name] + elif class_name in dbset_map: + table_name = dbset_map[class_name] + else: + # Simple pluralization: append 's' + table_name = class_name + "s" + + models[class_name] = { + "table": table_name, + "fields": fields, + "file": str(file_path.relative_to(self.path)), + "orm": "Entity Framework", + } + + return models diff --git a/apps/backend/analysis/analyzers/framework_analyzer.py b/apps/backend/analysis/analyzers/framework_analyzer.py index 2586f8873f..0f5e87b9e4 100644 --- a/apps/backend/analysis/analyzers/framework_analyzer.py +++ b/apps/backend/analysis/analyzers/framework_analyzer.py @@ -3,7 +3,7 @@ ========================= Detects programming languages, frameworks, and related technologies across different ecosystems. -Supports Python, Node.js/TypeScript, Go, Rust, and Ruby frameworks. +Supports Python, Node.js/TypeScript, Go, Rust, Ruby, C#/.NET, and documentation tools. """ from __future__ import annotations @@ -91,6 +91,43 @@ def detect_language_and_framework(self) -> None: content = self._read_file("Gemfile") self._detect_ruby_framework(content) + # C#/.NET detection (check root and src/ subdirectories) + elif ( + any(self.path.glob("*.csproj")) + or any(self.path.glob("*.sln")) + or any(self.path.glob("src/**/*.csproj")) + ): + self.analysis["language"] = "C#" + self.analysis["package_manager"] = "NuGet" + self._detect_dotnet_framework() + + # Documentation tool detection (standalone, no language-specific manifest) + elif self._exists("mkdocs.yml") or self._exists("mkdocs.yaml"): + self.analysis["language"] = "Python" + self.analysis["framework"] = "MkDocs" + self.analysis["type"] = "documentation" + self.analysis["package_manager"] = "pip" + self._detect_mkdocs_details() + elif self._exists("book.toml"): + self.analysis["language"] = "Rust" + self.analysis["framework"] = "mdBook" + self.analysis["type"] = "documentation" + self.analysis["package_manager"] = "cargo" + elif self._exists("conf.py"): + content = self._read_file("conf.py") + if "sphinx" in content.lower(): + self.analysis["language"] = "Python" + self.analysis["framework"] = "Sphinx" + self.analysis["type"] = "documentation" + self.analysis["package_manager"] = "pip" + + # Fallback: detect C# by .cs source files even without .csproj/.sln + # (handles repos where project files are missing or not yet committed) + elif any(self.path.glob("src/**/*.cs")): + self.analysis["language"] = "C#" + self.analysis["package_manager"] = "NuGet" + self._detect_dotnet_framework() + def _detect_python_framework(self, content: str) -> None: """Detect Python framework.""" from .port_detector import PortDetector @@ -106,12 +143,12 @@ def _detect_python_framework(self, content: str) -> None: "litestar": {"name": "Litestar", "type": "backend", "port": 8000}, } + port_detector = PortDetector(self.path, self.analysis) + for key, info in frameworks.items(): if key in content_lower: self.analysis["framework"] = info["name"] self.analysis["type"] = info["type"] - # Try to detect actual port, fall back to default - port_detector = PortDetector(self.path, self.analysis) detected_port = port_detector.detect_port_from_sources(info["port"]) self.analysis["default_port"] = detected_port break @@ -140,6 +177,89 @@ def _detect_node_framework(self, pkg: dict) -> None: deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})} deps_lower = {k.lower(): k for k in deps.keys()} + main_deps = pkg.get("dependencies", {}) + main_deps_lower = {k.lower(): k for k in main_deps.keys()} + + # Documentation frameworks (check before generic frontend) + doc_frameworks = { + "@docusaurus/core": { + "name": "Docusaurus", + "type": "documentation", + "port": 3000, + }, + "vitepress": {"name": "VitePress", "type": "documentation", "port": 5173}, + "vuepress": {"name": "VuePress", "type": "documentation", "port": 8080}, + "@vuepress/core": { + "name": "VuePress", + "type": "documentation", + "port": 8080, + }, + "nextra": {"name": "Nextra", "type": "documentation", "port": 3000}, + } + + # Storybook packages are typically devDependencies and should only be treated + # as the primary framework when present in dependencies (not devDependencies). + # Apps with Storybook in devDependencies alongside React/Angular/Vue should be + # classified as frontend, not documentation. + storybook_packages = { + "storybook": {"name": "Storybook", "type": "documentation", "port": 6006}, + "@storybook/react": { + "name": "Storybook", + "type": "documentation", + "port": 6006, + }, + "@storybook/angular": { + "name": "Storybook", + "type": "documentation", + "port": 6006, + }, + "@storybook/vue3": { + "name": "Storybook", + "type": "documentation", + "port": 6006, + }, + } + + port_detector = PortDetector(self.path, self.analysis) + + for key, info in doc_frameworks.items(): + if key in deps_lower: + self.analysis["framework"] = info["name"] + self.analysis["type"] = info["type"] + detected_port = port_detector.detect_port_from_sources(info["port"]) + self.analysis["default_port"] = detected_port + return # Documentation framework found, skip other detection + + # Only classify Storybook as primary framework if it's in main dependencies + for key, info in storybook_packages.items(): + if key in main_deps_lower: + self.analysis["framework"] = info["name"] + self.analysis["type"] = info["type"] + detected_port = port_detector.detect_port_from_sources(info["port"]) + self.analysis["default_port"] = detected_port + return # Storybook is the primary framework + + # Microfrontend detection (adds metadata, then falls through to frontend detection) + mfe_indicators = { + "@module-federation/enhanced": "Module Federation", + "@module-federation/runtime": "Module Federation", + "@angular-architects/module-federation": "Module Federation", + "@angular-architects/native-federation": "Native Federation", + "single-spa": "single-spa", + "qiankun": "Qiankun", + } + for key, mfe_name in mfe_indicators.items(): + if key in deps_lower: + self.analysis["microfrontend"] = mfe_name + break + # Also check webpack config for ModuleFederationPlugin + if not self.analysis.get("microfrontend"): + for config_file in ["webpack.config.js", "webpack.config.ts"]: + if self._exists(config_file): + wpc = self._read_file(config_file) + if "ModuleFederationPlugin" in wpc or "moduleFederation" in wpc: + self.analysis["microfrontend"] = "Module Federation" + break # Frontend frameworks frontend_frameworks = { @@ -256,11 +376,12 @@ def _detect_go_framework(self, content: str) -> None: "go-chi/chi": {"name": "Chi", "port": 8080}, } + port_detector = PortDetector(self.path, self.analysis) + for key, info in frameworks.items(): if key in content: self.analysis["framework"] = info["name"] self.analysis["type"] = "backend" - port_detector = PortDetector(self.path, self.analysis) detected_port = port_detector.detect_port_from_sources(info["port"]) self.analysis["default_port"] = detected_port break @@ -269,6 +390,8 @@ def _detect_rust_framework(self, content: str) -> None: """Detect Rust framework.""" from .port_detector import PortDetector + port_detector = PortDetector(self.path, self.analysis) + frameworks = { "actix-web": {"name": "Actix Web", "port": 8080}, "axum": {"name": "Axum", "port": 3000}, @@ -279,7 +402,6 @@ def _detect_rust_framework(self, content: str) -> None: if key in content: self.analysis["framework"] = info["name"] self.analysis["type"] = "backend" - port_detector = PortDetector(self.path, self.analysis) detected_port = port_detector.detect_port_from_sources(info["port"]) self.analysis["default_port"] = detected_port break @@ -329,8 +451,8 @@ def _detect_swift_framework(self) -> None: if line.startswith("import "): module = line.replace("import ", "").split()[0] imports.add(module) - except Exception: - continue + except (OSError, UnicodeDecodeError): + continue # Silently skip unreadable Swift files # Detect UI framework if "SwiftUI" in imports: @@ -367,8 +489,8 @@ def _detect_swift_framework(self) -> None: dependencies = self._detect_spm_dependencies() if dependencies: self.analysis["spm_dependencies"] = dependencies - except Exception: - # Silently fail if Swift detection has issues + except (OSError, UnicodeDecodeError, ValueError): + # Silently fail if Swift detection has issues (file I/O, path resolution) pass def _detect_spm_dependencies(self) -> list[str]: @@ -402,8 +524,8 @@ def _detect_spm_dependencies(self) -> list[str]: name = url.rstrip("/").split("/")[-1].replace(".git", "") if name and name not in dependencies: dependencies.append(name) - except Exception: - continue + except (OSError, UnicodeDecodeError): + continue # Silently skip unreadable .pbxproj files return dependencies @@ -416,3 +538,469 @@ def _detect_node_package_manager(self) -> str: elif self._exists("bun.lockb") or self._exists("bun.lock"): return "bun" return "npm" + + def _detect_dotnet_framework(self) -> None: + """Detect .NET framework from .csproj/.sln files. + + Handles two cases: + 1. .NET Solution (Clean Architecture): .sln + src/ with sub-projects + → Treated as a SINGLE service with aggregated analysis + 2. Single .csproj project → Standard single-project detection + """ + + from .port_detector import PortDetector + + # ---- .NET Solution detection ---- + # Parse the .sln file directly to discover all projects in the solution. + # A solution with 2+ projects is treated as a single service (not a monorepo). + sln_files = list(self.path.glob("*.sln")) + + if sln_files: + best_solution = None + best_size = -1 + for sln_path in sorted(sln_files): + candidate = self._map_dotnet_solution(sln_path) + if candidate: + size = len(candidate.get("entry_points", [])) + len( + candidate.get("libraries", []) + ) + if size > best_size: + best_solution = candidate + best_size = size + if best_solution and ( + best_solution["entry_points"] or best_solution["libraries"] + ): + self.analysis["dotnet_solution"] = best_solution + self._apply_dotnet_solution_framework(best_solution) + return + + # ---- Single .csproj detection ---- + csproj_files = list(self.path.glob("*.csproj")) + if not csproj_files: + csproj_files = list(self.path.glob("**/*.csproj"))[:5] + + if not csproj_files: + return + + all_packages: set[str] = set() + sdk_types: list[str] = [] + has_azure_functions_version = False + + for csproj in csproj_files[:3]: + info = self._parse_csproj_info(csproj) + all_packages.update(info.get("packages", set())) + if info.get("sdk"): + sdk_types.append(info["sdk"]) + if info.get("is_azure_functions"): + has_azure_functions_version = True + + # Pick the most specific SDK type: prefer specialized SDKs over the generic one. + # E.g., Microsoft.NET.Sdk.Web is more specific than Microsoft.NET.Sdk. + sdk_type = "" + sdk_priority = [ + "Microsoft.NET.Sdk.BlazorWebAssembly", + "Microsoft.NET.Sdk.Web", + "Microsoft.NET.Sdk.Worker", + "Microsoft.NET.Sdk.WindowsDesktop", + "Microsoft.NET.Sdk.Maui", + ] + for preferred in sdk_priority: + if preferred in sdk_types: + sdk_type = preferred + break + if not sdk_type and sdk_types: + sdk_type = sdk_types[0] + + port_detector = PortDetector(self.path, self.analysis) + + # Azure Functions (check before ASP.NET Core — AF projects may reference AspNetCore) + is_azure_functions = has_azure_functions_version or any( + "microsoft.azure.functions.worker" in p for p in all_packages + ) + if is_azure_functions: + self.analysis["framework"] = "Azure Functions" + self.analysis["type"] = "worker" + self.analysis["default_port"] = port_detector.detect_port_from_sources(7071) + if any("durabletask" in p for p in all_packages): + self.analysis["durable_functions"] = True + + # ASP.NET Core Web API + elif sdk_type == "Microsoft.NET.Sdk.Web" or any( + p.startswith("microsoft.aspnetcore") for p in all_packages + ): + if ( + any("blazor" in p for p in all_packages) + or sdk_type == "Microsoft.NET.Sdk.BlazorWebAssembly" + ): + self.analysis["framework"] = "Blazor" + self.analysis["type"] = "frontend" + self.analysis["default_port"] = port_detector.detect_port_from_sources( + 5000 + ) + else: + self.analysis["framework"] = "ASP.NET Core" + self.analysis["type"] = "backend" + self.analysis["default_port"] = port_detector.detect_port_from_sources( + 5000 + ) + + if any("microsoft.aspnetcore.openapi" in p for p in all_packages) or any( + "swashbuckle" in p for p in all_packages + ): + self.analysis["api_docs"] = "Swagger/OpenAPI" + + # WPF + elif sdk_type == "Microsoft.NET.Sdk.WindowsDesktop" or any( + "wpf" in p for p in all_packages + ): + self.analysis["framework"] = "WPF" + self.analysis["type"] = "desktop" + + # MAUI + elif ( + any("microsoft.maui" in p for p in all_packages) + or sdk_type == "Microsoft.NET.Sdk.Maui" + ): + self.analysis["framework"] = "MAUI" + self.analysis["type"] = "mobile" + + # Worker Service + elif sdk_type == "Microsoft.NET.Sdk.Worker" or any( + "microsoft.extensions.hosting" in p for p in all_packages + ): + self.analysis["framework"] = ".NET Worker" + self.analysis["type"] = "worker" + + # gRPC + elif any("grpc" in p for p in all_packages): + self.analysis["framework"] = "gRPC .NET" + self.analysis["type"] = "backend" + self.analysis["default_port"] = port_detector.detect_port_from_sources(5000) + + # Generic .NET (library or console) + else: + self.analysis["framework"] = ".NET" + if not self.analysis.get("type"): + self.analysis["type"] = "backend" + + self._apply_dotnet_metadata(all_packages) + + # .sln GUIDs + _SLN_FOLDER_GUID = "2150E333-8FDC-42A3-9474-1A3956D46DE8" + + def _map_dotnet_solution(self, sln_path: Path) -> dict | None: + """Map a .NET solution by parsing the .sln file. + + The .sln file lists every project with its name, relative path to .csproj, + and a type GUID. Solution Folders (virtual grouping) are skipped. + Each real project's .csproj is then parsed to classify it as: + - API entry point (Microsoft.NET.Sdk.Web) + - Worker entry point (Azure Functions / Microsoft.NET.Sdk.Worker) + - Test project (path contains 'tests' or 'test') + - Library (everything else, classified by name) + """ + import re + + try: + sln_content = sln_path.read_text(encoding="utf-8", errors="ignore") + except (OSError, UnicodeDecodeError): + return None + + # Parse Project entries from .sln + # Format: Project("{TYPE_GUID}") = "Name", "path\to\project.csproj", "{PROJ_GUID}" + project_pattern = re.compile( + r'Project\("\{([^}]+)\}"\)\s*=\s*"([^"]+)"\s*,\s*"([^"]+)"\s*,\s*"\{[^}]+\}"' + ) + + entry_points: list[dict] = [] + libraries: list[dict] = [] + test_projects: list[dict] = [] + all_packages: set[str] = set() + projects: dict[str, dict] = {} + dependency_graph: dict[str, list[str]] = {} + + for match in project_pattern.finditer(sln_content): + type_guid = match.group(1).upper() + project_name = match.group(2) + project_rel_path = match.group(3).replace( + "\\", "/" + ) # Normalize to Unix paths + + # Skip Solution Folders (virtual grouping, no actual project) + if type_guid == self._SLN_FOLDER_GUID: + continue + + # Resolve absolute path to .csproj + csproj_path = self.path / project_rel_path + if not csproj_path.exists(): + continue + + # Get the project directory path relative to solution root. + # Skip projects that resolve outside the solution directory + # (e.g., via ".." segments in the .sln reference). + try: + project_dir = str(csproj_path.parent.relative_to(self.path)) + except ValueError: + continue + + # Parse .csproj to determine type + info = self._parse_csproj_info(csproj_path) + packages = info.pop("packages", set()) + project_references = info.get("project_references", []) + all_packages.update(packages) + + sdk = info.get("sdk", "") + is_test = "test" in project_dir.lower() + + entry = { + "name": project_name, + "path": project_dir, + } + + is_entry_point = False + + if is_test: + role = "test" + test_projects.append(entry) + elif sdk == "Microsoft.NET.Sdk.Web": + role = "api" + is_entry_point = True + entry["type"] = "api" + entry_points.append(entry) + elif info.get("is_azure_functions"): + role = "worker" + is_entry_point = True + entry["type"] = "worker" + entry["framework"] = "Azure Functions" + entry_points.append(entry) + elif sdk == "Microsoft.NET.Sdk.Worker": + role = "worker" + is_entry_point = True + entry["type"] = "worker" + entry["framework"] = ".NET Worker" + entry_points.append(entry) + elif info.get("output_type", "").lower() == "exe": + role = "tool" + is_entry_point = True + entry["type"] = "tool" + entry_points.append(entry) + else: + # Library — classify by name + name_lower = project_name.lower() + if "gateway" in name_lower: + role = "data_access" + elif name_lower == "application": + role = "business_logic" + elif name_lower == "contracts": + role = "contracts" + elif "infrastructure" in name_lower: + role = "infrastructure" + elif name_lower == "client": + role = "client" + else: + role = "library" + entry["role"] = role + libraries.append(entry) + + # Per-project detail record + projects[project_name] = { + "path": project_dir, + "sdk": sdk or None, + "target_framework": info.get("target_framework"), + "output_type": info.get("output_type"), + "role": role, + "is_entry_point": is_entry_point, + "container_support": info.get("container_support", False), + "packages": sorted(packages), + "project_references": project_references, + } + dependency_graph[project_name] = project_references + + if not entry_points and not libraries: + return None + + return { + "solution_file": sln_path.name, + "entry_points": entry_points, + "libraries": libraries, + "test_projects": test_projects, + "all_packages": sorted(all_packages), + "projects": projects, + "dependency_graph": dependency_graph, + } + + def _parse_csproj_info(self, csproj_path: Path) -> dict: + """Parse a .csproj file and extract SDK type, packages, project references, and key properties.""" + import re + import xml.etree.ElementTree as ET + from pathlib import PureWindowsPath + + info: dict = {"packages": set(), "project_references": []} + + try: + content = csproj_path.read_text(encoding="utf-8", errors="ignore") + + sdk_match = re.search(r'Sdk="([^"]+)"', content) + if sdk_match: + info["sdk"] = sdk_match.group(1) + + output_match = re.search(r"([^<]+)", content) + if output_match: + info["output_type"] = output_match.group(1) + + tf_match = re.search(r"([^<]+)", content) + if tf_match: + info["target_framework"] = tf_match.group(1) + + if "" in content: + info["is_azure_functions"] = True + + if "" in content: + info["container_support"] = True + + # Parse XML tree once for both PackageReference and ProjectReference + try: + tree = ET.fromstring(content) + ns = "" + ns_match = re.search(r"\{([^}]+)\}", tree.tag) + if ns_match: + ns = ns_match.group(1) + + # PackageReference + found_packages = list(tree.iter("PackageReference")) + if not found_packages and ns: + found_packages = list(tree.iter(f"{{{ns}}}PackageReference")) + for pkg_ref in found_packages: + include = pkg_ref.get("Include", "") + if include: + info["packages"].add(include.lower()) + if not found_packages: + refs = re.findall(r' None: + """Set framework properties based on .NET solution structure.""" + from .port_detector import PortDetector + + entry_points = solution["entry_points"] + all_packages = set(solution.get("all_packages", [])) + + # Determine primary framework from entry points + has_api = any(ep.get("type") == "api" for ep in entry_points) + has_worker = any(ep.get("type") == "worker" for ep in entry_points) + + if has_api: + self.analysis["framework"] = "ASP.NET Core" + self.analysis["type"] = "backend" + elif has_worker: + ep = next(ep for ep in entry_points if ep["type"] == "worker") + self.analysis["framework"] = ep.get("framework", ".NET Worker") + self.analysis["type"] = "worker" + else: + self.analysis["framework"] = ".NET" + self.analysis["type"] = "library" + + # Record all components (API + Workers) + if has_api and has_worker: + self.analysis["components"] = [ + { + "name": ep["name"], + "type": ep["type"], + "path": ep["path"], + "framework": ep.get("framework", self.analysis["framework"]), + } + for ep in entry_points + ] + + # Port detection — try API entry point first + port_detector = PortDetector(self.path, self.analysis) + self.analysis["default_port"] = port_detector.detect_port_from_sources(5000) + + # API docs + if any("microsoft.aspnetcore.openapi" in p for p in all_packages) or any( + "swashbuckle" in p for p in all_packages + ): + self.analysis["api_docs"] = "Swagger/OpenAPI" + + self._apply_dotnet_metadata(all_packages) + + def _apply_dotnet_metadata(self, all_packages: set[str]) -> None: + """Apply common .NET metadata (ORM, messaging, testing) from packages.""" + # ORM detection + if any( + "entityframeworkcore" in p or "entityframework" in p for p in all_packages + ): + self.analysis["orm"] = "Entity Framework" + elif any("dapper" in p for p in all_packages): + self.analysis["orm"] = "Dapper" + elif any("npgsql" in p for p in all_packages): + self.analysis["orm"] = "Npgsql" + + # Task queue / messaging + if any("masstransit" in p for p in all_packages): + self.analysis["task_queue"] = "MassTransit" + elif any("hangfire" in p for p in all_packages): + self.analysis["task_queue"] = "Hangfire" + elif any("rabbitmq" in p for p in all_packages): + self.analysis["task_queue"] = "RabbitMQ" + + # Testing + if any("xunit" in p for p in all_packages): + self.analysis["testing"] = "xUnit" + elif any("nunit" in p for p in all_packages): + self.analysis["testing"] = "NUnit" + elif any("mstest" in p for p in all_packages): + self.analysis["testing"] = "MSTest" + + # Durable functions + if any("durabletask" in p for p in all_packages): + self.analysis["durable_functions"] = True + + def _detect_mkdocs_details(self) -> None: + """Detect MkDocs configuration details.""" + from .port_detector import PortDetector + + config_file = "mkdocs.yml" if self._exists("mkdocs.yml") else "mkdocs.yaml" + content = self._read_file(config_file) + content_lower = content.lower() + + if "material" in content_lower: + self.analysis["framework"] = "MkDocs Material" + port_detector = PortDetector(self.path, self.analysis) + self.analysis["default_port"] = port_detector.detect_port_from_sources(8000) diff --git a/apps/backend/analysis/analyzers/port_detector.py b/apps/backend/analysis/analyzers/port_detector.py index 7e533b43b3..8fc95ba8df 100644 --- a/apps/backend/analysis/analyzers/port_detector.py +++ b/apps/backend/analysis/analyzers/port_detector.py @@ -28,12 +28,13 @@ def detect_port_from_sources(self, default_port: int) -> int: Checks in order of priority: 1. Entry point files (app.py, main.py, etc.) for uvicorn.run(), app.run(), etc. - 2. Environment files (.env, .env.local, .env.development) - 3. Docker Compose port mappings - 4. Configuration files (config.py, settings.py, etc.) - 5. Package.json scripts (for Node.js) - 6. Makefile/shell scripts - 7. Falls back to default_port if nothing found + 2. .NET launchSettings.json (Properties/launchSettings.json) + 3. Environment files (.env, .env.local, .env.development) + 4. Docker Compose port mappings + 5. Configuration files (config.py, settings.py, etc.) + 6. Package.json scripts (for Node.js) + 7. Makefile/shell scripts + 8. Falls back to default_port if nothing found Args: default_port: The framework's conventional default port @@ -46,28 +47,33 @@ def detect_port_from_sources(self, default_port: int) -> int: if port: return port - # 2. Check environment files + # 2. Check .NET launchSettings.json (Properties/launchSettings.json) + port = self._detect_port_in_launch_settings() + if port: + return port + + # 3. Check environment files port = self._detect_port_in_env_files() if port: return port - # 3. Check Docker Compose + # 4. Check Docker Compose port = self._detect_port_in_docker_compose() if port: return port - # 4. Check configuration files + # 5. Check configuration files port = self._detect_port_in_config_files() if port: return port - # 5. Check package.json scripts (for Node.js) + # 6. Check package.json scripts (for Node.js) if self.analysis.get("language") in ["JavaScript", "TypeScript"]: port = self._detect_port_in_package_scripts() if port: return port - # 6. Check Makefile/shell scripts + # 7. Check Makefile/shell scripts port = self._detect_port_in_scripts() if port: return port @@ -309,6 +315,73 @@ def _detect_port_in_package_scripts(self) -> int | None: return None + def _detect_port_in_launch_settings(self) -> int | None: + """Detect port from .NET Properties/launchSettings.json. + + Checks for applicationUrl (Web API) and commandLineArgs --port (Azure Functions). + For .NET solutions, scans entry point sub-projects (Api first, then Workers). + """ + launch_paths = [ + "Properties/launchSettings.json", + "properties/launchSettings.json", + ] + + # For .NET solutions, prioritize API entry points over workers + solution = self.analysis.get("dotnet_solution") + if solution: + api_paths = [] + worker_paths = [] + for ep in solution.get("entry_points", []): + upper = f"{ep['path']}/Properties/launchSettings.json" + lower = f"{ep['path']}/properties/launchSettings.json" + if ep.get("type") == "api": + api_paths.extend([upper, lower]) + else: + worker_paths.extend([upper, lower]) + launch_paths = api_paths + worker_paths + launch_paths + + for launch_path in launch_paths: + data = self._read_json(launch_path) + if not data: + continue + + profiles = data.get("profiles", {}) + + # Prefer "http" profile, then first profile with applicationUrl + for profile_key in ["http", *profiles.keys()]: + profile = profiles.get(profile_key) + if not profile: + continue + + # 1. Check applicationUrl (e.g., "http://localhost:5233") + app_url = profile.get("applicationUrl", "") + if app_url: + # May contain multiple URLs separated by ";" + for url in app_url.split(";"): + url = url.strip() + match = re.search(r":(\d+)/?$", url) + if match: + try: + port = int(match.group(1)) + if 1000 <= port <= 65535: + return port + except ValueError: + continue + + # 2. Check commandLineArgs (e.g., "--port 7173" or "--port=7173") + cmd_args = profile.get("commandLineArgs", "") + if cmd_args: + match = re.search(r"--port[=\s]+(\d+)", cmd_args) + if match: + try: + port = int(match.group(1)) + if 1000 <= port <= 65535: + return port + except ValueError: + pass # Expected for non-numeric port values in args + + return None + def _detect_port_in_scripts(self) -> int | None: """Detect port in Makefile or shell scripts.""" script_files = ["Makefile", "start.sh", "run.sh", "dev.sh"] diff --git a/apps/backend/analysis/analyzers/project_analyzer_module.py b/apps/backend/analysis/analyzers/project_analyzer_module.py index b7380dbb49..2d90e5af31 100644 --- a/apps/backend/analysis/analyzers/project_analyzer_module.py +++ b/apps/backend/analysis/analyzers/project_analyzer_module.py @@ -10,7 +10,7 @@ from pathlib import Path from typing import Any -from .base import SERVICE_INDICATORS, SERVICE_ROOT_FILES, SKIP_DIRS +from .base import SERVICE_INDICATORS, SKIP_DIRS, has_service_root from .service_analyzer import ServiceAnalyzer @@ -62,7 +62,20 @@ def _detect_project_type(self) -> None: self.index["project_type"] = "monorepo" return - # Check for multiple service directories + # .NET solution structure: .sln at root with src/ containing sub-projects + # This is Clean Architecture / DDD — one solution = one service, NOT a monorepo. + # The framework_analyzer handles mapping the internal structure (Api, Worker, + # libraries) into a single aggregated service via analysis["dotnet_solution"]. + # Only skip monorepo detection when there's exactly ONE .sln; multiple .sln + # files may indicate a multi-solution monorepo. + sln_files = list(self.project_dir.glob("*.sln")) + has_single_sln_with_src = False + if len(sln_files) == 1: + src_dir = self.project_dir / "src" + if src_dir.exists() and src_dir.is_dir(): + has_single_sln_with_src = True + + # Check for multiple service directories at root level service_dirs_found = 0 for item in self.project_dir.iterdir(): if not item.is_dir(): @@ -71,12 +84,16 @@ def _detect_project_type(self) -> None: continue # Check if this directory has service root files - if any((item / f).exists() for f in SERVICE_ROOT_FILES): + if has_service_root(item): service_dirs_found += 1 - # If we have 2+ directories with service root files, it's likely a monorepo + # If we have 2+ directories with service root files, it's likely a monorepo. + # A single .sln with src/ is only treated as single-project when there are + # no additional sibling service directories outside src/. if service_dirs_found >= 2: self.index["project_type"] = "monorepo" + elif has_single_sln_with_src and service_dirs_found == 0: + pass # Single .NET solution, not a monorepo def _find_and_analyze_services(self) -> None: """Find all services and analyze each.""" @@ -89,6 +106,7 @@ def _find_and_analyze_services(self) -> None: self.project_dir / "packages", self.project_dir / "apps", self.project_dir / "services", + self.project_dir / "src", # .NET Clean Architecture / DDD ] for location in service_locations: @@ -104,10 +122,10 @@ def _find_and_analyze_services(self) -> None: continue # Check if this looks like a service - has_root_file = any((item / f).exists() for f in SERVICE_ROOT_FILES) + has_service_marker = has_service_root(item) is_service_name = item.name.lower() in SERVICE_INDICATORS - if has_root_file or ( + if has_service_marker or ( location == self.project_dir and is_service_name ): analyzer = ServiceAnalyzer(item, item.name) @@ -118,10 +136,16 @@ def _find_and_analyze_services(self) -> None: services[item.name] = service_info else: # Single project - analyze root - analyzer = ServiceAnalyzer(self.project_dir, "main") + # For .NET solutions, use the .sln name as service name + sln_files = list(self.project_dir.glob("*.sln")) + if sln_files: + service_name = sln_files[0].stem # e.g. "Smart.Management.Sales" + else: + service_name = self.project_dir.name + analyzer = ServiceAnalyzer(self.project_dir, service_name) service_info = analyzer.analyze() if service_info.get("language"): - services["main"] = service_info + services[service_name] = service_info self.index["services"] = services diff --git a/apps/backend/analysis/analyzers/route_detector.py b/apps/backend/analysis/analyzers/route_detector.py index 0ff51e74ff..d9ddce1fca 100644 --- a/apps/backend/analysis/analyzers/route_detector.py +++ b/apps/backend/analysis/analyzers/route_detector.py @@ -7,6 +7,8 @@ - Node.js: Express, Next.js - Go: Gin, Echo, Chi, Fiber - Rust: Axum, Actix +- C#/.NET: ASP.NET Core (Controllers, Minimal APIs) +- TypeScript: Angular """ from __future__ import annotations @@ -21,7 +23,15 @@ class RouteDetector(BaseAnalyzer): """Detects API routes across multiple web frameworks.""" # Directories to exclude from route detection - EXCLUDED_DIRS = {"node_modules", ".venv", "venv", "__pycache__", ".git"} + EXCLUDED_DIRS = { + "node_modules", + ".venv", + "venv", + "__pycache__", + ".git", + "bin", + "obj", + } def __init__(self, path: Path): super().__init__(path) @@ -55,6 +65,12 @@ def detect_all_routes(self) -> list[dict]: # Rust Axum/Actix routes.extend(self._detect_rust_routes()) + # C#/.NET ASP.NET Core + routes.extend(self._detect_aspnet_routes()) + + # Angular + routes.extend(self._detect_angular_routes()) + return routes def _detect_fastapi_routes(self) -> list[dict]: @@ -416,3 +432,410 @@ def _detect_rust_routes(self) -> list[dict]: ) return routes + + def _detect_aspnet_routes(self) -> list[dict]: + """Detect ASP.NET Core routes (Controllers and Minimal APIs).""" + routes = [] + cs_files = [ + f for f in self.path.glob("**/*.cs") if self._should_include_file(f) + ] + + for file_path in cs_files: + try: + content = file_path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + + # --- Controller-based routes --- + routes.extend(self._detect_aspnet_controller_routes(file_path, content)) + + # --- Minimal API routes --- + routes.extend(self._detect_aspnet_minimal_api_routes(file_path, content)) + + return routes + + def _normalize_aspnet_path(self, path: str) -> str: + """Normalize ASP.NET route path: convert {param} and {param:type} to :param format.""" + # Convert {param:constraint} to :param (e.g., {id:int} -> :id) + path = re.sub(r"\{(\w+):[^}]+\}", r":\1", path) + # Convert {param} to :param (e.g., {id} -> :id) + path = re.sub(r"\{(\w+)\}", r":\1", path) + # Ensure leading slash + if path and not path.startswith("/"): + path = "/" + path + # Collapse duplicate slashes (e.g., //api/users -> /api/users) + path = re.sub(r"//+", "/", path) + return path + + def _detect_aspnet_controller_routes( + self, file_path: Path, content: str + ) -> list[dict]: + """Detect routes from ASP.NET Core controller classes.""" + routes = [] + + # Check if this file contains [ApiController] or inherits from Controller/ControllerBase + is_controller = bool( + re.search(r"\[ApiController\]|:\s*(?:Controller|ControllerBase)\b", content) + ) + if not is_controller: + return routes + + # Extract class name + class_match = re.search(r"class\s+(\w+)", content) + if not class_match: + return routes + class_name = class_match.group(1) + + # Derive controller name (strip "Controller" suffix, lowercase) + controller_name = class_name + if controller_name.endswith("Controller"): + controller_name = controller_name[: -len("Controller")] + controller_name = controller_name.lower() + + # Find class-level [Route("...")] attribute + class_route_prefix = "" + # Look for [Route("...")] in the ~500 chars before the class declaration + class_decl_pos = class_match.start() + pre_class_section = content[max(0, class_decl_pos - 500) : class_decl_pos] + # Find the last [Route("...")] before the class (closest to class declaration) + route_attr_matches = list( + re.finditer(r'\[Route\(["\']([^"\']+)["\']\)\]', pre_class_section) + ) + if route_attr_matches: + class_route_prefix = route_attr_matches[-1].group(1) + + # Replace [controller] placeholder with actual controller name + class_route_prefix = class_route_prefix.replace("[controller]", controller_name) + + # Detect class-level [Authorize] + class_authorize = bool(re.search(r"\[Authorize", pre_class_section)) + + # Find all [Http*] attributed methods + # Captures: [HttpGet], [HttpGet("route")], [HttpGet("route", Name = "...")] + # Also handles unquoted route templates like [HttpGet({id})] + http_method_pattern = re.compile( + r'\[(Http(?:Get|Post|Put|Delete|Patch))(?:\((?:\s*["\']([^"\']*)["\'][^)]*)?\))?\]', + re.MULTILINE, + ) + + for match in http_method_pattern.finditer(content): + attr_name = match.group(1) # e.g., "HttpGet" + method_route = match.group(2) or "" # e.g., "path" or "" + + # Map attribute to HTTP method + method_map = { + "HttpGet": "GET", + "HttpPost": "POST", + "HttpPut": "PUT", + "HttpDelete": "DELETE", + "HttpPatch": "PATCH", + } + http_method = method_map.get(attr_name, "GET") + + # Build full path from class prefix + method route + full_path = class_route_prefix + if method_route: + if full_path and not full_path.endswith("/"): + full_path += "/" + full_path += method_route + + # Replace [action] with method name (find the method after the attribute) + method_name_match = re.search( + r"(?:public|private|protected|internal)\s+(?:(?:async|static|virtual|override|abstract|sealed)\s+)*\S+\s+(\w+)\s*\(", + content[match.end() : match.end() + 300], + ) + if method_name_match: + method_name = method_name_match.group(1).lower() + full_path = full_path.replace("[action]", method_name) + else: + # Remove unresolved [action] placeholder + full_path = full_path.replace("[action]", "") + + # Normalize path params + full_path = self._normalize_aspnet_path(full_path) + + # Check for method-level [Authorize] + # Look backwards from [Http*] to the previous method end (}) or class opening + # and forward to the method signature opening ( + attr_before_start = max(0, match.start() - 300) + pre_section = content[attr_before_start : match.start()] + # Find the last } or { before this attribute to bound the search + last_brace = max(pre_section.rfind("}"), pre_section.rfind("{")) + if last_brace >= 0: + pre_section = pre_section[last_brace + 1 :] + + # Also check attributes between [Http*] and the method signature + post_section = content[match.end() : match.end() + 200] + # Stop at the method body opening brace + brace_pos = post_section.find("{") + if brace_pos >= 0: + post_section = post_section[:brace_pos] + + method_authorize = bool( + re.search(r"\[Authorize", pre_section) + or re.search(r"\[Authorize", post_section) + ) + + requires_auth = class_authorize or method_authorize + + routes.append( + { + "path": full_path, + "methods": [http_method], + "file": str(file_path.relative_to(self.path)), + "framework": "ASP.NET Core", + "requires_auth": requires_auth, + } + ) + + return routes + + def _detect_aspnet_minimal_api_routes( + self, file_path: Path, content: str + ) -> list[dict]: + """Detect routes from ASP.NET Core Minimal APIs. + + Supports both direct mapping (app.MapGet) and group-based mapping + (var root = app.MapGroup("/prefix"); root.MapGet("/path", ...)). + """ + routes = [] + + # Step 1: Detect MapGroup base paths and their auth status + # Pattern: var root = app.MapGroup("/api/v1/orders")...RequireAuthorization() + group_prefixes: dict[str, tuple[str, bool]] = {} # var_name -> (prefix, auth) + group_pattern = re.compile( + r'(?:var|_)\s+(\w+)\s*=\s*\w+\s*\.\s*MapGroup\s*\(\s*["\']([^"\']+)["\']', + re.MULTILINE, + ) + for match in group_pattern.finditer(content): + var_name = match.group(1) + prefix = match.group(2).rstrip("/") + # Check if the group chain includes RequireAuthorization + stmt_end = content.find(";", match.end()) + if stmt_end == -1: + stmt_end = min(len(content), match.end() + 500) + group_stmt = content[match.start() : stmt_end] + group_auth = ".RequireAuthorization(" in group_stmt + group_prefixes[var_name] = (prefix, group_auth) + + # Step 2: Detect individual route mappings + # Match any variable calling .MapGet/Post/Put/Delete/Patch + minimal_api_pattern = re.compile( + r'(\w+)\s*\.\s*Map(Get|Post|Put|Delete|Patch)\s*\(\s*["\']([^"\']*)["\']', + re.MULTILINE, + ) + + for match in minimal_api_pattern.finditer(content): + var_name = match.group(1) + http_method = match.group(2).upper() + path = match.group(3) + + # Resolve group prefix if the variable references a known group + prefix = "" + group_auth = False + if var_name in group_prefixes: + prefix, group_auth = group_prefixes[var_name] + + # Build full path from group prefix + route path + if path and path != "/": + full_path = prefix + "/" + path.lstrip("/") + elif path == "/": + full_path = prefix + "/" + else: + full_path = prefix if prefix else "/" + + # Normalize path params ({id} -> :id) + full_path = self._normalize_aspnet_path(full_path) + + # Check for .RequireAuthorization() in the same statement + stmt_end = content.find(";", match.end()) + if stmt_end == -1: + stmt_end = min(len(content), match.end() + 300) + route_stmt = content[match.start() : stmt_end] + + requires_auth = group_auth or ".RequireAuthorization(" in route_stmt + + routes.append( + { + "path": full_path, + "methods": [http_method], + "file": str(file_path.relative_to(self.path)), + "framework": "ASP.NET Core", + "requires_auth": requires_auth, + } + ) + + return routes + + def _detect_angular_routes(self) -> list[dict]: + """Detect Angular routes from route configuration files.""" + routes = [] + ts_files = [ + f + for f in self.path.glob("**/*.ts") + if self._should_include_file(f) + and not any(part in {"dist", "build"} for part in f.parts) + ] + + for file_path in ts_files: + try: + content = file_path.read_text(encoding="utf-8") + except (OSError, UnicodeDecodeError): + continue + + # Check if this file contains Angular route definitions + has_routes = bool( + re.search( + r"(?:Routes|Route\[\])\s*=|RouterModule\.for(?:Root|Child)\s*\(", + content, + ) + ) + if not has_routes: + continue + + # Extract route objects from the content + routes.extend(self._extract_angular_routes(file_path, content, prefix="")) + + return routes + + def _extract_angular_routes( + self, file_path: Path, content: str, prefix: str + ) -> list[dict]: + """Extract route definitions from Angular route configuration. + + To avoid double-counting nested child routes, this method tracks which + character ranges have been consumed as children blocks. The top-level + finditer only processes route objects whose opening brace falls outside + any already-consumed children range. + """ + routes = [] + + # Ranges (start, end) of children blocks that have been extracted and + # will be recursed into separately. Any `{ path: ...` whose opening + # brace falls inside one of these ranges is skipped at this level. + consumed_ranges: list[tuple[int, int]] = [] + + # Find route objects by locating `{ path: '...'` and then tracking brace depth + # to find the matching closing brace. This handles nested objects like + # `data: { title: 'Home' }` that would break a simple [^}]* regex. + path_pattern = re.compile(r"\{\s*path\s*:\s*['\"]([^'\"]*)['\"]") + + for match in path_pattern.finditer(content): + # Skip this match if its opening brace falls inside a children + # block that was already extracted for a parent route. + if any(start <= match.start() < end for start, end in consumed_ranges): + continue + + path_segment = match.group(1) + + # Find the matching closing brace by counting brace depth + brace_start = match.start() + depth = 0 + pos = brace_start + while pos < len(content): + if content[pos] == "{": + depth += 1 + elif content[pos] == "}": + depth -= 1 + if depth == 0: + break + pos += 1 + + route_body = content[brace_start : pos + 1] + + # Build full path + if path_segment: + full_path = f"{prefix}/{path_segment}" if prefix else f"/{path_segment}" + else: + full_path = prefix if prefix else "/" + + # Normalize double slashes + full_path = re.sub(r"//+", "/", full_path) + + # Convert Angular path params (:id is already the right format) + # Ensure leading slash + if full_path and not full_path.startswith("/"): + full_path = "/" + full_path + + # Extract and isolate the children block (if any) BEFORE checking + # has_target so that child route objects are not re-matched at this level. + children_content = None + children_match = re.search(r"children\s*:\s*\[", route_body) + if children_match: + # Compute absolute position in content + abs_bracket_start = brace_start + children_match.end() + # Find the matching closing bracket + bracket_depth = 1 + bracket_pos = abs_bracket_start + while bracket_pos < len(content) and bracket_depth > 0: + if content[bracket_pos] == "[": + bracket_depth += 1 + elif content[bracket_pos] == "]": + bracket_depth -= 1 + bracket_pos += 1 + children_content = content[abs_bracket_start : bracket_pos - 1] + # Mark this range as consumed so nested `{ path: ...` matches + # inside it are skipped by the outer finditer loop. + consumed_ranges.append((abs_bracket_start, bracket_pos)) + + # Determine if route has a component/loadChildren/loadComponent + # Use the route body with children stripped to avoid false positives + # from child route component declarations. + body_for_check = route_body + if children_match: + # Remove the children block from the body for target detection + children_rel_start = children_match.start() + children_rel_end = ( + children_match.end() + - children_match.start() + + (len(children_content) if children_content else 0) + + 1 + ) # +1 for closing ] + body_for_check = ( + route_body[:children_rel_start] + + route_body[children_rel_start + children_rel_end :] + ) + + has_target = bool( + re.search( + r"(?:component|loadChildren|loadComponent)\s*:", + body_for_check, + ) + ) + + # Check for canActivate (auth guard) + requires_auth = "canActivate" in route_body + + has_children = children_content is not None + + if has_target and not has_children: + routes.append( + { + "path": full_path, + "methods": ["GET"], # Frontend routes are GET + "file": str(file_path.relative_to(self.path)), + "framework": "Angular", + "requires_auth": requires_auth, + } + ) + + # If there are children, recurse ONLY into the extracted children block + if has_children and children_content: + # Parent route with children may also have its own component + if has_target: + routes.append( + { + "path": full_path, + "methods": ["GET"], + "file": str(file_path.relative_to(self.path)), + "framework": "Angular", + "requires_auth": requires_auth, + } + ) + + child_routes = self._extract_angular_routes( + file_path, children_content, prefix=full_path + ) + routes.extend(child_routes) + + return routes diff --git a/apps/backend/analysis/analyzers/service_analyzer.py b/apps/backend/analysis/analyzers/service_analyzer.py index d8f35171a6..bca10dbba0 100644 --- a/apps/backend/analysis/analyzers/service_analyzer.py +++ b/apps/backend/analysis/analyzers/service_analyzer.py @@ -155,6 +155,20 @@ def _find_key_directories(self) -> None: def _find_entry_points(self) -> None: """Find main entry point files.""" + # For .NET solutions, entry points are in sub-project directories + solution = self.analysis.get("dotnet_solution") + if solution: + entry_points = [] + for ep in solution.get("entry_points", []): + program_cs = f"{ep['path']}/Program.cs" + if self._exists(program_cs): + entry_points.append(program_cs) + if entry_points: + self.analysis["entry_point"] = entry_points[0] + if len(entry_points) > 1: + self.analysis["entry_points"] = entry_points + return + entry_patterns = [ "main.py", "app.py", @@ -183,6 +197,8 @@ def _find_entry_points(self) -> None: "cmd/main.go", "src/main.rs", "src/lib.rs", + # .NET single project + "Program.cs", ] for pattern in entry_patterns: diff --git a/apps/backend/core/client.py b/apps/backend/core/client.py index a21e395920..0fc0de972c 100644 --- a/apps/backend/core/client.py +++ b/apps/backend/core/client.py @@ -392,6 +392,82 @@ def _validate_custom_mcp_server(server: dict) -> bool: return True +def _load_global_mcp_servers() -> dict[str, dict]: + """Load MCP servers from ~/.claude.json (user's global Claude Code config). + + Respects CLAUDE_CONFIG_DIR env var for custom config locations (e.g. + multi-profile setups). Falls back to ~/.claude.json. + + Returns a dict mapping server_id -> server_config for all non-disabled + MCP servers defined in the user's global config. These are the same + servers available in interactive Claude Code sessions. + """ + home = Path.home() + + # Respect CLAUDE_CONFIG_DIR for custom config locations (multi-profile support). + # .claude.json lives in the PARENT of the config dir (e.g. ~/.claude.json is the + # parent of ~/.claude/). For custom config dirs, check the parent first. + config_dir = os.environ.get("CLAUDE_CONFIG_DIR") + candidates = [] + if config_dir: + config_parent = Path(config_dir).parent + candidates.append(config_parent / ".claude.json") + candidates.append(home / ".claude.json") + + claude_json_path: Path | None = None + for candidate in candidates: + if candidate.exists(): + claude_json_path = candidate + break + + if not claude_json_path: + return {} + + try: + with open(claude_json_path, encoding="utf-8") as f: + data = json.load(f) + except (json.JSONDecodeError, OSError) as exc: + logger.warning("Failed to read %s: %s", claude_json_path, exc) + return {} + + raw_servers = data.get("mcpServers") + if not isinstance(raw_servers, dict): + return {} + + servers: dict[str, dict] = {} + for server_id, config in raw_servers.items(): + if not isinstance(config, dict): + continue + # Skip disabled servers + if config.get("disabled", False): + continue + + # Build a clean config dict compatible with the SDK + clean: dict[str, Any] = {} + if "type" in config and config["type"] in ("http", "sse"): + clean["type"] = config["type"] + if "url" in config: + clean["url"] = config["url"] + if "command" in config: + clean["command"] = config["command"] + if "args" in config and isinstance(config["args"], list): + clean["args"] = config["args"] + if "env" in config and isinstance(config["env"], dict): + clean["env"] = config["env"] + if "headers" in config and isinstance(config["headers"], dict): + clean["headers"] = config["headers"] + + # Must have a usable transport + has_command = bool(clean.get("command")) + has_url = bool(clean.get("url")) + if not has_command and not has_url: + continue + + servers[server_id] = clean + + return servers + + def load_project_mcp_config(project_dir: Path) -> dict: """ Load MCP configuration from project's .auto-claude/.env file. @@ -536,6 +612,7 @@ def create_client( betas: list[str] | None = None, effort_level: str | None = None, fast_mode: bool = False, + agents_catalog_prompt: str | None = None, ) -> ClaudeSDKClient: """ Create a Claude Agent SDK client with multi-layered security. @@ -571,6 +648,10 @@ def create_client( the "user" setting source so the CLI reads fastMode from ~/.claude/settings.json. Requires extra usage enabled on Claude subscription; falls back to standard speed automatically. + agents_catalog_prompt: Optional catalog of available specialist agents + (~/.claude/agents/). When provided, this is appended to + the base system prompt so the agent knows what specialist + agents are available for delegation. Returns: Configured ClaudeSDKClient @@ -714,6 +795,11 @@ def create_client( ) break + # Pre-load global MCP server IDs so we can grant permissions in security_settings. + # The actual mcp_servers dict is populated later; here we only need the IDs. + _global_mcp_servers_preload = _load_global_mcp_servers() + global_mcp_added: list[str] = [] # Populated after mcp_servers is built + security_settings = { "sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True}, "permissions": { @@ -763,6 +849,9 @@ def create_client( else [] ), *[f"{tool}(*)" for tool in browser_tools_permissions], + # Allow all tools from global MCP servers (loaded from ~/.claude.json) + # Uses wildcard pattern to permit any tool from each global server + *[f"mcp__{sid}__*(*)" for sid in global_mcp_added], ], }, } @@ -888,6 +977,27 @@ def create_client( server_config["headers"] = custom["headers"] mcp_servers[server_id] = server_config + # ========== Merge global MCP servers from ~/.claude.json ========== + # Load all user-configured MCPs and add any that Auto-Claude doesn't + # already define. Auto-Claude's hardcoded/project MCPs take priority. + # Re-use the pre-loaded global servers (loaded before security_settings). + global_mcp_servers = _global_mcp_servers_preload + for server_id, server_config in global_mcp_servers.items(): + if server_id not in mcp_servers: + mcp_servers[server_id] = server_config + global_mcp_added.append(server_id) + # Allow all tools from this global MCP server + allowed_tools_list.append(f"mcp__{server_id}__*") + + if global_mcp_added: + # Update security_settings with permissions for global MCP tools and re-write + security_settings["permissions"]["allow"].extend( + [f"mcp__{sid}__*(*)" for sid in global_mcp_added] + ) + with open(settings_file, "w", encoding="utf-8") as f: + json.dump(security_settings, f, indent=2) + print(f" - Global MCPs (from ~/.claude.json): {', '.join(sorted(global_mcp_added))}") + # Build system prompt base_prompt = ( f"You are an expert full-stack developer building production-quality software. " @@ -900,6 +1010,18 @@ def create_client( f"and build-progress.txt updates." ) + # Include specialist agents catalog if provided + if agents_catalog_prompt: + # Protect against Windows command-line length limits for large catalogs + MAX_AGENT_PROMPT_LEN = 8000 + if len(agents_catalog_prompt) > MAX_AGENT_PROMPT_LEN: + agents_catalog_prompt = agents_catalog_prompt[:MAX_AGENT_PROMPT_LEN] + "\n\n[Truncated due to length]" + base_prompt = ( + f"{base_prompt}\n\n" + f"{agents_catalog_prompt}" + ) + print(f" - Specialist agents catalog: {len(agents_catalog_prompt)} chars injected") + # Include CLAUDE.md if enabled and present if should_use_claude_md(): claude_md_content = load_claude_md(project_dir) diff --git a/apps/backend/core/fast_mode.py b/apps/backend/core/fast_mode.py index cb5bd5733d..815f47c65c 100644 --- a/apps/backend/core/fast_mode.py +++ b/apps/backend/core/fast_mode.py @@ -8,6 +8,7 @@ import json import logging +import os from pathlib import Path from core.file_utils import write_json_atomic @@ -23,7 +24,8 @@ def _write_fast_mode_setting(enabled: bool) -> None: Uses write_json_atomic from core.file_utils to prevent corruption when multiple concurrent task processes modify the file simultaneously. """ - settings_file = Path.home() / ".claude" / "settings.json" + config_dir = os.environ.get("CLAUDE_CONFIG_DIR") + settings_file = Path(config_dir) / "settings.json" if config_dir else Path.home() / ".claude" / "settings.json" try: settings: dict = {} if settings_file.exists(): diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py index b8078e678c..04564e2625 100644 --- a/apps/backend/integrations/graphiti/config.py +++ b/apps/backend/integrations/graphiti/config.py @@ -53,9 +53,16 @@ - qwen3-embedding:0.6b (1024), :4b (2560), :8b (4096) - Qwen3 series - nomic-embed-text (768), mxbai-embed-large (1024), bge-large (1024) OLLAMA_EMBEDDING_DIM: Override dimension (optional if using known model) + + # Search + GRAPHITI_MAX_RESULTS: Max context results per query (default: 10) + + # Lifecycle + GRAPHITI_EPISODE_TTL_DAYS: Auto-cleanup episodes older than N days (default: 0 = disabled) """ import json +import logging import os from dataclasses import dataclass, field from datetime import datetime @@ -63,6 +70,8 @@ from pathlib import Path from typing import Optional +logger = logging.getLogger(__name__) + # Default configuration values DEFAULT_DATABASE = "auto_claude_memory" DEFAULT_DB_PATH = "~/.auto-claude/memories" @@ -155,6 +164,9 @@ class GraphitiConfig: ollama_embedding_model: str = "" ollama_embedding_dim: int = 0 # Required for Ollama embeddings + # Lifecycle settings + episode_ttl_days: int = 0 # 0 = disabled (no expiration) + @classmethod def from_env(cls) -> "GraphitiConfig": """Create config from environment variables.""" @@ -227,6 +239,18 @@ def from_env(cls) -> "GraphitiConfig": except ValueError: ollama_embedding_dim = 0 + # Lifecycle settings + try: + episode_ttl_days = int(os.environ.get("GRAPHITI_EPISODE_TTL_DAYS", "0")) + except ValueError: + episode_ttl_days = 0 + if episode_ttl_days < 0: + logger.warning( + "GRAPHITI_EPISODE_TTL_DAYS=%d is negative; clamping to 0 (disabled)", + episode_ttl_days, + ) + episode_ttl_days = 0 + return cls( enabled=enabled, llm_provider=llm_provider, @@ -255,6 +279,7 @@ def from_env(cls) -> "GraphitiConfig": ollama_llm_model=ollama_llm_model, ollama_embedding_model=ollama_embedding_model, ollama_embedding_dim=ollama_embedding_dim, + episode_ttl_days=episode_ttl_days, ) def is_valid(self) -> bool: diff --git a/apps/backend/integrations/graphiti/memory.py b/apps/backend/integrations/graphiti/memory.py index 571ca15e88..c0c5e0d0a8 100644 --- a/apps/backend/integrations/graphiti/memory.py +++ b/apps/backend/integrations/graphiti/memory.py @@ -30,6 +30,7 @@ # Re-export from modular system (queries_pkg) from .queries_pkg.graphiti import GraphitiMemory from .queries_pkg.schema import ( + ALL_EPISODE_TYPES, EPISODE_TYPE_CODEBASE_DISCOVERY, EPISODE_TYPE_GOTCHA, EPISODE_TYPE_HISTORICAL_CONTEXT, @@ -192,4 +193,5 @@ async def test_provider_configuration() -> dict: "EPISODE_TYPE_TASK_OUTCOME", "EPISODE_TYPE_QA_RESULT", "EPISODE_TYPE_HISTORICAL_CONTEXT", + "ALL_EPISODE_TYPES", ] diff --git a/apps/backend/integrations/graphiti/queries_pkg/graphiti.py b/apps/backend/integrations/graphiti/queries_pkg/graphiti.py index ef1043584e..21bd2ebc31 100644 --- a/apps/backend/integrations/graphiti/queries_pkg/graphiti.py +++ b/apps/backend/integrations/graphiti/queries_pkg/graphiti.py @@ -192,6 +192,20 @@ async def initialize(self) -> bool: f"Graphiti initialized for group: {self.group_id} " f"(mode: {self.group_id_mode}, providers: {self.config.get_provider_summary()})" ) + + # Run TTL cleanup if configured (non-blocking) + if self.config.episode_ttl_days > 0: + try: + removed = await self._queries.cleanup_expired_episodes( + self.config.episode_ttl_days + ) + if removed > 0: + logger.info( + f"TTL cleanup: removed {removed} expired episodes" + ) + except Exception as e: + logger.debug(f"TTL cleanup skipped: {e}") + return True except Exception as e: @@ -382,6 +396,7 @@ async def get_relevant_context( query: str, num_results: int = MAX_CONTEXT_RESULTS, include_project_context: bool = True, + episode_types: list[str] | None = None, ) -> list[dict]: """Search for relevant context based on a query.""" if not await self._ensure_initialized(): @@ -389,7 +404,7 @@ async def get_relevant_context( try: return await self._search.get_relevant_context( - query, num_results, include_project_context + query, num_results, include_project_context, episode_types=episode_types ) except Exception as e: logger.warning(f"Failed to get relevant context: {e}") diff --git a/apps/backend/integrations/graphiti/queries_pkg/queries.py b/apps/backend/integrations/graphiti/queries_pkg/queries.py index cf67cf6b18..c91d47723e 100644 --- a/apps/backend/integrations/graphiti/queries_pkg/queries.py +++ b/apps/backend/integrations/graphiti/queries_pkg/queries.py @@ -1,12 +1,12 @@ """ Graph query operations for Graphiti memory. -Handles episode storage, retrieval, and filtering operations. +Handles episode storage, retrieval, filtering, and lifecycle operations. """ import json import logging -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from core.sentry import capture_exception @@ -521,3 +521,72 @@ async def add_structured_insights(self, insights: dict) -> bool: content_summary=", ".join(insight_types) if insight_types else "empty", ) return False + + async def cleanup_expired_episodes(self, ttl_days: int) -> int: + """ + Remove episodes older than the specified TTL. + + Uses the graphiti_core EpisodicNode API to retrieve all episodes for + the current group, then removes those whose created_at timestamp + exceeds the TTL cutoff. Removal uses Graphiti.remove_episode() which + also cleans up orphaned edges and entity nodes. + + Args: + ttl_days: Number of days after which episodes expire. + Must be > 0; returns 0 immediately otherwise. + + Returns: + Number of episodes removed + """ + if ttl_days <= 0: + return 0 + + try: + from graphiti_core.nodes import EpisodicNode + + cutoff = datetime.now(timezone.utc) - timedelta(days=ttl_days) + + # Retrieve all episodes for this group via the graph database directly + episodes = await EpisodicNode.get_by_group_ids( + self.client.graphiti.driver, + group_ids=[self.group_id], + ) + + # Filter episodes older than cutoff + expired_uuids = [] + for episode in episodes: + episode_time = episode.created_at + # Ensure timezone-aware comparison + if episode_time.tzinfo is None: + episode_time = episode_time.replace(tzinfo=timezone.utc) + if episode_time < cutoff: + expired_uuids.append(episode.uuid) + + if not expired_uuids: + return 0 + + # Remove each expired episode (cleans up edges and orphaned nodes) + removed = 0 + for uuid in expired_uuids: + try: + await self.client.graphiti.remove_episode(uuid) + removed += 1 + except Exception: + logger.debug(f"Could not remove expired episode {uuid}") + + if removed > 0: + logger.info( + f"Memory cleanup: removed {removed} episodes older than " + f"{ttl_days} days (group: {self.group_id})" + ) + return removed + + except Exception as e: + logger.warning(f"Episode TTL cleanup failed: {e}") + capture_exception( + e, + operation="cleanup_expired_episodes", + group_id=self.group_id, + ttl_days=ttl_days, + ) + return 0 diff --git a/apps/backend/integrations/graphiti/queries_pkg/schema.py b/apps/backend/integrations/graphiti/queries_pkg/schema.py index d4ae7083b2..9929ae5381 100644 --- a/apps/backend/integrations/graphiti/queries_pkg/schema.py +++ b/apps/backend/integrations/graphiti/queries_pkg/schema.py @@ -4,6 +4,8 @@ Defines episode types and data structures used across the memory system. """ +import os + # Episode type constants EPISODE_TYPE_SESSION_INSIGHT = "session_insight" EPISODE_TYPE_CODEBASE_DISCOVERY = "codebase_discovery" @@ -13,8 +15,21 @@ EPISODE_TYPE_QA_RESULT = "qa_result" EPISODE_TYPE_HISTORICAL_CONTEXT = "historical_context" -# Maximum results to return for context queries (avoid overwhelming agent context) -MAX_CONTEXT_RESULTS = 10 +ALL_EPISODE_TYPES = [ + EPISODE_TYPE_SESSION_INSIGHT, + EPISODE_TYPE_CODEBASE_DISCOVERY, + EPISODE_TYPE_PATTERN, + EPISODE_TYPE_GOTCHA, + EPISODE_TYPE_TASK_OUTCOME, + EPISODE_TYPE_QA_RESULT, + EPISODE_TYPE_HISTORICAL_CONTEXT, +] + +# Maximum results to return for context queries (configurable via env var) +try: + MAX_CONTEXT_RESULTS = int(os.getenv("GRAPHITI_MAX_RESULTS", "10")) +except ValueError: + MAX_CONTEXT_RESULTS = 10 # Retry configuration MAX_RETRIES = 2 diff --git a/apps/backend/integrations/graphiti/queries_pkg/search.py b/apps/backend/integrations/graphiti/queries_pkg/search.py index ea0366cbf5..07c5228472 100644 --- a/apps/backend/integrations/graphiti/queries_pkg/search.py +++ b/apps/backend/integrations/graphiti/queries_pkg/search.py @@ -22,6 +22,9 @@ logger = logging.getLogger(__name__) +# Sentinel value for scores when embedding was not computed (distinct from 0.0) +SCORE_NOT_COMPUTED: float = -1.0 + class GraphitiSearch: """ @@ -30,6 +33,8 @@ class GraphitiSearch: Provides methods for finding relevant knowledge from the graph. """ + _dimension_validated = False + def __init__( self, client, @@ -54,12 +59,32 @@ def __init__( self.group_id_mode = group_id_mode self.project_dir = project_dir + def _validate_embedding_dimension(self) -> None: + """Log a warning if embedding dimension may be mismatched (once per session).""" + if GraphitiSearch._dimension_validated: + return + GraphitiSearch._dimension_validated = True + + try: + from graphiti_config import GraphitiConfig + + config = GraphitiConfig.from_env() + expected_dim = config.get_embedding_dimension() + if expected_dim > 0: + logger.info( + f"Embedding dimension: {expected_dim} " + f"(provider: {config.embedder_provider}, model: {config.ollama_embedding_model or 'default'})" + ) + except Exception as exc: + logger.debug("Skipping embedding dimension validation: %s", exc) + async def get_relevant_context( self, query: str, num_results: int = MAX_CONTEXT_RESULTS, include_project_context: bool = True, min_score: float = 0.0, + episode_types: list[str] | None = None, ) -> list[dict]: """ Search for relevant context based on a query. @@ -73,6 +98,8 @@ async def get_relevant_context( Returns: List of relevant context items with content, score, and type """ + self._validate_embedding_dimension() + try: # Determine which group IDs to search group_ids = [self.group_id] @@ -102,9 +129,9 @@ async def get_relevant_context( or str(result) ) - # Normalize score to float, treating None as 0.0 + # Normalize score: None means embedding wasn't computed raw_score = getattr(result, "score", None) - score = raw_score if raw_score is not None else 0.0 + score = raw_score if raw_score is not None else SCORE_NOT_COMPUTED context_items.append( { @@ -114,12 +141,21 @@ async def get_relevant_context( } ) - # Filter by minimum score if specified + # Filter by episode types if specified + if episode_types: + context_items = [ + item + for item in context_items + if item.get("type", "unknown") in episode_types + ] + + # Filter by minimum score if specified (exclude unscored results) if min_score > 0: context_items = [ item for item in context_items - if (item.get("score", 0.0)) >= min_score + if item.get("score", SCORE_NOT_COMPUTED) != SCORE_NOT_COMPUTED + and item.get("score", 0.0) >= min_score ] logger.info( @@ -233,7 +269,7 @@ async def get_similar_task_outcomes( continue if data.get("type") == EPISODE_TYPE_TASK_OUTCOME: raw_score = getattr(result, "score", None) - score = raw_score if raw_score is not None else 0.0 + score = raw_score if raw_score is not None else SCORE_NOT_COMPUTED outcomes.append( { "task_id": data.get("task_id"), @@ -294,9 +330,9 @@ async def get_patterns_and_gotchas( result, "fact", None ) raw_score = getattr(result, "score", None) - score = raw_score if raw_score is not None else 0.0 + score = raw_score if raw_score is not None else SCORE_NOT_COMPUTED - if score < min_score: + if score == SCORE_NOT_COMPUTED or score < min_score: continue if content and EPISODE_TYPE_PATTERN in str(content): @@ -331,9 +367,9 @@ async def get_patterns_and_gotchas( result, "fact", None ) raw_score = getattr(result, "score", None) - score = raw_score if raw_score is not None else 0.0 + score = raw_score if raw_score is not None else SCORE_NOT_COMPUTED - if score < min_score: + if score == SCORE_NOT_COMPUTED or score < min_score: continue if content and EPISODE_TYPE_GOTCHA in str(content): diff --git a/apps/backend/integrations/graphiti/tests/test_episode_types.py b/apps/backend/integrations/graphiti/tests/test_episode_types.py new file mode 100644 index 0000000000..0f0d5954e2 --- /dev/null +++ b/apps/backend/integrations/graphiti/tests/test_episode_types.py @@ -0,0 +1,615 @@ +""" +Tests for HISTORICAL_CONTEXT and QA_RESULT episode types. + +Tests cover: +- Storing a HISTORICAL_CONTEXT episode and verifying it is retrievable +- Storing a QA_RESULT episode and verifying it is retrievable +- Verifying episode type field is correctly set +- Verifying content structure is preserved +- Episode content round-trip integrity +""" + +import json +from datetime import datetime, timezone +from unittest.mock import AsyncMock, MagicMock + +import pytest +from integrations.graphiti.queries_pkg.schema import ( + EPISODE_TYPE_HISTORICAL_CONTEXT, + EPISODE_TYPE_QA_RESULT, +) + +# ============================================================================= +# Mock External Dependencies +# ============================================================================= + + +@pytest.fixture(autouse=True) +def mock_graphiti_core_nodes(): + """Auto-mock graphiti_core for all tests.""" + import sys + + # Save pre-existing module entries so we can restore them in teardown + _module_keys = ["graphiti_core", "graphiti_core.nodes"] + _saved = {k: sys.modules[k] for k in _module_keys if k in sys.modules} + + # Patch graphiti_core at module level before import + mock_graphiti_core = MagicMock() + mock_nodes = MagicMock() + mock_episode_type = MagicMock() + mock_episode_type.text = "text" + mock_nodes.EpisodeType = mock_episode_type + mock_graphiti_core.nodes = mock_nodes + + sys.modules["graphiti_core"] = mock_graphiti_core + sys.modules["graphiti_core.nodes"] = mock_nodes + + try: + yield mock_episode_type + finally: + for k in _module_keys: + if k in _saved: + sys.modules[k] = _saved[k] + else: + sys.modules.pop(k, None) + + +# ============================================================================= +# Client Fixtures +# ============================================================================= + + +@pytest.fixture +def mock_client(): + """Create a mock GraphitiClient with episode capture.""" + client = MagicMock() + client.graphiti = MagicMock() + client.graphiti.add_episode = AsyncMock() + return client + + +@pytest.fixture +def stored_episodes(): + """In-memory episode store for verifying round-trip storage.""" + return [] + + +@pytest.fixture +def mock_client_with_store(stored_episodes): + """Create a mock GraphitiClient that captures stored episodes. + + This fixture simulates the store-and-retrieve pattern by capturing + episode data passed to add_episode into an in-memory list, then + making it available for retrieval assertions. + + Returns: + MagicMock: Mock client with add_episode that captures episodes. + """ + client = MagicMock() + client.graphiti = MagicMock() + + async def capture_episode(**kwargs): + stored_episodes.append(kwargs) + + client.graphiti.add_episode = AsyncMock(side_effect=capture_episode) + return client + + +@pytest.fixture +def queries(mock_client): + """Create a GraphitiQueries instance.""" + from integrations.graphiti.queries_pkg.queries import GraphitiQueries + + return GraphitiQueries( + client=mock_client, + group_id="test_group", + spec_context_id="test_spec", + ) + + +# ============================================================================= +# HISTORICAL_CONTEXT Episode Type Tests +# ============================================================================= + + +class TestHistoricalContextEpisode: + """Tests for HISTORICAL_CONTEXT episode type storage and retrieval.""" + + def test_historical_context_type_value(self): + """Test EPISODE_TYPE_HISTORICAL_CONTEXT has expected string value.""" + assert EPISODE_TYPE_HISTORICAL_CONTEXT == "historical_context" + assert isinstance(EPISODE_TYPE_HISTORICAL_CONTEXT, str) + + @pytest.mark.asyncio + async def test_store_historical_context_episode(self, mock_client_with_store, stored_episodes): + """Test storing a HISTORICAL_CONTEXT episode and verify it is retrievable.""" + from graphiti_core.nodes import EpisodeType + + historical_content = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "spec_id": "test_spec", + "timestamp": datetime.now(timezone.utc).isoformat(), + "context": "Previous implementation used SQLAlchemy ORM with PostgreSQL", + "relevance": "Migration from PostgreSQL to embedded graph database", + "source": "session_003_migration", + } + + await mock_client_with_store.graphiti.add_episode( + name="historical_context_migration_notes", + episode_body=json.dumps(historical_content), + source=EpisodeType.text, + source_description="Historical context about database migration", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + # Verify episode was stored + assert len(stored_episodes) == 1 + + # Verify episode is retrievable with correct content + stored = stored_episodes[0] + retrieved_body = json.loads(stored["episode_body"]) + + assert retrieved_body["type"] == EPISODE_TYPE_HISTORICAL_CONTEXT + assert retrieved_body["context"] == "Previous implementation used SQLAlchemy ORM with PostgreSQL" + assert retrieved_body["relevance"] == "Migration from PostgreSQL to embedded graph database" + assert retrieved_body["source"] == "session_003_migration" + + @pytest.mark.asyncio + async def test_historical_context_episode_type_field_is_correct( + self, mock_client_with_store, stored_episodes + ): + """Test that the episode type field is correctly set to historical_context.""" + from graphiti_core.nodes import EpisodeType + + historical_content = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "spec_id": "test_spec", + "timestamp": "2026-03-05T00:00:00Z", + "context": "Minimal historical context", + } + + await mock_client_with_store.graphiti.add_episode( + name="historical_context_type_check", + episode_body=json.dumps(historical_content), + source=EpisodeType.text, + source_description="Type field verification", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + assert len(stored_episodes) == 1 + retrieved_body = json.loads(stored_episodes[0]["episode_body"]) + assert retrieved_body["type"] == "historical_context" + assert retrieved_body["type"] == EPISODE_TYPE_HISTORICAL_CONTEXT + + @pytest.mark.asyncio + async def test_historical_context_content_structure_preserved( + self, mock_client_with_store, stored_episodes + ): + """Test that complex content structure is preserved in HISTORICAL_CONTEXT episodes.""" + from graphiti_core.nodes import EpisodeType + + complex_content = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "spec_id": "spec_042_refactor", + "timestamp": "2026-03-05T12:00:00Z", + "context": "The authentication system was originally built with basic JWT", + "previous_decisions": [ + "Chose JWT over session-based auth for statelessness", + "Used RS256 algorithm for token signing", + ], + "relevant_files": { + "auth/jwt.py": "Token generation and validation", + "middleware/auth.py": "Request authentication middleware", + }, + "lessons_learned": "Token rotation is critical for long-lived sessions", + "tags": ["authentication", "security", "jwt"], + } + + await mock_client_with_store.graphiti.add_episode( + name="historical_context_auth_system", + episode_body=json.dumps(complex_content), + source=EpisodeType.text, + source_description="Historical context for authentication refactor", + reference_time=datetime.now(timezone.utc), + group_id="spec_042_group", + ) + + assert len(stored_episodes) == 1 + retrieved_body = json.loads(stored_episodes[0]["episode_body"]) + + # Verify all nested structures are preserved + assert retrieved_body["type"] == EPISODE_TYPE_HISTORICAL_CONTEXT + assert retrieved_body["spec_id"] == "spec_042_refactor" + assert len(retrieved_body["previous_decisions"]) == 2 + assert "Chose JWT over session-based auth" in retrieved_body["previous_decisions"][0] + assert retrieved_body["relevant_files"]["auth/jwt.py"] == "Token generation and validation" + assert retrieved_body["lessons_learned"] == "Token rotation is critical for long-lived sessions" + assert "security" in retrieved_body["tags"] + + @pytest.mark.asyncio + async def test_historical_context_episode_name_and_metadata( + self, mock_client_with_store, stored_episodes + ): + """Test that episode-level metadata (name, group_id, source_description) is preserved.""" + from graphiti_core.nodes import EpisodeType + + content = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "context": "Test metadata preservation", + } + + await mock_client_with_store.graphiti.add_episode( + name="historical_context_metadata_test", + episode_body=json.dumps(content), + source=EpisodeType.text, + source_description="Metadata test for historical context", + reference_time=datetime(2026, 3, 5, tzinfo=timezone.utc), + group_id="metadata_test_group", + ) + + stored = stored_episodes[0] + assert stored["name"] == "historical_context_metadata_test" + assert stored["group_id"] == "metadata_test_group" + assert stored["source_description"] == "Metadata test for historical context" + + @pytest.mark.asyncio + async def test_multiple_historical_context_episodes( + self, mock_client_with_store, stored_episodes + ): + """Test storing multiple HISTORICAL_CONTEXT episodes.""" + from graphiti_core.nodes import EpisodeType + + for i in range(3): + content = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "spec_id": f"spec_{i:03d}", + "context": f"Historical context entry {i}", + } + + await mock_client_with_store.graphiti.add_episode( + name=f"historical_context_{i}", + episode_body=json.dumps(content), + source=EpisodeType.text, + source_description=f"Historical context {i}", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + assert len(stored_episodes) == 3 + + # Verify each episode has the correct type and unique content + for i, stored in enumerate(stored_episodes): + body = json.loads(stored["episode_body"]) + assert body["type"] == EPISODE_TYPE_HISTORICAL_CONTEXT + assert body["spec_id"] == f"spec_{i:03d}" + assert body["context"] == f"Historical context entry {i}" + + +# ============================================================================= +# QA_RESULT Episode Type Tests +# ============================================================================= + + +class TestQaResultEpisode: + """Tests for QA_RESULT episode type storage and retrieval.""" + + def test_qa_result_type_value(self): + """Test EPISODE_TYPE_QA_RESULT has expected string value.""" + assert EPISODE_TYPE_QA_RESULT == "qa_result" + assert isinstance(EPISODE_TYPE_QA_RESULT, str) + + @pytest.mark.asyncio + async def test_store_qa_result_episode(self, mock_client_with_store, stored_episodes): + """Test storing a QA_RESULT episode and verify it is retrievable.""" + from graphiti_core.nodes import EpisodeType + + qa_content = { + "type": EPISODE_TYPE_QA_RESULT, + "spec_id": "test_spec", + "timestamp": datetime.now(timezone.utc).isoformat(), + "task_id": "subtask-3-implement-auth", + "qa_passed": True, + "issues_found": [], + "test_coverage": 92.5, + "reviewer_notes": "All acceptance criteria met. Code quality is good.", + } + + await mock_client_with_store.graphiti.add_episode( + name="qa_result_subtask_3", + episode_body=json.dumps(qa_content), + source=EpisodeType.text, + source_description="QA review result for subtask-3", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + # Verify episode was stored + assert len(stored_episodes) == 1 + + # Verify episode is retrievable with correct content + stored = stored_episodes[0] + retrieved_body = json.loads(stored["episode_body"]) + + assert retrieved_body["type"] == EPISODE_TYPE_QA_RESULT + assert retrieved_body["task_id"] == "subtask-3-implement-auth" + assert retrieved_body["qa_passed"] is True + assert retrieved_body["test_coverage"] == 92.5 + assert retrieved_body["reviewer_notes"] == "All acceptance criteria met. Code quality is good." + + @pytest.mark.asyncio + async def test_qa_result_episode_type_field_is_correct( + self, mock_client_with_store, stored_episodes + ): + """Test that the episode type field is correctly set to qa_result.""" + from graphiti_core.nodes import EpisodeType + + qa_content = { + "type": EPISODE_TYPE_QA_RESULT, + "spec_id": "test_spec", + "timestamp": "2026-03-05T00:00:00Z", + "task_id": "type-check-task", + "qa_passed": False, + } + + await mock_client_with_store.graphiti.add_episode( + name="qa_result_type_check", + episode_body=json.dumps(qa_content), + source=EpisodeType.text, + source_description="Type field verification", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + assert len(stored_episodes) == 1 + retrieved_body = json.loads(stored_episodes[0]["episode_body"]) + assert retrieved_body["type"] == "qa_result" + assert retrieved_body["type"] == EPISODE_TYPE_QA_RESULT + + @pytest.mark.asyncio + async def test_qa_result_content_structure_preserved( + self, mock_client_with_store, stored_episodes + ): + """Test that complex content structure is preserved in QA_RESULT episodes.""" + from graphiti_core.nodes import EpisodeType + + complex_qa_content = { + "type": EPISODE_TYPE_QA_RESULT, + "spec_id": "spec_015_api_endpoints", + "timestamp": "2026-03-05T15:30:00Z", + "task_id": "subtask-7-api-validation", + "qa_passed": False, + "issues_found": [ + { + "severity": "high", + "description": "Missing input validation on POST /users", + "file": "api/routes/users.py", + "line": 42, + }, + { + "severity": "medium", + "description": "No rate limiting on auth endpoints", + "file": "api/routes/auth.py", + "line": 15, + }, + ], + "test_coverage": 78.3, + "reviewer_notes": "Two issues must be resolved before merge", + "fix_attempts": 2, + "criteria_results": { + "functionality": True, + "code_quality": True, + "test_coverage": False, + "security": False, + }, + } + + await mock_client_with_store.graphiti.add_episode( + name="qa_result_api_validation", + episode_body=json.dumps(complex_qa_content), + source=EpisodeType.text, + source_description="QA result with issues for API validation", + reference_time=datetime.now(timezone.utc), + group_id="spec_015_group", + ) + + assert len(stored_episodes) == 1 + retrieved_body = json.loads(stored_episodes[0]["episode_body"]) + + # Verify all nested structures are preserved + assert retrieved_body["type"] == EPISODE_TYPE_QA_RESULT + assert retrieved_body["spec_id"] == "spec_015_api_endpoints" + assert retrieved_body["qa_passed"] is False + assert len(retrieved_body["issues_found"]) == 2 + assert retrieved_body["issues_found"][0]["severity"] == "high" + assert retrieved_body["issues_found"][0]["line"] == 42 + assert retrieved_body["issues_found"][1]["file"] == "api/routes/auth.py" + assert retrieved_body["test_coverage"] == 78.3 + assert retrieved_body["fix_attempts"] == 2 + assert retrieved_body["criteria_results"]["functionality"] is True + assert retrieved_body["criteria_results"]["security"] is False + + @pytest.mark.asyncio + async def test_qa_result_episode_name_and_metadata( + self, mock_client_with_store, stored_episodes + ): + """Test that episode-level metadata (name, group_id, source_description) is preserved.""" + from graphiti_core.nodes import EpisodeType + + content = { + "type": EPISODE_TYPE_QA_RESULT, + "task_id": "metadata-test-task", + "qa_passed": True, + } + + await mock_client_with_store.graphiti.add_episode( + name="qa_result_metadata_test", + episode_body=json.dumps(content), + source=EpisodeType.text, + source_description="Metadata test for QA result", + reference_time=datetime(2026, 3, 5, tzinfo=timezone.utc), + group_id="metadata_test_group", + ) + + stored = stored_episodes[0] + assert stored["name"] == "qa_result_metadata_test" + assert stored["group_id"] == "metadata_test_group" + assert stored["source_description"] == "Metadata test for QA result" + + @pytest.mark.asyncio + async def test_multiple_qa_result_episodes( + self, mock_client_with_store, stored_episodes + ): + """Test storing multiple QA_RESULT episodes.""" + from graphiti_core.nodes import EpisodeType + + for i in range(3): + content = { + "type": EPISODE_TYPE_QA_RESULT, + "spec_id": f"spec_{i:03d}", + "task_id": f"task-{i}", + "qa_passed": i % 2 == 0, # Alternate pass/fail + "issues_found": [] if i % 2 == 0 else [{"description": f"Issue in task {i}"}], + } + + await mock_client_with_store.graphiti.add_episode( + name=f"qa_result_{i}", + episode_body=json.dumps(content), + source=EpisodeType.text, + source_description=f"QA result {i}", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + assert len(stored_episodes) == 3 + + # Verify each episode has the correct type and unique content + for i, stored in enumerate(stored_episodes): + body = json.loads(stored["episode_body"]) + assert body["type"] == EPISODE_TYPE_QA_RESULT + assert body["spec_id"] == f"spec_{i:03d}" + assert body["task_id"] == f"task-{i}" + assert body["qa_passed"] == (i % 2 == 0) + + @pytest.mark.asyncio + async def test_qa_result_with_empty_issues(self, mock_client_with_store, stored_episodes): + """Test QA_RESULT episode with empty issues list (passed review).""" + from graphiti_core.nodes import EpisodeType + + content = { + "type": EPISODE_TYPE_QA_RESULT, + "task_id": "clean-task", + "qa_passed": True, + "issues_found": [], + "reviewer_notes": "", + } + + await mock_client_with_store.graphiti.add_episode( + name="qa_result_clean", + episode_body=json.dumps(content), + source=EpisodeType.text, + source_description="Clean QA pass", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + retrieved_body = json.loads(stored_episodes[0]["episode_body"]) + assert retrieved_body["issues_found"] == [] + assert retrieved_body["qa_passed"] is True + + +# ============================================================================= +# Cross-Type Tests +# ============================================================================= + + +class TestEpisodeTypeCrossValidation: + """Tests that verify HISTORICAL_CONTEXT and QA_RESULT behave consistently with other types.""" + + def test_historical_context_and_qa_result_are_distinct(self): + """Test that HISTORICAL_CONTEXT and QA_RESULT are distinct episode types.""" + assert EPISODE_TYPE_HISTORICAL_CONTEXT != EPISODE_TYPE_QA_RESULT + + def test_episode_types_are_lowercase_snake_case(self): + """Test that episode type values follow the lowercase_snake_case convention.""" + assert EPISODE_TYPE_HISTORICAL_CONTEXT == EPISODE_TYPE_HISTORICAL_CONTEXT.lower() + assert EPISODE_TYPE_QA_RESULT == EPISODE_TYPE_QA_RESULT.lower() + assert "_" in EPISODE_TYPE_HISTORICAL_CONTEXT + assert "_" in EPISODE_TYPE_QA_RESULT + + @pytest.mark.asyncio + async def test_mixed_episode_types_in_same_group( + self, mock_client_with_store, stored_episodes + ): + """Test storing both HISTORICAL_CONTEXT and QA_RESULT in the same group.""" + from graphiti_core.nodes import EpisodeType + + historical = { + "type": EPISODE_TYPE_HISTORICAL_CONTEXT, + "context": "Previous auth implementation details", + } + + qa = { + "type": EPISODE_TYPE_QA_RESULT, + "task_id": "auth-task", + "qa_passed": True, + } + + await mock_client_with_store.graphiti.add_episode( + name="historical_entry", + episode_body=json.dumps(historical), + source=EpisodeType.text, + source_description="Historical context", + reference_time=datetime.now(timezone.utc), + group_id="shared_group", + ) + + await mock_client_with_store.graphiti.add_episode( + name="qa_entry", + episode_body=json.dumps(qa), + source=EpisodeType.text, + source_description="QA result", + reference_time=datetime.now(timezone.utc), + group_id="shared_group", + ) + + assert len(stored_episodes) == 2 + + # Verify types are distinct + types = [json.loads(ep["episode_body"])["type"] for ep in stored_episodes] + assert EPISODE_TYPE_HISTORICAL_CONTEXT in types + assert EPISODE_TYPE_QA_RESULT in types + assert types[0] != types[1] + + @pytest.mark.asyncio + async def test_episode_body_is_valid_json_roundtrip( + self, mock_client_with_store, stored_episodes + ): + """Test that episode_body survives JSON serialization/deserialization.""" + from graphiti_core.nodes import EpisodeType + + original_content = { + "type": EPISODE_TYPE_QA_RESULT, + "task_id": "roundtrip-test", + "qa_passed": True, + "nested": {"key": "value", "list": [1, 2, 3]}, + "unicode": "Test with special chars: \u00e9\u00e0\u00fc", + } + + serialized = json.dumps(original_content) + + await mock_client_with_store.graphiti.add_episode( + name="roundtrip_test", + episode_body=serialized, + source=EpisodeType.text, + source_description="Round-trip test", + reference_time=datetime.now(timezone.utc), + group_id="test_group", + ) + + # Deserialize from stored episode + deserialized = json.loads(stored_episodes[0]["episode_body"]) + + assert deserialized == original_content + assert deserialized["unicode"] == "Test with special chars: \u00e9\u00e0\u00fc" diff --git a/apps/backend/integrations/graphiti/tests/test_integration_graphiti.py b/apps/backend/integrations/graphiti/tests/test_integration_graphiti.py new file mode 100644 index 0000000000..13d08e15d9 --- /dev/null +++ b/apps/backend/integrations/graphiti/tests/test_integration_graphiti.py @@ -0,0 +1,517 @@ +""" +Unit-level tests for Graphiti memory system with mocked dependencies. + +NOTE: Despite the @pytest.mark.integration marker and the filename, these are +**unit tests** that mock all external dependencies (graphiti_core, database +connections). The autouse ``mock_graphiti_core_modules`` fixture replaces +graphiti_core modules in sys.modules so no real graph database is contacted. +To add true integration tests that exercise a live LadybugDB/kuzu instance, +create a separate test module without the autouse mock fixture. + +Converted from run_graphiti_memory_test.py to proper pytest format. + +Original script: integrations/graphiti/run_graphiti_memory_test.py + +Usage: + # Run only integration tests: + pytest tests/test_integration_graphiti.py -v -m integration + + # Skip integration tests (default in CI): + pytest tests/ -v -m "not integration" +""" + +import json +import os +import sys +from unittest.mock import AsyncMock, MagicMock + +import pytest + +# ============================================================================= +# Markers +# ============================================================================= + +pytestmark = [ + pytest.mark.integration, +] + +# ============================================================================= +# Mock External Dependencies +# ============================================================================= + + +@pytest.fixture(autouse=True) +def mock_graphiti_core_modules(): + """Auto-mock graphiti_core and related modules for all tests. + + This prevents actual graph database connections during tests. + """ + # Save pre-existing module entries so we can restore them in teardown + _module_keys = [ + "graphiti_core", + "graphiti_core.nodes", + "graphiti_core.driver", + "graphiti_core.driver.kuzu_driver", + ] + _saved = {k: sys.modules[k] for k in _module_keys if k in sys.modules} + + mock_graphiti_core = MagicMock() + mock_nodes = MagicMock() + mock_episode_type = MagicMock() + mock_episode_type.text = "text" + mock_nodes.EpisodeType = mock_episode_type + mock_graphiti_core.nodes = mock_nodes + + # Mock the Graphiti class + mock_graphiti_class = MagicMock() + mock_graphiti_instance = MagicMock() + mock_graphiti_instance.add_episode = AsyncMock() + mock_graphiti_instance.search = AsyncMock(return_value=[]) + mock_graphiti_instance.build_indices_and_constraints = AsyncMock() + mock_graphiti_instance.close = AsyncMock() + mock_graphiti_class.return_value = mock_graphiti_instance + mock_graphiti_core.Graphiti = mock_graphiti_class + + # Mock driver + mock_driver = MagicMock() # noqa: F841 - assigned to set up mock chain below + mock_driver_module = MagicMock() + mock_driver_module.KuzuDriver = MagicMock() + mock_graphiti_core.driver = MagicMock() + mock_graphiti_core.driver.kuzu_driver = mock_driver_module + + sys.modules["graphiti_core"] = mock_graphiti_core + sys.modules["graphiti_core.nodes"] = mock_nodes + sys.modules["graphiti_core.driver"] = mock_graphiti_core.driver + sys.modules["graphiti_core.driver.kuzu_driver"] = mock_driver_module + + try: + yield { + "graphiti_core": mock_graphiti_core, + "episode_type": mock_episode_type, + "graphiti_instance": mock_graphiti_instance, + } + finally: + for k in _module_keys: + if k in _saved: + sys.modules[k] = _saved[k] + else: + sys.modules.pop(k, None) + + +@pytest.fixture +def mock_env_for_graphiti(tmp_path): + """Set environment variables for Graphiti integration testing. + + Yields: + dict: Dictionary of environment variables that were set. + """ + test_db_path = str(tmp_path / "test_graphiti.db") + + env_vars = { + "GRAPHITI_ENABLED": "true", + "GRAPHITI_LLM_PROVIDER": "openai", + "GRAPHITI_EMBEDDER_PROVIDER": "openai", + "GRAPHITI_DATABASE": "test_memory", + "GRAPHITI_DB_PATH": test_db_path, + "OPENAI_MODEL": "gpt-5-mini", + "OPENAI_EMBEDDING_MODEL": "text-embedding-3-small", + "OPENAI_API_KEY": "sk-test-key-for-integration-testing", + } + + original = {k: os.environ.get(k) for k in env_vars} + + for key, value in env_vars.items(): + os.environ[key] = value + + yield env_vars + + for key, original_value in original.items(): + if original_value is None: + os.environ.pop(key, None) + else: + os.environ[key] = original_value + + +# ============================================================================= +# Client Fixtures +# ============================================================================= + + +@pytest.fixture +def mock_client(): + """Create a mock GraphitiClient with common methods.""" + client = MagicMock() + client.graphiti = MagicMock() + client.graphiti.add_episode = AsyncMock() + client.graphiti.search = AsyncMock(return_value=[]) + client.graphiti.build_indices_and_constraints = AsyncMock() + client.graphiti.close = AsyncMock() + client.is_initialized = True + client.initialize = AsyncMock(return_value=True) + client.close = AsyncMock() + return client + + +@pytest.fixture +def queries(mock_client): + """Create a GraphitiQueries instance for testing.""" + from integrations.graphiti.queries_pkg.queries import GraphitiQueries + + return GraphitiQueries( + client=mock_client, + group_id="integration_test_group", + spec_context_id="integration_test_spec", + ) + + +# ============================================================================= +# Test: LadybugDB Connection (from test_ladybugdb_connection) +# ============================================================================= + + +class TestLadybugDBConnection: + """Tests for LadybugDB connection verification. + + Converted from test_ladybugdb_connection() in run_graphiti_memory_test.py. + """ + + def test_ladybug_monkeypatch_import_path(self): + """Test that the LadybugDB monkeypatch import path exists. + + Verifies the import mechanism is available, without requiring + the actual real_ladybug package. + """ + from integrations.graphiti.queries_pkg.client import _apply_ladybug_monkeypatch + + assert callable(_apply_ladybug_monkeypatch) + + def test_database_path_construction(self, tmp_path): + """Test that database path is correctly constructed.""" + db_path = tmp_path / "test_db" + database = "test_memory" + full_path = db_path / database + + # Parent should be creatable + full_path.parent.mkdir(parents=True, exist_ok=True) + assert full_path.parent.exists() + + +# ============================================================================= +# Test: Episode Save (from test_save_episode) +# ============================================================================= + + +class TestEpisodeSave: + """Tests for episode save operations. + + Converted from test_save_episode() in run_graphiti_memory_test.py. + """ + + @pytest.mark.asyncio + async def test_save_session_insight_episode(self, queries): + """Test saving a session insight episode.""" + insights = { + "subtasks_completed": ["test-subtask-1"], + "discoveries": { + "files_understood": {"test.py": "Test file"}, + "patterns_found": ["Pattern: LadybugDB works!"], + "gotchas_encountered": [], + }, + "what_worked": ["Using embedded database"], + "what_failed": [], + "recommendations_for_next_session": ["Continue testing"], + } + + result = await queries.add_session_insight(session_num=1, insights=insights) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "session_insight" + assert episode_body["session_number"] == 1 + assert "subtasks_completed" in episode_body + assert "test-subtask-1" in episode_body["subtasks_completed"] + + @pytest.mark.asyncio + async def test_save_pattern_episode(self, queries): + """Test saving a code pattern episode.""" + pattern = "LadybugDB pattern: Embedded graph database works without Docker" + + result = await queries.add_pattern(pattern) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "pattern" + assert episode_body["pattern"] == pattern + + @pytest.mark.asyncio + async def test_save_gotcha_episode(self, queries): + """Test saving a gotcha episode.""" + gotcha = "Always close database connections in finally blocks" + + result = await queries.add_gotcha(gotcha) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "gotcha" + assert episode_body["gotcha"] == gotcha + + @pytest.mark.asyncio + async def test_save_codebase_discoveries_episode(self, queries): + """Test saving codebase discoveries episode.""" + discoveries = { + "src/main.py": "Entry point for the application", + "src/config.py": "Configuration module with env var loading", + } + + result = await queries.add_codebase_discoveries(discoveries) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "codebase_discovery" + assert episode_body["files"] == discoveries + + @pytest.mark.asyncio + async def test_save_task_outcome_episode(self, queries): + """Test saving a task outcome episode.""" + result = await queries.add_task_outcome( + task_id="integration-task-1", + success=True, + outcome="Integration test completed successfully", + metadata={"duration_seconds": 45}, + ) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "task_outcome" + assert episode_body["task_id"] == "integration-task-1" + assert episode_body["success"] is True + assert episode_body["duration_seconds"] == 45 + + +# ============================================================================= +# Test: Episode Save Error Handling +# ============================================================================= + + +class TestEpisodeSaveErrorHandling: + """Tests for episode save error handling. + + Verifies graceful failure when database operations fail. + """ + + @pytest.mark.asyncio + async def test_session_insight_handles_database_error(self, queries): + """Test that session insight save handles database errors gracefully.""" + queries.client.graphiti.add_episode.side_effect = Exception( + "Database connection lost" + ) + + result = await queries.add_session_insight(session_num=1, insights={}) + + assert result is False + + @pytest.mark.asyncio + async def test_pattern_handles_database_error(self, queries): + """Test that pattern save handles database errors gracefully.""" + queries.client.graphiti.add_episode.side_effect = Exception( + "Database connection lost" + ) + + result = await queries.add_pattern("test pattern") + + assert result is False + + @pytest.mark.asyncio + async def test_gotcha_handles_database_error(self, queries): + """Test that gotcha save handles database errors gracefully.""" + queries.client.graphiti.add_episode.side_effect = Exception( + "Database connection lost" + ) + + result = await queries.add_gotcha("test gotcha") + + assert result is False + + @pytest.mark.asyncio + async def test_codebase_discoveries_handles_database_error(self, queries): + """Test that codebase discoveries save handles database errors gracefully.""" + queries.client.graphiti.add_episode.side_effect = Exception( + "Database connection lost" + ) + + result = await queries.add_codebase_discoveries({"file.py": "desc"}) + + assert result is False + + @pytest.mark.asyncio + async def test_task_outcome_handles_database_error(self, queries): + """Test that task outcome save handles database errors gracefully.""" + queries.client.graphiti.add_episode.side_effect = Exception( + "Database connection lost" + ) + + result = await queries.add_task_outcome("task-1", True, "outcome") + + assert result is False + + +# ============================================================================= +# Test: GraphitiMemory Class (from test_graphiti_memory_class) +# ============================================================================= + + +class TestGraphitiMemoryClass: + """Tests for the GraphitiMemory wrapper class. + + Converted from test_graphiti_memory_class() in run_graphiti_memory_test.py. + """ + + def test_graphiti_memory_import(self): + """Test that GraphitiMemory can be imported.""" + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + + assert GraphitiMemory is not None + + def test_graphiti_memory_facade_import(self): + """Test that GraphitiMemory can be imported from memory facade.""" + from integrations.graphiti.memory import GraphitiMemory + + assert GraphitiMemory is not None + + def test_graphiti_memory_initialization(self, tmp_path, mock_env_for_graphiti): + """Test GraphitiMemory initialization with test config.""" + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + + spec_dir = tmp_path / "test_spec" + project_dir = tmp_path / "test_project" + spec_dir.mkdir() + project_dir.mkdir() + + memory = GraphitiMemory(spec_dir, project_dir) + + assert memory.spec_dir == spec_dir + assert memory.project_dir == project_dir + assert memory.group_id == spec_dir.name + + def test_graphiti_memory_group_id_spec_mode(self, tmp_path, mock_env_for_graphiti): + """Test group ID generation in spec mode.""" + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + from integrations.graphiti.queries_pkg.schema import GroupIdMode + + spec_dir = tmp_path / "spec_001_auth" + project_dir = tmp_path / "my_project" + spec_dir.mkdir() + project_dir.mkdir() + + memory = GraphitiMemory(spec_dir, project_dir, group_id_mode=GroupIdMode.SPEC) + + assert memory.group_id == "spec_001_auth" + + def test_graphiti_memory_group_id_project_mode(self, tmp_path, mock_env_for_graphiti): + """Test group ID generation in project mode.""" + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + from integrations.graphiti.queries_pkg.schema import GroupIdMode + + spec_dir = tmp_path / "spec_001_auth" + project_dir = tmp_path / "my_project" + spec_dir.mkdir() + project_dir.mkdir() + + memory = GraphitiMemory(spec_dir, project_dir, group_id_mode=GroupIdMode.PROJECT) + + assert memory.group_id.startswith("project_my_project_") + + def test_graphiti_memory_status_summary(self, tmp_path, mock_env_for_graphiti): + """Test get_status_summary returns expected structure.""" + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + + spec_dir = tmp_path / "test_spec" + project_dir = tmp_path / "test_project" + spec_dir.mkdir() + project_dir.mkdir() + + memory = GraphitiMemory(spec_dir, project_dir) + status = memory.get_status_summary() + + assert "enabled" in status + assert "initialized" in status + assert "database" in status + assert "group_id" in status + assert "group_id_mode" in status + assert "episode_count" in status + assert "last_session" in status + assert "errors" in status + + +# ============================================================================= +# Test: Episode Content Integrity +# ============================================================================= + + +class TestEpisodeContentIntegrity: + """Tests for episode content integrity across serialization. + + Validates that episode data survives JSON round-trip serialization, + which is the core mechanism used by the memory system. + """ + + @pytest.mark.asyncio + async def test_episode_content_json_roundtrip(self, queries): + """Test that episode content survives JSON serialization.""" + insights = { + "subtasks_completed": ["task-1", "task-2", "task-3"], + "discoveries": { + "files_understood": { + "src/auth.py": "Authentication module", + "src/db.py": "Database connection pool", + }, + }, + "what_worked": ["Dependency injection pattern"], + "what_failed": ["Initial connection pooling approach"], + "nested_data": {"level1": {"level2": {"level3": "deep value"}}}, + } + + result = await queries.add_session_insight(session_num=5, insights=insights) + + assert result is True + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + + # Verify nested structures are preserved + assert episode_body["discoveries"]["files_understood"]["src/auth.py"] == "Authentication module" + assert episode_body["nested_data"]["level1"]["level2"]["level3"] == "deep value" + assert len(episode_body["subtasks_completed"]) == 3 + + @pytest.mark.asyncio + async def test_episode_group_id_is_set(self, queries): + """Test that group_id is correctly set on stored episodes.""" + result = await queries.add_pattern("test pattern") + + assert result is True + call_args = queries.client.graphiti.add_episode.call_args + assert call_args[1]["group_id"] == "integration_test_group" + + @pytest.mark.asyncio + async def test_episode_source_type_is_text(self, queries, mock_graphiti_core_modules): + """Test that episode source type is set to text.""" + result = await queries.add_pattern("test pattern") + + assert result is True + call_args = queries.client.graphiti.add_episode.call_args + assert call_args[1]["source"] == mock_graphiti_core_modules["episode_type"].text diff --git a/apps/backend/integrations/graphiti/tests/test_integration_ollama.py b/apps/backend/integrations/graphiti/tests/test_integration_ollama.py new file mode 100644 index 0000000000..f5650b2ca4 --- /dev/null +++ b/apps/backend/integrations/graphiti/tests/test_integration_ollama.py @@ -0,0 +1,566 @@ +""" +Integration tests for Ollama embedding memory system. + +Converted from run_ollama_embedding_test.py to proper pytest format. + +These tests require a running Ollama server and are marked with +@pytest.mark.integration so they can be skipped in CI without Ollama. + +Original script: integrations/graphiti/run_ollama_embedding_test.py + +Usage: + # Run only integration tests: + pytest tests/test_integration_ollama.py -v -m integration + + # Skip integration tests (default in CI): + pytest tests/ -v -m "not integration" +""" + +import json +import os +import shutil +import sys +from unittest.mock import AsyncMock, MagicMock + +import pytest + +# ============================================================================= +# Markers +# ============================================================================= + +# NOTE: Despite the `integration` marker, these are unit-level tests with fully +# mocked dependencies (no live Ollama server required). The marker is retained +# for grouping with other Ollama-related tests that *do* need a running server. +pytestmark = [ + pytest.mark.integration, +] + +# ============================================================================= +# Mock External Dependencies +# ============================================================================= + + +@pytest.fixture(autouse=True) +def mock_graphiti_core_modules(): + """Auto-mock graphiti_core and related modules for all tests.""" + # Save pre-existing module entries so we can restore them in teardown + _module_keys = [ + "graphiti_core", + "graphiti_core.nodes", + "graphiti_core.driver", + "graphiti_core.driver.kuzu_driver", + ] + _saved = {k: sys.modules[k] for k in _module_keys if k in sys.modules} + + mock_graphiti_core = MagicMock() + mock_nodes = MagicMock() + mock_episode_type = MagicMock() + mock_episode_type.text = "text" + mock_nodes.EpisodeType = mock_episode_type + mock_graphiti_core.nodes = mock_nodes + + mock_graphiti_class = MagicMock() + mock_graphiti_instance = MagicMock() + mock_graphiti_instance.add_episode = AsyncMock() + mock_graphiti_instance.search = AsyncMock(return_value=[]) + mock_graphiti_instance.build_indices_and_constraints = AsyncMock() + mock_graphiti_instance.close = AsyncMock() + mock_graphiti_class.return_value = mock_graphiti_instance + mock_graphiti_core.Graphiti = mock_graphiti_class + + mock_driver_module = MagicMock() + mock_driver_module.KuzuDriver = MagicMock() + mock_graphiti_core.driver = MagicMock() + mock_graphiti_core.driver.kuzu_driver = mock_driver_module + + sys.modules["graphiti_core"] = mock_graphiti_core + sys.modules["graphiti_core.nodes"] = mock_nodes + sys.modules["graphiti_core.driver"] = mock_graphiti_core.driver + sys.modules["graphiti_core.driver.kuzu_driver"] = mock_driver_module + + try: + yield { + "graphiti_core": mock_graphiti_core, + "episode_type": mock_episode_type, + "graphiti_instance": mock_graphiti_instance, + } + finally: + for k in _module_keys: + if k in _saved: + sys.modules[k] = _saved[k] + else: + sys.modules.pop(k, None) + + +# ============================================================================= +# Environment Fixtures +# ============================================================================= + + +@pytest.fixture +def ollama_env_vars(tmp_path): + """Set environment variables for Ollama integration testing. + + Yields: + dict: Dictionary of environment variables that were set. + """ + test_db_path = str(tmp_path / "ollama_test_graphiti.db") + + env_vars = { + "GRAPHITI_ENABLED": "true", + "GRAPHITI_LLM_PROVIDER": "ollama", + "GRAPHITI_EMBEDDER_PROVIDER": "ollama", + "GRAPHITI_DATABASE": "test_ollama_memory", + "GRAPHITI_DB_PATH": test_db_path, + "OLLAMA_LLM_MODEL": "deepseek-r1:7b", + "OLLAMA_EMBEDDING_MODEL": "embeddinggemma", + "OLLAMA_EMBEDDING_DIM": "768", + "OLLAMA_BASE_URL": "http://localhost:11434", + "OPENAI_API_KEY": "sk-dummy-for-reranker", + } + + original = {k: os.environ.get(k) for k in env_vars} + + for key, value in env_vars.items(): + os.environ[key] = value + + yield env_vars + + for key, original_value in original.items(): + if original_value is None: + os.environ.pop(key, None) + else: + os.environ[key] = original_value + + +@pytest.fixture +def mock_client(): + """Create a mock GraphitiClient.""" + client = MagicMock() + client.graphiti = MagicMock() + client.graphiti.add_episode = AsyncMock() + client.graphiti.search = AsyncMock(return_value=[]) + client.is_initialized = True + client.initialize = AsyncMock(return_value=True) + client.close = AsyncMock() + return client + + +@pytest.fixture +def queries(mock_client): + """Create a GraphitiQueries instance for Ollama testing.""" + from integrations.graphiti.queries_pkg.queries import GraphitiQueries + + return GraphitiQueries( + client=mock_client, + group_id="ollama_test_group", + spec_context_id="ollama_test_spec", + ) + + +# ============================================================================= +# Test: Ollama Configuration (from test_ollama_embeddings) +# ============================================================================= + + +class TestOllamaConfiguration: + """Tests for Ollama provider configuration. + + Converted from the configuration check section of run_ollama_embedding_test.py. + """ + + def test_ollama_config_from_env(self, ollama_env_vars): + """Test that GraphitiConfig correctly reads Ollama env vars.""" + from integrations.graphiti.config import GraphitiConfig + + config = GraphitiConfig.from_env() + + assert config.enabled is True + assert config.llm_provider == "ollama" + assert config.embedder_provider == "ollama" + assert config.ollama_llm_model == "deepseek-r1:7b" + assert config.ollama_embedding_model == "embeddinggemma" + + def test_ollama_embedding_dimension_config(self, ollama_env_vars): + """Test that embedding dimension is correctly configured.""" + from integrations.graphiti.config import GraphitiConfig + + config = GraphitiConfig.from_env() + + assert config.ollama_embedding_dim == 768 + + def test_ollama_base_url_config(self, ollama_env_vars): + """Test that Ollama base URL is correctly configured.""" + from integrations.graphiti.config import GraphitiConfig + + config = GraphitiConfig.from_env() + + assert config.ollama_base_url == "http://localhost:11434" + + def test_ollama_config_is_valid(self, ollama_env_vars): + """Test that Ollama configuration passes validation.""" + from integrations.graphiti.config import GraphitiConfig + + config = GraphitiConfig.from_env() + + assert config.is_valid() is True + assert len(config.get_validation_errors()) == 0 + + +# ============================================================================= +# Test: Embedding Generation (from test_ollama_embeddings) +# ============================================================================= + + +class TestEmbeddingGeneration: + """Tests for embedding generation patterns. + + Tests the embedding workflow without requiring a live Ollama server. + Converted from test_ollama_embeddings() in run_ollama_embedding_test.py. + """ + + def test_cosine_similarity_identical_vectors(self): + """Test cosine similarity returns 1.0 for identical vectors.""" + + def cosine_similarity(a, b): + dot_product = sum(x * y for x, y in zip(a, b)) + norm_a = sum(x * x for x in a) ** 0.5 + norm_b = sum(x * x for x in b) ** 0.5 + return dot_product / (norm_a * norm_b) if norm_a and norm_b else 0 + + vec = [0.1, 0.2, 0.3, 0.4, 0.5] + similarity = cosine_similarity(vec, vec) + + assert abs(similarity - 1.0) < 1e-10 + + def test_cosine_similarity_orthogonal_vectors(self): + """Test cosine similarity returns 0.0 for orthogonal vectors.""" + + def cosine_similarity(a, b): + dot_product = sum(x * y for x, y in zip(a, b)) + norm_a = sum(x * x for x in a) ** 0.5 + norm_b = sum(x * x for x in b) ** 0.5 + return dot_product / (norm_a * norm_b) if norm_a and norm_b else 0 + + vec_a = [1.0, 0.0] + vec_b = [0.0, 1.0] + similarity = cosine_similarity(vec_a, vec_b) + + assert abs(similarity) < 1e-10 + + def test_cosine_similarity_zero_vector(self): + """Test cosine similarity handles zero vectors.""" + + def cosine_similarity(a, b): + dot_product = sum(x * y for x, y in zip(a, b)) + norm_a = sum(x * x for x in a) ** 0.5 + norm_b = sum(x * x for x in b) ** 0.5 + return dot_product / (norm_a * norm_b) if norm_a and norm_b else 0 + + vec_a = [0.0, 0.0, 0.0] + vec_b = [1.0, 2.0, 3.0] + similarity = cosine_similarity(vec_a, vec_b) + + assert similarity == 0 + + def test_embedding_dimension_validation(self): + """Test that embedding dimension validation works correctly. + + Converted from the dimension check in test_ollama_embeddings(). + """ + expected_dim = 768 + test_embedding = [0.1] * expected_dim + + assert len(test_embedding) == expected_dim + + # Wrong dimension should be detectable + wrong_embedding = [0.1] * 384 + assert len(wrong_embedding) != expected_dim + + def test_embedding_is_numeric_list(self): + """Test that embeddings are lists of numeric values.""" + test_embedding = [0.1] * 768 + + assert isinstance(test_embedding, list) + assert all(isinstance(v, (int, float)) for v in test_embedding) + assert len(test_embedding) > 0 + + +# ============================================================================= +# Test: Memory Creation with Ollama (from test_memory_creation) +# ============================================================================= + + +class TestMemoryCreationWithOllama: + """Tests for memory creation operations with Ollama embeddings. + + Converted from test_memory_creation() in run_ollama_embedding_test.py. + """ + + @pytest.mark.asyncio + async def test_save_session_insights_with_ollama_config(self, queries): + """Test saving session insights (simulating Ollama embedding flow).""" + session_insights = { + "subtasks_completed": ["implement-oauth-login", "add-jwt-validation"], + "discoveries": { + "files_understood": { + "auth/oauth.py": "OAuth 2.0 flow implementation with Google/GitHub", + "auth/jwt.py": "JWT token generation and validation utilities", + }, + "patterns_found": [ + "Pattern: Use refresh tokens for long-lived sessions", + "Pattern: Store tokens in httpOnly cookies for security", + ], + "gotchas_encountered": [ + "Gotcha: Always validate JWT signature on server side", + "Gotcha: OAuth state parameter prevents CSRF attacks", + ], + }, + "what_worked": [ + "Using PyJWT for token handling", + "Separating OAuth providers into individual modules", + ], + "what_failed": [], + "recommendations_for_next_session": [ + "Consider adding refresh token rotation", + "Add rate limiting to auth endpoints", + ], + } + + result = await queries.add_session_insight( + session_num=1, insights=session_insights + ) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "session_insight" + assert "implement-oauth-login" in episode_body["subtasks_completed"] + assert "auth/oauth.py" in episode_body["discoveries"]["files_understood"] + + @pytest.mark.asyncio + async def test_save_multiple_patterns(self, queries): + """Test saving multiple code patterns sequentially. + + Converted from the pattern save loop in test_memory_creation(). + """ + patterns = [ + "OAuth implementation uses authorization code flow for web apps", + "JWT tokens include user ID, roles, and expiration in payload", + "Token refresh happens automatically when access token expires", + ] + + for pattern in patterns: + result = await queries.add_pattern(pattern) + assert result is True + + assert queries.client.graphiti.add_episode.call_count == 3 + + # Verify each pattern was stored with correct content + for i, call in enumerate(queries.client.graphiti.add_episode.call_args_list): + episode_body = json.loads(call[1]["episode_body"]) + assert episode_body["type"] == "pattern" + assert episode_body["pattern"] == patterns[i] + + @pytest.mark.asyncio + async def test_save_multiple_gotchas(self, queries): + """Test saving multiple gotchas sequentially. + + Converted from the gotcha save loop in test_memory_creation(). + """ + gotchas = [ + "Never store config values in frontend code or files checked into git", + "API redirect URIs must exactly match the registered URIs", + "Cache expiration times should be short for performance (15 min default)", + ] + + for gotcha in gotchas: + result = await queries.add_gotcha(gotcha) + assert result is True + + assert queries.client.graphiti.add_episode.call_count == 3 + + for i, call in enumerate(queries.client.graphiti.add_episode.call_args_list): + episode_body = json.loads(call[1]["episode_body"]) + assert episode_body["type"] == "gotcha" + assert episode_body["gotcha"] == gotchas[i] + + @pytest.mark.asyncio + async def test_save_codebase_discoveries(self, queries): + """Test saving codebase discoveries. + + Converted from step 5 in test_memory_creation(). + """ + discoveries = { + "api/routes/users.py": "User management API endpoints (list, create, update)", + "middleware/logging.py": "Request logging middleware for all routes", + "models/user.py": "User model with profile data and role management", + "services/notifications.py": "Notification service integrations (email, SMS, push)", + } + + result = await queries.add_codebase_discoveries(discoveries) + + assert result is True + queries.client.graphiti.add_episode.assert_called_once() + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + assert episode_body["type"] == "codebase_discovery" + assert len(episode_body["files"]) == 4 + assert episode_body["files"]["api/routes/users.py"] == ( + "User management API endpoints (list, create, update)" + ) + + +# ============================================================================= +# Test: Full Create-Store-Retrieve Cycle (from test_full_cycle) +# ============================================================================= + + +class TestFullCycle: + """Tests for the complete memory lifecycle. + + Converted from test_full_cycle() in run_ollama_embedding_test.py. + Tests the create -> store -> verify cycle without requiring live services. + """ + + @pytest.mark.asyncio + async def test_pattern_store_and_verify(self, queries): + """Test storing a unique pattern and verifying its content.""" + unique_pattern = "Use dependency injection for database connections" + + result = await queries.add_pattern(unique_pattern) + + assert result is True + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + + assert episode_body["type"] == "pattern" + assert episode_body["pattern"] == unique_pattern + assert episode_body["spec_id"] == "ollama_test_spec" + assert "timestamp" in episode_body + + @pytest.mark.asyncio + async def test_gotcha_store_and_verify(self, queries): + """Test storing a unique gotcha and verifying its content.""" + unique_gotcha = "Always close database connections in finally blocks" + + result = await queries.add_gotcha(unique_gotcha) + + assert result is True + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + + assert episode_body["type"] == "gotcha" + assert episode_body["gotcha"] == unique_gotcha + assert episode_body["spec_id"] == "ollama_test_spec" + + @pytest.mark.asyncio + async def test_task_outcome_store_and_verify(self, queries): + """Test storing a task outcome and verifying its content.""" + result = await queries.add_task_outcome( + task_id="cycle-test-task", + success=True, + outcome="Full cycle test completed", + metadata={"test_type": "integration"}, + ) + + assert result is True + + call_args = queries.client.graphiti.add_episode.call_args + episode_body = json.loads(call_args[1]["episode_body"]) + + assert episode_body["type"] == "task_outcome" + assert episode_body["task_id"] == "cycle-test-task" + assert episode_body["success"] is True + assert episode_body["outcome"] == "Full cycle test completed" + assert episode_body["test_type"] == "integration" + + @pytest.mark.asyncio + async def test_structured_insights_store_and_verify(self, queries): + """Test storing structured insights and verifying content integrity.""" + insights = { + "subtask_id": "cycle-subtask", + "file_insights": [ + { + "path": "src/db.py", + "purpose": "Database connection pool", + "changes_made": "Added connection retry logic", + "patterns_used": ["retry pattern", "connection pooling"], + "gotchas": ["pool exhaustion under load"], + } + ], + "patterns_discovered": [ + { + "pattern": "Connection pooling with retry", + "applies_to": "Database access layer", + "example": "src/db.py:create_pool()", + } + ], + "gotchas_discovered": [ + { + "gotcha": "Pool exhaustion under concurrent load", + "trigger": "More than 100 concurrent requests", + "solution": "Increase pool size or add queue", + } + ], + } + + result = await queries.add_structured_insights(insights) + + assert result is True + # Should have 3 calls: 1 file insight + 1 pattern + 1 gotcha + assert queries.client.graphiti.add_episode.call_count == 3 + + +# ============================================================================= +# Test: Temp Directory Cleanup +# ============================================================================= + + +class TestTempDirectoryCleanup: + """Tests for temporary directory cleanup. + + Verifies that test database directories are properly cleaned up, + matching the cleanup behavior in the original script. + """ + + def test_temp_directory_creation_and_cleanup(self, tmp_path): + """Test that temporary test directories can be created and cleaned up.""" + test_db_path = tmp_path / "ollama_memory_test" + test_db_path.mkdir(parents=True, exist_ok=True) + + spec_dir = test_db_path / "test_spec" + project_dir = test_db_path / "test_project" + spec_dir.mkdir() + project_dir.mkdir() + + assert test_db_path.exists() + assert spec_dir.exists() + assert project_dir.exists() + + # Simulate cleanup (as done in run_ollama_embedding_test.py) + shutil.rmtree(test_db_path, ignore_errors=True) + + assert not test_db_path.exists() + + def test_nested_temp_directories(self, tmp_path): + """Test creation and cleanup of nested directory structures.""" + test_db_path = tmp_path / "test_db" + graphiti_db = test_db_path / "graphiti_db" + graphiti_db.mkdir(parents=True) + + # Create some test files + (graphiti_db / "test.db").touch() + (graphiti_db / "lock.lck").touch() + + assert (graphiti_db / "test.db").exists() + + # Cleanup + shutil.rmtree(test_db_path, ignore_errors=True) + + assert not test_db_path.exists() diff --git a/apps/backend/integrations/graphiti/tests/test_queries.py b/apps/backend/integrations/graphiti/tests/test_queries.py index 9f8b2f6727..9061ff9220 100644 --- a/apps/backend/integrations/graphiti/tests/test_queries.py +++ b/apps/backend/integrations/graphiti/tests/test_queries.py @@ -9,10 +9,11 @@ - add_gotcha() - add_task_outcome() - add_structured_insights() +- cleanup_expired_episodes() """ import json -from datetime import datetime +from datetime import datetime, timedelta, timezone from unittest.mock import AsyncMock, MagicMock, patch import pytest @@ -781,3 +782,164 @@ async def test_add_structured_insights_outcome_with_all_fields(self, queries): assert episode_body["why_failed"] is None assert episode_body["alternatives_tried"] == ["Alt1", "Alt2"] assert episode_body["changed_files"] == ["file1.py", "file2.py"] + + +# ============================================================================= +# Episode TTL Cleanup Tests +# ============================================================================= + + +def _make_episode_node(uuid: str, created_at: datetime, group_id: str = "test_group"): + """Create a mock EpisodicNode for testing.""" + node = MagicMock() + node.uuid = uuid + node.created_at = created_at + node.group_id = group_id + return node + + +class TestCleanupExpiredEpisodes: + """Test cleanup_expired_episodes method.""" + + @pytest.mark.asyncio + async def test_cleanup_disabled_when_ttl_zero(self, queries): + """Test that TTL of 0 returns 0 without querying.""" + result = await queries.cleanup_expired_episodes(0) + assert result == 0 + + @pytest.mark.asyncio + async def test_cleanup_disabled_when_ttl_negative(self, queries): + """Test that negative TTL returns 0 without querying.""" + result = await queries.cleanup_expired_episodes(-5) + assert result == 0 + + @pytest.mark.asyncio + async def test_cleanup_removes_old_episodes(self, queries, mock_graphiti_core_nodes): + """Test that episodes older than TTL are removed.""" + import sys + + now = datetime.now(timezone.utc) + old_episode = _make_episode_node( + "old-uuid", now - timedelta(days=40) + ) + recent_episode = _make_episode_node( + "recent-uuid", now - timedelta(days=5) + ) + + # Mock EpisodicNode.get_by_group_ids + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock( + return_value=[old_episode, recent_episode] + ) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + # Mock remove_episode + queries.client.graphiti.remove_episode = AsyncMock() + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 1 + queries.client.graphiti.remove_episode.assert_called_once_with("old-uuid") + + @pytest.mark.asyncio + async def test_cleanup_no_expired_episodes(self, queries, mock_graphiti_core_nodes): + """Test cleanup when no episodes are expired.""" + import sys + + now = datetime.now(timezone.utc) + recent = _make_episode_node("recent-uuid", now - timedelta(days=1)) + + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock(return_value=[recent]) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + queries.client.graphiti.remove_episode = AsyncMock() + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 0 + queries.client.graphiti.remove_episode.assert_not_called() + + @pytest.mark.asyncio + async def test_cleanup_handles_naive_datetime(self, queries, mock_graphiti_core_nodes): + """Test that episodes with naive datetimes are handled correctly.""" + import sys + + # Created_at without tzinfo (naive datetime) + naive_old = datetime(2020, 1, 1) + old_episode = _make_episode_node("old-uuid", naive_old) + + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock( + return_value=[old_episode] + ) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + queries.client.graphiti.remove_episode = AsyncMock() + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 1 + queries.client.graphiti.remove_episode.assert_called_once_with("old-uuid") + + @pytest.mark.asyncio + async def test_cleanup_continues_on_single_remove_failure( + self, queries, mock_graphiti_core_nodes + ): + """Test that cleanup continues when one episode removal fails.""" + import sys + + now = datetime.now(timezone.utc) + old1 = _make_episode_node("old-1", now - timedelta(days=60)) + old2 = _make_episode_node("old-2", now - timedelta(days=60)) + + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock( + return_value=[old1, old2] + ) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + # First removal fails, second succeeds + queries.client.graphiti.remove_episode = AsyncMock( + side_effect=[Exception("remove failed"), None] + ) + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 1 # Only the second one succeeded + + @pytest.mark.asyncio + async def test_cleanup_handles_query_exception(self, queries, mock_graphiti_core_nodes): + """Test that cleanup handles exceptions from get_by_group_ids.""" + import sys + + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock( + side_effect=Exception("Database error") + ) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 0 + + @pytest.mark.asyncio + async def test_cleanup_empty_group(self, queries, mock_graphiti_core_nodes): + """Test cleanup when no episodes exist in the group.""" + import sys + + mock_episodic_node = MagicMock() + mock_episodic_node.get_by_group_ids = AsyncMock(return_value=[]) + sys.modules["graphiti_core.nodes"].EpisodicNode = mock_episodic_node + + queries.client.graphiti.driver = MagicMock() + + result = await queries.cleanup_expired_episodes(30) + + assert result == 0 diff --git a/apps/backend/phase_config.py b/apps/backend/phase_config.py index ed7542b5d8..b2ec219a50 100644 --- a/apps/backend/phase_config.py +++ b/apps/backend/phase_config.py @@ -20,7 +20,8 @@ "opus": "claude-opus-4-6", "opus-1m": "claude-opus-4-6", "opus-4.5": "claude-opus-4-5-20251101", - "sonnet": "claude-sonnet-4-5-20250929", + "sonnet": "claude-sonnet-4-6", + "sonnet-4.5": "claude-sonnet-4-5-20250929", "haiku": "claude-haiku-4-5-20251001", } @@ -95,12 +96,22 @@ class PhaseThinkingConfig(TypedDict, total=False): qa: str +class PhaseCustomAgentsConfig(TypedDict, total=False): + """Per-phase custom agent IDs from ~/.claude/agents/""" + + spec: str + planning: str + coding: str + qa: str + + class TaskMetadataConfig(TypedDict, total=False): """Structure of model-related fields in task_metadata.json""" isAutoProfile: bool phaseModels: PhaseModelConfig phaseThinking: PhaseThinkingConfig + phaseCustomAgents: PhaseCustomAgentsConfig model: str thinkingLevel: str fastMode: bool @@ -495,6 +506,36 @@ def get_fast_mode(spec_dir: Path) -> bool: return False +def get_phase_custom_agent( + spec_dir: Path, + phase: Phase, +) -> str | None: + """ + Get the custom agent ID for a specific execution phase. + + Reads the phaseCustomAgents field from task_metadata.json. + + Args: + spec_dir: Path to the spec directory + phase: Execution phase (spec, planning, coding, qa) + + Returns: + Custom agent ID if configured, None otherwise + """ + metadata = load_task_metadata(spec_dir) + if not metadata: + return None + + phase_agents = metadata.get("phaseCustomAgents") + if not phase_agents or not isinstance(phase_agents, dict): + return None + + agent_id = phase_agents.get(phase) + if agent_id and isinstance(agent_id, str) and agent_id.strip(): + return agent_id.strip() + return None + + def get_spec_phase_thinking_budget(phase_name: str) -> int: """ Get the thinking budget for a specific spec runner phase. diff --git a/apps/backend/qa/loop.py b/apps/backend/qa/loop.py index 9bf7f5d776..fc092d673e 100644 --- a/apps/backend/qa/loop.py +++ b/apps/backend/qa/loop.py @@ -21,6 +21,7 @@ linear_qa_rejected, linear_qa_started, ) +from agents.custom_agents import build_agents_catalog_prompt from phase_config import ( get_fast_mode, get_phase_client_thinking_kwargs, @@ -141,6 +142,9 @@ async def run_qa_validation_loop( {"iteration": 1, "maxIterations": MAX_QA_ITERATIONS}, ) + # Build catalog of available specialist agents + agents_catalog = build_agents_catalog_prompt() + fast_mode = get_fast_mode(spec_dir) debug( "qa_loop", @@ -185,6 +189,7 @@ async def run_qa_validation_loop( agent_type="qa_fixer", betas=qa_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **fixer_thinking_kwargs, ) @@ -302,6 +307,7 @@ async def run_qa_validation_loop( agent_type="qa_reviewer", betas=qa_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **qa_thinking_kwargs, ) @@ -502,6 +508,7 @@ async def run_qa_validation_loop( agent_type="qa_fixer", betas=fixer_betas, fast_mode=fast_mode, + agents_catalog_prompt=agents_catalog, **fixer_thinking_kwargs, ) diff --git a/apps/frontend/package.json b/apps/frontend/package.json index 1cf515ed93..d19be92f12 100644 --- a/apps/frontend/package.json +++ b/apps/frontend/package.json @@ -1,6 +1,6 @@ { "name": "auto-claude-ui", - "version": "2.7.6", + "version": "2.7.16", "type": "module", "description": "Desktop UI for Auto Claude autonomous coding framework", "homepage": "https://github.com/AndyMik90/Auto-Claude", diff --git a/apps/frontend/src/__tests__/e2e/smoke.test.ts b/apps/frontend/src/__tests__/e2e/smoke.test.ts index bdee6480a9..c956c96766 100644 --- a/apps/frontend/src/__tests__/e2e/smoke.test.ts +++ b/apps/frontend/src/__tests__/e2e/smoke.test.ts @@ -182,7 +182,7 @@ describe('E2E Smoke Tests', () => { const addProject = electronAPI['addProject'] as (path: string) => Promise; const addResult = await addProject(TEST_PROJECT_PATH); - expect(mockIpcRenderer.invoke).toHaveBeenCalledWith('project:add', TEST_PROJECT_PATH); + expect(mockIpcRenderer.invoke).toHaveBeenCalledWith('project:add', TEST_PROJECT_PATH, undefined); expect(addResult).toMatchObject({ success: true, data: expect.objectContaining({ diff --git a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts index 123b76fa9f..119978327d 100644 --- a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts +++ b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts @@ -63,7 +63,7 @@ describe('IPC Bridge Integration', () => { const addProject = electronAPI['addProject'] as (path: string) => Promise; await addProject('/test/path'); - expect(mockIpcRenderer.invoke).toHaveBeenCalledWith('project:add', '/test/path'); + expect(mockIpcRenderer.invoke).toHaveBeenCalledWith('project:add', '/test/path', undefined); }); it('should have removeProject method', async () => { diff --git a/apps/frontend/src/main/claude-code-settings/reader.ts b/apps/frontend/src/main/claude-code-settings/reader.ts index c8141a8dbe..c9b76f8193 100644 --- a/apps/frontend/src/main/claude-code-settings/reader.ts +++ b/apps/frontend/src/main/claude-code-settings/reader.ts @@ -16,7 +16,7 @@ import { existsSync, readFileSync } from 'fs'; import { homedir } from 'os'; import path from 'path'; import { isWindows, isMacOS } from '../platform'; -import type { ClaudeCodeSettings, ClaudeCodeSettingsHierarchy } from './types'; +import type { ClaudeCodeSettings, ClaudeCodeSettingsHierarchy, ClaudeCodeMcpServerConfig } from './types'; import { mergeClaudeCodeSettings } from './merger'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; @@ -91,6 +91,127 @@ function sanitizePermissions(permissions: unknown): ClaudeCodeSettings['permissi return hasValidFields ? result : undefined; } +/** + * Validate and sanitize the mcpServers field to ensure it's a Record. + * Returns undefined if the field is invalid or empty after sanitization. + */ +function sanitizeMcpServers(mcpServers: unknown): Record | undefined { + if (!isPlainObject(mcpServers)) { + return undefined; + } + + const sanitized: Record = {}; + let hasValidEntries = false; + + for (const [key, value] of Object.entries(mcpServers)) { + if (!isPlainObject(value)) { + debugLog(`${LOG_PREFIX} Skipping invalid mcpServers entry (not an object):`, key); + continue; + } + + const serverConfig: ClaudeCodeMcpServerConfig = {}; + let hasFields = false; + + // Validate optional string fields + if ('type' in value && typeof value.type === 'string' && (value.type === 'http' || value.type === 'sse')) { + serverConfig.type = value.type; + hasFields = true; + } + if ('command' in value && typeof value.command === 'string') { + serverConfig.command = value.command; + hasFields = true; + } + if ('url' in value && typeof value.url === 'string') { + serverConfig.url = value.url; + hasFields = true; + } + + // Validate args (string array) + if ('args' in value && Array.isArray(value.args)) { + const validArgs = (value.args as unknown[]).filter((a): a is string => typeof a === 'string'); + if (validArgs.length > 0) { + serverConfig.args = validArgs; + hasFields = true; + } + } + + // Validate headers (Record) + if ('headers' in value && isPlainObject(value.headers)) { + const headers: Record = {}; + let hasHeaders = false; + for (const [hk, hv] of Object.entries(value.headers)) { + if (typeof hv === 'string') { + headers[hk] = hv; + hasHeaders = true; + } + } + if (hasHeaders) { + serverConfig.headers = headers; + hasFields = true; + } + } + + // Validate env (Record) + if ('env' in value && isPlainObject(value.env)) { + const sanitizedEnvResult = sanitizeEnv(value.env); + if (sanitizedEnvResult) { + serverConfig.env = sanitizedEnvResult; + hasFields = true; + } + } + + // Validate oauth (opaque object, just check it's an object) + if ('oauth' in value && isPlainObject(value.oauth)) { + serverConfig.oauth = value.oauth as Record; + hasFields = true; + } + + // Ensure the server has a usable transport (command-based or HTTP/SSE) + const hasCommandTransport = + typeof serverConfig.command === 'string' && serverConfig.command.trim().length > 0; + const hasHttpTransport = + (serverConfig.type === 'http' || serverConfig.type === 'sse') && + typeof serverConfig.url === 'string' && + serverConfig.url.trim().length > 0; + const isUsableServer = hasCommandTransport || hasHttpTransport; + + if (hasFields && isUsableServer) { + sanitized[key] = serverConfig; + hasValidEntries = true; + } else if (!hasFields) { + debugLog(`${LOG_PREFIX} Skipping mcpServers entry with no valid fields:`, key); + } else { + debugLog(`${LOG_PREFIX} Skipping unusable mcpServers entry (no command or url transport):`, key); + } + } + + return hasValidEntries ? sanitized : undefined; +} + +/** + * Validate and sanitize the enabledPlugins field to ensure it's a Record. + * Returns undefined if the field is invalid or empty after sanitization. + */ +function sanitizeEnabledPlugins(enabledPlugins: unknown): Record | undefined { + if (!isPlainObject(enabledPlugins)) { + return undefined; + } + + const sanitized: Record = {}; + let hasValidEntries = false; + + for (const [key, value] of Object.entries(enabledPlugins)) { + if (typeof value === 'boolean') { + sanitized[key] = value; + hasValidEntries = true; + } else { + debugLog(`${LOG_PREFIX} Skipping invalid enabledPlugins entry:`, { key, value: typeof value }); + } + } + + return hasValidEntries ? sanitized : undefined; +} + /** * Validate and sanitize a parsed JSON object to ensure it has the expected structure for ClaudeCodeSettings. * Invalid fields are removed, valid fields are kept. @@ -147,6 +268,28 @@ function isValidSettings(obj: unknown): obj is ClaudeCodeSettings { } } + // Validate and sanitize mcpServers field + if ('mcpServers' in obj) { + const sanitizedMcpServers = sanitizeMcpServers(obj.mcpServers); + if (sanitizedMcpServers) { + sanitized.mcpServers = sanitizedMcpServers; + hasValidFields = true; + } else { + debugError(`${LOG_PREFIX} Invalid or empty mcpServers field, skipping`); + } + } + + // Validate and sanitize enabledPlugins field + if ('enabledPlugins' in obj) { + const sanitizedEnabledPlugins = sanitizeEnabledPlugins(obj.enabledPlugins); + if (sanitizedEnabledPlugins) { + sanitized.enabledPlugins = sanitizedEnabledPlugins; + hasValidFields = true; + } else { + debugError(`${LOG_PREFIX} Invalid or empty enabledPlugins field, skipping`); + } + } + // If we have at least one valid field, mutate the original object to contain only sanitized fields if (hasValidFields) { // Clear the original object and copy sanitized fields @@ -195,7 +338,7 @@ function readJsonFile(filePath: string): ClaudeCodeSettings | undefined { * 2. CLAUDE_CONFIG_DIR environment variable * 3. Default: ~/.claude */ -function getUserConfigDir(): string { +export function getUserConfigDir(): string { // Try to get configDir from the active Claude profile. // We use a lazy import to avoid circular dependencies and to handle // the case where ClaudeProfileManager hasn't been initialized yet. diff --git a/apps/frontend/src/main/claude-code-settings/types.ts b/apps/frontend/src/main/claude-code-settings/types.ts index 2f73b1e37e..c6c97e10ce 100644 --- a/apps/frontend/src/main/claude-code-settings/types.ts +++ b/apps/frontend/src/main/claude-code-settings/types.ts @@ -22,6 +22,27 @@ export interface ClaudeCodePermissions { additionalDirectories?: string[]; } +/** + * MCP server config as defined in Claude Code settings. + * Supports both stdio (command-based) and HTTP/SSE server types. + */ +export interface ClaudeCodeMcpServerConfig { + /** Server type - command (stdio) or http/sse */ + type?: 'http' | 'sse'; + /** Command to run (for stdio servers) */ + command?: string; + /** Command arguments */ + args?: string[]; + /** URL for HTTP/SSE servers */ + url?: string; + /** HTTP headers */ + headers?: Record; + /** OAuth config (for some servers like Slack) */ + oauth?: Record; + /** Environment variables passed to the server process */ + env?: Record; +} + /** * A single level of Claude Code settings, as read from one settings file. * All fields are optional since any given file may only set a subset. @@ -34,6 +55,10 @@ export interface ClaudeCodeSettings { alwaysThinkingEnabled?: boolean; /** Environment variables to inject into agent processes */ env?: Record; + /** MCP server configurations (inline) */ + mcpServers?: Record; + /** Enabled plugins from marketplace (key: "pluginId@marketplace", value: enabled) */ + enabledPlugins?: Record; } /** diff --git a/apps/frontend/src/main/ipc-handlers/claude-agents-handlers.ts b/apps/frontend/src/main/ipc-handlers/claude-agents-handlers.ts new file mode 100644 index 0000000000..380e85e6b7 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/claude-agents-handlers.ts @@ -0,0 +1,125 @@ +/** + * Claude Agents Handlers + * + * IPC handlers for reading Claude Code custom agent definitions + * from ~/.claude/agents/ directory structure. + */ + +import { ipcMain } from 'electron'; +import { existsSync, readdirSync } from 'fs'; +import path from 'path'; +import { IPC_CHANNELS } from '../../shared/constants/ipc'; +import type { IPCResult } from '../../shared/types'; +import type { ClaudeAgentsInfo, ClaudeAgentCategory, ClaudeCustomAgent } from '../../shared/types/integrations'; +import { getUserConfigDir } from '../claude-code-settings/reader'; +import { debugLog } from '../../shared/utils/debug-logger'; + +const LOG_PREFIX = '[ClaudeAgents]'; + +/** + * Convert a category directory name to a human-readable name. + * Removes the number prefix (e.g. "01-") and capitalizes words. + */ +function toCategoryName(dirName: string): string { + // Remove number prefix (e.g. "01-" from "01-core-development") + const withoutPrefix = dirName.replace(/^\d+-/, ''); + return withoutPrefix + .replace(/[-_]/g, ' ') + .replace(/\b\w/g, (c) => c.toUpperCase()); +} + +/** + * Convert an agent filename to a human-readable name. + * Removes the .md extension, capitalizes words, replaces hyphens with spaces. + */ +function toAgentName(fileName: string): string { + // Remove .md extension + const withoutExt = fileName.replace(/\.md$/, ''); + return withoutExt + .replace(/[-_]/g, ' ') + .replace(/\b\w/g, (c) => c.toUpperCase()); +} + +/** + * Get the agents directory path (~/.claude/agents/). + * Respects CLAUDE_CONFIG_DIR environment variable. + */ +function getAgentsDir(): string { + return path.join(getUserConfigDir(), 'agents'); +} + +/** + * Register Claude Agents IPC handlers. + */ +export function registerClaudeAgentsHandlers(): void { + ipcMain.handle(IPC_CHANNELS.CLAUDE_AGENTS_GET, async (): Promise> => { + try { + const agentsDir = getAgentsDir(); + + if (!existsSync(agentsDir)) { + debugLog(`${LOG_PREFIX} Agents directory not found:`, agentsDir); + return { success: true, data: { categories: [], totalAgents: 0 } }; + } + + const categories: ClaudeAgentCategory[] = []; + let totalAgents = 0; + + const entries = readdirSync(agentsDir, { withFileTypes: true }); + + for (const entry of entries) { + if (!entry.isDirectory()) continue; + const entryPath = path.join(agentsDir, entry.name); + + const agents: ClaudeCustomAgent[] = []; + + try { + const files = readdirSync(entryPath); + for (const file of files) { + if (!file.endsWith('.md') || file.toLowerCase() === 'readme.md') continue; + + const agentId = file.replace(/\.md$/, ''); + + // Use relative path (categoryDir/file) instead of absolute filePath + // to avoid exposing full filesystem paths to the renderer process + const relativePath = path.join(entry.name, file); + + agents.push({ + agentId, + agentName: toAgentName(file), + categoryDir: entry.name, + categoryName: toCategoryName(entry.name), + filePath: relativePath, + }); + } + } catch { + debugLog(`${LOG_PREFIX} Failed to read category directory:`, entryPath); + continue; + } + + if (agents.length > 0) { + // Sort agents by name within category + agents.sort((a, b) => a.agentName.localeCompare(b.agentName)); + + categories.push({ + categoryDir: entry.name, + categoryName: toCategoryName(entry.name), + agents, + }); + totalAgents += agents.length; + } + } + + // Sort categories by directory name (already numbered) + categories.sort((a, b) => a.categoryDir.localeCompare(b.categoryDir)); + + debugLog(`${LOG_PREFIX} Found ${totalAgents} agent(s) in ${categories.length} categories`); + return { success: true, data: { categories, totalAgents } }; + } catch (error) { + debugLog(`${LOG_PREFIX} Error reading agents:`, error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to read custom agents', + }; + } + }); +} diff --git a/apps/frontend/src/main/ipc-handlers/claude-mcp-handlers.ts b/apps/frontend/src/main/ipc-handlers/claude-mcp-handlers.ts new file mode 100644 index 0000000000..24c849710a --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/claude-mcp-handlers.ts @@ -0,0 +1,365 @@ +/** + * Claude MCP Handlers + * + * IPC handlers for reading Claude Code's global MCP configuration. + * Resolves both inline mcpServers from settings.json and enabled plugins + * from the marketplace plugin cache. + */ + +import { ipcMain } from 'electron'; +import fs from 'fs/promises'; +import { homedir } from 'os'; +import path from 'path'; +import { IPC_CHANNELS } from '../../shared/constants/ipc'; +import type { IPCResult } from '../../shared/types'; +import type { GlobalMcpInfo, GlobalMcpServerEntry } from '../../shared/types/integrations'; +import { readUserGlobalSettings, getUserConfigDir } from '../claude-code-settings/reader'; +import { debugLog } from '../../shared/utils/debug-logger'; + +const LOG_PREFIX = '[ClaudeMCP]'; + +/** + * Convert a serverId (e.g. "context7", "github-mcp") to a human-readable name. + * Capitalizes words and replaces hyphens/underscores with spaces. + */ +function toServerName(serverId: string): string { + return serverId + .replace(/[-_]/g, ' ') + .replace(/\b\w/g, (c) => c.toUpperCase()); +} + +/** + * Find the most recently modified subdirectory within a directory. + * Plugin caches store configs in hash-named subdirectories; we want the latest one. + */ +async function findLatestSubdir(dirPath: string): Promise { + try { + await fs.access(dirPath); + } catch { + return undefined; + } + + try { + const entries = await fs.readdir(dirPath); + let latestDir: string | undefined; + let latestMtime = 0; + + for (const entry of entries) { + const entryPath = path.join(dirPath, entry); + try { + const stat = await fs.stat(entryPath); + if (stat.isDirectory() && stat.mtimeMs > latestMtime) { + latestMtime = stat.mtimeMs; + latestDir = entryPath; + } + } catch { + // Skip entries we can't stat + } + } + + return latestDir; + } catch { + return undefined; + } +} + +/** + * Resolve a single enabled plugin to its MCP server entries. + * Reads the .mcp.json from the plugin cache directory. + * + * @param pluginKey - Plugin key in format "pluginId@marketplace" + * @param claudeDir - Path to ~/.claude directory + * @returns Array of resolved server entries (a plugin .mcp.json can define multiple servers) + */ +async function resolvePluginServers(pluginKey: string, claudeDir: string): Promise { + const atIndex = pluginKey.lastIndexOf('@'); + if (atIndex <= 0) { + debugLog(`${LOG_PREFIX} Invalid plugin key format (missing @):`, pluginKey); + return []; + } + + const pluginId = pluginKey.substring(0, atIndex); + const marketplace = pluginKey.substring(atIndex + 1); + + // Plugin cache path: ~/.claude/plugins/cache/{marketplace}/{pluginId}/ + const pluginCacheDir = path.join(claudeDir, 'plugins', 'cache', marketplace, pluginId); + + try { + await fs.access(pluginCacheDir); + } catch { + debugLog(`${LOG_PREFIX} Plugin cache directory not found:`, pluginCacheDir); + return []; + } + + // Find the most recently modified hash subdirectory + const latestHashDir = await findLatestSubdir(pluginCacheDir); + if (!latestHashDir) { + debugLog(`${LOG_PREFIX} No hash subdirectory found in plugin cache:`, pluginCacheDir); + return []; + } + + // Read .mcp.json from the hash directory (read directly to avoid TOCTOU race) + const mcpJsonPath = path.join(latestHashDir, '.mcp.json'); + try { + const content = await fs.readFile(mcpJsonPath, 'utf-8'); + const parsed = JSON.parse(content); + + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + debugLog(`${LOG_PREFIX} Invalid .mcp.json structure:`, mcpJsonPath); + return []; + } + + const entries: GlobalMcpServerEntry[] = []; + + // Each key in the .mcp.json is a server ID with its config + for (const [serverId, serverConfig] of Object.entries(parsed)) { + if (typeof serverConfig !== 'object' || serverConfig === null) { + debugLog(`${LOG_PREFIX} Skipping invalid server config in .mcp.json:`, { pluginKey, serverId }); + continue; + } + + const config = serverConfig as Record; + const entry: GlobalMcpServerEntry = { + pluginKey, + serverId, + serverName: toServerName(serverId), + config: { + ...(typeof config.type === 'string' && (config.type === 'http' || config.type === 'sse') + ? { type: config.type as 'http' | 'sse' } + : {}), + ...(typeof config.command === 'string' ? { command: config.command } : {}), + ...(Array.isArray(config.args) ? { args: config.args.filter((a): a is string => typeof a === 'string') } : {}), + ...(typeof config.url === 'string' ? { url: config.url } : {}), + ...(typeof config.headers === 'object' && config.headers !== null && !Array.isArray(config.headers) + ? { headers: Object.fromEntries( + Object.entries(config.headers as Record) + .filter(([, v]) => typeof v === 'string') + ) as Record } + : {}), + ...(typeof config.env === 'object' && config.env !== null && !Array.isArray(config.env) + ? { env: Object.fromEntries( + Object.entries(config.env as Record) + .filter(([, v]) => typeof v === 'string') + ) as Record } + : {}), + }, + source: 'plugin', + }; + + entries.push(entry); + } + + debugLog(`${LOG_PREFIX} Resolved ${entries.length} server(s) from plugin:`, pluginKey); + return entries; + } catch (error: unknown) { + // ENOENT means file doesn't exist — not an error worth logging at detail level + if (error instanceof Error && 'code' in error && (error as NodeJS.ErrnoException).code === 'ENOENT') { + debugLog(`${LOG_PREFIX} .mcp.json not found in plugin cache:`, mcpJsonPath); + } else { + debugLog(`${LOG_PREFIX} Failed to parse .mcp.json:`, mcpJsonPath, error); + } + return []; + } +} + +/** + * Convert inline mcpServers config entries to GlobalMcpServerEntry array. + * Performs runtime type validation since the input may come from untrusted JSON. + * + * @param mcpServers - MCP server configurations keyed by server ID (runtime-validated) + * @param source - Where this config was sourced from ('settings' for settings.json, 'claude-json' for ~/.claude.json) + */ +function resolveInlineServers( + mcpServers: Record, + source: 'settings' | 'claude-json' = 'settings' +): GlobalMcpServerEntry[] { + const entries: GlobalMcpServerEntry[] = []; + + for (const [serverId, rawConfig] of Object.entries(mcpServers)) { + if (typeof rawConfig !== 'object' || rawConfig === null || Array.isArray(rawConfig)) { + debugLog(`${LOG_PREFIX} Skipping invalid mcpServers entry (not an object):`, serverId); + continue; + } + + const config = rawConfig as Record; + + // Skip disabled servers + if (config.disabled === true) { + debugLog(`${LOG_PREFIX} Skipping disabled mcpServers entry:`, serverId); + continue; + } + + const entry: GlobalMcpServerEntry = { + serverId, + serverName: toServerName(serverId), + config: { + ...(typeof config.type === 'string' && (config.type === 'http' || config.type === 'sse') + ? { type: config.type as 'http' | 'sse' } + : {}), + ...(typeof config.command === 'string' ? { command: config.command } : {}), + ...(Array.isArray(config.args) + ? { args: config.args.filter((a: unknown): a is string => typeof a === 'string') } + : {}), + ...(typeof config.url === 'string' ? { url: config.url } : {}), + ...(typeof config.headers === 'object' && config.headers !== null && !Array.isArray(config.headers) + ? { headers: Object.fromEntries( + Object.entries(config.headers as Record) + .filter(([, v]) => typeof v === 'string') + ) as Record } + : {}), + ...(typeof config.env === 'object' && config.env !== null && !Array.isArray(config.env) + ? { env: Object.fromEntries( + Object.entries(config.env as Record) + .filter(([, v]) => typeof v === 'string') + ) as Record } + : {}), + }, + source, + }; + + // Ensure the server has a usable transport (command-based or HTTP/SSE) + const hasCommandTransport = + typeof entry.config.command === 'string' && entry.config.command.trim().length > 0; + const hasHttpTransport = + (entry.config.type === 'http' || entry.config.type === 'sse') && + typeof entry.config.url === 'string' && + entry.config.url.trim().length > 0; + + if (!hasCommandTransport && !hasHttpTransport) { + debugLog(`${LOG_PREFIX} Skipping unusable mcpServers entry (no command or url transport):`, serverId); + continue; + } + + entries.push(entry); + } + + return entries; +} + +/** + * Get the Claude home directory (~/.claude). + * Delegates to getUserConfigDir() from the settings reader for consistency + * with profile-aware config resolution (active profile → CLAUDE_CONFIG_DIR → ~/.claude). + */ +function getClaudeHomeDir(): string { + return getUserConfigDir(); +} + +/** + * Read MCP servers from ~/.claude.json (the main Claude Code configuration file). + * This file contains a top-level `mcpServers` key with the same structure as + * ClaudeCodeMcpServerConfig entries. + * + * @returns Array of GlobalMcpServerEntry with source 'claude-json', or empty array on failure. + */ +async function readClaudeJsonMcpServers(): Promise { + // .claude.json lives in the home directory (or CLAUDE_CONFIG_DIR parent). + // Use getUserConfigDir() for profile-aware resolution, then look for + // .claude.json in the parent of that config dir. + const configDir = getUserConfigDir(); + const configParent = path.dirname(configDir); + const homeDir = homedir(); + + // Build candidate list: config dir parent first, then home as fallback + const candidates = [path.join(configParent, '.claude.json')]; + if (configParent !== homeDir) { + candidates.push(path.join(homeDir, '.claude.json')); + } + + let claudeJsonPath: string | undefined; + for (const candidate of candidates) { + try { + await fs.access(candidate); + claudeJsonPath = candidate; + break; + } catch { + // Not found, try next + } + } + if (!claudeJsonPath) { + debugLog(`${LOG_PREFIX} .claude.json not found in expected locations`); + return []; + } + + try { + const content = await fs.readFile(claudeJsonPath, 'utf-8'); + const parsed = JSON.parse(content); + + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + debugLog(`${LOG_PREFIX} Invalid ~/.claude.json structure (expected object)`); + return []; + } + + const mcpServers = parsed.mcpServers; + if (!mcpServers || typeof mcpServers !== 'object' || Array.isArray(mcpServers)) { + debugLog(`${LOG_PREFIX} No valid mcpServers found in ~/.claude.json`); + return []; + } + + const entries = resolveInlineServers( + mcpServers as Record, + 'claude-json' + ); + + debugLog(`${LOG_PREFIX} Resolved ${entries.length} server(s) from ~/.claude.json`); + return entries; + } catch (error) { + debugLog(`${LOG_PREFIX} Failed to read/parse ~/.claude.json:`, claudeJsonPath, error); + return []; + } +} + +/** + * Register Claude MCP IPC handlers. + */ +export function registerClaudeMcpHandlers(): void { + ipcMain.handle(IPC_CHANNELS.CLAUDE_MCP_GET_GLOBAL, async (): Promise> => { + try { + debugLog(`${LOG_PREFIX} Reading global MCP configuration`); + + const settings = readUserGlobalSettings(); + const claudeDir = getClaudeHomeDir(); + + const result: GlobalMcpInfo = { + pluginServers: [], + inlineServers: [], + claudeJsonServers: [], + }; + + // Resolve enabled plugins + if (settings?.enabledPlugins) { + for (const [pluginKey, enabled] of Object.entries(settings.enabledPlugins)) { + if (!enabled) { + continue; + } + + const servers = await resolvePluginServers(pluginKey, claudeDir); + result.pluginServers.push(...servers); + } + } + + // Resolve inline mcpServers from settings.json + if (settings?.mcpServers) { + result.inlineServers = resolveInlineServers(settings.mcpServers); + } + + // Read ~/.claude.json mcpServers + result.claudeJsonServers = await readClaudeJsonMcpServers(); + + debugLog( + `${LOG_PREFIX} Resolved global MCPs:`, + `${result.pluginServers.length} plugin server(s),`, + `${result.inlineServers.length} inline server(s),`, + `${result.claudeJsonServers.length} claude.json server(s)` + ); + + return { success: true, data: result }; + } catch (error) { + debugLog(`${LOG_PREFIX} Error reading global MCP configuration:`, error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to read global MCP configuration', + }; + } + }); +} diff --git a/apps/frontend/src/main/ipc-handlers/context/project-context-handlers.ts b/apps/frontend/src/main/ipc-handlers/context/project-context-handlers.ts index 49134a6dc3..6cda8d8961 100644 --- a/apps/frontend/src/main/ipc-handlers/context/project-context-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/context/project-context-handlers.ts @@ -1,11 +1,13 @@ import { ipcMain } from 'electron'; import type { BrowserWindow } from 'electron'; import path from 'path'; -import { existsSync, readFileSync } from 'fs'; +import { existsSync, readFileSync, mkdirSync } from 'fs'; +import { writeFileAtomicSync } from '../../utils/atomic-file'; import { spawn } from 'child_process'; import { IPC_CHANNELS, getSpecsDir, AUTO_BUILD_PATHS } from '../../../shared/constants'; import type { IPCResult, + Project, ProjectContextData, ProjectIndex, MemoryEpisode @@ -21,6 +23,26 @@ import { loadFileBasedMemories } from './memory-data-handlers'; import { parsePythonCommand } from '../../python-detector'; import { getConfiguredPythonPath } from '../../python-env-manager'; import { getAugmentedEnv } from '../../env-utils'; +import { debugLog } from '../../../shared/utils/debug-logger'; + +function isChildPath(parentPath: string, candidatePath: string): boolean { + const rel = path.relative(parentPath, candidatePath); + return rel !== '' && !rel.startsWith('..') && !path.isAbsolute(rel); +} + +/** + * Generate a unique key for a child project index entry. + * Uses path.basename(child.path) and appends a numeric suffix (-2, -3, ...) if the key already exists. + */ +function uniqueChildKey(childPath: string, existingKeys: Record): string { + const base = path.basename(childPath); + if (!(base in existingKeys)) return base; + let suffix = 2; + while (`${base}-${suffix}` in existingKeys) { + suffix++; + } + return `${base}-${suffix}`; +} /** * Load project index from file @@ -39,6 +61,123 @@ function loadProjectIndex(projectPath: string): ProjectIndex | null { } } +/** + * Run analyzer.py on a single project to generate its project_index.json. + * Reuses the same spawn logic as the CONTEXT_REFRESH_INDEX handler. + */ +async function refreshChildIndex( + childProject: Project, + autoBuildSource: string +): Promise { + const analyzerPath = path.join(autoBuildSource, 'analyzer.py'); + const indexOutputPath = path.join(childProject.path, AUTO_BUILD_PATHS.PROJECT_INDEX); + + const pythonCmd = getConfiguredPythonPath(); + const [pythonCommand, pythonBaseArgs] = parsePythonCommand(pythonCmd); + + try { + await new Promise((resolve, reject) => { + let stdout = ''; + let stderr = ''; + + const ANALYZER_TIMEOUT_MS = 120_000; // 2 minutes + + const proc = spawn(pythonCommand, [ + ...pythonBaseArgs, + analyzerPath, + '--project-dir', childProject.path, + '--output', indexOutputPath + ], { + cwd: childProject.path, + env: { + ...getAugmentedEnv(), + PYTHONIOENCODING: 'utf-8', + PYTHONUTF8: '1' + } + }); + + const timeout = setTimeout(() => { + debugLog(`[project-context] Child analyzer (${childProject.name}) timed out after ${ANALYZER_TIMEOUT_MS}ms, killing process`); + proc.kill('SIGTERM'); + reject(new Error(`Analyzer timed out after ${ANALYZER_TIMEOUT_MS / 1000}s`)); + }, ANALYZER_TIMEOUT_MS); + + proc.stdout?.on('data', (data) => { + stdout += data.toString('utf-8'); + }); + + proc.stderr?.on('data', (data) => { + stderr += data.toString('utf-8'); + }); + + proc.on('close', (code: number) => { + clearTimeout(timeout); + if (code === 0) { + debugLog(`[project-context] Child analyzer (${childProject.name}) stdout:`, stdout); + resolve(); + } else { + debugLog(`[project-context] Child analyzer (${childProject.name}) failed with code`, code); + debugLog(`[project-context] Child analyzer (${childProject.name}) stderr:`, stderr); + reject(new Error(`Analyzer exited with code ${code}: ${stderr || stdout}`)); + } + }); + + proc.on('error', (err) => { + clearTimeout(timeout); + debugLog(`[project-context] Child analyzer (${childProject.name}) spawn error:`, err); + reject(err); + }); + }); + + return loadProjectIndex(childProject.path); + } catch (error) { + debugLog(`[project-context] Failed to index child ${childProject.name}:`, error); + return null; + } +} + +/** + * Aggregate project indexes from child repos into a single customer-level index. + * Services are prefixed with the repo name to avoid key collisions. + */ +function aggregateChildIndexes( + customerPath: string, + childIndexes: Record +): ProjectIndex { + const mergedServices: Record = {}; + const mergedInfrastructure: ProjectIndex['infrastructure'] = {}; + const mergedConventions: ProjectIndex['conventions'] = {}; + + for (const [repoName, index] of Object.entries(childIndexes)) { + // Merge services with repo-name prefix to avoid collisions + if (index.services) { + for (const [serviceName, serviceInfo] of Object.entries(index.services)) { + const key = `${repoName}/${serviceName}`; + mergedServices[key] = serviceInfo; + } + } + + // Merge infrastructure (last-write-wins for overlapping keys) + if (index.infrastructure) { + Object.assign(mergedInfrastructure, index.infrastructure); + } + + // Merge conventions (last-write-wins for overlapping keys) + if (index.conventions) { + Object.assign(mergedConventions, index.conventions); + } + } + + return { + project_root: customerPath, + project_type: 'customer', + services: mergedServices, + infrastructure: mergedInfrastructure, + conventions: mergedConventions, + child_repos: childIndexes + }; +} + /** * Load recent memories from LadybugDB with file-based fallback */ @@ -63,7 +202,7 @@ async function loadRecentMemories( recentMemories = graphMemories; } } catch (error) { - console.warn('Failed to load memories from LadybugDB, falling back to file-based:', error); + debugLog('Failed to load memories from LadybugDB, falling back to file-based:', error); } } @@ -81,8 +220,16 @@ async function loadRecentMemories( * Register project context handlers */ export function registerProjectContextHandlers( - _getMainWindow: () => BrowserWindow | null + getMainWindow: () => BrowserWindow | null ): void { + /** Send progress event to renderer */ + function sendIndexProgress(message: string, current?: number, total?: number, projectId?: string) { + const win = getMainWindow(); + if (win && !win.isDestroyed()) { + win.webContents.send(IPC_CHANNELS.CONTEXT_INDEX_PROGRESS, { message, current, total, projectId }); + } + } + // Get full project context ipcMain.handle( IPC_CHANNELS.CONTEXT_GET, @@ -93,8 +240,31 @@ export function registerProjectContextHandlers( } try { - // Load project index - const projectIndex = loadProjectIndex(project.path); + // Load project index — for customer projects, load the aggregated index + let projectIndex: ProjectIndex | null; + if (project.type === 'customer') { + projectIndex = loadProjectIndex(project.path); + // If no aggregated index exists yet, try to build one from existing child indexes + if (!projectIndex || projectIndex.project_type !== 'customer') { + const allProjects = projectStore.getProjects(); + const childProjects = allProjects.filter( + (p) => p.id !== project.id && isChildPath(project.path, p.path) + ); + const childIndexes: Record = {}; + for (const child of childProjects) { + const childIndex = loadProjectIndex(child.path); + if (childIndex) { + const key = uniqueChildKey(child.path, childIndexes); + childIndexes[key] = childIndex; + } + } + if (Object.keys(childIndexes).length > 0) { + projectIndex = aggregateChildIndexes(project.path, childIndexes); + } + } + } else { + projectIndex = loadProjectIndex(project.path); + } // Load graphiti state from most recent spec const memoryState = loadGraphitiStateFromSpecs(project.path, project.autoBuildPath); @@ -137,7 +307,7 @@ export function registerProjectContextHandlers( // Refresh project index ipcMain.handle( IPC_CHANNELS.CONTEXT_REFRESH_INDEX, - async (_, projectId: string): Promise> => { + async (_, projectId: string, force?: boolean): Promise> => { const project = projectStore.getProject(projectId); if (!project) { return { success: false, error: 'Project not found' }; @@ -154,13 +324,87 @@ export function registerProjectContextHandlers( }; } + // Customer projects: aggregate indexes from child repos + if (project.type === 'customer') { + const allProjects = projectStore.getProjects(); + const childProjects = allProjects.filter( + (p) => p.id !== project.id && isChildPath(project.path, p.path) + ); + + if (childProjects.length === 0) { + return { + success: false, + error: 'No child repositories found for this customer project' + }; + } + + const total = childProjects.length; + debugLog(`[project-context] Customer project: indexing ${total} child repos (force=${!!force})`); + sendIndexProgress('progress.discovering_repos', 0, total); + + const childIndexes: Record = {}; + const errors: string[] = []; + + for (let i = 0; i < childProjects.length; i++) { + const child = childProjects[i]; + sendIndexProgress('progress.analyzing_repo', i + 1, total); + + // Check if child already has an index (skip if force=true) + let childIndex = force ? null : loadProjectIndex(child.path); + + // If no index exists (or force), run analyzer on the child repo + if (!childIndex) { + debugLog(`[project-context] Running analyzer for child: ${child.name}`); + childIndex = await refreshChildIndex(child, autoBuildSource); + } + + if (childIndex) { + const key = uniqueChildKey(child.path, childIndexes); + childIndexes[key] = childIndex; + } else { + errors.push(child.name); + } + } + + sendIndexProgress('progress.aggregating_results', total, total); + + if (Object.keys(childIndexes).length === 0) { + sendIndexProgress(''); + return { + success: false, + error: `Failed to index any child repos. Failed: ${errors.join(', ')}` + }; + } + + // Aggregate all child indexes + const aggregatedIndex = aggregateChildIndexes(project.path, childIndexes); + + // Save aggregated index to customer's .auto-claude/project_index.json + const indexOutputPath = path.join(project.path, AUTO_BUILD_PATHS.PROJECT_INDEX); + const indexDir = path.dirname(indexOutputPath); + if (!existsSync(indexDir)) { + mkdirSync(indexDir, { recursive: true }); + } + writeFileAtomicSync(indexOutputPath, JSON.stringify(aggregatedIndex, null, 2), 'utf-8'); + + if (errors.length > 0) { + debugLog(`[project-context] Some child repos failed to index: ${errors.join(', ')}`); + } + + sendIndexProgress(''); + return { success: true, data: aggregatedIndex }; + } + + // Regular project: run analyzer directly + sendIndexProgress('progress.analyzing_structure'); + const analyzerPath = path.join(autoBuildSource, 'analyzer.py'); const indexOutputPath = path.join(project.path, AUTO_BUILD_PATHS.PROJECT_INDEX); // Get configured Python path (venv if ready, otherwise bundled/system) // This ensures we use the venv Python which has dependencies installed const pythonCmd = getConfiguredPythonPath(); - console.log('[project-context] Using Python:', pythonCmd); + debugLog('[project-context] Using Python:', pythonCmd); const [pythonCommand, pythonBaseArgs] = parsePythonCommand(pythonCmd); @@ -168,6 +412,7 @@ export function registerProjectContextHandlers( await new Promise((resolve, reject) => { let stdout = ''; let stderr = ''; + const ANALYZER_TIMEOUT_MS = 120_000; // 2 minutes const proc = spawn(pythonCommand, [ ...pythonBaseArgs, @@ -183,6 +428,12 @@ export function registerProjectContextHandlers( } }); + const timeout = setTimeout(() => { + debugLog(`[project-context] Analyzer timed out after ${ANALYZER_TIMEOUT_MS}ms, killing process`); + proc.kill('SIGTERM'); + reject(new Error(`Analyzer timed out after ${ANALYZER_TIMEOUT_MS / 1000}s`)); + }, ANALYZER_TIMEOUT_MS); + proc.stdout?.on('data', (data) => { stdout += data.toString('utf-8'); }); @@ -192,23 +443,27 @@ export function registerProjectContextHandlers( }); proc.on('close', (code: number) => { + clearTimeout(timeout); if (code === 0) { - console.log('[project-context] Analyzer stdout:', stdout); + debugLog('[project-context] Analyzer stdout:', stdout); resolve(); } else { - console.error('[project-context] Analyzer failed with code', code); - console.error('[project-context] Analyzer stderr:', stderr); - console.error('[project-context] Analyzer stdout:', stdout); + debugLog('[project-context] Analyzer failed with code', code); + debugLog('[project-context] Analyzer stderr:', stderr); + debugLog('[project-context] Analyzer stdout:', stdout); reject(new Error(`Analyzer exited with code ${code}: ${stderr || stdout}`)); } }); proc.on('error', (err) => { - console.error('[project-context] Analyzer spawn error:', err); + clearTimeout(timeout); + debugLog('[project-context] Analyzer spawn error:', err); reject(err); }); }); + sendIndexProgress(''); + // Read the new index const projectIndex = loadProjectIndex(project.path); if (projectIndex) { @@ -217,6 +472,7 @@ export function registerProjectContextHandlers( return { success: false, error: 'Failed to generate project index' }; } catch (error) { + sendIndexProgress(''); return { success: false, error: error instanceof Error ? error.message : 'Failed to refresh project index' diff --git a/apps/frontend/src/main/ipc-handlers/github/customer-github-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/customer-github-handlers.ts new file mode 100644 index 0000000000..93529b5742 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/customer-github-handlers.ts @@ -0,0 +1,432 @@ +/** + * Multi-repo GitHub Issues handlers for Customer projects. + * + * A Customer project aggregates issues from multiple child repositories. + * The GitHub token comes from the customer's own .env while each child + * repository supplies its own GITHUB_REPO value. + */ + +import { ipcMain } from 'electron'; +import { existsSync, readFileSync } from 'fs'; +import { execFile } from 'child_process'; +import { promisify } from 'util'; +import path from 'path'; +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { IPCResult, GitHubIssue, MultiRepoGitHubStatus, MultiRepoIssuesResult, MultiRepoPRsResult } from '../../../shared/types'; +import { projectStore } from '../../project-store'; +import { getGitHubConfig, githubFetch, normalizeRepoReference } from './utils'; +import { getToolPath } from '../../cli-tool-manager'; +import type { GitHubAPIIssue } from './types'; +import { transformIssue } from './issue-handlers'; +import { parseEnvFile } from '../utils'; +import { debugLog } from '../../../shared/utils/debug-logger'; + +const execFileAsync = promisify(execFile); + +/** Cross-platform child path check using path.relative */ +function isChildPath(parentPath: string, candidatePath: string): boolean { + const rel = path.relative(parentPath, candidatePath); + return rel !== '' && !rel.startsWith('..') && !path.isAbsolute(rel); +} + +// ──────────────────────────────────────────────────────────────────────────── +// Shared helper +// ──────────────────────────────────────────────────────────────────────────── + +interface CustomerRepo { + projectId: string; + repoFullName: string; +} + +interface CustomerGitHubConfig { + token: string; + repos: CustomerRepo[]; +} + +/** + * Resolve the GitHub token and child-repo list for a Customer project. + * + * Token resolution order: + * 1. GITHUB_TOKEN from the customer's .env + * 2. Fallback to `getGitHubConfig(customer)?.token` (which also tries `gh` CLI) + * + * Each child repo's GITHUB_REPO is read from its own .env via `getGitHubConfig`. + */ +async function getCustomerGitHubConfig(customerId: string): Promise { + const customer = projectStore.getProject(customerId); + if (!customer) { + debugLog('[Customer GitHub] Customer project not found:', customerId); + return null; + } + + if (customer.type !== 'customer') { + debugLog('[Customer GitHub] Project is not a customer:', customerId); + return null; + } + + // 1. Resolve token from customer's .env + let token: string | undefined; + + if (customer.autoBuildPath) { + const envPath = path.join(customer.path, customer.autoBuildPath, '.env'); + if (existsSync(envPath)) { + try { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + token = vars['GITHUB_TOKEN']; + } catch { + // ignore read errors, fall through to fallback + } + } + } + + // Fallback: try getGitHubConfig which also checks gh CLI + if (!token) { + const fallbackConfig = getGitHubConfig(customer); + token = fallbackConfig?.token; + } + + if (!token) { + debugLog('[Customer GitHub] No GitHub token found for customer:', customerId); + return null; + } + + // 2. Discover child repos + const allProjects = projectStore.getProjects(); + const childProjects = allProjects.filter( + (p) => p.id !== customer.id && isChildPath(customer.path, p.path) + ); + + const repos: CustomerRepo[] = []; + + for (const child of childProjects) { + // Try .env first (if child has autoBuildPath and GITHUB_REPO configured) + const childConfig = getGitHubConfig(child); + if (childConfig?.repo) { + const normalized = normalizeRepoReference(childConfig.repo); + if (normalized) { + repos.push({ projectId: child.id, repoFullName: normalized }); + continue; + } + } + + // Fallback: detect from git remote origin (cloned repos have this) + try { + const { stdout } = await execFileAsync(getToolPath('git'), ['remote', 'get-url', 'origin'], { + encoding: 'utf-8', + cwd: child.path, + timeout: 5000, + }); + const remoteUrl = stdout.trim(); + + const match = remoteUrl.match(/github\.com[/:]([^/]+\/[^/]+?)(?:\.git)?$/); + if (match) { + const repoFullName = match[1]; + debugLog('[Customer GitHub] Detected repo from git remote:', repoFullName, 'for', child.path); + repos.push({ projectId: child.id, repoFullName }); + } + } catch { + debugLog('[Customer GitHub] Could not detect git remote for child:', child.path); + } + } + + debugLog('[Customer GitHub] Resolved config:', { + customerId, + hasToken: !!token, + repoCount: repos.length, + }); + + return { token, repos }; +} + +// ──────────────────────────────────────────────────────────────────────────── +// Handler 1: Check multi-repo connection +// ──────────────────────────────────────────────────────────────────────────── + +function registerCheckMultiRepoConnection(): void { + ipcMain.handle( + IPC_CHANNELS.GITHUB_CHECK_MULTI_REPO_CONNECTION, + async (_, customerId: string): Promise> => { + debugLog('[Customer GitHub] checkMultiRepoConnection called', { customerId }); + + const config = await getCustomerGitHubConfig(customerId); + if (!config) { + return { + success: true, + data: { + connected: false, + repos: [], + error: 'No GitHub token configured for this customer', + }, + }; + } + + return { + success: true, + data: { + connected: true, + repos: config.repos, + }, + }; + } + ); +} + +// ──────────────────────────────────────────────────────────────────────────── +// Handler 2: Get issues across all child repos +// ──────────────────────────────────────────────────────────────────────────── + +function registerGetMultiRepoIssues(): void { + ipcMain.handle( + IPC_CHANNELS.GITHUB_GET_MULTI_REPO_ISSUES, + async ( + _, + customerId: string, + state: 'open' | 'closed' | 'all' = 'open', + page: number = 1 + ): Promise> => { + // Validate IPC query parameters + const validStates = ['open', 'closed', 'all'] as const; + if (!validStates.includes(state)) { + return { success: false, error: `Invalid state parameter: ${String(state)}. Must be one of: ${validStates.join(', ')}` }; + } + if (typeof page !== 'number' || !Number.isFinite(page) || page < 1) { + page = 1; + } + + debugLog('[Customer GitHub] getMultiRepoIssues called', { customerId, state, page }); + + const config = await getCustomerGitHubConfig(customerId); + if (!config) { + return { success: false, error: 'No GitHub configuration found for this customer' }; + } + + if (config.repos.length === 0) { + return { + success: true, + data: { issues: [], repos: [], hasMore: false }, + }; + } + + try { + const allRepoNames = config.repos.map((r) => r.repoFullName); + + // Fetch issues from all repos in parallel + const settledResults = await Promise.allSettled( + config.repos.map(async (repo) => { + const endpoint = `/repos/${repo.repoFullName}/issues?state=${state}&per_page=50&sort=updated&page=${page}`; + const data = await githubFetch(config.token, endpoint); + return { repoFullName: repo.repoFullName, data }; + }) + ); + + const allIssues: GitHubIssue[] = []; + const perPage = 50; + let anyRepoHasMore = false; + + for (const result of settledResults) { + if (result.status === 'fulfilled') { + const { repoFullName, data } = result.value; + if (Array.isArray(data)) { + if (data.length === perPage) { + anyRepoHasMore = true; + } + const issuesOnly = (data as GitHubAPIIssue[]).filter( + (item) => !item.pull_request + ); + const transformed = issuesOnly.map((issue) => + transformIssue(issue, repoFullName) + ); + allIssues.push(...transformed); + } + } else { + debugLog('[Customer GitHub] Failed to fetch from repo:', result.reason); + } + } + + // Sort by updatedAt descending + allIssues.sort( + (a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime() + ); + + debugLog('[Customer GitHub] Returning', allIssues.length, 'issues from', allRepoNames.length, 'repos'); + + return { + success: true, + data: { + issues: allIssues, + repos: allRepoNames, + hasMore: anyRepoHasMore, + }, + }; + } catch (error) { + debugLog('[Customer GitHub] Error fetching multi-repo issues:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch multi-repo issues', + }; + } + } + ); +} + +// ──────────────────────────────────────────────────────────────────────────── +// Handler 3: Get single issue detail from a specific repo +// ──────────────────────────────────────────────────────────────────────────── + +function registerGetMultiRepoIssueDetail(): void { + ipcMain.handle( + IPC_CHANNELS.GITHUB_GET_MULTI_REPO_ISSUE_DETAIL, + async ( + _, + customerId: string, + repoFullName: string, + issueNumber: number + ): Promise> => { + // Validate issueNumber + if (typeof issueNumber !== 'number' || !Number.isFinite(issueNumber) || issueNumber < 1) { + return { success: false, error: `Invalid issue number: ${String(issueNumber)}` }; + } + + debugLog('[Customer GitHub] getMultiRepoIssueDetail called', { + customerId, + repoFullName, + issueNumber, + }); + + const config = await getCustomerGitHubConfig(customerId); + if (!config) { + return { success: false, error: 'No GitHub configuration found for this customer' }; + } + + // Validate that the requested repo belongs to this customer's configured repos + const isValidRepo = config.repos.some(r => r.repoFullName === repoFullName); + if (!isValidRepo) { + return { success: false, error: `Repository ${repoFullName} is not configured for this customer` }; + } + + try { + const issue = (await githubFetch( + config.token, + `/repos/${repoFullName}/issues/${issueNumber}` + )) as GitHubAPIIssue; + + const result = transformIssue(issue, repoFullName); + + return { success: true, data: result }; + } catch (error) { + debugLog('[Customer GitHub] Error fetching issue detail:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch issue detail', + }; + } + } + ); +} + +// ──────────────────────────────────────────────────────────────────────────── +// Handler 4: Get PRs across all child repos +// ──────────────────────────────────────────────────────────────────────────── + +function registerGetMultiRepoPRs(): void { + ipcMain.handle( + IPC_CHANNELS.GITHUB_GET_MULTI_REPO_PRS, + async (_, customerId: string): Promise> => { + debugLog('[Customer GitHub] getMultiRepoPRs called', { customerId }); + + const config = await getCustomerGitHubConfig(customerId); + if (!config) { + return { success: false, error: 'No GitHub configuration found for this customer' }; + } + + if (config.repos.length === 0) { + return { + success: true, + data: { prs: [], repos: [] }, + }; + } + + try { + const allRepoNames = config.repos.map((r) => r.repoFullName); + + // Fetch open PRs from all repos in parallel + const settledResults = await Promise.allSettled( + config.repos.map(async (repo) => { + const endpoint = `/repos/${repo.repoFullName}/pulls?state=open&sort=updated&direction=desc&per_page=50`; + const data = await githubFetch(config.token, endpoint); + return { repoFullName: repo.repoFullName, data }; + }) + ); + + const allPRs: MultiRepoPRsResult['prs'] = []; + + for (const result of settledResults) { + if (result.status === 'fulfilled') { + const { repoFullName, data } = result.value; + if (Array.isArray(data)) { + // TODO: Add a typed interface (e.g. GitHubAPIPullRequest) for the GitHub PR API response shape + // biome-ignore lint/suspicious/noExplicitAny: GitHub REST API response shape + const transformed = (data as any[]).map((pr) => ({ + number: pr.number, + title: pr.title, + body: pr.body || '', + state: pr.state.toLowerCase(), + author: { login: pr.user.login }, + headRefName: pr.head.ref, + baseRefName: pr.base.ref, + additions: pr.additions ?? 0, + deletions: pr.deletions ?? 0, + changedFiles: pr.changed_files ?? 0, + // biome-ignore lint/suspicious/noExplicitAny: GitHub REST API assignee shape + assignees: (pr.assignees || []).map((a: any) => ({ login: a.login })), + createdAt: pr.created_at, + updatedAt: pr.updated_at, + htmlUrl: pr.html_url, + repoFullName, + })); + allPRs.push(...transformed); + } + } else { + debugLog('[Customer GitHub] Failed to fetch PRs from repo:', result.reason); + } + } + + // Sort by updatedAt descending + allPRs.sort( + (a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime() + ); + + debugLog('[Customer GitHub] Returning', allPRs.length, 'PRs from', allRepoNames.length, 'repos'); + + return { + success: true, + data: { + prs: allPRs, + repos: allRepoNames, + }, + }; + } catch (error) { + debugLog('[Customer GitHub] Error fetching multi-repo PRs:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch multi-repo PRs', + }; + } + } + ); +} + +// ──────────────────────────────────────────────────────────────────────────── +// Public registration +// ──────────────────────────────────────────────────────────────────────────── + +/** + * Register all Customer multi-repo GitHub IPC handlers + */ +export function registerCustomerGitHubHandlers(): void { + registerCheckMultiRepoConnection(); + registerGetMultiRepoIssues(); + registerGetMultiRepoIssueDetail(); + registerGetMultiRepoPRs(); +} diff --git a/apps/frontend/src/main/ipc-handlers/github/index.ts b/apps/frontend/src/main/ipc-handlers/github/index.ts index 02616cda01..a0b327efbd 100644 --- a/apps/frontend/src/main/ipc-handlers/github/index.ts +++ b/apps/frontend/src/main/ipc-handlers/github/index.ts @@ -25,6 +25,7 @@ import { registerGithubOAuthHandlers } from './oauth-handlers'; import { registerAutoFixHandlers } from './autofix-handlers'; import { registerPRHandlers } from './pr-handlers'; import { registerTriageHandlers } from './triage-handlers'; +import { registerCustomerGitHubHandlers } from './customer-github-handlers'; /** * Register all GitHub-related IPC handlers @@ -42,6 +43,7 @@ export function registerGithubHandlers( registerAutoFixHandlers(agentManager, getMainWindow); registerPRHandlers(getMainWindow); registerTriageHandlers(getMainWindow); + registerCustomerGitHubHandlers(); } // Re-export utilities for potential external use diff --git a/apps/frontend/src/main/ipc-handlers/github/issue-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/issue-handlers.ts index a3be6a1fb3..6b414321a3 100644 --- a/apps/frontend/src/main/ipc-handlers/github/issue-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/issue-handlers.ts @@ -19,7 +19,7 @@ const MAX_PAGES_FETCH_ALL = 30; // Max API pages to fetch in fetchAll mode /** * Transform GitHub API issue to application format */ -function transformIssue(issue: GitHubAPIIssue, repoFullName: string): GitHubIssue { +export function transformIssue(issue: GitHubAPIIssue, repoFullName: string): GitHubIssue { return { id: issue.id, number: issue.number, diff --git a/apps/frontend/src/main/ipc-handlers/github/oauth-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/oauth-handlers.ts index 37adb6bb2d..52d8930af3 100644 --- a/apps/frontend/src/main/ipc-handlers/github/oauth-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/oauth-handlers.ts @@ -5,6 +5,8 @@ import { ipcMain, shell, BrowserWindow } from 'electron'; import { execSync, execFileSync, execFile, spawn } from 'child_process'; +import { existsSync } from 'fs'; +import path from 'path'; import { promisify } from 'util'; import { IPC_CHANNELS } from '../../../shared/constants'; import type { IPCResult } from '../../../shared/types'; @@ -553,8 +555,9 @@ export function registerListUserRepos(): void { // Use gh repo list to get user's repositories // Format: owner/repo, description, visibility debugLog('Running: gh repo list --limit 100 --json nameWithOwner,description,isPrivate'); - const output = execSync( - 'gh repo list --limit 100 --json nameWithOwner,description,isPrivate', + const output = execFileSync( + getToolPath('gh'), + ['repo', 'list', '--limit', '100', '--json', 'nameWithOwner,description,isPrivate'], { encoding: 'utf-8', stdio: 'pipe', @@ -660,7 +663,7 @@ export function registerGetGitHubBranches(): void { const apiEndpoint = `repos/${repo}/branches`; debugLog(`Running: gh api ${apiEndpoint} --paginate --jq '.[].name'`); const output = execFileSync( - 'gh', + getToolPath('gh'), ['api', apiEndpoint, '--paginate', '--jq', '.[].name'], { encoding: 'utf-8', @@ -740,7 +743,7 @@ export function registerCreateGitHubRepo(): void { args.push('--push'); debugLog('Running: gh', args); - const output = execFileSync('gh', args, { + const output = execFileSync(getToolPath('gh'), args, { encoding: 'utf-8', cwd: options.projectPath, stdio: 'pipe', @@ -814,7 +817,7 @@ export function registerAddGitRemote(): void { // Add the remote debugLog('Adding remote origin:', remoteUrl); - execFileSync('git', ['remote', 'add', 'origin', remoteUrl], { + execFileSync(getToolPath('git'), ['remote', 'add', 'origin', remoteUrl], { cwd: projectPath, encoding: 'utf-8', stdio: 'pipe' @@ -887,6 +890,64 @@ export function registerListGitHubOrgs(): void { ); } +/** + * Clone a GitHub repository into a target directory + */ +export function registerCloneGitHubRepo(): void { + ipcMain.handle( + IPC_CHANNELS.GITHUB_CLONE_REPO, + async ( + _event: Electron.IpcMainInvokeEvent, + repoFullName: string, + targetDir: string + ): Promise> => { + debugLog('cloneGitHubRepo handler called', { repoFullName, targetDir }); + try { + // Validate repo format before any operations + if (!isValidGitHubRepo(repoFullName)) { + return { + success: false, + error: 'Invalid repository format. Expected: owner/repo' + }; + } + + // Extract repo name from fullName (owner/repo -> repo) + const repoName = repoFullName.split('/').pop() || repoFullName; + const clonePath = path.join(targetDir, repoName); + + // Check if directory already exists + if (existsSync(clonePath)) { + return { + success: false, + error: `Directory already exists: ${clonePath}` + }; + } + + // Clone using gh CLI (uses authenticated session) + debugLog(`Running: gh repo clone ${repoFullName} ${clonePath}`); + execFileSync(getToolPath('gh'), ['repo', 'clone', repoFullName, clonePath], { + encoding: 'utf-8', + stdio: 'pipe', + env: getAugmentedEnv(), + timeout: 120000 // 2 minute timeout for large repos + }); + + debugLog('Clone successful:', clonePath); + return { + success: true, + data: { path: clonePath, name: repoName } + }; + } catch (error) { + debugLog('Failed to clone repo:', error instanceof Error ? error.message : error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to clone repository' + }; + } + } + ); +} + /** * Register all GitHub OAuth handlers */ @@ -903,5 +964,6 @@ export function registerGithubOAuthHandlers(): void { registerCreateGitHubRepo(); registerAddGitRemote(); registerListGitHubOrgs(); + registerCloneGitHubRepo(); debugLog('GitHub OAuth handlers registered'); } diff --git a/apps/frontend/src/main/ipc-handlers/index.ts b/apps/frontend/src/main/ipc-handlers/index.ts index fdd7c5b728..e2ddc00d5d 100644 --- a/apps/frontend/src/main/ipc-handlers/index.ts +++ b/apps/frontend/src/main/ipc-handlers/index.ts @@ -31,6 +31,8 @@ import { registerAppUpdateHandlers } from './app-update-handlers'; import { registerDebugHandlers } from './debug-handlers'; import { registerClaudeCodeHandlers } from './claude-code-handlers'; import { registerMcpHandlers } from './mcp-handlers'; +import { registerClaudeMcpHandlers } from './claude-mcp-handlers'; +import { registerClaudeAgentsHandlers } from './claude-agents-handlers'; import { registerProfileHandlers } from './profile-handlers'; import { registerScreenshotHandlers } from './screenshot-handlers'; import { registerTerminalWorktreeIpcHandlers } from './terminal'; @@ -120,6 +122,12 @@ export function setupIpcHandlers( // MCP server health check handlers registerMcpHandlers(); + // Claude Code global MCP configuration handlers + registerClaudeMcpHandlers(); + + // Claude Code custom agents handlers + registerClaudeAgentsHandlers(); + // API Profile handlers (custom Anthropic-compatible endpoints) registerProfileHandlers(); @@ -152,6 +160,8 @@ export { registerDebugHandlers, registerClaudeCodeHandlers, registerMcpHandlers, + registerClaudeMcpHandlers, + registerClaudeAgentsHandlers, registerProfileHandlers, registerScreenshotHandlers }; diff --git a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts index 2a9d82420e..b5d64dcb14 100644 --- a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts @@ -254,6 +254,77 @@ async function checkCommandHealth(server: CustomMcpServer, startTime: number): P }); } +/** + * Health check for global MCPs from Claude Code config. + * These servers come from a trusted source (~/.claude.json, ~/.claude/settings.json) + * so we skip the command allowlist validation. For command-based servers, we check + * if the command exists in PATH. For HTTP servers, we probe the URL. + */ +async function checkGlobalMcpHealth(server: CustomMcpServer): Promise { + const startTime = Date.now(); + + if (server.type === 'http') { + // HTTP servers: reuse existing check (no allowlist involved) + return checkHttpHealth(server, startTime); + } + + // Command-based servers: just verify the command exists (no allowlist filter) + if (!server.command) { + return { + serverId: server.id, + status: 'unhealthy', + message: 'No command configured', + checkedAt: new Date().toISOString(), + }; + } + + return new Promise((resolve) => { + const whichCmd = isWindows() ? getWhereExePath() : 'which'; + const proc = spawn(whichCmd, [server.command!], { + timeout: 5000, + windowsHide: true, + }); + + let found = false; + + proc.stdout.on('data', () => { + found = true; + }); + + proc.on('close', (code) => { + const responseTime = Date.now() - startTime; + if (code === 0 || found) { + resolve({ + serverId: server.id, + status: 'healthy', + message: `Available — '${server.command}' found (starts on demand)`, + responseTime, + checkedAt: new Date().toISOString(), + }); + } else { + resolve({ + serverId: server.id, + status: 'unhealthy', + message: `Command '${server.command}' not found in PATH`, + responseTime, + checkedAt: new Date().toISOString(), + }); + } + }); + + proc.on('error', (error: Error) => { + const responseTime = Date.now() - startTime; + resolve({ + serverId: server.id, + status: 'unhealthy', + message: `Failed to check: ${error.message}`, + responseTime, + checkedAt: new Date().toISOString(), + }); + }); + }); +} + /** * Full MCP connection test - actually connects to the server and tries to list tools. * This is more thorough but slower than the health check. @@ -566,6 +637,20 @@ export function registerMcpHandlers(): void { } }); + // Health check for global MCPs (from Claude Code config — trusted source, skip allowlist) + ipcMain.handle(IPC_CHANNELS.MCP_CHECK_GLOBAL_HEALTH, async (_event, server: CustomMcpServer) => { + try { + const result = await checkGlobalMcpHealth(server); + return { success: true, data: result }; + } catch (error) { + appLog.error('Global MCP health check error:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Health check failed', + }; + } + }); + // Full connection test ipcMain.handle(IPC_CHANNELS.MCP_TEST_CONNECTION, async (_event, server: CustomMcpServer) => { try { diff --git a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts index 05741373c0..aa4a0dbb02 100644 --- a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts @@ -33,6 +33,84 @@ import { parsePythonCommand } from '../python-detector'; import { getConfiguredPythonPath, pythonEnvManager } from '../python-env-manager'; import { openTerminalWithCommand } from './claude-code-handlers'; +/** + * Known Ollama embedding model dimensions. + * Single source of truth for the frontend — mirrors the backend's + * KNOWN_EMBEDDING_MODELS in ollama_model_detector.py and + * KNOWN_OLLAMA_EMBEDDING_MODELS in ollama_embedder.py. + */ +const KNOWN_OLLAMA_EMBEDDING_DIMS: Record = { + 'embeddinggemma': 768, + 'embeddinggemma:300m': 768, + 'qwen3-embedding': 1024, + 'qwen3-embedding:0.6b': 1024, + 'qwen3-embedding:4b': 2560, + 'qwen3-embedding:8b': 4096, + 'nomic-embed-text': 768, + 'nomic-embed-text:latest': 768, + 'mxbai-embed-large': 1024, + 'mxbai-embed-large:latest': 1024, + 'bge-large': 1024, + 'bge-large:latest': 1024, + 'bge-large-en': 1024, + 'bge-base-en': 768, + 'bge-small-en': 384, + 'bge-m3': 1024, + 'bge-m3:latest': 1024, + 'all-minilm': 384, + 'all-minilm:latest': 384, + 'snowflake-arctic-embed': 1024, + 'jina-embeddings-v2-base-en': 768, + 'e5-small': 384, + 'e5-base': 768, + 'e5-large': 1024, + 'paraphrase-multilingual': 768, +}; + +/** + * Look up the embedding dimension for an Ollama model. + * Tries exact match, base-name match, prefix match, then heuristic fallback. + */ +function lookupEmbeddingDim(modelName: string): { dim: number; source: 'known' | 'fallback' } | null { + const nameLower = modelName.toLowerCase(); + + // Exact match + if (nameLower in KNOWN_OLLAMA_EMBEDDING_DIMS) { + return { dim: KNOWN_OLLAMA_EMBEDDING_DIMS[nameLower], source: 'known' }; + } + + // Base name match (strip :tag) + const baseName = nameLower.split(':')[0]; + if (baseName in KNOWN_OLLAMA_EMBEDDING_DIMS) { + return { dim: KNOWN_OLLAMA_EMBEDDING_DIMS[baseName], source: 'known' }; + } + + // Prefix match + for (const [key, dim] of Object.entries(KNOWN_OLLAMA_EMBEDDING_DIMS)) { + if (nameLower.startsWith(key)) { + return { dim, source: 'known' }; + } + } + + // Heuristic fallback based on name patterns. + // WARNING: These are guesses and may be incorrect for unknown models. + // The 'fallback' source flag allows callers to surface this uncertainty. + if (nameLower.includes('large')) { + console.warn(`[OllamaEmbedding] Using heuristic dimension guess (1024) for unknown model: ${modelName}`); + return { dim: 1024, source: 'fallback' }; + } + if (nameLower.includes('base')) { + console.warn(`[OllamaEmbedding] Using heuristic dimension guess (768) for unknown model: ${modelName}`); + return { dim: 768, source: 'fallback' }; + } + if (nameLower.includes('small') || nameLower.includes('mini')) { + console.warn(`[OllamaEmbedding] Using heuristic dimension guess (384) for unknown model: ${modelName}`); + return { dim: 384, source: 'fallback' }; + } + + return null; +} + /** * Ollama Service Status * Contains information about Ollama service availability and configuration @@ -863,4 +941,45 @@ export function registerMemoryHandlers(): void { } } ); + + // ============================================ + // Ollama Embedding Dimension Lookup + // ============================================ + + /** + * Get the embedding dimension for an Ollama model. + * Single source of truth — the renderer calls this instead of + * maintaining its own hardcoded dimension map. + */ + ipcMain.handle( + IPC_CHANNELS.OLLAMA_GET_EMBEDDING_DIM, + async ( + _, + modelName: string + ): Promise> => { + try { + if (!modelName || typeof modelName !== 'string') { + return { success: false, error: 'Model name is required' }; + } + + const result = lookupEmbeddingDim(modelName); + if (result) { + return { + success: true, + data: { model: modelName, dim: result.dim, source: result.source }, + }; + } + + return { + success: false, + error: `Unknown embedding model: ${modelName}. Dimension could not be determined.`, + }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to get embedding dimension', + }; + } + } + ); } diff --git a/apps/frontend/src/main/ipc-handlers/project-handlers.ts b/apps/frontend/src/main/ipc-handlers/project-handlers.ts index 20c5403bd4..ab3a954852 100644 --- a/apps/frontend/src/main/ipc-handlers/project-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/project-handlers.ts @@ -1,5 +1,6 @@ import { ipcMain } from 'electron'; -import { existsSync } from 'fs'; +import { existsSync, mkdirSync } from 'fs'; +import path from 'path'; import { execFileSync } from 'child_process'; import { IPC_CHANNELS } from '../../shared/constants'; import type { @@ -27,6 +28,7 @@ import { insightsService } from '../insights-service'; import { titleGenerator } from '../title-generator'; import type { BrowserWindow } from 'electron'; import { getEffectiveSourcePath } from '../updater/path-resolver'; +import { debugLog } from '../../shared/utils/debug-logger'; // ============================================ // Git Helper Functions @@ -299,14 +301,19 @@ export function registerProjectHandlers( ipcMain.handle( IPC_CHANNELS.PROJECT_ADD, - async (_, projectPath: string): Promise> => { + async (_, projectPath: string, type?: 'project' | 'customer'): Promise> => { try { + // Validate type parameter against allowed values + if (type !== undefined && type !== 'project' && type !== 'customer') { + return { success: false, error: 'Invalid project type' }; + } + // Validate path exists if (!existsSync(projectPath)) { return { success: false, error: 'Directory does not exist' }; } - const project = projectStore.addProject(projectPath); + const project = projectStore.addProject(projectPath, undefined, type); return { success: true, data: project }; } catch (error) { return { @@ -364,7 +371,7 @@ export function registerProjectHandlers( IPC_CHANNELS.TAB_STATE_GET, async (): Promise> => { const tabState = projectStore.getTabState(); - console.log('[IPC] TAB_STATE_GET returning:', tabState); + debugLog('[IPC] TAB_STATE_GET returning:', tabState); return { success: true, data: tabState }; } ); @@ -375,7 +382,7 @@ export function registerProjectHandlers( _, tabState: { openProjectIds: string[]; activeProjectId: string | null; tabOrder: string[] } ): Promise => { - console.log('[IPC] TAB_STATE_SAVE called with:', tabState); + debugLog('[IPC] TAB_STATE_SAVE called with:', tabState); projectStore.saveTabState(tabState); return { success: true }; } @@ -494,6 +501,45 @@ export function registerProjectHandlers( } ); + // Initialize customer project — creates .auto-claude/ without requiring git + ipcMain.handle( + IPC_CHANNELS.PROJECT_INIT_CUSTOMER, + async (_, projectId: string): Promise> => { + try { + const project = projectStore.getProject(projectId); + if (!project) { + return { success: false, error: 'Project not found' }; + } + + if (project.type !== 'customer') { + return { success: false, error: 'Project is not a customer project' }; + } + + // Validate that the project root directory still exists before creating subdirectory. + // This prevents silently recreating deleted/moved project directories. + if (!existsSync(project.path)) { + return { success: false, error: `Project directory does not exist: ${project.path}` }; + } + + const dotAutoClaude = path.join(project.path, '.auto-claude'); + + if (!existsSync(dotAutoClaude)) { + mkdirSync(dotAutoClaude, { recursive: true }); + } + + projectStore.updateAutoBuildPath(projectId, '.auto-claude'); + // Ensure customer type is persisted (safety net for projects created before type persistence) + projectStore.updateProjectType(projectId, 'customer'); + return { success: true, data: { success: true } }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Unknown error' + }; + } + } + ); + // PROJECT_CHECK_VERSION now just checks if project is initialized // Version tracking for .auto-claude is removed since it only contains data ipcMain.handle( diff --git a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts index 697711049a..c585c5c795 100644 --- a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts @@ -22,6 +22,8 @@ import { setUpdateChannel, setUpdateChannelWithDowngradeCheck } from '../app-upd import { getSettingsPath, readSettingsFile } from '../settings-utils'; import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform, preWarmToolCache } from '../cli-tool-manager'; import { parseEnvFile } from './utils'; +import { getClaudeProfileManager } from '../claude-profile-manager'; +import { getCredentialsFromKeychain } from '../claude-profile/credential-utils'; const settingsPath = getSettingsPath(); @@ -668,13 +670,14 @@ export function registerSettingsHandlers( const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; if (!sourcePath) { - // Even without source path, check global token + // Even without source path, check global token or keychain const globalToken = globalSettings.globalClaudeOAuthToken; + const hasToken = isNonBlankToken(globalToken) || hasKeychainClaudeToken(); return { success: true, data: { - hasClaudeToken: !!globalToken && globalToken.length > 0, - claudeOAuthToken: globalToken, + hasClaudeToken: hasToken, + claudeOAuthToken: isNonBlankToken(globalToken) ? globalToken : undefined, envExists: false } }; @@ -689,15 +692,20 @@ export function registerSettingsHandlers( const content = readFileSync(envPath, 'utf-8'); const vars = parseEnvFile(content); claudeOAuthToken = vars['CLAUDE_CODE_OAUTH_TOKEN']; - hasClaudeToken = !!claudeOAuthToken && claudeOAuthToken.length > 0; + hasClaudeToken = isNonBlankToken(claudeOAuthToken); } // Fallback to global settings if no token in source .env - if (!hasClaudeToken && globalSettings.globalClaudeOAuthToken) { + if (!hasClaudeToken && isNonBlankToken(globalSettings.globalClaudeOAuthToken)) { claudeOAuthToken = globalSettings.globalClaudeOAuthToken; hasClaudeToken = true; } + // Fallback to keychain if no token found in .env or global settings + if (!hasClaudeToken) { + hasClaudeToken = hasKeychainClaudeToken(); + } + return { success: true, data: { @@ -775,6 +783,33 @@ export function registerSettingsHandlers( } ); + /** + * Check whether a non-blank token string is present. + * Rejects undefined, null, and whitespace-only strings. + */ + function isNonBlankToken(token: string | undefined | null): boolean { + return typeof token === 'string' && token.trim().length > 0; + } + + /** + * Check whether a valid Claude token exists in the OS keychain. + * Tries the profile manager first, then falls back to reading the keychain directly. + */ + function hasKeychainClaudeToken(): boolean { + try { + const profileManager = getClaudeProfileManager(); + if (profileManager.hasValidAuth()) return true; + } catch { + // profile manager may not be initialized + } + try { + const creds = getCredentialsFromKeychain(); + return typeof creds.token === 'string' && creds.token.trim().length > 0; + } catch { + return false; + } + } + ipcMain.handle( IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN, async (): Promise> => { @@ -787,7 +822,7 @@ export function registerSettingsHandlers( // Check global token first as it's the primary method const globalToken = globalSettings.globalClaudeOAuthToken; - const hasGlobalToken = !!globalToken && globalToken.length > 0; + const hasGlobalToken = isNonBlankToken(globalToken); if (!sourcePath) { // In production, no source path is acceptable if global token exists @@ -800,6 +835,17 @@ export function registerSettingsHandlers( } }; } + + if (hasKeychainClaudeToken()) { + return { + success: true, + data: { + hasToken: true, + sourcePath: isProduction ? app.getPath('userData') : undefined + } + }; + } + return { success: true, data: { @@ -817,11 +863,11 @@ export function registerSettingsHandlers( const content = readFileSync(envPath, 'utf-8'); const vars = parseEnvFile(content); const token = vars['CLAUDE_CODE_OAUTH_TOKEN']; - hasEnvToken = !!token && token.length > 0; + hasEnvToken = isNonBlankToken(token); } - // Token exists if either source .env has it OR global settings has it - const hasToken = hasEnvToken || hasGlobalToken; + // Token exists if source .env, global settings, OR Keychain has it + const hasToken = hasEnvToken || hasGlobalToken || hasKeychainClaudeToken(); return { success: true, diff --git a/apps/frontend/src/main/project-store.ts b/apps/frontend/src/main/project-store.ts index cca93eeeb0..7e32d9f62f 100644 --- a/apps/frontend/src/main/project-store.ts +++ b/apps/frontend/src/main/project-store.ts @@ -68,6 +68,10 @@ export class ProjectStore { createdAt: new Date(p.createdAt), updatedAt: new Date(p.updatedAt) })); + // Migration: auto-detect customer projects created before type persistence + if (this.migrateCustomerTypes(data.projects)) { + writeFileAtomicSync(this.storePath, JSON.stringify(data, null, 2)); + } return data; } catch { return { projects: [], settings: {} }; @@ -76,6 +80,42 @@ export class ProjectStore { return { projects: [], settings: {} }; } + /** + * Migration: detect customer projects that were created before type persistence. + * A customer project is identified by having child projects nested inside its path + * and no .git directory (customer folders are plain directories, not git repos). + * Returns true if any projects were migrated. + */ + private migrateCustomerTypes(projects: Project[]): boolean { + let changed = false; + const allPaths = projects.map(p => p.path); + + for (const project of projects) { + if (project.type) continue; // Already has type, skip + + // Check if this project has children (other projects nested inside its path) + // Use path.relative() for cross-platform compatibility (avoids hardcoded '/' separator) + const hasChildren = allPaths.some(otherPath => { + if (otherPath === project.path) return false; + const rel = path.relative(project.path, otherPath); + return rel.length > 0 && !rel.startsWith('..') && !path.isAbsolute(rel); + }); + + if (hasChildren) { + // A project with children and no git is a customer folder + const gitPath = path.join(project.path, '.git'); + if (!existsSync(gitPath)) { + project.type = 'customer'; + project.updatedAt = new Date(); + changed = true; + console.warn(`[ProjectStore] Migration: Marked "${project.name}" as customer (has child projects, no git)`); + } + } + } + + return changed; + } + /** * Save store to disk */ @@ -86,7 +126,7 @@ export class ProjectStore { /** * Add a new project */ - addProject(projectPath: string, name?: string): Project { + addProject(projectPath: string, name?: string, type?: 'project' | 'customer'): Project { // CRITICAL: Normalize to absolute path for dev mode compatibility // This prevents path resolution issues after app restart const absolutePath = ensureAbsolutePath(projectPath); @@ -118,7 +158,8 @@ export class ProjectStore { autoBuildPath, settings: { ...DEFAULT_PROJECT_SETTINGS }, createdAt: new Date(), - updatedAt: new Date() + updatedAt: new Date(), + ...(type && { type }) }; this.data.projects.push(project); @@ -140,6 +181,19 @@ export class ProjectStore { return project; } + /** + * Update project type (e.g., 'customer') + */ + updateProjectType(projectId: string, type: 'project' | 'customer'): Project | undefined { + const project = this.data.projects.find((p) => p.id === projectId); + if (project) { + project.type = type; + project.updatedAt = new Date(); + this.save(); + } + return project; + } + /** * Remove a project */ diff --git a/apps/frontend/src/preload/api/modules/github-api.ts b/apps/frontend/src/preload/api/modules/github-api.ts index c2115eb110..6c3b1a1196 100644 --- a/apps/frontend/src/preload/api/modules/github-api.ts +++ b/apps/frontend/src/preload/api/modules/github-api.ts @@ -10,7 +10,10 @@ import type { VersionSuggestion, PaginatedIssuesResult, PRStatusUpdate, - PollingMetadata + PollingMetadata, + MultiRepoGitHubStatus, + MultiRepoIssuesResult, + MultiRepoPRsResult } from '../../../shared/types'; import { createIpcListener, invokeIpc, sendIpc, IpcListenerCleanup } from './ipc-utils'; @@ -166,6 +169,21 @@ export interface GitHubAPI { getGitHubIssue: (projectId: string, issueNumber: number) => Promise>; getIssueComments: (projectId: string, issueNumber: number) => Promise>; checkGitHubConnection: (projectId: string) => Promise>; + + // Customer multi-repo operations + checkMultiRepoConnection: (customerId: string) => Promise>; + getMultiRepoIssues: ( + customerId: string, + state?: 'open' | 'closed' | 'all', + page?: number + ) => Promise>; + getMultiRepoIssueDetail: ( + customerId: string, + repoFullName: string, + issueNumber: number + ) => Promise>; + getMultiRepoPRs: (customerId: string) => Promise>; + investigateGitHubIssue: (projectId: string, issueNumber: number, selectedCommentIds?: number[]) => void; importGitHubIssues: (projectId: string, issueNumbers: number[]) => Promise>; createGitHubRelease: ( @@ -185,6 +203,7 @@ export interface GitHubAPI { getGitHubToken: () => Promise>; getGitHubUser: () => Promise>; listGitHubUserRepos: () => Promise }>>; + cloneGitHubRepo: (repoFullName: string, targetDir: string) => Promise>; // OAuth event listener - receives device code immediately when extracted onGitHubAuthDeviceCode: ( @@ -554,6 +573,27 @@ export const createGitHubAPI = (): GitHubAPI => ({ checkGitHubConnection: (projectId: string): Promise> => invokeIpc(IPC_CHANNELS.GITHUB_CHECK_CONNECTION, projectId), + // Customer multi-repo operations + checkMultiRepoConnection: (customerId: string): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_CHECK_MULTI_REPO_CONNECTION, customerId), + + getMultiRepoIssues: ( + customerId: string, + state?: 'open' | 'closed' | 'all', + page?: number + ): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_GET_MULTI_REPO_ISSUES, customerId, state, page), + + getMultiRepoIssueDetail: ( + customerId: string, + repoFullName: string, + issueNumber: number + ): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_GET_MULTI_REPO_ISSUE_DETAIL, customerId, repoFullName, issueNumber), + + getMultiRepoPRs: (customerId: string) => + invokeIpc(IPC_CHANNELS.GITHUB_GET_MULTI_REPO_PRS, customerId), + investigateGitHubIssue: (projectId: string, issueNumber: number, selectedCommentIds?: number[]): void => sendIpc(IPC_CHANNELS.GITHUB_INVESTIGATE_ISSUE, projectId, issueNumber, selectedCommentIds), @@ -590,6 +630,9 @@ export const createGitHubAPI = (): GitHubAPI => ({ listGitHubUserRepos: (): Promise }>> => invokeIpc(IPC_CHANNELS.GITHUB_LIST_USER_REPOS), + cloneGitHubRepo: (repoFullName: string, targetDir: string): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_CLONE_REPO, repoFullName, targetDir), + // OAuth event listener - receives device code immediately when extracted (during auth process) onGitHubAuthDeviceCode: ( callback: (data: { deviceCode: string; authUrl: string; browserOpened: boolean }) => void diff --git a/apps/frontend/src/preload/api/modules/mcp-api.ts b/apps/frontend/src/preload/api/modules/mcp-api.ts index 91a951b7e9..f8561957c0 100644 --- a/apps/frontend/src/preload/api/modules/mcp-api.ts +++ b/apps/frontend/src/preload/api/modules/mcp-api.ts @@ -1,19 +1,27 @@ /** * MCP Server API * - * Exposes MCP health check and connection test functionality to the renderer. + * Exposes MCP health check, connection test, and global MCP configuration + * functionality to the renderer. */ import { ipcRenderer } from 'electron'; import { IPC_CHANNELS } from '../../../shared/constants/ipc'; import type { IPCResult } from '../../../shared/types/common'; import type { CustomMcpServer, McpHealthCheckResult, McpTestConnectionResult } from '../../../shared/types/project'; +import type { GlobalMcpInfo, ClaudeAgentsInfo } from '../../../shared/types/integrations'; export interface McpAPI { /** Quick health check for a custom MCP server */ checkMcpHealth: (server: CustomMcpServer) => Promise>; + /** Health check for global MCPs (trusted source, no command allowlist) */ + checkGlobalMcpHealth: (server: CustomMcpServer) => Promise>; /** Full MCP connection test */ testMcpConnection: (server: CustomMcpServer) => Promise>; + /** Get all global MCP servers from Claude Code settings (plugins + inline) */ + getGlobalMcps: () => Promise>; + /** Get all custom agents from ~/.claude/agents/ */ + getClaudeAgents: () => Promise>; } export function createMcpAPI(): McpAPI { @@ -21,7 +29,16 @@ export function createMcpAPI(): McpAPI { checkMcpHealth: (server: CustomMcpServer) => ipcRenderer.invoke(IPC_CHANNELS.MCP_CHECK_HEALTH, server), + checkGlobalMcpHealth: (server: CustomMcpServer) => + ipcRenderer.invoke(IPC_CHANNELS.MCP_CHECK_GLOBAL_HEALTH, server), + testMcpConnection: (server: CustomMcpServer) => ipcRenderer.invoke(IPC_CHANNELS.MCP_TEST_CONNECTION, server), + + getGlobalMcps: () => + ipcRenderer.invoke(IPC_CHANNELS.CLAUDE_MCP_GET_GLOBAL), + + getClaudeAgents: () => + ipcRenderer.invoke(IPC_CHANNELS.CLAUDE_AGENTS_GET), }; } diff --git a/apps/frontend/src/preload/api/project-api.ts b/apps/frontend/src/preload/api/project-api.ts index b37face307..4f38d513d7 100644 --- a/apps/frontend/src/preload/api/project-api.ts +++ b/apps/frontend/src/preload/api/project-api.ts @@ -25,7 +25,7 @@ export interface TabState { export interface ProjectAPI { // Project Management - addProject: (projectPath: string) => Promise>; + addProject: (projectPath: string, type?: 'project' | 'customer') => Promise>; removeProject: (projectId: string) => Promise; getProjects: () => Promise>; updateProjectSettings: ( @@ -33,6 +33,7 @@ export interface ProjectAPI { settings: Partial ) => Promise; initializeProject: (projectId: string) => Promise>; + initializeCustomerProject: (projectId: string) => Promise>; checkProjectVersion: (projectId: string) => Promise>; // Tab State (persisted in main process for reliability) @@ -45,7 +46,8 @@ export interface ProjectAPI { // Context Operations getProjectContext: (projectId: string) => Promise>; - refreshProjectIndex: (projectId: string) => Promise>; + refreshProjectIndex: (projectId: string, force?: boolean) => Promise>; + onIndexProgress: (callback: (data: { message: string; current?: number; total?: number }) => void) => () => void; getMemoryStatus: (projectId: string) => Promise>; searchMemories: (projectId: string, query: string) => Promise>; getRecentMemories: (projectId: string, limit?: number) => Promise>; @@ -147,12 +149,19 @@ export interface ProjectAPI { status: 'completed' | 'failed'; output: string[]; }>>; + + // Ollama Embedding Dimension Lookup (single source of truth) + getOllamaEmbeddingDim: (modelName: string) => Promise>; } export const createProjectAPI = (): ProjectAPI => ({ // Project Management - addProject: (projectPath: string): Promise> => - ipcRenderer.invoke(IPC_CHANNELS.PROJECT_ADD, projectPath), + addProject: (projectPath: string, type?: 'project' | 'customer'): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROJECT_ADD, projectPath, type), removeProject: (projectId: string): Promise => ipcRenderer.invoke(IPC_CHANNELS.PROJECT_REMOVE, projectId), @@ -169,6 +178,9 @@ export const createProjectAPI = (): ProjectAPI => ({ initializeProject: (projectId: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.PROJECT_INITIALIZE, projectId), + initializeCustomerProject: (projectId: string): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROJECT_INIT_CUSTOMER, projectId), + checkProjectVersion: (projectId: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.PROJECT_CHECK_VERSION, projectId), @@ -190,8 +202,14 @@ export const createProjectAPI = (): ProjectAPI => ({ getProjectContext: (projectId: string) => ipcRenderer.invoke(IPC_CHANNELS.CONTEXT_GET, projectId), - refreshProjectIndex: (projectId: string) => - ipcRenderer.invoke(IPC_CHANNELS.CONTEXT_REFRESH_INDEX, projectId), + refreshProjectIndex: (projectId: string, force?: boolean) => + ipcRenderer.invoke(IPC_CHANNELS.CONTEXT_REFRESH_INDEX, projectId, force), + + onIndexProgress: (callback: (data: { message: string; current?: number; total?: number }) => void) => { + const handler = (_event: unknown, data: { message: string; current?: number; total?: number }) => callback(data); + ipcRenderer.on(IPC_CHANNELS.CONTEXT_INDEX_PROGRESS, handler); + return () => { ipcRenderer.removeListener(IPC_CHANNELS.CONTEXT_INDEX_PROGRESS, handler); }; + }, getMemoryStatus: (projectId: string) => ipcRenderer.invoke(IPC_CHANNELS.CONTEXT_MEMORY_STATUS, projectId), @@ -313,5 +331,9 @@ export const createProjectAPI = (): ProjectAPI => ({ ipcRenderer.invoke(IPC_CHANNELS.OLLAMA_LIST_EMBEDDING_MODELS, baseUrl), pullOllamaModel: (modelName: string, baseUrl?: string) => - ipcRenderer.invoke(IPC_CHANNELS.OLLAMA_PULL_MODEL, modelName, baseUrl) + ipcRenderer.invoke(IPC_CHANNELS.OLLAMA_PULL_MODEL, modelName, baseUrl), + + // Ollama Embedding Dimension Lookup (single source of truth) + getOllamaEmbeddingDim: (modelName: string) => + ipcRenderer.invoke(IPC_CHANNELS.OLLAMA_GET_EMBEDDING_DIM, modelName), }); diff --git a/apps/frontend/src/renderer/App.tsx b/apps/frontend/src/renderer/App.tsx index 3e8eddcdef..501d17969a 100644 --- a/apps/frontend/src/renderer/App.tsx +++ b/apps/frontend/src/renderer/App.tsx @@ -55,6 +55,7 @@ import { OnboardingWizard } from './components/onboarding'; import { AppUpdateNotification } from './components/AppUpdateNotification'; import { ProactiveSwapListener } from './components/ProactiveSwapListener'; import { GitHubSetupModal } from './components/GitHubSetupModal'; +import { CustomerReposModal } from './components/CustomerReposModal'; import { useProjectStore, loadProjects, addProject, initializeProject, removeProject } from './stores/project-store'; import { useTaskStore, loadTasks } from './stores/task-store'; import { useSettingsStore, loadSettings, loadProfiles, saveSettings } from './stores/settings-store'; @@ -159,6 +160,10 @@ export function App() { const [showGitHubSetup, setShowGitHubSetup] = useState(false); const [gitHubSetupProject, setGitHubSetupProject] = useState(null); + // Customer repos modal state (shown after customer GitHub auth) + const [showCustomerRepos, setShowCustomerRepos] = useState(false); + const [customerReposProject, setCustomerReposProject] = useState(null); + // Remove project confirmation state const [showRemoveProjectDialog, setShowRemoveProjectDialog] = useState(false); const [removeProjectError, setRemoveProjectError] = useState(null); @@ -381,6 +386,9 @@ export function App() { // (project update with autoBuildPath may not have propagated yet) if (initSuccess) return; + // Customer folders handle initialization automatically — skip the dialog + if (selectedProject?.type === 'customer') return; + if (selectedProject && !selectedProject.autoBuildPath && skippedInitProjectId !== selectedProject.id) { // Project exists but isn't initialized - show init dialog setPendingProject(selectedProject); @@ -775,27 +783,41 @@ export function App() { // - Claude token: for Claude AI access (run.py, roadmap, etc.) // The user needs to separately authenticate with Claude using 'claude setup-token' - // Update project env config with GitHub settings - await window.electronAPI.updateProjectEnv(gitHubSetupProject.id, { - githubEnabled: true, - githubToken: settings.githubToken, // GitHub token for repo access - githubRepo: settings.githubRepo, - githubAuthMethod: settings.githubAuthMethod // Track how user authenticated - }); - - // Update project settings with mainBranch - await window.electronAPI.updateProjectSettings(gitHubSetupProject.id, { - mainBranch: settings.mainBranch - }); + if (gitHubSetupProject.type === 'customer') { + // Customer flow: only save the GitHub token (no repo/branch needed) + await window.electronAPI.updateProjectEnv(gitHubSetupProject.id, { + githubEnabled: true, + githubToken: settings.githubToken, + githubAuthMethod: settings.githubAuthMethod + }); + } else { + // Regular project flow: save token + repo + branch + await window.electronAPI.updateProjectEnv(gitHubSetupProject.id, { + githubEnabled: true, + githubToken: settings.githubToken, + githubRepo: settings.githubRepo, + githubAuthMethod: settings.githubAuthMethod + }); + + await window.electronAPI.updateProjectSettings(gitHubSetupProject.id, { + mainBranch: settings.mainBranch + }); + } // Refresh projects to get updated data await loadProjects(); + + // For customers, open the repos modal to clone repositories + if (gitHubSetupProject.type === 'customer') { + setCustomerReposProject(gitHubSetupProject); + setShowCustomerRepos(true); + } + + setShowGitHubSetup(false); + setGitHubSetupProject(null); } catch (error) { console.error('Failed to save GitHub settings:', error); } - - setShowGitHubSetup(false); - setGitHubSetupProject(null); }; const handleGitHubSetupSkip = () => { @@ -835,6 +857,10 @@ export function App() { onNewTaskClick={() => setIsNewTaskDialogOpen(true)} activeView={activeView} onViewChange={setActiveView} + onCustomerAdded={(project) => { + setGitHubSetupProject(project); + setShowGitHubSetup(true); + }} /> {/* Main content */} @@ -1104,6 +1130,18 @@ export function App() { /> )} + {/* Customer Repos Modal - clone GitHub repos into customer folder */} + {customerReposProject && ( + { + setShowCustomerRepos(open); + if (!open) setCustomerReposProject(null); + }} + customer={customerReposProject} + /> + )} + {/* Remove Project Confirmation Dialog */} { if (!open) handleCancelRemoveProject(); diff --git a/apps/frontend/src/renderer/components/AddCustomerModal.tsx b/apps/frontend/src/renderer/components/AddCustomerModal.tsx new file mode 100644 index 0000000000..1dc2aa34a5 --- /dev/null +++ b/apps/frontend/src/renderer/components/AddCustomerModal.tsx @@ -0,0 +1,287 @@ +import { useState, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { FolderOpen, FolderPlus, ChevronRight, ArrowLeft, RefreshCw } from 'lucide-react'; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle +} from './ui/dialog'; +import { Button } from './ui/button'; +import { cn } from '../lib/utils'; +import { useProjectStore } from '../stores/project-store'; +import type { Project } from '../../shared/types'; + +interface AddCustomerModalProps { + open: boolean; + onOpenChange: (open: boolean) => void; + onCustomerAdded?: (project: Project) => void; +} + +type Step = 'choose' | 'create'; + +export function AddCustomerModal({ open, onOpenChange, onCustomerAdded }: AddCustomerModalProps) { + const { t } = useTranslation('dialogs'); + const [error, setError] = useState(null); + const [step, setStep] = useState('choose'); + const [customerName, setCustomerName] = useState(''); + const [location, setLocation] = useState(''); + const [isCreating, setIsCreating] = useState(false); + const [isPicking, setIsPicking] = useState(false); + + useEffect(() => { + if (open) { + setError(null); + setStep('choose'); + setCustomerName(''); + setLocation(''); + setIsCreating(false); + setIsPicking(false); + } + }, [open]); + + const registerAndInitCustomer = async (path: string) => { + // Pass type: 'customer' through IPC so it's persisted to disk (projects.json) + const result = await window.electronAPI.addProject(path, 'customer'); + if (!result.success || !result.data) { + setError(result.error || t('addCustomer.failedToOpen')); + return; + } + + const store = useProjectStore.getState(); + const project = result.data; + + // Add with type already set, then select — Sidebar will see type: 'customer' and skip git check + store.addProject(project); + store.selectProject(project.id); + store.openProjectTab(project.id); + + // Create .auto-claude/ and persist autoBuildPath via dedicated customer IPC. + // We can't use initializeProject because it requires git (customers don't have git). + if (!project.autoBuildPath) { + try { + const initResult = await window.electronAPI.initializeCustomerProject(project.id); + if (initResult.success) { + store.updateProject(project.id, { autoBuildPath: '.auto-claude' }); + } + } catch (e) { + // Non-fatal — user can configure later + console.debug('[AddCustomerModal] Failed to initialize customer project:', e); + } + } + + // Read updated project from fresh store state (avoids stale Zustand snapshot) + const updatedProject = useProjectStore.getState().projects.find(p => p.id === project.id) || project; + onCustomerAdded?.(updatedProject); + onOpenChange(false); + }; + + const handleOpenExisting = async () => { + setIsPicking(true); + try { + const path = await window.electronAPI.selectDirectory(); + if (path) { + await registerAndInitCustomer(path); + } + } catch (err) { + setError(err instanceof Error ? err.message : t('addCustomer.failedToOpen')); + } finally { + setIsPicking(false); + } + }; + + const handleBrowseLocation = async () => { + setIsPicking(true); + try { + const path = await window.electronAPI.selectDirectory(); + if (path) { + setLocation(path); + } + } catch { + // User cancelled + } finally { + setIsPicking(false); + } + }; + + const handleCreateFolder = async () => { + if (!customerName.trim()) { + setError(t('addCustomer.nameRequired')); + return; + } + if (!location) { + setError(t('addCustomer.locationRequired')); + return; + } + + setIsCreating(true); + setError(null); + + try { + const result = await window.electronAPI.createProjectFolder( + location, + customerName.trim(), + false // No git init for customer folders + ); + if (!result.success || !result.data) { + setError(result.error || t('addCustomer.failedToCreate')); + return; + } + await registerAndInitCustomer(result.data.path); + } catch (err) { + setError(err instanceof Error ? err.message : t('addCustomer.failedToCreate')); + } finally { + setIsCreating(false); + } + }; + + const sep = window.navigator.platform.startsWith('Win') ? '\\' : '/'; + const folderPreview = customerName.trim() && location + ? `${location}${sep}${customerName.trim()}` + : null; + + return ( + + + + {t('addCustomer.title')} + + {step === 'choose' + ? t('addCustomer.description') + : t('addCustomer.createNewSubtitle')} + + + + {step === 'choose' && ( +
+ {/* Create New Folder */} + + + {/* Open Existing Folder */} + +
+ )} + + {step === 'create' && ( +
+ {/* Customer Name */} +
+ + { + setCustomerName(e.target.value); + setError(null); + }} + placeholder={t('addCustomer.customerNamePlaceholder')} + className={cn( + 'w-full rounded-md border border-input bg-background px-3 py-2 text-sm', + 'placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring' + )} + autoFocus + /> +
+ + {/* Location */} +
+ + {t('addCustomer.location')} + +
+
+ {location || t('addCustomer.locationPlaceholder')} +
+ +
+
+ + {/* Folder preview */} + {folderPreview && ( +
+ {t('addCustomer.willCreate')} {folderPreview} +
+ )} + + {/* Actions */} +
+ + +
+
+ )} + + {error && ( +
+ {error} +
+ )} +
+
+ ); +} diff --git a/apps/frontend/src/renderer/components/AgentTools.tsx b/apps/frontend/src/renderer/components/AgentTools.tsx index e32573a119..42bfcba85f 100644 --- a/apps/frontend/src/renderer/components/AgentTools.tsx +++ b/apps/frontend/src/renderer/components/AgentTools.tsx @@ -32,7 +32,9 @@ import { Terminal, Loader2, RefreshCw, - Lock + Lock, + ExternalLink, + Activity } from 'lucide-react'; import { useState, useMemo, useEffect, useCallback } from 'react'; import { ScrollArea } from './ui/scroll-area'; @@ -45,21 +47,23 @@ import { DialogHeader, DialogTitle } from './ui/dialog'; -import { useSettingsStore } from '../stores/settings-store'; -import { useProjectStore } from '../stores/project-store'; -import type { ProjectEnvConfig, AgentMcpOverride, CustomMcpServer, McpHealthCheckResult, } from '../../shared/types'; +import { useSettingsStore, saveSettings } from '@/stores/settings-store'; +import { cn } from '@/lib/utils'; +import { useProjectStore } from '@/stores/project-store'; +import type { ProjectEnvConfig, AgentMcpOverride, CustomMcpServer, McpHealthCheckResult } from '@shared/types'; +import type { GlobalMcpInfo, GlobalMcpServerEntry } from '@shared/types/integrations'; import { CustomMcpDialog } from './CustomMcpDialog'; import { useTranslation } from 'react-i18next'; import { AVAILABLE_MODELS, THINKING_LEVELS, -} from '../../shared/constants/models'; +} from '@shared/constants/models'; import { useResolvedAgentSettings, resolveAgentSettings as resolveAgentModelConfig, type AgentSettingsSource, -} from '../hooks'; -import type { ModelTypeShort, ThinkingLevel } from '../../shared/types/settings'; +} from '@/hooks'; +import type { ModelTypeShort, ThinkingLevel, GlobalMcpPhaseConfig } from '@shared/types/settings'; // Agent configuration data - mirrors AGENT_CONFIGS from backend // Model and thinking are now dynamically read from user settings @@ -662,6 +666,12 @@ export function AgentTools() { const [serverHealthStatus, setServerHealthStatus] = useState>({}); const [testingServers, setTestingServers] = useState>(new Set()); + // Global Claude Code MCP state + const [globalMcps, setGlobalMcps] = useState(null); + const [isLoadingGlobalMcps, setIsLoadingGlobalMcps] = useState(false); + const [globalMcpHealth, setGlobalMcpHealth] = useState>({}); + const [isCheckingGlobalHealth, setIsCheckingGlobalHealth] = useState(false); + // Load project env config when project changes useEffect(() => { if (selectedProjectId && selectedProject?.autoBuildPath) { @@ -685,6 +695,104 @@ export function AgentTools() { } }, [selectedProjectId, selectedProject?.autoBuildPath]); + // Load global Claude Code MCPs on mount + const loadGlobalMcps = useCallback(async () => { + setIsLoadingGlobalMcps(true); + try { + const result = await window.electronAPI.getGlobalMcps(); + if (result.success && result.data) { + setGlobalMcps(result.data); + } + } catch { + // Non-critical — global MCPs are informational only + } finally { + setIsLoadingGlobalMcps(false); + } + }, []); + + useEffect(() => { + loadGlobalMcps(); + }, [loadGlobalMcps]); + + // Combine all global MCP servers for display + const allGlobalServers = useMemo((): GlobalMcpServerEntry[] => { + if (!globalMcps) return []; + return [...globalMcps.claudeJsonServers, ...globalMcps.pluginServers, ...globalMcps.inlineServers]; + }, [globalMcps]); + + // Check health of all global MCP servers + const checkGlobalMcpHealth = useCallback(async () => { + if (!allGlobalServers.length) return; + setIsCheckingGlobalHealth(true); + + const results: Record = {}; + + // Check all servers in parallel + await Promise.all( + allGlobalServers.map(async (server) => { + try { + // Convert GlobalMcpServerEntry to CustomMcpServer format for health check + const customServer: CustomMcpServer = { + id: server.serverId, + name: server.serverName, + type: server.config.command ? 'command' : 'http', + command: server.config.command, + args: server.config.args, + url: server.config.url, + headers: server.config.headers, + env: server.config.env, + }; + const result = await window.electronAPI.checkGlobalMcpHealth(customServer); + if (result.success && result.data) { + results[server.serverId] = result.data; + } + } catch { + results[server.serverId] = { + serverId: server.serverId, + status: 'unknown', + message: t('mcp.globalMcps.healthCheckFailed'), + checkedAt: new Date().toISOString(), + }; + } + }) + ); + + setGlobalMcpHealth(results); + setIsCheckingGlobalHealth(false); + }, [allGlobalServers, t]); + + // Auto-check health when global MCPs are loaded + useEffect(() => { + if (allGlobalServers.length > 0) { + checkGlobalMcpHealth(); + } + }, [checkGlobalMcpHealth]); + + // Settings access for global MCP phase assignments + const globalMcpPhases = settings.globalMcpPhases || {}; + + // Toggle a global MCP server's assignment to a pipeline phase + const handleToggleGlobalMcpPhase = async (serverId: string, phase: keyof GlobalMcpPhaseConfig) => { + const current = { ...globalMcpPhases }; + const phaseServers = current[phase] || []; + + if (phaseServers.includes(serverId)) { + current[phase] = phaseServers.filter(id => id !== serverId); + } else { + current[phase] = [...phaseServers, serverId]; + } + + // Clean up empty arrays + for (const key of Object.keys(current) as Array) { + if (current[key]?.length === 0) { + delete current[key]; + } + } + + const hasAny = Object.values(current).some(v => v && v.length > 0); + await saveSettings({ globalMcpPhases: hasAny ? current : undefined }); + }; + // Update MCP server toggle const updateMcpServer = useCallback(async ( key: keyof NonNullable, @@ -916,13 +1024,13 @@ export function AgentTools() { [server.id]: { serverId: server.id, status: 'unknown', - message: 'Health check failed', + message: t('mcp.globalMcps.healthCheckFailed'), checkedAt: new Date().toISOString(), } })); } } - }, [envConfig?.customMcpServers]); + }, [envConfig?.customMcpServers, t]); // Check health when custom servers change useEffect(() => { @@ -1315,6 +1423,159 @@ export function AgentTools() { )} + {/* Claude Code Global MCPs Section - always show container so refresh controls remain visible */} + {( +
+
+
+ +

+ {t('settings:mcp.globalMcps.title')} +

+ + {t('settings:mcp.globalMcps.badge')} + +
+
+ + +
+
+

+ {t('settings:mcp.globalMcps.description')} +

+
+ {allGlobalServers.length === 0 && ( +

+ {t('settings:mcp.globalMcps.noServers', 'No global MCP servers found.')} +

+ )} + {allGlobalServers.map((server) => { + const serverType = server.config.command + ? t('settings:mcp.globalMcps.serverType.command') + : server.config.type === 'sse' + ? t('settings:mcp.globalMcps.serverType.sse') + : t('settings:mcp.globalMcps.serverType.http'); + const ServerIcon = server.config.command ? Terminal : Globe; + const detail = server.config.command + ? `${server.config.command} ${server.config.args?.join(' ') || ''}` + : server.config.url || ''; + const health = globalMcpHealth[server.serverId]; + const statusColor = health?.status === 'healthy' + ? 'bg-green-500' + : health?.status === 'unhealthy' + ? 'bg-red-500' + : health?.status === 'needs_auth' + ? 'bg-yellow-500' + : 'bg-gray-400'; + + const PIPELINE_PHASES: Array<{ key: keyof GlobalMcpPhaseConfig; label: string }> = [ + { key: 'spec', label: t('settings:mcp.globalMcps.phases.spec') }, + { key: 'build', label: t('settings:mcp.globalMcps.phases.build') }, + { key: 'qa', label: t('settings:mcp.globalMcps.phases.qa') }, + { key: 'utility', label: t('settings:mcp.globalMcps.phases.utility') }, + { key: 'ideation', label: t('settings:mcp.globalMcps.phases.ideation') }, + ]; + + return ( +
+
+
+
+ + +
+
+
+ {server.serverName} + + {serverType} + + + {server.source === 'plugin' + ? t('settings:mcp.globalMcps.source.plugin') + : server.source === 'claude-json' + ? t('settings:mcp.globalMcps.source.claudeJson') + : t('settings:mcp.globalMcps.source.settings')} + + {health?.responseTime && ( + + {health.responseTime}ms + + )} +
+

+ {detail} +

+
+
+
+ {/* Phase assignment chips */} +
+ + {t('settings:mcp.globalMcps.useIn')} + + {PIPELINE_PHASES.map(({ key, label }) => { + const isActive = (globalMcpPhases[key] || []).includes(server.serverId); + return ( + + ); + })} +
+
+ ); + })} +
+
+ )} + {/* Agent Categories */} {Object.entries(CATEGORIES).map(([categoryId, category]) => { const agents = agentsByCategory[categoryId] || []; diff --git a/apps/frontend/src/renderer/components/CustomerReposModal.tsx b/apps/frontend/src/renderer/components/CustomerReposModal.tsx new file mode 100644 index 0000000000..37bf6afd39 --- /dev/null +++ b/apps/frontend/src/renderer/components/CustomerReposModal.tsx @@ -0,0 +1,252 @@ +import { useState, useEffect, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Github, Download, CheckCircle2, Loader2, Lock, Globe, Search, X, FolderGit2 } from 'lucide-react'; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle +} from './ui/dialog'; +import { Button } from './ui/button'; +import { cn } from '../lib/utils'; +import { useProjectStore } from '../stores/project-store'; +import type { Project } from '../../shared/types'; + +interface CustomerReposModalProps { + open: boolean; + onOpenChange: (open: boolean) => void; + customer: Project; +} + +interface RepoItem { + fullName: string; + description: string | null; + isPrivate: boolean; +} + +type CloneStatus = 'idle' | 'cloning' | 'done' | 'error'; + +export function CustomerReposModal({ open, onOpenChange, customer }: CustomerReposModalProps) { + const { t } = useTranslation('dialogs'); + const [repos, setRepos] = useState([]); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [search, setSearch] = useState(''); + const [cloneStatuses, setCloneStatuses] = useState>({}); + const [cloneErrors, setCloneErrors] = useState>({}); + + const loadRepos = useCallback(async () => { + setIsLoading(true); + setError(null); + try { + const result = await window.electronAPI.listGitHubUserRepos(); + if (result.success && result.data) { + setRepos(result.data.repos); + } else { + setError(result.error || t('customerRepos.failedToLoad')); + } + } catch (err) { + setError(err instanceof Error ? err.message : t('customerRepos.failedToLoad')); + } finally { + setIsLoading(false); + } + }, [t]); + + useEffect(() => { + if (open) { + setSearch(''); + setCloneStatuses({}); + setCloneErrors({}); + loadRepos(); + } + }, [open, loadRepos]); + + const handleClone = async (repo: RepoItem) => { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'cloning' })); + setCloneErrors(prev => { + const next = { ...prev }; + delete next[repo.fullName]; + return next; + }); + + try { + const result = await window.electronAPI.cloneGitHubRepo(repo.fullName, customer.path); + if (!result.success || !result.data) { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ ...prev, [repo.fullName]: result.error || t('customerRepos.cloneFailed') })); + return; + } + + // Register the cloned repo as a project + const addResult = await window.electronAPI.addProject(result.data.path); + if (addResult.success && addResult.data) { + const store = useProjectStore.getState(); + store.addProject(addResult.data); + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'done' })); + } else { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ + ...prev, + [repo.fullName]: addResult.error || t('customerRepos.cloneFailed') + })); + } + } catch (err) { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ + ...prev, + [repo.fullName]: err instanceof Error ? err.message : t('customerRepos.cloneFailed') + })); + } + }; + + const filteredRepos = repos.filter(repo => + repo.fullName.toLowerCase().includes(search.toLowerCase()) || + (repo.description && repo.description.toLowerCase().includes(search.toLowerCase())) + ); + + const clonedCount = Object.values(cloneStatuses).filter(s => s === 'done').length; + + return ( + + + + + + {t('customerRepos.title')} + + + {t('customerRepos.description', { name: customer.name })} + + + + {/* Search */} +
+ + setSearch(e.target.value)} + placeholder={t('customerRepos.searchPlaceholder')} + className={cn( + 'w-full rounded-md border border-input bg-background pl-9 pr-9 py-2 text-sm', + 'placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring' + )} + /> + {search && ( + + )} +
+ + {/* Repo list */} +
+ {isLoading && ( +
+ + {t('customerRepos.loading')} +
+ )} + + {error && ( +
+ {error} +
+ )} + + {!isLoading && !error && filteredRepos.length === 0 && ( +
+ {search ? t('customerRepos.noResults') : t('customerRepos.noRepos')} +
+ )} + + {filteredRepos.map((repo) => { + const status = cloneStatuses[repo.fullName] || 'idle'; + const cloneError = cloneErrors[repo.fullName]; + + return ( +
+ +
+
+ {repo.fullName} + {repo.isPrivate ? ( + + ) : ( + + )} +
+ {repo.description && ( +

+ {repo.description} +

+ )} + {cloneError && ( +

{cloneError}

+ )} +
+ +
+ {status === 'idle' && ( + + )} + {status === 'cloning' && ( + + )} + {status === 'done' && ( + + + {t('customerRepos.cloned')} + + )} + {status === 'error' && ( + + )} +
+
+ ); + })} +
+ + {/* Footer */} +
+ + {clonedCount > 0 && t('customerRepos.clonedCount', { count: clonedCount })} + + +
+
+
+ ); +} diff --git a/apps/frontend/src/renderer/components/EnvConfigModal.tsx b/apps/frontend/src/renderer/components/EnvConfigModal.tsx index e22112a920..ed6984c07c 100644 --- a/apps/frontend/src/renderer/components/EnvConfigModal.tsx +++ b/apps/frontend/src/renderer/components/EnvConfigModal.tsx @@ -13,6 +13,7 @@ import { ChevronDown, ChevronRight } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; import { useSettingsStore } from '../stores/settings-store'; import { Dialog, @@ -33,6 +34,14 @@ import { import { cn } from '../lib/utils'; import type { ClaudeProfile } from '../../shared/types'; +/** + * Unified predicate for determining whether a Claude profile has valid credentials. + * Used both for filtering the profile list and for the "use profile" action. + */ +function isAuthenticatedProfile(profile: { oauthToken?: string; isDefault: boolean; configDir?: string }): boolean { + return !!profile.oauthToken || !!(profile.isDefault && profile.configDir); +} + interface EnvConfigModalProps { open: boolean; onOpenChange: (open: boolean) => void; @@ -50,6 +59,7 @@ export function EnvConfigModal({ description = 'A Claude Code OAuth token is required to use AI features like Ideation and Roadmap generation.', projectId }: EnvConfigModalProps) { + const { t } = useTranslation('dialogs'); const [token, setToken] = useState(''); const [showToken, setShowToken] = useState(false); const [showManualEntry, setShowManualEntry] = useState(false); @@ -64,6 +74,7 @@ export function EnvConfigModal({ id: string; name: string; oauthToken?: string; + configDir?: string; email?: string; isDefault: boolean; }>>([]); @@ -103,7 +114,7 @@ export function EnvConfigModal({ // Handle Claude profiles if (profilesResult.success && profilesResult.data) { const authenticatedProfiles = profilesResult.data.profiles.filter( - (p: ClaudeProfile) => p.oauthToken || (p.isDefault && p.configDir) + (p: ClaudeProfile) => isAuthenticatedProfile(p) ); setClaudeProfiles(authenticatedProfiles); @@ -153,33 +164,48 @@ export function EnvConfigModal({ setError(null); try { - // Get the selected profile's token const profile = claudeProfiles.find(p => p.id === selectedProfileId); - if (!profile?.oauthToken) { - setError('Selected profile does not have a valid token'); + if (!profile) { + setError(t('envConfig.profileNotFound')); setIsSaving(false); return; } - // Save the token to auto-claude .env - const result = await window.electronAPI.updateSourceEnv({ - claudeOAuthToken: profile.oauthToken - }); + if (!isAuthenticatedProfile(profile)) { + setError(t('envConfig.invalidProfileCredentials')); + setIsSaving(false); + return; + } - if (result.success) { + // Try to use profile's oauthToken if available (legacy path) + if (profile.oauthToken) { + const result = await window.electronAPI.updateSourceEnv({ + claudeOAuthToken: profile.oauthToken + }); + + if (result.success) { + setSuccess(true); + setHasExistingToken(true); + setTimeout(() => { + onConfigured?.(); + onOpenChange(false); + }, 1500); + } else { + setError(result.error || t('envConfig.failedToSaveToken')); + } + } else { + // Profile uses Keychain-based auth (modern path) + // The profile is authenticated via OS Keychain, no need to copy token to .env + // The main process will resolve credentials from the active profile's Keychain setSuccess(true); setHasExistingToken(true); - - // Notify parent setTimeout(() => { onConfigured?.(); onOpenChange(false); }, 1500); - } else { - setError(result.error || 'Failed to save token'); } } catch (err) { - setError(err instanceof Error ? err.message : 'Unknown error'); + setError(err instanceof Error ? err.message : t('envConfig.failedToUseProfile')); } finally { setIsSaving(false); } diff --git a/apps/frontend/src/renderer/components/GitHubIssues.tsx b/apps/frontend/src/renderer/components/GitHubIssues.tsx index 410b60d485..015532462d 100644 --- a/apps/frontend/src/renderer/components/GitHubIssues.tsx +++ b/apps/frontend/src/renderer/components/GitHubIssues.tsx @@ -1,4 +1,5 @@ import { useState, useCallback, useMemo, useEffect } from "react"; +import { useTranslation } from "react-i18next"; import { useProjectStore } from "../stores/project-store"; import { useTaskStore } from "../stores/task-store"; import { @@ -7,6 +8,7 @@ import { useIssueFiltering, useAutoFix, } from "./github-issues/hooks"; +import { useMultiRepoGitHubIssues } from "./github-issues/hooks/useMultiRepoGitHubIssues"; import { useAnalyzePreview } from "./github-issues/hooks/useAnalyzePreview"; import { NotConnectedState, @@ -17,26 +19,39 @@ import { InvestigationDialog, BatchReviewWizard, } from "./github-issues/components"; +import { RepoFilterDropdown } from "./github-issues/components/RepoFilterDropdown"; import { GitHubSetupModal } from "./GitHubSetupModal"; import type { GitHubIssue } from "../../shared/types"; import type { GitHubIssuesProps } from "./github-issues/types"; export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesProps) { + const { t } = useTranslation("common"); const projects = useProjectStore((state) => state.projects); const selectedProjectId = useProjectStore((state) => state.selectedProjectId); const selectedProject = projects.find((p) => p.id === selectedProjectId); const tasks = useTaskStore((state) => state.tasks); + const isCustomer = selectedProject?.type === 'customer'; + + // Single-repo hook (active when NOT a customer) + const singleRepo = useGitHubIssues(isCustomer ? undefined : selectedProject?.id); + + // Multi-repo hook (active when IS a customer) + const multiRepo = useMultiRepoGitHubIssues(isCustomer ? selectedProject?.id : undefined); + + // Select the active hook's data. + // Multi-repo uses composite string IDs (selectedIssueId: `repo#number`), + // while single-repo uses plain numbers (selectedIssueNumber). + // We unify them under selectedIssueId (string | number | null) for IssueList. + const activeHook = isCustomer ? multiRepo : singleRepo; const { syncStatus, isLoading, isLoadingMore, error, - selectedIssueNumber, selectedIssue, filterState, hasMore, - selectIssue, getFilteredIssues, getOpenIssuesCount, handleRefresh, @@ -44,14 +59,38 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP handleLoadMore, handleSearchStart, handleSearchClear, - } = useGitHubIssues(selectedProject?.id); + } = activeHook; + + // Unified selection ID: composite string in multi-repo, plain number in single-repo + const selectedIssueId: string | number | null = isCustomer + ? multiRepo.selectedIssueId + : singleRepo.selectedIssueNumber; + + // Unified selection callback: multi-repo expects string, single-repo expects number. + // Wrapped in a single function to avoid TypeScript union narrowing issues. + const selectIssue = useCallback((id: string | number | null) => { + if (isCustomer) { + multiRepo.selectIssue(typeof id === 'string' ? id : null); + } else { + singleRepo.selectIssue(typeof id === 'number' ? id : null); + } + }, [isCustomer, multiRepo.selectIssue, singleRepo.selectIssue]); + + // Resolve child project ID from selected issue's repoFullName (for multi-repo) + const resolvedChildProjectId = useMemo(() => { + if (!isCustomer || !selectedIssue || !multiRepo.syncStatus?.repos) return undefined; + const match = multiRepo.syncStatus.repos.find(r => r.repoFullName === selectedIssue.repoFullName); + return match?.projectId; + }, [isCustomer, selectedIssue, multiRepo.syncStatus?.repos]); + + const effectiveProjectId = isCustomer ? resolvedChildProjectId : selectedProject?.id; const { investigationStatus, lastInvestigationResult, startInvestigation, resetInvestigationStatus, - } = useGitHubInvestigation(selectedProject?.id); + } = useGitHubInvestigation(effectiveProjectId); const { searchQuery, setSearchQuery, filteredIssues, isSearchActive } = useIssueFiltering( getFilteredIssues(), @@ -68,9 +107,9 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP batchProgress, toggleAutoFix, checkForNewIssues, - } = useAutoFix(selectedProject?.id); + } = useAutoFix(effectiveProjectId); - // Analyze & Group Issues (proactive workflow) + // Analyze & Group Issues (proactive workflow) - disabled for customer multi-repo const { isWizardOpen, isAnalyzing, @@ -82,7 +121,7 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP closeWizard, startAnalysis, approveBatches, - } = useAnalyzePreview({ projectId: selectedProject?.id || "" }); + } = useAnalyzePreview({ projectId: isCustomer ? "" : (selectedProject?.id || "") }); const [showInvestigateDialog, setShowInvestigateDialog] = useState(false); const [selectedIssueForInvestigation, setSelectedIssueForInvestigation] = @@ -96,12 +135,22 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP } }, [analysisError]); - // Build a map of GitHub issue numbers to task IDs for quick lookup + // Build a map of GitHub issue identifiers to task IDs for quick lookup. + // Uses the GitHub URL (which includes repo) as key when available to avoid + // ambiguity in customer multi-repo mode where issue numbers can overlap. + // Falls back to "#number" when URL is not available. const issueToTaskMap = useMemo(() => { - const map = new Map(); + const map = new Map(); for (const task of tasks) { if (task.metadata?.githubIssueNumber) { - map.set(task.metadata.githubIssueNumber, task.specId || task.id); + // Extract repo from githubUrl if available (e.g. "https://github.com/owner/repo/issues/123") + let repo = ''; + if (task.metadata.githubUrl) { + const match = task.metadata.githubUrl.match(/github\.com\/([^/]+\/[^/]+)\//); + if (match) repo = match[1]; + } + const key = repo ? `${repo}#${task.metadata.githubIssueNumber}` : `#${task.metadata.githubIssueNumber}`; + map.set(key, task.specId || task.id); } } return map; @@ -135,6 +184,15 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP resetInvestigationStatus(); }, [resetInvestigationStatus]); + // Derive header repo name + const headerRepoName = isCustomer + ? (multiRepo.repos.length > 0 + ? (multiRepo.selectedRepo === 'all' + ? t('issues.reposCount', { count: multiRepo.repos.length }) + : multiRepo.selectedRepo) + : '') + : (singleRepo.syncStatus?.repoFullName ?? ""); + // Not connected state if (!syncStatus?.connected) { return ; @@ -144,7 +202,7 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP
{/* Header */} + {/* Repo filter dropdown for multi-repo mode */} + {isCustomer && multiRepo.repos.length > 1 && ( +
+ +
+ )} + {/* Content */}
{/* Issue List */}
@@ -190,14 +260,20 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP ? lastInvestigationResult : null } - linkedTaskId={issueToTaskMap.get(selectedIssue.number)} + linkedTaskId={ + issueToTaskMap.get( + selectedIssue.repoFullName + ? `${selectedIssue.repoFullName}#${selectedIssue.number}` + : `#${selectedIssue.number}` + ) + } onViewTask={onNavigateToTask} - projectId={selectedProject?.id} + projectId={effectiveProjectId} autoFixConfig={autoFixConfig} autoFixQueueItem={getAutoFixQueueItem(selectedIssue.number)} /> ) : ( - + )}
@@ -210,25 +286,27 @@ export function GitHubIssues({ onOpenSettings, onNavigateToTask }: GitHubIssuesP investigationStatus={investigationStatus} onStartInvestigation={handleStartInvestigation} onClose={handleCloseDialog} - projectId={selectedProject?.id} + projectId={effectiveProjectId} /> - {/* Batch Review Wizard (Proactive workflow) */} - + {/* Batch Review Wizard (Proactive workflow) - not available in multi-repo mode */} + {!isCustomer && ( + + )} {/* GitHub Setup Modal - shown when GitHub module is not configured */} - {selectedProject && ( + {selectedProject && !isCustomer && ( { setGithubToken(token); + // For customers, we only need the GitHub token — skip repo/branch/claude steps + if (project.type === 'customer') { + onComplete({ + githubToken: token, + githubRepo: '', + mainBranch: '', + githubAuthMethod: 'oauth' + }); + return; + } + // Check if Claude is already authenticated before showing auth step try { const profilesResult = await window.electronAPI.getClaudeProfiles(); diff --git a/apps/frontend/src/renderer/components/Insights.tsx b/apps/frontend/src/renderer/components/Insights.tsx index b7133ef8af..488d65397a 100644 --- a/apps/frontend/src/renderer/components/Insights.tsx +++ b/apps/frontend/src/renderer/components/Insights.tsx @@ -564,7 +564,7 @@ export function Insights({ projectId }: InsightsProps) { onDragOver={handleDragOver} onDragLeave={handleDragLeave} onDrop={handleDrop} - placeholder="Ask about your codebase..." + placeholder={t('insights.placeholder')} className={cn( 'min-h-[80px] resize-none', isDragOver && 'border-primary ring-2 ring-primary/20' diff --git a/apps/frontend/src/renderer/components/Sidebar.tsx b/apps/frontend/src/renderer/components/Sidebar.tsx index 0efe1c0749..055afa4256 100644 --- a/apps/frontend/src/renderer/components/Sidebar.tsx +++ b/apps/frontend/src/renderer/components/Sidebar.tsx @@ -27,6 +27,13 @@ import { import { Button } from './ui/button'; import { ScrollArea } from './ui/scroll-area'; import { Separator } from './ui/separator'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue +} from './ui/select'; import { Tooltip, TooltipContent, @@ -45,7 +52,7 @@ import { cn } from '../lib/utils'; import { useProjectStore, removeProject, - initializeProject + initializeProject, } from '../stores/project-store'; import { useSettingsStore, saveSettings } from '../stores/settings-store'; import { @@ -54,6 +61,7 @@ import { clearProjectEnvConfig } from '../stores/project-env-store'; import { AddProjectModal } from './AddProjectModal'; +// AddCustomerModal is used in ProjectSelector, not Sidebar import { GitSetupModal } from './GitSetupModal'; import { RateLimitIndicator } from './RateLimitIndicator'; import { ClaudeCodeStatusBadge } from './ClaudeCodeStatusBadge'; @@ -67,6 +75,7 @@ interface SidebarProps { onNewTaskClick: () => void; activeView?: SidebarView; onViewChange?: (view: SidebarView) => void; + onCustomerAdded?: (project: Project) => void; } interface NavItem { @@ -105,11 +114,13 @@ export function Sidebar({ onSettingsClick, onNewTaskClick, activeView = 'kanban', - onViewChange + onViewChange, + onCustomerAdded }: SidebarProps) { const { t } = useTranslation(['navigation', 'dialogs', 'common']); const projects = useProjectStore((state) => state.projects); const selectedProjectId = useProjectStore((state) => state.selectedProjectId); + const selectProject = useProjectStore((state) => state.selectProject); const settings = useSettingsStore((state) => state.settings); const [showAddProjectModal, setShowAddProjectModal] = useState(false); @@ -121,6 +132,34 @@ export function Sidebar({ const selectedProject = projects.find((p) => p.id === selectedProjectId); + // Normalize path separators for cross-platform customer/child comparisons. + // startsWith(p.path + '/') breaks on Windows where paths use backslashes. + const normalizePath = (value: string) => value.replace(/\\/g, '/').replace(/\/+$/, ''); + + // Determine customer context: the parent customer for the selected project + // - If selected is a customer → that customer + // - If selected is a child of a customer → the parent customer + // - Otherwise → null (regular project, no repo dropdown) + const customerContext = useMemo(() => { + if (!selectedProject) return null; + if (selectedProject.type === 'customer') return selectedProject; + // Check if selected project is inside a customer's folder + const normalizedSelected = normalizePath(selectedProject.path); + const parentCustomer = projects.find( + p => p.type === 'customer' && normalizedSelected.startsWith(normalizePath(p.path) + '/') + ); + return parentCustomer ?? null; + }, [selectedProject, projects]); + + // Child repos belonging to the current customer context + const customerChildRepos = useMemo(() => { + if (!customerContext) return []; + const normalizedCustomer = normalizePath(customerContext.path); + return projects.filter( + p => p.id !== customerContext.id && normalizePath(p.path).startsWith(normalizedCustomer + '/') + ); + }, [customerContext, projects]); + // Sidebar collapsed state from settings const isCollapsed = settings.sidebarCollapsed ?? false; @@ -135,20 +174,23 @@ export function Sidebar({ // Track the last loaded project ID to avoid redundant loads const lastLoadedProjectIdRef = useRef(null); - // Compute visible nav items based on GitHub/GitLab enabled state from store + // When inside a Customer context (either the customer itself or a child repo selected + // via dropdown), always show GitHub nav — multi-repo aggregation uses the customer's token. + const inCustomerContext = !!customerContext; + + // Compute visible nav items — show GitHub OR GitLab based on what's configured const visibleNavItems = useMemo(() => { const items = [...baseNavItems]; - - if (githubEnabled) { + const effectiveGithubEnabled = githubEnabled || inCustomerContext; + if (effectiveGithubEnabled && !gitlabEnabled) { items.push(...githubNavItems); - } - - if (gitlabEnabled) { + } else if (gitlabEnabled && !effectiveGithubEnabled) { items.push(...gitlabNavItems); + } else if (effectiveGithubEnabled && gitlabEnabled) { + items.push(...githubNavItems, ...gitlabNavItems); } - return items; - }, [githubEnabled, gitlabEnabled]); + }, [githubEnabled, gitlabEnabled, inCustomerContext]); // Load envConfig when project changes to ensure store is populated useEffect(() => { @@ -212,16 +254,25 @@ export function Sidebar({ return () => window.removeEventListener('keydown', handleKeyDown); }, [selectedProjectId, onViewChange, visibleNavItems]); - // Check git status when project changes + // Track which project IDs had git modal dismissed to avoid re-showing + const gitModalDismissedRef = useRef>(new Set()); + + // Check git status when project changes (skip for customer-type projects) useEffect(() => { const checkGit = async () => { if (selectedProject) { + // Customer folders don't require git + if (selectedProject.type === 'customer') { + setGitStatus(null); + return; + } try { const result = await window.electronAPI.checkGitStatus(selectedProject.path); if (result.success && result.data) { setGitStatus(result.data); // Show git setup modal if project is not a git repo or has no commits - if (!result.data.isGitRepo || !result.data.hasCommits) { + // but only if user hasn't already dismissed it for this project + if ((!result.data.isGitRepo || !result.data.hasCommits) && !gitModalDismissedRef.current.has(selectedProject.id)) { setShowGitSetupModal(true); } } @@ -233,7 +284,7 @@ export function Sidebar({ } }; checkGit(); - }, [selectedProject]); + }, [selectedProjectId]); const handleProjectAdded = (project: Project, needsInit: boolean) => { if (needsInit) { @@ -399,6 +450,30 @@ export function Sidebar({ {t('sections.project')} )} + + {/* Repo Selector Dropdown — only visible in customer context */} + {customerContext && customerChildRepos.length > 0 && !isCollapsed && ( +
+ +
+ )} + @@ -572,7 +647,13 @@ export function Sidebar({ {/* Git Setup Modal */} { + setShowGitSetupModal(open); + // When user closes the modal, remember not to show it again for this project + if (!open && selectedProjectId) { + gitModalDismissedRef.current.add(selectedProjectId); + } + }} project={selectedProject || null} gitStatus={gitStatus} onGitInitialized={handleGitInitialized} diff --git a/apps/frontend/src/renderer/components/SortableProjectTab.tsx b/apps/frontend/src/renderer/components/SortableProjectTab.tsx index d57cf1292c..7eb12e8754 100644 --- a/apps/frontend/src/renderer/components/SortableProjectTab.tsx +++ b/apps/frontend/src/renderer/components/SortableProjectTab.tsx @@ -87,14 +87,19 @@ export function SortableProjectTab({ )} onClick={onSelect} > - {/* Drag handle - visible on hover, hidden on mobile */} + {/* Drag handle - visible on hover, fluorescent green when active */}
diff --git a/apps/frontend/src/renderer/components/context/Context.tsx b/apps/frontend/src/renderer/components/context/Context.tsx index c6812fefe4..4205b66d90 100644 --- a/apps/frontend/src/renderer/components/context/Context.tsx +++ b/apps/frontend/src/renderer/components/context/Context.tsx @@ -2,7 +2,7 @@ import { useState } from 'react'; import { FolderTree, Brain } from 'lucide-react'; import { Tabs, TabsContent, TabsList, TabsTrigger } from '../ui/tabs'; import { useContextStore } from '../../stores/context-store'; -import { useProjectContext, useRefreshIndex, useMemorySearch } from './hooks'; +import { useProjectContext, useRefreshIndex, useMemorySearch, useIndexProgress } from './hooks'; import { ProjectIndexTab } from './ProjectIndexTab'; import { MemoriesTab } from './MemoriesTab'; import type { ContextProps } from './types'; @@ -12,6 +12,9 @@ export function Context({ projectId }: ContextProps) { projectIndex, indexLoading, indexError, + indexProgress, + indexProgressCurrent, + indexProgressTotal, memoryStatus, memoryState, recentMemories, @@ -26,6 +29,7 @@ export function Context({ projectId }: ContextProps) { useProjectContext(projectId); const handleRefreshIndex = useRefreshIndex(projectId); const handleSearch = useMemorySearch(projectId); + useIndexProgress(); return (
@@ -49,6 +53,9 @@ export function Context({ projectId }: ContextProps) { projectIndex={projectIndex} indexLoading={indexLoading} indexError={indexError} + indexProgress={indexProgress} + indexProgressCurrent={indexProgressCurrent} + indexProgressTotal={indexProgressTotal} onRefresh={handleRefreshIndex} /> diff --git a/apps/frontend/src/renderer/components/context/MemoriesTab.tsx b/apps/frontend/src/renderer/components/context/MemoriesTab.tsx index 736a01b065..919bc2b329 100644 --- a/apps/frontend/src/renderer/components/context/MemoriesTab.tsx +++ b/apps/frontend/src/renderer/components/context/MemoriesTab.tsx @@ -1,4 +1,5 @@ import { useState, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; import { RefreshCw, Database, @@ -77,6 +78,7 @@ export function MemoriesTab({ searchLoading, onSearch }: MemoriesTabProps) { + const { t } = useTranslation('common'); const [localSearchQuery, setLocalSearchQuery] = useState(''); const [activeFilter, setActiveFilter] = useState('all'); @@ -199,7 +201,7 @@ export function MemoriesTab({
setLocalSearchQuery(e.target.value)} onKeyDown={handleSearchKeyDown} diff --git a/apps/frontend/src/renderer/components/context/MemoryCard.tsx b/apps/frontend/src/renderer/components/context/MemoryCard.tsx index 46260083df..466f11abc4 100644 --- a/apps/frontend/src/renderer/components/context/MemoryCard.tsx +++ b/apps/frontend/src/renderer/components/context/MemoryCard.tsx @@ -1,4 +1,5 @@ import { useState, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; import { Clock, CheckCircle2, @@ -107,6 +108,7 @@ function isPRReviewMemory(memory: MemoryEpisode): boolean { } export function MemoryCard({ memory }: MemoryCardProps) { + const { t } = useTranslation('common'); const [expanded, setExpanded] = useState(false); const parsed = useMemo(() => parseMemoryContent(memory.content), [memory.content]); @@ -186,12 +188,12 @@ export function MemoryCard({ memory }: MemoryCardProps) { {expanded ? ( <> - Collapse + {t('memory.collapse')} ) : ( <> - Expand + {t('memory.expand')} )} @@ -204,7 +206,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* What Worked */} {parsed.what_worked && parsed.what_worked.length > 0 && (
- +
    {parsed.what_worked.map((item, idx) => ( {item} @@ -216,7 +218,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* What Failed */} {parsed.what_failed && parsed.what_failed.length > 0 && (
    - +
      {parsed.what_failed.map((item, idx) => ( {item} @@ -230,7 +232,7 @@ export function MemoryCard({ memory }: MemoryCardProps) {

      @@ -256,7 +258,7 @@ export function MemoryCard({ memory }: MemoryCardProps) {

        @@ -273,7 +275,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* Patterns Discovered */} {parsed.discoveries?.patterns_discovered && parsed.discoveries.patterns_discovered.length > 0 && (
        - +
        {parsed.discoveries.patterns_discovered.map((pattern, idx) => { const text = typeof pattern === 'string' @@ -292,7 +294,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* Gotchas */} {parsed.discoveries?.gotchas_discovered && parsed.discoveries.gotchas_discovered.length > 0 && (
        - +
          {parsed.discoveries.gotchas_discovered.map((gotcha, idx) => { const text = typeof gotcha === 'string' @@ -309,7 +311,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* Changed Files */} {parsed.discoveries?.changed_files && parsed.discoveries.changed_files.length > 0 && (
          - +
          {parsed.discoveries.changed_files.map((file, idx) => ( @@ -323,7 +325,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* File Insights */} {parsed.discoveries?.file_insights && parsed.discoveries.file_insights.length > 0 && (
          - +
          {parsed.discoveries.file_insights.map((insight, idx) => (
          @@ -347,7 +349,7 @@ export function MemoryCard({ memory }: MemoryCardProps) { {/* Subtasks Completed */} {parsed.subtasks_completed && parsed.subtasks_completed.length > 0 && (
          - +
          {parsed.subtasks_completed.map((task, idx) => ( diff --git a/apps/frontend/src/renderer/components/context/ProjectIndexTab.tsx b/apps/frontend/src/renderer/components/context/ProjectIndexTab.tsx index 6c5190c204..48526e428f 100644 --- a/apps/frontend/src/renderer/components/context/ProjectIndexTab.tsx +++ b/apps/frontend/src/renderer/components/context/ProjectIndexTab.tsx @@ -1,4 +1,5 @@ -import { RefreshCw, AlertCircle, FolderTree } from 'lucide-react'; +import { RefreshCw, AlertCircle, FolderTree, RotateCcw, Loader2 } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; import { Button } from '../ui/button'; import { Card, CardContent, CardHeader, CardTitle } from '../ui/card'; import { Badge } from '../ui/badge'; @@ -13,40 +14,70 @@ interface ProjectIndexTabProps { projectIndex: ProjectIndex | null; indexLoading: boolean; indexError: string | null; - onRefresh: () => void; + indexProgress: string | null; + indexProgressCurrent: number | null; + indexProgressTotal: number | null; + onRefresh: (force?: boolean) => void; } export function ProjectIndexTab({ projectIndex, indexLoading, indexError, + indexProgress, + indexProgressCurrent, + indexProgressTotal, onRefresh }: ProjectIndexTabProps) { + const { t } = useTranslation('context'); + const isCustomer = projectIndex?.project_type === 'customer'; + const childRepoCount = projectIndex?.child_repos ? Object.keys(projectIndex.child_repos).length : 0; + return (
          - {/* Header with refresh */} + {/* Header with refresh / re-analyze buttons */}
          -

          Project Structure

          +

          {t('projectIndex.title')}

          - AI-discovered knowledge about your codebase + {t('projectIndex.subtitle')}

          - - - - - Re-analyze project structure - +
          + {/* Re-analyze button (forces re-run of analyzer on all repos) */} + {projectIndex && ( + + + + + {t('projectIndex.reanalyzeTooltip')} + + )} + {/* Refresh button */} + + + + + {t('projectIndex.analyzeTooltip')} + +
          {/* Error state */} @@ -54,16 +85,61 @@ export function ProjectIndexTab({
          -

          Failed to load project index

          +

          {t('projectIndex.errorTitle')}

          {indexError}

          )} - {/* Loading state */} + {/* Loading state with progress */} {indexLoading && !projectIndex && ( -
          - +
          + + {indexProgress ? ( +
          +

          {indexProgress}

          + {indexProgressTotal != null && indexProgressCurrent != null ? ( +
          +
          + {t('projectIndex.repoProgress', { current: indexProgressCurrent, total: indexProgressTotal })} + {indexProgressTotal > 0 ? Math.round((indexProgressCurrent / indexProgressTotal) * 100) : 0}% +
          +
          +
          0 ? (indexProgressCurrent / indexProgressTotal) * 100 : 0}%` }} + /> +
          +
          + ) : null} +
          + ) : ( +

          {t('projectIndex.analyzing')}

          + )} +
          + )} + + {/* Inline progress when refreshing existing data */} + {indexLoading && projectIndex && indexProgress && ( +
          + +
          +

          {indexProgress}

          + {indexProgressTotal != null && indexProgressCurrent != null ? ( +
          +
          + {t('projectIndex.repoProgress', { current: indexProgressCurrent, total: indexProgressTotal })} + {indexProgressTotal > 0 ? Math.round((indexProgressCurrent / indexProgressTotal) * 100) : 0}% +
          +
          +
          0 ? (indexProgressCurrent / indexProgressTotal) * 100 : 0}%` }} + /> +
          +
          + ) : null} +
          )} @@ -71,13 +147,13 @@ export function ProjectIndexTab({ {!indexLoading && !projectIndex && !indexError && (
          -

          No Project Index Found

          +

          {t('projectIndex.noIndexTitle')}

          - Click the Refresh button to analyze your project structure and create an index. + {t('projectIndex.noIndexDescription')}

          -
          )} @@ -88,7 +164,7 @@ export function ProjectIndexTab({ {/* Project Overview */} - Overview + {t('projectIndex.overview')}
          @@ -97,8 +173,12 @@ export function ProjectIndexTab({ {Object.keys(projectIndex.services).length > 0 && ( - {Object.keys(projectIndex.services).length} service - {Object.keys(projectIndex.services).length !== 1 ? 's' : ''} + {t('projectIndex.serviceCount', { count: Object.keys(projectIndex.services).length })} + + )} + {isCustomer && childRepoCount > 0 && ( + + {t('projectIndex.repoCount', { count: childRepoCount })} )}
          @@ -108,11 +188,55 @@ export function ProjectIndexTab({
          + {/* Child Repos (customer only) */} + {isCustomer && projectIndex.child_repos && Object.keys(projectIndex.child_repos).length > 0 && ( +
          +

          + {t('projectIndex.repositories')} +

          +
          + {Object.entries(projectIndex.child_repos).map(([repoName, repoIndex]) => { + const serviceCount = Object.keys(repoIndex.services).length; + const mainService = Object.values(repoIndex.services)[0]; + return ( + + +
          +
          +

          {repoName}

          + {mainService && ( +

          + {mainService.language} + {mainService.framework ? ` / ${mainService.framework}` : ''} +

          + )} +
          +
          + {mainService?.type && ( + + {mainService.type} + + )} + {serviceCount > 1 && ( + + {t('projectIndex.svcCount', { count: serviceCount })} + + )} +
          +
          +
          +
          + ); + })} +
          +
          + )} + {/* Services */} {Object.keys(projectIndex.services).length > 0 && (

          - Services + {t('projectIndex.services')}

          {Object.entries(projectIndex.services).map(([name, service]) => ( @@ -126,24 +250,24 @@ export function ProjectIndexTab({ {Object.keys(projectIndex.infrastructure).length > 0 && (

          - Infrastructure + {t('projectIndex.infrastructure')}

          {projectIndex.infrastructure.docker_compose && ( - + )} {projectIndex.infrastructure.ci && ( - + )} {projectIndex.infrastructure.deployment && ( - + )} {projectIndex.infrastructure.docker_services && projectIndex.infrastructure.docker_services.length > 0 && (
          - Docker Services + {t('projectIndex.dockerServices')}
          {projectIndex.infrastructure.docker_services.map((svc) => ( @@ -163,25 +287,25 @@ export function ProjectIndexTab({ {Object.keys(projectIndex.conventions).length > 0 && (

          - Conventions + {t('projectIndex.conventions')}

          {projectIndex.conventions.python_linting && ( - + )} {projectIndex.conventions.js_linting && ( - + )} {projectIndex.conventions.formatting && ( - + )} {projectIndex.conventions.git_hooks && ( - + )} {projectIndex.conventions.typescript && ( - + )}
          diff --git a/apps/frontend/src/renderer/components/context/constants.ts b/apps/frontend/src/renderer/components/context/constants.ts index 3905d06965..b3fa7f5186 100644 --- a/apps/frontend/src/renderer/components/context/constants.ts +++ b/apps/frontend/src/renderer/components/context/constants.ts @@ -14,7 +14,8 @@ import { GitPullRequest, Bug, Sparkles, - Target + Target, + BookOpen } from 'lucide-react'; // Service type icon mapping @@ -27,6 +28,7 @@ export const serviceTypeIcons: Record = { proxy: GitBranch, mobile: Smartphone, desktop: Monitor, + documentation: BookOpen, unknown: FileCode }; @@ -40,6 +42,7 @@ export const serviceTypeColors: Record = { proxy: 'bg-cyan-500/10 text-cyan-400 border-cyan-500/30', mobile: 'bg-orange-500/10 text-orange-400 border-orange-500/30', desktop: 'bg-indigo-500/10 text-indigo-400 border-indigo-500/30', + documentation: 'bg-emerald-500/10 text-emerald-400 border-emerald-500/30', unknown: 'bg-muted text-muted-foreground border-muted' }; diff --git a/apps/frontend/src/renderer/components/context/hooks.ts b/apps/frontend/src/renderer/components/context/hooks.ts index c5102858b2..04683b33e0 100644 --- a/apps/frontend/src/renderer/components/context/hooks.ts +++ b/apps/frontend/src/renderer/components/context/hooks.ts @@ -1,8 +1,9 @@ -import { useEffect } from 'react'; +import { useEffect, useCallback } from 'react'; import { loadProjectContext, refreshProjectIndex, - searchMemories + searchMemories, + useContextStore } from '../../stores/context-store'; export function useProjectContext(projectId: string) { @@ -14,9 +15,9 @@ export function useProjectContext(projectId: string) { } export function useRefreshIndex(projectId: string) { - return async () => { - await refreshProjectIndex(projectId); - }; + return useCallback(async (force?: boolean) => { + await refreshProjectIndex(projectId, force); + }, [projectId]); } export function useMemorySearch(projectId: string) { @@ -26,3 +27,17 @@ export function useMemorySearch(projectId: string) { } }; } + +/** + * Listen for index progress events from main process + */ +export function useIndexProgress() { + const setIndexProgress = useContextStore((s) => s.setIndexProgress); + + useEffect(() => { + const cleanup = window.electronAPI.onIndexProgress((data) => { + setIndexProgress(data.message || null, data.current, data.total); + }); + return cleanup; + }, [setIndexProgress]); +} diff --git a/apps/frontend/src/renderer/components/github-issues/components/IssueList.tsx b/apps/frontend/src/renderer/components/github-issues/components/IssueList.tsx index d6586889cb..77ea21c799 100644 --- a/apps/frontend/src/renderer/components/github-issues/components/IssueList.tsx +++ b/apps/frontend/src/renderer/components/github-issues/components/IssueList.tsx @@ -5,11 +5,12 @@ import { IssueListItem } from './IssueListItem'; import { EmptyState } from './EmptyStates'; import { GitHubErrorDisplay } from './GitHubErrorDisplay'; import type { IssueListProps } from '../types'; +import { makeIssueId } from '../hooks/useMultiRepoGitHubIssues'; import { useTranslation } from 'react-i18next'; export function IssueList({ issues, - selectedIssueNumber, + selectedIssueId, isLoading, isLoadingMore, hasMore, @@ -18,7 +19,8 @@ export function IssueList({ onInvestigate, onLoadMore, onRetry, - onOpenSettings + onOpenSettings, + showRepoBadge }: IssueListProps) { const { t } = useTranslation('common'); const loadMoreTriggerRef = useRef(null); @@ -71,21 +73,32 @@ export function IssueList({ } if (issues.length === 0) { - return ; + return ; } return (
          - {issues.map((issue) => ( - onSelectIssue(issue.number)} - onInvestigate={() => onInvestigate(issue)} - /> - ))} + {issues.map((issue) => { + // Build the composite key for this issue to compare with the selected ID. + // In single-repo mode selectedIssueId is a number, so compare against issue.number. + // In multi-repo mode selectedIssueId is a composite string `repo#number`. + const issueCompositeId = showRepoBadge + ? makeIssueId(issue.repoFullName, issue.number) + : issue.number; + const isSelected = selectedIssueId === issueCompositeId; + + return ( + onSelectIssue(issueCompositeId)} + onInvestigate={() => onInvestigate(issue)} + showRepoBadge={showRepoBadge} + /> + ); + })} {/* Load more trigger / Loading indicator */} {/* Inline error for load-more failures (visible even when onLoadMore is undefined during search) */} diff --git a/apps/frontend/src/renderer/components/github-issues/components/IssueListItem.tsx b/apps/frontend/src/renderer/components/github-issues/components/IssueListItem.tsx index 41aef5d11a..157909c077 100644 --- a/apps/frontend/src/renderer/components/github-issues/components/IssueListItem.tsx +++ b/apps/frontend/src/renderer/components/github-issues/components/IssueListItem.tsx @@ -7,7 +7,7 @@ import { } from '../../../../shared/constants'; import type { IssueListItemProps } from '../types'; -export function IssueListItem({ issue, isSelected, onClick, onInvestigate }: IssueListItemProps) { +export function IssueListItem({ issue, isSelected, onClick, onInvestigate, showRepoBadge }: IssueListItemProps) { return (
          #{issue.number} + {showRepoBadge && issue.repoFullName && ( + + {issue.repoFullName} + + )}

          {issue.title} diff --git a/apps/frontend/src/renderer/components/github-issues/components/RepoFilterDropdown.tsx b/apps/frontend/src/renderer/components/github-issues/components/RepoFilterDropdown.tsx new file mode 100644 index 0000000000..3d0fd782e7 --- /dev/null +++ b/apps/frontend/src/renderer/components/github-issues/components/RepoFilterDropdown.tsx @@ -0,0 +1,40 @@ +import { useTranslation } from 'react-i18next'; +import { GitBranch } from 'lucide-react'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue +} from '../../ui/select'; + +interface RepoFilterDropdownProps { + repos: string[]; + selectedRepo: string; + onRepoChange: (repo: string) => void; +} + +export function RepoFilterDropdown({ repos, selectedRepo, onRepoChange }: RepoFilterDropdownProps) { + const { t } = useTranslation('navigation'); + + if (repos.length === 0) return null; + + return ( + + ); +} diff --git a/apps/frontend/src/renderer/components/github-issues/hooks/useMultiRepoGitHubIssues.ts b/apps/frontend/src/renderer/components/github-issues/hooks/useMultiRepoGitHubIssues.ts new file mode 100644 index 0000000000..1d44fc1421 --- /dev/null +++ b/apps/frontend/src/renderer/components/github-issues/hooks/useMultiRepoGitHubIssues.ts @@ -0,0 +1,249 @@ +import { useEffect, useCallback, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import type { GitHubIssue, MultiRepoGitHubStatus } from '@shared/types'; +import type { FilterState } from '@/components/github-issues/types'; + +/** + * Creates a composite issue ID from repo name and issue number. + * Format: `repoFullName#number` (e.g., `org/repo#123`) + * Falls back to `#number` when repoFullName is empty (single-repo compat). + */ +export function makeIssueId(repoFullName: string | undefined, number: number): string { + return `${repoFullName || ''}#${number}`; +} + +/** + * Parses a composite issue ID back into its parts. + * Handles both `repoFullName#number` and `#number` formats. + */ +export function parseIssueId(id: string): { repo: string; number: number } { + const hashIndex = id.lastIndexOf('#'); + if (hashIndex === -1) { + return { repo: '', number: Number.parseInt(id, 10) || 0 }; + } + return { + repo: id.slice(0, hashIndex), + number: Number.parseInt(id.slice(hashIndex + 1), 10) || 0, + }; +} + +interface MultiRepoState { + issues: GitHubIssue[]; + repos: string[]; + selectedRepo: string; // 'all' or repoFullName + isLoading: boolean; + error: string | null; + syncStatus: MultiRepoGitHubStatus | null; + selectedIssueId: string | null; + filterState: FilterState; +} + +export function useMultiRepoGitHubIssues(customerId: string | undefined) { + const { t } = useTranslation('common'); + const [state, setState] = useState({ + issues: [], + repos: [], + selectedRepo: 'all', + isLoading: false, + error: null, + syncStatus: null, + selectedIssueId: null, + filterState: 'open', + }); + + // Check multi-repo connection on mount/customerId change + useEffect(() => { + if (!customerId) return; + let cancelled = false; + + const checkConnection = async () => { + try { + const result = await window.electronAPI.github.checkMultiRepoConnection(customerId); + if (cancelled) return; + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + syncStatus: data, + repos: data.repos.map(r => r.repoFullName), + })); + } else { + setState(prev => ({ + ...prev, + syncStatus: { connected: false, repos: [], error: result.error }, + error: result.error || t('issues.multiRepo.failedToCheckConnection'), + })); + } + } catch (error) { + if (cancelled) return; + setState(prev => ({ + ...prev, + error: error instanceof Error ? error.message : t('issues.multiRepo.unknownError'), + })); + } + }; + + checkConnection(); + return () => { cancelled = true; }; + }, [customerId, t]); + + // Load issues when connected or filter changes + useEffect(() => { + if (!customerId || !state.syncStatus?.connected) return; + let cancelled = false; + + const loadIssues = async () => { + setState(prev => ({ ...prev, isLoading: true, error: null })); + + try { + const result = await window.electronAPI.github.getMultiRepoIssues( + customerId, + state.filterState + ); + + if (cancelled) return; + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + issues: data.issues, + repos: data.repos.length > 0 ? data.repos : prev.repos, + isLoading: false, + })); + } else { + setState(prev => ({ + ...prev, + error: result.error || t('issues.multiRepo.failedToLoadIssues'), + isLoading: false, + })); + } + } catch (error) { + if (cancelled) return; + setState(prev => ({ + ...prev, + error: error instanceof Error ? error.message : t('issues.multiRepo.unknownError'), + isLoading: false, + })); + } + }; + + loadIssues(); + return () => { cancelled = true; }; + }, [customerId, state.syncStatus?.connected, state.filterState, t]); + + const selectIssue = useCallback((issueId: string | null) => { + setState(prev => ({ ...prev, selectedIssueId: issueId })); + }, []); + + const setSelectedRepo = useCallback((repo: string) => { + setState(prev => ({ ...prev, selectedRepo: repo, selectedIssueId: null })); + }, []); + + const handleFilterChange = useCallback((filterState: FilterState) => { + setState(prev => ({ ...prev, filterState, selectedIssueId: null })); + }, []); + + const handleRefresh = useCallback(() => { + if (!customerId) return; + + const refresh = async () => { + setState(prev => ({ ...prev, isLoading: true, error: null })); + + try { + const connResult = await window.electronAPI.github.checkMultiRepoConnection(customerId); + if (connResult.success && connResult.data) { + const connData = connResult.data; + setState(prev => ({ + ...prev, + syncStatus: connData, + repos: connData.repos.map(r => r.repoFullName), + })); + } + + const result = await window.electronAPI.github.getMultiRepoIssues( + customerId, + state.filterState + ); + + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + issues: data.issues, + repos: data.repos.length > 0 ? data.repos : prev.repos, + isLoading: false, + })); + } else { + setState(prev => ({ + ...prev, + error: result.error || t('issues.multiRepo.failedToRefreshIssues'), + isLoading: false, + })); + } + } catch (error) { + setState(prev => ({ + ...prev, + error: error instanceof Error ? error.message : t('issues.multiRepo.unknownError'), + isLoading: false, + })); + } + }; + + refresh(); + }, [customerId, state.filterState, t]); + + // Get filtered issues based on selected repo + // Note: state filtering is already done by the API via the `state` parameter + const getFilteredIssues = useCallback((): GitHubIssue[] => { + const { issues, selectedRepo } = state; + + // Filter by repo + if (selectedRepo !== 'all') { + return issues.filter(issue => issue.repoFullName === selectedRepo); + } + + return issues; + }, [state]); + + const getOpenIssuesCount = useCallback((): number => { + const { issues, selectedRepo } = state; + let filtered = issues.filter(issue => issue.state === 'open'); + if (selectedRepo !== 'all') { + filtered = filtered.filter(issue => issue.repoFullName === selectedRepo); + } + return filtered.length; + }, [state]); + + const selectedIssue = useMemo(() => { + if (!state.selectedIssueId) return null; + const { repo, number } = parseIssueId(state.selectedIssueId); + return state.issues.find(i => + i.number === number && (repo === '' || i.repoFullName === repo) + ) || null; + }, [state.issues, state.selectedIssueId]); + + return { + issues: state.issues, + syncStatus: state.syncStatus, + isLoading: state.isLoading, + isLoadingMore: false, + error: state.error, + selectedIssueId: state.selectedIssueId, + selectedIssue, + filterState: state.filterState, + hasMore: false, + selectIssue, + getFilteredIssues, + getOpenIssuesCount, + handleRefresh, + handleFilterChange, + handleLoadMore: undefined, + handleSearchStart: () => { /* no-op: multi-repo fetches all issues at once */ }, + handleSearchClear: () => { /* no-op: multi-repo fetches all issues at once */ }, + // Multi-repo specific + repos: state.repos, + selectedRepo: state.selectedRepo, + setSelectedRepo, + isMultiRepo: true, + }; +} diff --git a/apps/frontend/src/renderer/components/github-issues/types/index.ts b/apps/frontend/src/renderer/components/github-issues/types/index.ts index 89a56a0d2f..b0efcc5599 100644 --- a/apps/frontend/src/renderer/components/github-issues/types/index.ts +++ b/apps/frontend/src/renderer/components/github-issues/types/index.ts @@ -55,6 +55,7 @@ export interface IssueListItemProps { isSelected: boolean; onClick: () => void; onInvestigate: () => void; + showRepoBadge?: boolean; } export interface IssueDetailProps { @@ -109,18 +110,30 @@ export interface IssueListHeaderProps { export interface IssueListProps { issues: GitHubIssue[]; - selectedIssueNumber: number | null; + /** + * Selected issue identifier. + * - Single-repo mode: issue number (number) + * - Multi-repo mode: composite key `repoFullName#number` (string) + */ + selectedIssueId: string | number | null; isLoading: boolean; isLoadingMore?: boolean; hasMore?: boolean; error: string | null; - onSelectIssue: (issueNumber: number) => void; + /** + * Called when an issue is selected. + * - Single-repo mode: receives the issue number (number) + * - Multi-repo mode: receives a composite key `repoFullName#number` (string) + */ + onSelectIssue: (issueId: string | number) => void; onInvestigate: (issue: GitHubIssue) => void; onLoadMore?: () => void; /** Callback for retry button in error display */ onRetry?: () => void; /** Callback for settings button in error display */ onOpenSettings?: () => void; + /** Show repo badge on each issue (for multi-repo mode) */ + showRepoBadge?: boolean; } export interface EmptyStateProps { diff --git a/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx b/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx index 048ee59479..f55fd1a656 100644 --- a/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx +++ b/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx @@ -1,11 +1,17 @@ -import { useCallback, useEffect } from "react"; -import { GitPullRequest, RefreshCw, ExternalLink, Settings } from "lucide-react"; +import { useCallback, useEffect, useMemo } from "react"; +import { GitPullRequest, RefreshCw, ExternalLink, Settings, User, Clock, FileDiff } from "lucide-react"; import { useTranslation } from "react-i18next"; import { useProjectStore } from "../../stores/project-store"; import { useGitHubPRs, usePRFiltering } from "./hooks"; +import { useMultiRepoGitHubPRs, makePRId, parsePRId } from "./hooks/useMultiRepoGitHubPRs"; import { PRList, PRDetail, PRFilterBar } from "./components"; +import { RepoFilterDropdown } from "../github-issues/components/RepoFilterDropdown"; import { Button } from "../ui/button"; +import { Badge } from "../ui/badge"; +import { ScrollArea } from "../ui/scroll-area"; import { ResizablePanels } from "../ui/resizable-panels"; +import { cn } from "../../lib/utils"; +import type { MultiRepoPRData } from "../../../shared/types"; interface GitHubPRsProps { onOpenSettings?: () => void; @@ -19,7 +25,7 @@ function NotConnectedState({ }: { error: string | null; onOpenSettings?: () => void; - t: (key: string) => string; + t: (key: string, options?: Record) => string; }) { return (
          @@ -49,12 +55,275 @@ function EmptyState({ message }: { message: string }) { ); } +function formatRelativeDate(dateString: string, t: (key: string, options?: Record) => string): string { + const date = new Date(dateString); + const now = new Date(); + // Clamp negative diff to zero to handle future timestamps gracefully + const diffMs = Math.max(0, now.getTime() - date.getTime()); + const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24)); + if (diffDays === 0) { + const diffHours = Math.floor(diffMs / (1000 * 60 * 60)); + if (diffHours === 0) { + const diffMins = Math.floor(diffMs / (1000 * 60)); + return t('time.minutesAgo', { count: diffMins }); + } + return t('time.hoursAgo', { count: diffHours }); + } + if (diffDays === 1) return t('time.yesterday'); + if (diffDays < 7) return t('time.daysAgo', { count: diffDays }); + if (diffDays < 30) return t('time.weeksAgo', { count: Math.floor(diffDays / 7) }); + return date.toLocaleDateString(); +} + +/** Simplified PR detail for multi-repo read-only mode */ +function MultiRepoPRDetail({ pr }: { pr: MultiRepoPRData }) { + const { t } = useTranslation("common"); + return ( + +
          +
          + +
          +

          {pr.title}

          +
          + #{pr.number} + · + {pr.repoFullName} +
          +
          +
          + +
          + + + {pr.author.login} + + + + {formatRelativeDate(pr.updatedAt, t)} + + + + +{pr.additions} + -{pr.deletions} + +
          + +
          + {pr.headRefName} + + {pr.baseRefName} +
          + + {pr.body && ( +
          +
          +              {pr.body}
          +            
          +
          + )} + + + {t("prReview.viewOnGitHub")} + + +
          +
          + ); +} + +/** Multi-repo PR list item */ +function MultiRepoPRListItem({ + pr, + isSelected, + onClick, +}: { + pr: MultiRepoPRData; + isSelected: boolean; + onClick: () => void; +}) { + const { t } = useTranslation("common"); + return ( + + ); +} + +/** Multi-repo Customer PR view */ +function MultiRepoPRView({ + multiRepo, + onOpenSettings, + t, + fullPRDetail, +}: { + multiRepo: ReturnType; + onOpenSettings?: () => void; + t: (key: string, options?: Record) => string; + fullPRDetail?: React.ReactNode; +}) { + const { prs, isLoading, error, selectedPRId, selectedPR, isConnected, selectPR, refresh, repos, selectedRepo, setSelectedRepo } = multiRepo; + + if (!isConnected) { + return ; + } + + const headerRepoName = repos.length > 0 + ? (selectedRepo === 'all' ? t('prReview.reposCount', { count: repos.length }) : selectedRepo) + : ''; + + return ( +
          + {/* Header */} +
          +
          +

          + + {t("prReview.pullRequests")} +

          + {headerRepoName && ( + {headerRepoName} + )} + + {prs.length} {t("prReview.open")} + +
          + +
          + + {/* Repo filter dropdown */} + {repos.length > 1 && ( +
          + +
          + )} + + {/* Content */} +
          + {/* PR List */} +
          + {isLoading && prs.length === 0 ? ( +
          +
          + +

          {t('prReview.loadingPRs')}

          +
          +
          + ) : error ? ( +
          +
          +

          {error}

          +
          +
          + ) : prs.length === 0 ? ( +
          +
          + +

          {t('prReview.noOpenPRs')}

          +
          +
          + ) : ( + +
          + {prs.map((pr) => { + const prCompositeId = makePRId(pr.repoFullName, pr.number); + return ( + selectPR(prCompositeId)} + /> + ); + })} +
          +
          + )} +
          + + {/* PR Detail */} +
          + {selectedPR ? ( + fullPRDetail || + ) : ( + + )} +
          +
          +
          + ); +} + export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) { const { t } = useTranslation("common"); const projects = useProjectStore((state) => state.projects); const selectedProjectId = useProjectStore((state) => state.selectedProjectId); const selectedProject = projects.find((p) => p.id === selectedProjectId); + const isCustomer = selectedProject?.type === 'customer'; + + // Multi-repo hook (active when IS a customer) + const multiRepo = useMultiRepoGitHubPRs(isCustomer ? selectedProject?.id : undefined); + + // Resolve child project ID from selected PR's repoFullName (for multi-repo) + const resolvedChildProjectId = useMemo(() => { + if (!isCustomer || !multiRepo.selectedPR || !multiRepo.syncStatus?.repos) return undefined; + const match = multiRepo.syncStatus.repos.find(r => r.repoFullName === multiRepo.selectedPR?.repoFullName); + return match?.projectId; + }, [isCustomer, multiRepo.selectedPR, multiRepo.syncStatus?.repos]); + + // Single-repo hook: activated with resolved child project for customer mode + const singleRepo = useGitHubPRs(isCustomer ? resolvedChildProjectId : selectedProject?.id, { isActive }); + const { prs, isLoading, @@ -86,7 +355,7 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) repoFullName, getReviewStateForPR, selectedPR, - } = useGitHubPRs(selectedProject?.id, { isActive }); + } = singleRepo; // Get newCommitsCheck for the selected PR (other values come from hook to ensure consistency) const selectedPRReviewState = selectedPRNumber ? getReviewStateForPR(selectedPRNumber) : null; @@ -106,18 +375,33 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) } = usePRFiltering(prs, getReviewStateForPR); // Sync UI state when PR list updates (e.g., after auto-refresh from review completion) - // Following pattern from PRDetail.tsx for state syncing useEffect(() => { - // Ensure selected PR is still valid after list updates - // This prevents stale state if a PR was closed/merged while selected + if (isCustomer) return; if (selectedPRNumber && prs.length > 0) { const selectedStillExists = prs.some(pr => pr.number === selectedPRNumber); if (!selectedStillExists) { - // Selected PR was removed/closed, clear selection to prevent stale state selectPR(null); } } - }, [prs, selectedPRNumber, selectPR]); + }, [prs, selectedPRNumber, selectPR, isCustomer]); + + // Sync PR selection from multi-repo to single-repo for customer mode. + // The multi-repo hook stores a composite ID (repo#number), but the single-repo + // hook needs the plain PR number to load details from that specific repo. + const multiRepoSelectedNumber = useMemo(() => { + if (!multiRepo.selectedPRId) return null; + return parsePRId(multiRepo.selectedPRId).number; + }, [multiRepo.selectedPRId]); + + useEffect(() => { + if (!isCustomer || !resolvedChildProjectId || !multiRepoSelectedNumber) return; + if (prs.length > 0) { + const prExists = prs.some(pr => pr.number === multiRepoSelectedNumber); + if (prExists && selectedPRNumber !== multiRepoSelectedNumber) { + selectPR(multiRepoSelectedNumber); + } + } + }, [isCustomer, resolvedChildProjectId, multiRepoSelectedNumber, prs, selectedPRNumber, selectPR]); const handleRunReview = useCallback(() => { if (selectedPRNumber) { @@ -186,16 +470,56 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) ); const handleGetLogs = useCallback(async () => { - if (selectedProjectId && selectedPRNumber) { - return await window.electronAPI.github.getPRLogs(selectedProjectId, selectedPRNumber); + const effectiveProjectId = isCustomer ? resolvedChildProjectId : selectedProjectId; + if (effectiveProjectId && selectedPRNumber) { + return await window.electronAPI.github.getPRLogs(effectiveProjectId, selectedPRNumber); } return null; - }, [selectedProjectId, selectedPRNumber]); + }, [isCustomer, resolvedChildProjectId, selectedProjectId, selectedPRNumber]); const handleMarkReviewPosted = useCallback(async (prNumber: number) => { await markReviewPosted(prNumber); }, [markReviewPosted]); + // Customer multi-repo view + if (isCustomer) { + return ( + + ) : undefined + } + /> + ); + } + // Not connected state if (!isConnected) { return ; diff --git a/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx b/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx index bb64682c9e..8a66560b07 100644 --- a/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx +++ b/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx @@ -185,7 +185,7 @@ interface PRListProps { isLoadingMore?: boolean; } -function formatDate(dateString: string): string { +function formatRelativeDate(dateString: string, t: (key: string, options?: Record) => string): string { const date = new Date(dateString); const now = new Date(); const diffMs = now.getTime() - date.getTime(); @@ -195,13 +195,13 @@ function formatDate(dateString: string): string { const diffHours = Math.floor(diffMs / (1000 * 60 * 60)); if (diffHours === 0) { const diffMins = Math.floor(diffMs / (1000 * 60)); - return `${diffMins}m ago`; + return t('time.minutesAgo', { count: diffMins }); } - return `${diffHours}h ago`; + return t('time.hoursAgo', { count: diffHours }); } - if (diffDays === 1) return 'yesterday'; - if (diffDays < 7) return `${diffDays}d ago`; - if (diffDays < 30) return `${Math.floor(diffDays / 7)}w ago`; + if (diffDays === 1) return t('time.yesterday'); + if (diffDays < 7) return t('time.daysAgo', { count: diffDays }); + if (diffDays < 30) return t('time.weeksAgo', { count: Math.floor(diffDays / 7) }); return date.toLocaleDateString(); } @@ -307,7 +307,7 @@ export function PRList({ - {formatDate(pr.updatedAt)} + {formatRelativeDate(pr.updatedAt, t)} diff --git a/apps/frontend/src/renderer/components/github-prs/hooks/useMultiRepoGitHubPRs.ts b/apps/frontend/src/renderer/components/github-prs/hooks/useMultiRepoGitHubPRs.ts new file mode 100644 index 0000000000..dc43b55b52 --- /dev/null +++ b/apps/frontend/src/renderer/components/github-prs/hooks/useMultiRepoGitHubPRs.ts @@ -0,0 +1,217 @@ +import { useEffect, useCallback, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import type { MultiRepoGitHubStatus, MultiRepoPRData } from '@shared/types'; + +/** + * Creates a composite PR ID from repo name and PR number. + * Format: `repoFullName#number` (e.g., `org/repo#123`) + * Falls back to `#number` when repoFullName is empty (single-repo compat). + */ +export function makePRId(repoFullName: string | undefined, number: number): string { + return `${repoFullName || ''}#${number}`; +} + +/** + * Parses a composite PR ID back into its parts. + * Handles both `repoFullName#number` and `#number` formats. + */ +export function parsePRId(id: string): { repo: string; number: number } { + const hashIndex = id.lastIndexOf('#'); + if (hashIndex === -1) { + return { repo: '', number: Number.parseInt(id, 10) || 0 }; + } + return { + repo: id.slice(0, hashIndex), + number: Number.parseInt(id.slice(hashIndex + 1), 10) || 0, + }; +} + +interface MultiRepoPRState { + prs: MultiRepoPRData[]; + repos: string[]; + selectedRepo: string; // 'all' or repoFullName + isLoading: boolean; + error: string | null; + syncStatus: MultiRepoGitHubStatus | null; + selectedPRId: string | null; +} + +export function useMultiRepoGitHubPRs(customerId: string | undefined) { + const { t } = useTranslation('common'); + const [state, setState] = useState({ + prs: [], + repos: [], + selectedRepo: 'all', + isLoading: false, + error: null, + syncStatus: null, + selectedPRId: null, + }); + + // Check multi-repo connection on mount/customerId change + useEffect(() => { + if (!customerId) return; + let cancelled = false; + + // Reset state on customer change to prevent stale data + setState(prev => ({ ...prev, prs: [], repos: [], syncStatus: null, error: null, selectedPRId: null })); + + const checkConnection = async () => { + try { + const result = await window.electronAPI.github.checkMultiRepoConnection(customerId); + if (cancelled) return; + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + syncStatus: data, + repos: data.repos.map(r => r.repoFullName), + })); + } else { + setState(prev => ({ + ...prev, + syncStatus: { connected: false, repos: [], error: result.error }, + error: result.error || t('prReview.multiRepo.failedToCheckConnection'), + })); + } + } catch (error) { + if (cancelled) return; + setState(prev => ({ + ...prev, + syncStatus: { connected: false, repos: [], error: error instanceof Error ? error.message : t('prReview.multiRepo.unknownError') }, + error: error instanceof Error ? error.message : t('prReview.multiRepo.unknownError'), + })); + } + }; + + checkConnection(); + return () => { cancelled = true; }; + }, [customerId, t]); + + // Load PRs when connected + useEffect(() => { + if (!customerId || !state.syncStatus?.connected) return; + let cancelled = false; + + const loadPRs = async () => { + setState(prev => ({ ...prev, isLoading: true, error: null })); + + try { + const result = await window.electronAPI.github.getMultiRepoPRs(customerId); + + if (cancelled) return; + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + prs: data.prs, + repos: data.repos.length > 0 ? data.repos : prev.repos, + isLoading: false, + })); + } else { + setState(prev => ({ + ...prev, + error: result.error || t('prReview.multiRepo.failedToLoadPRs'), + isLoading: false, + })); + } + } catch (error) { + if (cancelled) return; + setState(prev => ({ + ...prev, + error: error instanceof Error ? error.message : t('prReview.multiRepo.unknownError'), + isLoading: false, + })); + } + }; + + loadPRs(); + return () => { cancelled = true; }; + }, [customerId, state.syncStatus?.connected, t]); + + const selectPR = useCallback((prId: string | null) => { + setState(prev => ({ ...prev, selectedPRId: prId })); + }, []); + + const setSelectedRepo = useCallback((repo: string) => { + setState(prev => ({ ...prev, selectedRepo: repo, selectedPRId: null })); + }, []); + + const handleRefresh = useCallback(() => { + if (!customerId) return; + + const refresh = async () => { + setState(prev => ({ ...prev, isLoading: true, error: null })); + + try { + const connResult = await window.electronAPI.github.checkMultiRepoConnection(customerId); + if (connResult.success && connResult.data) { + const connData = connResult.data; + setState(prev => ({ + ...prev, + syncStatus: connData, + repos: connData.repos.map(r => r.repoFullName), + })); + } + + const result = await window.electronAPI.github.getMultiRepoPRs(customerId); + + if (result.success && result.data) { + const data = result.data; + setState(prev => ({ + ...prev, + prs: data.prs, + repos: data.repos.length > 0 ? data.repos : prev.repos, + isLoading: false, + })); + } else { + setState(prev => ({ + ...prev, + error: result.error || t('prReview.multiRepo.failedToRefreshPRs'), + isLoading: false, + })); + } + } catch (error) { + setState(prev => ({ + ...prev, + error: error instanceof Error ? error.message : t('prReview.multiRepo.unknownError'), + isLoading: false, + })); + } + }; + + refresh(); + }, [customerId, t]); + + // Get filtered PRs based on selected repo + const filteredPRs = useMemo((): MultiRepoPRData[] => { + const { prs, selectedRepo } = state; + if (selectedRepo === 'all') return prs; + return prs.filter(pr => pr.repoFullName === selectedRepo); + }, [state.prs, state.selectedRepo]); + + const selectedPR = useMemo(() => { + if (!state.selectedPRId) return null; + const { repo, number } = parsePRId(state.selectedPRId); + return state.prs.find(pr => + pr.number === number && (repo === '' || pr.repoFullName === repo) + ) || null; + }, [state.prs, state.selectedPRId]); + + return { + prs: filteredPRs, + syncStatus: state.syncStatus, + isLoading: state.isLoading, + error: state.error, + selectedPRId: state.selectedPRId, + selectedPR, + isConnected: state.syncStatus?.connected ?? false, + selectPR, + refresh: handleRefresh, + // Multi-repo specific + repos: state.repos, + selectedRepo: state.selectedRepo, + setSelectedRepo, + isMultiRepo: true, + }; +} diff --git a/apps/frontend/src/renderer/components/ideation/Ideation.tsx b/apps/frontend/src/renderer/components/ideation/Ideation.tsx index ce5feaa0f0..999a89a44d 100644 --- a/apps/frontend/src/renderer/components/ideation/Ideation.tsx +++ b/apps/frontend/src/renderer/components/ideation/Ideation.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { TabsContent } from '../ui/tabs'; import { EnvConfigModal } from '../EnvConfigModal'; import { IDEATION_TYPE_DESCRIPTIONS } from '../../../shared/constants'; @@ -18,6 +19,7 @@ interface IdeationProps { } export function Ideation({ projectId, onGoToTask }: IdeationProps) { + const { t } = useTranslation('common'); // Get showArchived from shared context for cross-page sync const { showArchived } = useViewState(); @@ -119,8 +121,8 @@ export function Ideation({ projectId, onGoToTask }: IdeationProps) { open={showEnvConfigModal} onOpenChange={setShowEnvConfigModal} onConfigured={handleEnvConfigured} - title="Claude Authentication Required" - description="A Claude Code OAuth token is required to generate AI-powered feature ideas." + title={t('auth.claudeAuthRequired')} + description={t('auth.claudeAuthRequiredDescription')} projectId={projectId} /> @@ -170,7 +172,7 @@ export function Ideation({ projectId, onGoToTask }: IdeationProps) { ))} {activeIdeas.length === 0 && (
          - No ideas to display + {t('ideation.noIdeasToDisplay')}
          )}
          @@ -242,8 +244,8 @@ export function Ideation({ projectId, onGoToTask }: IdeationProps) { open={showEnvConfigModal} onOpenChange={setShowEnvConfigModal} onConfigured={handleEnvConfigured} - title="Claude Authentication Required" - description="A Claude Code OAuth token is required to generate AI-powered feature ideas." + title={t('auth.claudeAuthRequired')} + description={t('auth.claudeAuthRequiredDescription')} projectId={projectId} />

          diff --git a/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx b/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx index 62582c81ae..f1fd42af16 100644 --- a/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx +++ b/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx @@ -31,7 +31,24 @@ interface OllamaModelSelectorProps { baseUrl?: string; } +/** + * Resolve the embedding dimension for a model via the backend IPC handler. + * Falls back to the hardcoded dimension from RECOMMENDED_MODELS if the IPC call fails. + */ +async function resolveEmbeddingDim(modelName: string, fallbackDim: number): Promise { + try { + const result = await window.electronAPI.getOllamaEmbeddingDim(modelName); + if (result?.success && result?.data?.dim) { + return result.data.dim; + } + } catch { + // IPC call failed; use fallback + } + return fallbackDim; +} + // Recommended embedding models for Auto Claude Memory +// Dimensions here are fallbacks only; authoritative values come from the backend via IPC // qwen3-embedding:4b is first as the recommended default (balanced quality/speed) const RECOMMENDED_MODELS: OllamaModel[] = [ { @@ -300,14 +317,15 @@ export function OllamaModelSelector({ * @param {OllamaModel} model - The model to select or deselect * @returns {void} */ - const handleSelect = (model: OllamaModel) => { + const handleSelect = async (model: OllamaModel) => { if (!model.installed || disabled) return; // Toggle behavior: if already selected, deselect by passing empty values if (selectedModel === model.name) { onModelSelect('', 0); } else { - onModelSelect(model.name, model.dim); + const dim = await resolveEmbeddingDim(model.name, model.dim); + onModelSelect(model.name, dim); } }; diff --git a/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx b/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx index f2b640db0e..a2ad2bffe5 100644 --- a/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx +++ b/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx @@ -1,6 +1,6 @@ -import { useState, useMemo } from 'react'; +import { useState, useMemo, useEffect, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import { Brain, Scale, Zap, Check, Sparkles, ChevronDown, ChevronUp, RotateCcw, Settings2 } from 'lucide-react'; +import { Brain, Scale, Zap, Check, Sparkles, ChevronDown, ChevronUp, ChevronRight, RotateCcw, Settings2, Bot } from 'lucide-react'; import { cn } from '../../lib/utils'; import { DEFAULT_AGENT_PROFILES, @@ -10,7 +10,7 @@ import { DEFAULT_PHASE_THINKING, ADAPTIVE_THINKING_MODELS, PHASE_KEYS -} from '../../../shared/constants'; +} from '@shared/constants'; import { useSettingsStore, saveSettings } from '../../stores/settings-store'; import { SettingsSection } from './SettingsSection'; import { Label } from '../ui/label'; @@ -23,7 +23,8 @@ import { SelectValue } from '../ui/select'; import { Tooltip, TooltipContent, TooltipTrigger } from '../ui/tooltip'; -import type { AgentProfile, PhaseModelConfig, PhaseThinkingConfig, ModelTypeShort, ThinkingLevel } from '../../../shared/types/settings'; +import type { AgentProfile, PhaseModelConfig, PhaseThinkingConfig, ModelTypeShort, ThinkingLevel } from '@shared/types/settings'; +import type { ClaudeAgentsInfo } from '@shared/types/integrations'; /** * Icon mapping for agent profile icons @@ -46,6 +47,25 @@ export function AgentProfileSettings() { const settings = useSettingsStore((state) => state.settings); const selectedProfileId = settings.selectedAgentProfile || 'auto'; const [showPhaseConfig, setShowPhaseConfig] = useState(true); + const [showAgentsCatalog, setShowAgentsCatalog] = useState(false); + const [expandedAgentCategories, setExpandedAgentCategories] = useState>(new Set()); + const [agentsInfo, setAgentsInfo] = useState(null); + + // Load custom agents from ~/.claude/agents/ + const loadAgents = useCallback(async () => { + try { + const result = await window.electronAPI.getClaudeAgents(); + if (result.success && result.data) { + setAgentsInfo(result.data); + } + } catch { + // Silently fail - custom agents are optional + } + }, []); + + useEffect(() => { + loadAgents(); + }, [loadAgents]); // Find the selected profile const selectedProfile = useMemo(() => @@ -108,7 +128,7 @@ export function AgentProfileSettings() { // Reset to the selected profile's defaults await saveSettings({ customPhaseModels: undefined, - customPhaseThinking: undefined + customPhaseThinking: undefined, }); }; @@ -334,6 +354,91 @@ export function AgentProfileSettings() { )}
          + {/* Available Specialist Agents */} + {agentsInfo && agentsInfo.totalAgents > 0 && ( +
          + + + {showAgentsCatalog && ( +
          +

          + {t('agentProfile.availableAgents.info')} +

          + {agentsInfo.categories.map((category) => { + const isExpanded = expandedAgentCategories.has(category.categoryDir); + return ( +
          + + {isExpanded && ( +
          + {category.agents.map((agent) => ( +
          + + {agent.agentName} +
          + ))} +
          + )} +
          + ); + })} +
          + )} +
          + )} +
          ); diff --git a/apps/frontend/src/renderer/components/settings/DisplaySettings.tsx b/apps/frontend/src/renderer/components/settings/DisplaySettings.tsx index 13d3d4a0ef..925288abfa 100644 --- a/apps/frontend/src/renderer/components/settings/DisplaySettings.tsx +++ b/apps/frontend/src/renderer/components/settings/DisplaySettings.tsx @@ -147,7 +147,7 @@ export function DisplaySettings({ settings, onSettingsChange }: DisplaySettingsP 'hover:bg-accent text-muted-foreground hover:text-foreground', 'focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring' )} - title="Reset to default (100%)" + title={t('scale.resetToDefault')} > @@ -170,7 +170,7 @@ export function DisplaySettings({ settings, onSettingsChange }: DisplaySettingsP 'focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring', 'disabled:opacity-50 disabled:cursor-not-allowed disabled:hover:bg-transparent' )} - title={`Decrease scale by ${UI_SCALE_STEP}%`} + title={t('scale.decreaseScale', { step: UI_SCALE_STEP })} > @@ -214,7 +214,7 @@ export function DisplaySettings({ settings, onSettingsChange }: DisplaySettingsP 'focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring', 'disabled:opacity-50 disabled:cursor-not-allowed disabled:hover:bg-transparent' )} - title={`Increase scale by ${UI_SCALE_STEP}%`} + title={t('scale.increaseScale', { step: UI_SCALE_STEP })} > @@ -228,10 +228,10 @@ export function DisplaySettings({ settings, onSettingsChange }: DisplaySettingsP 'focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring', 'disabled:opacity-50 disabled:cursor-not-allowed disabled:hover:bg-primary' )} - title="Apply scale changes" + title={t('scale.applyChanges')} > - Apply + {t('scale.apply')}
          diff --git a/apps/frontend/src/renderer/components/settings/ProjectSelector.tsx b/apps/frontend/src/renderer/components/settings/ProjectSelector.tsx index 0d83ebbce5..2bec4cb848 100644 --- a/apps/frontend/src/renderer/components/settings/ProjectSelector.tsx +++ b/apps/frontend/src/renderer/components/settings/ProjectSelector.tsx @@ -1,5 +1,6 @@ -import { useState, useCallback } from 'react'; -import { FolderOpen, Plus, Trash2 } from 'lucide-react'; +import { useState, useCallback, useMemo } from 'react'; +import { FolderOpen, Plus, Trash2, Users } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; import { Select, SelectContent, @@ -10,7 +11,8 @@ import { import { Separator } from '../ui/separator'; import { useProjectStore, removeProject } from '../../stores/project-store'; import { AddProjectModal } from '../AddProjectModal'; -import type { Project } from '../../../shared/types'; +import { AddCustomerModal } from '../AddCustomerModal'; +import type { Project } from '@shared/types'; interface ProjectSelectorProps { selectedProjectId: string | null; @@ -23,14 +25,34 @@ export function ProjectSelector({ onProjectChange, onProjectAdded }: ProjectSelectorProps) { - const projects = useProjectStore((state) => state.projects); + const { t } = useTranslation('settings'); + const allProjects = useProjectStore((state) => state.projects); const [showAddModal, setShowAddModal] = useState(false); + const [showAddCustomerModal, setShowAddCustomerModal] = useState(false); const [open, setOpen] = useState(false); + // Only show top-level projects: customers + regular projects (not cloned repos inside customer folders) + const projects = useMemo(() => { + // Normalize path separators for cross-platform comparison (Windows uses backslashes) + const normalize = (p: string) => p.replace(/\\/g, '/'); + const customerPaths = allProjects + .filter(p => p.type === 'customer') + .map(c => normalize(c.path)); + return allProjects.filter(p => { + if (p.type === 'customer') return true; + // Exclude projects whose path is inside a customer folder + const normalizedPath = normalize(p.path); + return !customerPaths.some(cp => normalizedPath.startsWith(cp + '/')); + }); + }, [allProjects]); + const handleValueChange = (value: string) => { if (value === '__add_new__') { setShowAddModal(true); setOpen(false); + } else if (value === '__add_customer__') { + setShowAddCustomerModal(true); + setOpen(false); } else { onProjectChange(value || null); setOpen(false); @@ -57,13 +79,13 @@ export function ProjectSelector({
          - +
          {projects.length === 0 ? (
          -

          No projects yet

          +

          {t('projectSelector.noProjects')}

          ) : ( projects.map((project) => ( @@ -90,7 +112,13 @@ export function ProjectSelector({
          - Add Project... + {t('projectSelector.addProject')} +
          +
          + +
          + + {t('projectSelector.addCustomer')}
          @@ -116,6 +144,15 @@ export function ProjectSelector({ onProjectAdded?.(project, needsInit); }} /> + + { + onProjectChange(project.id); + onProjectAdded?.(project, false); + }} + /> ); } diff --git a/apps/frontend/src/renderer/components/settings/ThemeSelector.tsx b/apps/frontend/src/renderer/components/settings/ThemeSelector.tsx index 504770cecf..5287273705 100644 --- a/apps/frontend/src/renderer/components/settings/ThemeSelector.tsx +++ b/apps/frontend/src/renderer/components/settings/ThemeSelector.tsx @@ -1,4 +1,5 @@ import { Check, Sun, Moon, Monitor } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; import { cn } from '../../lib/utils'; import { Label } from '../ui/label'; import { COLOR_THEMES } from '../../../shared/constants'; @@ -18,6 +19,7 @@ interface ThemeSelectorProps { * require saving to take effect. */ export function ThemeSelector({ settings, onSettingsChange }: ThemeSelectorProps) { + const { t } = useTranslation('settings'); const updateStoreSettings = useSettingsStore((state) => state.updateSettings); const currentColorTheme = settings.colorTheme || 'default'; @@ -54,8 +56,8 @@ export function ThemeSelector({ settings, onSettingsChange }: ThemeSelectorProps
          {/* Mode Toggle */}
          - -

          Choose light, dark, or system preference

          + +

          {t('theme.modeDescription')}

          {(['system', 'light', 'dark'] as const).map((mode) => ( ))}
          @@ -78,8 +80,8 @@ export function ThemeSelector({ settings, onSettingsChange }: ThemeSelectorProps {/* Color Theme Grid */}
          - -

          Select a color palette for the interface

          + +

          {t('theme.colorThemeDescription')}

          {COLOR_THEMES.map((theme) => { const isSelected = currentColorTheme === theme.id; @@ -113,12 +115,12 @@ export function ThemeSelector({ settings, onSettingsChange }: ThemeSelectorProps
          diff --git a/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx b/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx index 3f079472a0..9d3e820ff0 100644 --- a/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx +++ b/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx @@ -1,6 +1,7 @@ import { useState, useEffect, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { Github, RefreshCw, KeyRound, Loader2, CheckCircle2, AlertCircle, User, Lock, Globe, ChevronDown, GitBranch } from 'lucide-react'; +import { Github, RefreshCw, KeyRound, Loader2, CheckCircle2, AlertCircle, User, Lock, Globe, ChevronDown, GitBranch, Download, FolderGit2 } from 'lucide-react'; +import { useProjectStore } from '../../../stores/project-store'; import { Input } from '../../ui/input'; import { Label } from '../../ui/label'; import { Switch } from '../../ui/switch'; @@ -10,6 +11,7 @@ import { Combobox } from '../../ui/combobox'; import { GitHubOAuthFlow } from '../../project-settings/GitHubOAuthFlow'; import { PasswordInput } from '../../project-settings/PasswordInput'; import { buildBranchOptions } from '../../../lib/branch-utils'; +import { cn } from '../../../lib/utils'; import type { ProjectEnvConfig, GitHubSyncStatus, ProjectSettings, GitBranchDetail } from '../../../../shared/types'; // Debug logging @@ -38,6 +40,9 @@ interface GitHubIntegrationProps { gitHubConnectionStatus: GitHubSyncStatus | null; isCheckingGitHub: boolean; projectPath?: string; // Project path for fetching git branches + projectType?: 'project' | 'customer'; // Project type for customer-specific UI + projectName?: string; // Project name for display + projectId?: string; // Project ID for store lookups // Project settings for mainBranch (used by kanban tasks and terminal worktrees) settings?: ProjectSettings; setSettings?: React.Dispatch>; @@ -55,6 +60,9 @@ export function GitHubIntegration({ gitHubConnectionStatus, isCheckingGitHub, projectPath, + projectType, + projectName, + projectId, settings, setSettings }: GitHubIntegrationProps) { @@ -70,6 +78,24 @@ export function GitHubIntegration({ const [isLoadingBranches, setIsLoadingBranches] = useState(false); const [branchesError, setBranchesError] = useState(null); + // Customer clone repos state + const [customerRepos, setCustomerRepos] = useState([]); + const [isLoadingCustomerRepos, setIsLoadingCustomerRepos] = useState(false); + const [customerReposError, setCustomerReposError] = useState(null); + const [cloneStatuses, setCloneStatuses] = useState>({}); + const [cloneErrors, setCloneErrors] = useState>({}); + const [customerRepoSearch, setCustomerRepoSearch] = useState(''); + + // Get child projects (repos cloned into customer folder) + const allProjects = useProjectStore((state) => state.projects); + const customerChildProjects = useMemo(() => { + if (projectType !== 'customer' || !projectPath) return []; + // Normalize path separators for cross-platform comparison (Windows uses backslashes) + const normalize = (p: string) => p.replace(/\\/g, '/'); + const normalizedCustomerPath = normalize(projectPath); + return allProjects.filter(p => p.id !== projectId && normalize(p.path).startsWith(normalizedCustomerPath + '/')); + }, [projectType, projectPath, projectId, allProjects]); + debugLog('Render - authMode:', authMode); debugLog('Render - projectPath:', projectPath); debugLog('Render - envConfig:', envConfig ? { githubEnabled: envConfig.githubEnabled, hasToken: !!envConfig.githubToken, defaultBranch: envConfig.defaultBranch } : null); @@ -145,11 +171,11 @@ export function GitHubIntegration({ } } else { debugLog('fetchBranches: Failed -', result.error || 'No data returned'); - setBranchesError(result.error || 'Failed to load branches'); + setBranchesError(result.error || t('github.failedToLoadBranches')); } } catch (err) { debugLog('fetchBranches: Exception:', err); - setBranchesError(err instanceof Error ? err.message : 'Failed to load branches'); + setBranchesError(err instanceof Error ? err.message : t('github.failedToLoadBranches')); } finally { setIsLoadingBranches(false); } @@ -168,11 +194,11 @@ export function GitHubIntegration({ setRepos(result.data.repos); debugLog('Loaded repos:', result.data.repos.length); } else { - setReposError(result.error || 'Failed to load repositories'); + setReposError(result.error || t('github.failedToLoadRepositories')); } } catch (err) { debugLog('Error fetching repos:', err); - setReposError(err instanceof Error ? err.message : 'Failed to load repositories'); + setReposError(err instanceof Error ? err.message : t('github.failedToLoadRepositories')); } finally { setIsLoadingRepos(false); } @@ -221,6 +247,66 @@ export function GitHubIntegration({ updateEnvConfig({ githubRepo: repoFullName }); }; + // Customer-specific: load repos for cloning + const loadCustomerRepos = async () => { + setIsLoadingCustomerRepos(true); + setCustomerReposError(null); + try { + const result = await window.electronAPI.listGitHubUserRepos(); + if (result.success && result.data) { + setCustomerRepos(result.data.repos); + } else { + setCustomerReposError(result.error || t('github.failedToLoadRepositories')); + } + } catch (err) { + setCustomerReposError(err instanceof Error ? err.message : t('github.failedToLoadRepositories')); + } finally { + setIsLoadingCustomerRepos(false); + } + }; + + // Customer-specific: clone a repo into the customer folder + const handleCloneRepo = async (repo: GitHubRepo) => { + if (!projectPath) return; + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'cloning' })); + setCloneErrors(prev => { + const next = { ...prev }; + delete next[repo.fullName]; + return next; + }); + + try { + const result = await window.electronAPI.cloneGitHubRepo(repo.fullName, projectPath); + if (!result.success || !result.data) { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ ...prev, [repo.fullName]: result.error || t('github.cloneFailed') })); + return; + } + + // Register cloned repo as a project — only mark as done if registration succeeds + const addResult = await window.electronAPI.addProject(result.data.path); + if (addResult?.success && addResult?.data) { + const store = useProjectStore.getState(); + store.addProject(addResult.data); + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'done' })); + } else { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ ...prev, [repo.fullName]: addResult?.error || t('github.failedToRegisterProject') })); + } + } catch (err) { + setCloneStatuses(prev => ({ ...prev, [repo.fullName]: 'error' })); + setCloneErrors(prev => ({ + ...prev, + [repo.fullName]: err instanceof Error ? err.message : t('github.cloneFailed') + })); + } + }; + + const filteredCustomerRepos = customerRepos.filter(repo => + repo.fullName.toLowerCase().includes(customerRepoSearch.toLowerCase()) || + (repo.description?.toLowerCase().includes(customerRepoSearch.toLowerCase())) + ); + // Selected branch for Combobox value const selectedBranch = settings?.mainBranch || envConfig?.defaultBranch || ''; @@ -228,9 +314,9 @@ export function GitHubIntegration({
          - +

          - Sync issues from GitHub and create tasks automatically + {t('github.enableIssuesDescription')}

          -

          Connected via GitHub CLI

          +

          {t('github.connectedViaCLI')}

          {oauthUsername && (

          - Authenticated as {oauthUsername} + {t('github.authenticatedAs', { username: oauthUsername })}

          )}
          @@ -264,7 +350,7 @@ export function GitHubIntegration({ onClick={handleSwitchToManual} className="text-xs" > - Use Different Token + {t('github.useDifferentToken')}
          @@ -286,13 +372,13 @@ export function GitHubIntegration({ {authMode === 'oauth' && (
          - +
          - +

          - Create a token with repo scope from{' '} + {t('github.tokenInstructions')} repo {t('github.tokenScopeFrom')}{' '} - GitHub Settings + {t('github.githubSettings')}

          )} - {envConfig.githubToken && envConfig.githubRepo && ( - - )} - - {gitHubConnectionStatus?.connected && } + {/* Customer-specific: Clone Repositories */} + {projectType === 'customer' && envConfig.githubToken && ( + <> + + + {/* Already cloned repos */} + {customerChildProjects.length > 0 && ( +
          + +
          + {customerChildProjects.map((child) => ( +
          + + {child.name} + {child.path} +
          + ))} +
          +
          + )} - + {/* Clone new repos */} +
          +
          + + +
          - {/* Default Branch Selector */} - {projectPath && ( -
          -
          -
          -
          - - + {customerReposError && ( +
          + + {customerReposError}
          -

          - {t('settings:integrations.github.defaultBranch.description')} -

          -
          - + )} + + {customerRepos.length > 0 && ( + <> + {/* Search */} + setCustomerRepoSearch(e.target.value)} + placeholder={t('github.searchRepos')} + className="h-8 text-xs" + /> + + {/* Repo list */} +
          + {filteredCustomerRepos.map((repo) => { + const status = cloneStatuses[repo.fullName] || 'idle'; + const alreadyCloned = customerChildProjects.some( + p => p.name === repo.fullName.split('/').pop() + ); + const cloneError = cloneErrors[repo.fullName]; + + return ( +
          + +
          +
          + {repo.fullName} + {repo.isPrivate ? ( + + ) : ( + + )} +
          + {repo.description && ( +

          {repo.description}

          + )} + {cloneError && ( +

          {cloneError}

          + )} +
          + +
          + {alreadyCloned || status === 'done' ? ( + + + {t('github.cloned')} + + ) : status === 'cloning' ? ( + + ) : ( + + )} +
          +
          + ); + })} +
          + + )}
          + + )} - {branchesError && ( -
          - - {branchesError} -
          + {/* Regular project: repo connection + branch + auto-sync */} + {projectType !== 'customer' && ( + <> + {envConfig.githubToken && envConfig.githubRepo && ( + )} -
          - -
          + {gitHubConnectionStatus?.connected && } + + + + {/* Default Branch Selector */} + {projectPath && ( +
          +
          +
          +
          + + +
          +

          + {t('settings:integrations.github.defaultBranch.description')} +

          +
          + +
          - {selectedBranch && ( -

          - {t('settings:integrations.github.defaultBranch.selectedBranchHelp', { branch: selectedBranch })} -

          + {branchesError && ( +
          + + {branchesError} +
          + )} + +
          + +
          + + {selectedBranch && ( +

          + {t('settings:integrations.github.defaultBranch.selectedBranchHelp', { branch: selectedBranch })} +

          + )} +
          )} -
          - )} - + - updateEnvConfig({ githubAutoSync: checked })} - /> + updateEnvConfig({ githubAutoSync: checked })} + /> + + )} )}
          @@ -439,6 +667,7 @@ function RepositoryDropdown({ onRefresh, onManualEntry }: RepositoryDropdownProps) { + const { t } = useTranslation('settings'); const [isOpen, setIsOpen] = useState(false); const [filter, setFilter] = useState(''); @@ -452,7 +681,7 @@ function RepositoryDropdown({ return (
          - +
          @@ -491,7 +720,7 @@ function RepositoryDropdown({ {isLoading ? ( - Loading repositories... + {t('github.loadingRepositories')} ) : selectedRepo ? ( @@ -503,7 +732,7 @@ function RepositoryDropdown({ {selectedRepo} ) : ( - Select a repository... + {t('github.selectRepository')} )} @@ -513,7 +742,7 @@ function RepositoryDropdown({ {/* Search filter */}
          setFilter(e.target.value)} className="h-8 text-sm" @@ -525,7 +754,7 @@ function RepositoryDropdown({
          {filteredRepos.length === 0 ? (
          - {filter ? 'No matching repositories' : 'No repositories found'} + {filter ? t('github.noMatchingRepositories') : t('github.noRepositoriesFound')}
          ) : ( filteredRepos.map((repo) => ( @@ -562,7 +791,7 @@ function RepositoryDropdown({ {selectedRepo && (

          - Selected: {selectedRepo} + {t('github.selected')}: {selectedRepo}

          )}
          @@ -575,11 +804,12 @@ interface RepositoryInputProps { } function RepositoryInput({ value, onChange }: RepositoryInputProps) { + const { t } = useTranslation('settings'); return (
          - +

          - Format: owner/repo (e.g., facebook/react) + {t('github.repositoryFormat')}

          -

          Connection Status

          +

          {t('github.connectionStatus')}

          - {isChecking ? 'Checking...' : + {isChecking ? t('github.checking') : connectionStatus?.connected - ? `Connected to ${connectionStatus.repoFullName}` - : connectionStatus?.error || 'Not connected'} + ? t('github.connectedTo', { repo: connectionStatus.repoFullName }) + : connectionStatus?.error || t('github.notConnected')}

          {connectionStatus?.connected && connectionStatus.repoDescription && (

          @@ -626,14 +857,15 @@ function ConnectionStatus({ isChecking, connectionStatus }: ConnectionStatusProp } function IssuesAvailableInfo() { + const { t } = useTranslation('settings'); return (

          -

          Issues Available

          +

          {t('github.issuesAvailable')}

          - Access GitHub Issues from the sidebar to view, investigate, and create tasks from issues. + {t('github.issuesAvailableDescription')}

          @@ -647,15 +879,16 @@ interface AutoSyncToggleProps { } function AutoSyncToggle({ enabled, onToggle }: AutoSyncToggleProps) { + const { t } = useTranslation('settings'); return (
          - +

          - Automatically fetch issues when the project loads + {t('github.autoSyncDescription')}

          diff --git a/apps/frontend/src/renderer/components/settings/integrations/LinearIntegration.tsx b/apps/frontend/src/renderer/components/settings/integrations/LinearIntegration.tsx index 9179d8da3b..d083d451d0 100644 --- a/apps/frontend/src/renderer/components/settings/integrations/LinearIntegration.tsx +++ b/apps/frontend/src/renderer/components/settings/integrations/LinearIntegration.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { Radio, Import, Eye, EyeOff, Loader2, CheckCircle2, AlertCircle } from 'lucide-react'; import { Button } from '../../ui/button'; import { Input } from '../../ui/input'; @@ -29,15 +30,16 @@ export function LinearIntegration({ isCheckingLinear, onOpenLinearImport }: LinearIntegrationProps) { + const { t } = useTranslation('settings'); if (!envConfig) return null; return (
          - +

          - Create and update Linear issues automatically + {t('linear.enableSyncDescription')}

          - +

          - Get your API key from{' '} + {t('linear.apiKeyDescription')}{' '} - Linear Settings + {t('linear.linearSettings')}

          @@ -119,20 +121,21 @@ interface ConnectionStatusProps { } function ConnectionStatus({ isChecking, connectionStatus }: ConnectionStatusProps) { + const { t } = useTranslation('settings'); return (
          -

          Connection Status

          +

          {t('linear.connectionStatus')}

          - {isChecking ? 'Checking...' : + {isChecking ? t('linear.checking') : connectionStatus?.connected - ? `Connected${connectionStatus.teamName ? ` to ${connectionStatus.teamName}` : ''}` - : connectionStatus?.error || 'Not connected'} + ? (connectionStatus.teamName ? t('linear.connected', { team: connectionStatus.teamName }) : t('linear.connectedNoTeam')) + : connectionStatus?.error || t('linear.notConnected')}

          {connectionStatus?.connected && connectionStatus.issueCount !== undefined && (

          - {connectionStatus.issueCount}+ tasks available to import + {t('linear.tasksAvailable', { count: connectionStatus.issueCount })}

          )}
          @@ -153,14 +156,15 @@ interface ImportTasksPromptProps { } function ImportTasksPrompt({ onOpenLinearImport }: ImportTasksPromptProps) { + const { t } = useTranslation('settings'); return (
          -

          Import Existing Tasks

          +

          {t('linear.importTitle')}

          - Select which Linear issues to import into AutoBuild as tasks. + {t('linear.importDescription')}

          @@ -183,15 +187,16 @@ interface RealtimeSyncToggleProps { } function RealtimeSyncToggle({ enabled, onToggle }: RealtimeSyncToggleProps) { + const { t } = useTranslation('settings'); return (
          - +

          - Automatically import new tasks created in Linear + {t('linear.realtimeSyncDescription')}

          @@ -200,11 +205,11 @@ function RealtimeSyncToggle({ enabled, onToggle }: RealtimeSyncToggleProps) { } function RealtimeSyncWarning() { + const { t } = useTranslation('settings'); return (

          - When enabled, new Linear issues will be automatically imported into AutoBuild. - Make sure to configure your team/project filters below to control which issues are imported. + {t('linear.realtimeSyncWarning')}

          ); @@ -218,10 +223,11 @@ interface TeamProjectIdsProps { } function TeamProjectIds({ teamId, projectId, onTeamIdChange, onProjectIdChange }: TeamProjectIdsProps) { + const { t } = useTranslation('settings'); return (
          - +
          - + diff --git a/apps/frontend/src/renderer/components/task-detail/TaskProgress.tsx b/apps/frontend/src/renderer/components/task-detail/TaskProgress.tsx index b93f14bf1c..e17c55dc21 100644 --- a/apps/frontend/src/renderer/components/task-detail/TaskProgress.tsx +++ b/apps/frontend/src/renderer/components/task-detail/TaskProgress.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { Zap, Loader2 } from 'lucide-react'; import { Progress } from '../ui/progress'; import { cn, calculateProgress } from '../../lib/utils'; @@ -13,6 +14,7 @@ interface TaskProgressProps { } export function TaskProgress({ task, isRunning, hasActiveExecution, executionPhase, isStuck }: TaskProgressProps) { + const { t } = useTranslation('tasks'); const progress = calculateProgress(task.subtasks); return ( @@ -40,7 +42,7 @@ export function TaskProgress({ task, isRunning, hasActiveExecution, executionPha )} {task.executionProgress?.currentSubtask && (

          - Subtask: {task.executionProgress.currentSubtask} + {t('progress.currentSubtask', { subtask: task.executionProgress.currentSubtask })}

          )}
          @@ -50,15 +52,15 @@ export function TaskProgress({ task, isRunning, hasActiveExecution, executionPha {/* Progress Bar */}
          - Progress + {t('progress.title')}
          {hasActiveExecution && task.executionProgress?.message ? task.executionProgress.message : task.subtasks.length > 0 - ? `${task.subtasks.filter(c => c.status === 'completed').length}/${task.subtasks.length} subtasks completed` - : 'No subtasks yet'} + ? t('progress.subtasksCompleted', { completed: task.subtasks.filter(c => c.status === 'completed').length, total: task.subtasks.length }) + : t('progress.noSubtasksYet')}
          )} diff --git a/apps/frontend/src/renderer/lib/browser-mock.ts b/apps/frontend/src/renderer/lib/browser-mock.ts index 8f3d439ef9..4114a04ebd 100644 --- a/apps/frontend/src/renderer/lib/browser-mock.ts +++ b/apps/frontend/src/renderer/lib/browser-mock.ts @@ -177,6 +177,10 @@ const browserMockAPI: ElectronAPI = { getGitHubIssue: async () => ({ success: true, data: null as any }), getIssueComments: async () => ({ success: true, data: [] }), checkGitHubConnection: async () => ({ success: true, data: { connected: false, repoFullName: undefined, error: undefined } }), + checkMultiRepoConnection: async () => ({ success: true, data: { connected: false, repos: [] } }), + getMultiRepoIssues: async () => ({ success: true, data: { issues: [], repos: [], hasMore: false } }), + getMultiRepoIssueDetail: async () => ({ success: false, error: 'Not available in browser mode' }), + getMultiRepoPRs: async () => ({ success: true, data: { prs: [], repos: [] } }), investigateGitHubIssue: () => {}, importGitHubIssues: async () => ({ success: true, data: { success: true, imported: 0, failed: 0, issues: [] } }), createGitHubRelease: async () => ({ success: true, data: { url: '' } }), @@ -187,6 +191,7 @@ const browserMockAPI: ElectronAPI = { getGitHubToken: async () => ({ success: true, data: { token: '' } }), getGitHubUser: async () => ({ success: true, data: { username: '' } }), listGitHubUserRepos: async () => ({ success: true, data: { repos: [] } }), + cloneGitHubRepo: async () => ({ success: true, data: { path: '/mock/path', name: 'mock-repo' } }), detectGitHubRepo: async () => ({ success: true, data: '' }), getGitHubBranches: async () => ({ success: true, data: [] }), createGitHubRepo: async () => ({ success: true, data: { fullName: '', url: '' } }), @@ -347,6 +352,15 @@ const browserMockAPI: ElectronAPI = { checkedAt: new Date().toISOString() } }), + checkGlobalMcpHealth: async (server) => ({ + success: true, + data: { + serverId: server.id, + status: 'unknown' as const, + message: 'Health check not available in browser mode', + checkedAt: new Date().toISOString() + } + }), testMcpConnection: async (server) => ({ success: true, data: { @@ -355,6 +369,14 @@ const browserMockAPI: ElectronAPI = { message: 'Connection test not available in browser mode' } }), + getGlobalMcps: async () => ({ + success: true, + data: { pluginServers: [], inlineServers: [], claudeJsonServers: [] } + }), + getClaudeAgents: async () => ({ + success: true, + data: { categories: [], totalAgents: 0 }, + }), // Screenshot capture operations getSources: async () => ({ @@ -380,7 +402,14 @@ const browserMockAPI: ElectronAPI = { openLogsFolder: async () => ({ success: false, error: 'Not available in browser mode' }), copyDebugInfo: async () => ({ success: false, error: 'Not available in browser mode' }), getRecentErrors: async () => [], - listLogFiles: async () => [] + listLogFiles: async () => [], + onIndexProgress: () => () => {}, + + // Ollama Embedding Dimension Lookup (single source of truth) + getOllamaEmbeddingDim: async () => ({ + success: false as const, + error: 'Not available in browser mode' + }), }; /** diff --git a/apps/frontend/src/renderer/lib/mocks/integration-mock.ts b/apps/frontend/src/renderer/lib/mocks/integration-mock.ts index 3e5c0a9fa4..0b81186ccd 100644 --- a/apps/frontend/src/renderer/lib/mocks/integration-mock.ts +++ b/apps/frontend/src/renderer/lib/mocks/integration-mock.ts @@ -114,6 +114,23 @@ export const integrationMock = { } }), + checkMultiRepoConnection: async () => ({ + success: true, + data: { connected: false, repos: [] } + }), + + getMultiRepoIssues: async () => ({ + success: true, + data: { issues: [], repos: [], hasMore: false } + }), + + getMultiRepoIssueDetail: async () => ({ + success: false as const, + error: 'Not available in browser mock' + }), + + getMultiRepoPRs: async () => ({ success: true, data: { prs: [], repos: [] } }), + investigateGitHubIssue: () => { console.warn('[Browser Mock] investigateGitHubIssue called'); }, @@ -181,6 +198,11 @@ export const integrationMock = { } }), + cloneGitHubRepo: async () => ({ + success: true, + data: { path: '/Users/demo/customers/mock-repo', name: 'mock-repo' } + }), + detectGitHubRepo: async () => ({ success: true, data: 'user/example-repo' diff --git a/apps/frontend/src/renderer/lib/mocks/project-mock.ts b/apps/frontend/src/renderer/lib/mocks/project-mock.ts index 153600e098..dde16c1502 100644 --- a/apps/frontend/src/renderer/lib/mocks/project-mock.ts +++ b/apps/frontend/src/renderer/lib/mocks/project-mock.ts @@ -33,6 +33,11 @@ export const projectMock = { data: { success: true, version: '1.0.0', wasUpdate: false } }), + initializeCustomerProject: async () => ({ + success: true, + data: { success: true } + }), + checkProjectVersion: async () => ({ success: true, data: { diff --git a/apps/frontend/src/renderer/stores/context-store.ts b/apps/frontend/src/renderer/stores/context-store.ts index b81b6f2ab3..df9f1b5184 100644 --- a/apps/frontend/src/renderer/stores/context-store.ts +++ b/apps/frontend/src/renderer/stores/context-store.ts @@ -12,6 +12,9 @@ interface ContextState { projectIndex: ProjectIndex | null; indexLoading: boolean; indexError: string | null; + indexProgress: string | null; + indexProgressCurrent: number | null; + indexProgressTotal: number | null; // Memory Status memoryStatus: GraphitiMemoryStatus | null; @@ -32,6 +35,7 @@ interface ContextState { setProjectIndex: (index: ProjectIndex | null) => void; setIndexLoading: (loading: boolean) => void; setIndexError: (error: string | null) => void; + setIndexProgress: (message: string | null, current?: number | null, total?: number | null) => void; setMemoryStatus: (status: GraphitiMemoryStatus | null) => void; setMemoryState: (state: GraphitiMemoryState | null) => void; setMemoryLoading: (loading: boolean) => void; @@ -49,6 +53,9 @@ export const useContextStore = create((set) => ({ projectIndex: null, indexLoading: false, indexError: null, + indexProgress: null, + indexProgressCurrent: null, + indexProgressTotal: null, // Memory Status memoryStatus: null, @@ -69,6 +76,11 @@ export const useContextStore = create((set) => ({ setProjectIndex: (index) => set({ projectIndex: index }), setIndexLoading: (loading) => set({ indexLoading: loading }), setIndexError: (error) => set({ indexError: error }), + setIndexProgress: (message, current, total) => set({ + indexProgress: message, + indexProgressCurrent: current ?? null, + indexProgressTotal: total ?? null + }), setMemoryStatus: (status) => set({ memoryStatus: status }), setMemoryState: (state) => set({ memoryState: state }), setMemoryLoading: (loading) => set({ memoryLoading: loading }), @@ -83,6 +95,9 @@ export const useContextStore = create((set) => ({ projectIndex: null, indexLoading: false, indexError: null, + indexProgress: null, + indexProgressCurrent: null, + indexProgressTotal: null, memoryStatus: null, memoryState: null, memoryLoading: false, @@ -125,14 +140,16 @@ export async function loadProjectContext(projectId: string): Promise { /** * Refresh project index by re-running analyzer + * @param force - If true, re-runs analyzer even if index already exists (for customer child repos) */ -export async function refreshProjectIndex(projectId: string): Promise { +export async function refreshProjectIndex(projectId: string, force?: boolean): Promise { const store = useContextStore.getState(); store.setIndexLoading(true); store.setIndexError(null); + store.setIndexProgress(null); try { - const result = await window.electronAPI.refreshProjectIndex(projectId); + const result = await window.electronAPI.refreshProjectIndex(projectId, force); if (result.success && result.data) { store.setProjectIndex(result.data); } else { @@ -142,6 +159,7 @@ export async function refreshProjectIndex(projectId: string): Promise { store.setIndexError(error instanceof Error ? error.message : 'Unknown error'); } finally { store.setIndexLoading(false); + store.setIndexProgress(null); } } diff --git a/apps/frontend/src/shared/constants/ipc.ts b/apps/frontend/src/shared/constants/ipc.ts index 48b3e95c22..4dc1676a55 100644 --- a/apps/frontend/src/shared/constants/ipc.ts +++ b/apps/frontend/src/shared/constants/ipc.ts @@ -10,6 +10,7 @@ export const IPC_CHANNELS = { PROJECT_LIST: 'project:list', PROJECT_UPDATE_SETTINGS: 'project:updateSettings', PROJECT_INITIALIZE: 'project:initialize', + PROJECT_INIT_CUSTOMER: 'project:initCustomer', PROJECT_CHECK_VERSION: 'project:checkVersion', // Tab state operations (persisted in main process) @@ -201,6 +202,7 @@ export const IPC_CHANNELS = { // Context operations CONTEXT_GET: 'context:get', CONTEXT_REFRESH_INDEX: 'context:refreshIndex', + CONTEXT_INDEX_PROGRESS: 'context:indexProgress', CONTEXT_MEMORY_STATUS: 'context:memoryStatus', CONTEXT_SEARCH_MEMORIES: 'context:searchMemories', CONTEXT_GET_MEMORIES: 'context:getMemories', @@ -250,6 +252,12 @@ export const IPC_CHANNELS = { GITHUB_IMPORT_ISSUES: 'github:importIssues', GITHUB_CREATE_RELEASE: 'github:createRelease', + // Customer multi-repo GitHub operations + GITHUB_CHECK_MULTI_REPO_CONNECTION: 'github:checkMultiRepoConnection', + GITHUB_GET_MULTI_REPO_ISSUES: 'github:getMultiRepoIssues', + GITHUB_GET_MULTI_REPO_ISSUE_DETAIL: 'github:getMultiRepoIssueDetail', + GITHUB_GET_MULTI_REPO_PRS: 'github:getMultiRepoPRs', + // GitHub OAuth (gh CLI authentication) GITHUB_CHECK_CLI: 'github:checkCli', GITHUB_CHECK_AUTH: 'github:checkAuth', @@ -262,6 +270,7 @@ export const IPC_CHANNELS = { GITHUB_CREATE_REPO: 'github:createRepo', GITHUB_ADD_REMOTE: 'github:addRemote', GITHUB_LIST_ORGS: 'github:listOrgs', + GITHUB_CLONE_REPO: 'github:cloneRepo', // GitHub OAuth events (main -> renderer) - for streaming device code during auth GITHUB_AUTH_DEVICE_CODE: 'github:authDeviceCode', @@ -463,6 +472,7 @@ export const IPC_CHANNELS = { OLLAMA_LIST_EMBEDDING_MODELS: 'ollama:listEmbeddingModels', OLLAMA_PULL_MODEL: 'ollama:pullModel', OLLAMA_PULL_PROGRESS: 'ollama:pullProgress', + OLLAMA_GET_EMBEDDING_DIM: 'ollama:getEmbeddingDim', // Auto Claude source environment configuration AUTOBUILD_SOURCE_ENV_GET: 'autobuild:source:env:get', @@ -567,8 +577,13 @@ export const IPC_CHANNELS = { // MCP Server health checks MCP_CHECK_HEALTH: 'mcp:checkHealth', // Quick connectivity check + MCP_CHECK_GLOBAL_HEALTH: 'mcp:checkGlobalHealth', // Health check for global MCPs (trusted source, no allowlist) MCP_TEST_CONNECTION: 'mcp:testConnection', // Full MCP protocol test + // Claude Code global MCP configuration + CLAUDE_MCP_GET_GLOBAL: 'claude-mcp:getGlobalMcps', + CLAUDE_AGENTS_GET: 'claude-agents:getAgents', + // Sentry error reporting SENTRY_STATE_CHANGED: 'sentry:state-changed', // Notify main process when setting changes GET_SENTRY_DSN: 'sentry:get-dsn', // Get DSN from main process (env var) diff --git a/apps/frontend/src/shared/constants/models.ts b/apps/frontend/src/shared/constants/models.ts index c094bbb346..d3d0134a60 100644 --- a/apps/frontend/src/shared/constants/models.ts +++ b/apps/frontend/src/shared/constants/models.ts @@ -13,7 +13,8 @@ export const AVAILABLE_MODELS = [ { value: 'opus', label: 'Claude Opus 4.6' }, { value: 'opus-1m', label: 'Claude Opus 4.6 (1M)' }, { value: 'opus-4.5', label: 'Claude Opus 4.5' }, - { value: 'sonnet', label: 'Claude Sonnet 4.5' }, + { value: 'sonnet', label: 'Claude Sonnet 4.6' }, + { value: 'sonnet-4.5', label: 'Claude Sonnet 4.5' }, { value: 'haiku', label: 'Claude Haiku 4.5' } ] as const; @@ -23,7 +24,8 @@ export const MODEL_ID_MAP: Record = { opus: 'claude-opus-4-6', 'opus-1m': 'claude-opus-4-6', 'opus-4.5': 'claude-opus-4-5-20251101', - sonnet: 'claude-sonnet-4-5-20250929', + sonnet: 'claude-sonnet-4-6', + 'sonnet-4.5': 'claude-sonnet-4-5-20250929', haiku: 'claude-haiku-4-5-20251001' } as const; diff --git a/apps/frontend/src/shared/i18n/index.ts b/apps/frontend/src/shared/i18n/index.ts index 095b0b1188..cc5c556678 100644 --- a/apps/frontend/src/shared/i18n/index.ts +++ b/apps/frontend/src/shared/i18n/index.ts @@ -13,6 +13,7 @@ import enGitlab from './locales/en/gitlab.json'; import enTaskReview from './locales/en/taskReview.json'; import enTerminal from './locales/en/terminal.json'; import enErrors from './locales/en/errors.json'; +import enContext from './locales/en/context.json'; // Import French translation resources import frCommon from './locales/fr/common.json'; @@ -26,6 +27,7 @@ import frGitlab from './locales/fr/gitlab.json'; import frTaskReview from './locales/fr/taskReview.json'; import frTerminal from './locales/fr/terminal.json'; import frErrors from './locales/fr/errors.json'; +import frContext from './locales/fr/context.json'; export const defaultNS = 'common'; @@ -41,7 +43,8 @@ export const resources = { gitlab: enGitlab, taskReview: enTaskReview, terminal: enTerminal, - errors: enErrors + errors: enErrors, + context: enContext }, fr: { common: frCommon, @@ -54,7 +57,8 @@ export const resources = { gitlab: frGitlab, taskReview: frTaskReview, terminal: frTerminal, - errors: frErrors + errors: frErrors, + context: frContext } } as const; @@ -65,7 +69,7 @@ i18n lng: 'en', // Default language (will be overridden by settings) fallbackLng: 'en', defaultNS, - ns: ['common', 'navigation', 'settings', 'tasks', 'welcome', 'onboarding', 'dialogs', 'gitlab', 'taskReview', 'terminal', 'errors'], + ns: ['common', 'navigation', 'settings', 'tasks', 'welcome', 'onboarding', 'dialogs', 'gitlab', 'taskReview', 'terminal', 'errors', 'context'], interpolation: { escapeValue: false // React already escapes values }, diff --git a/apps/frontend/src/shared/i18n/locales/en/common.json b/apps/frontend/src/shared/i18n/locales/en/common.json index 6a2f1d84f2..99eace28a8 100644 --- a/apps/frontend/src/shared/i18n/locales/en/common.json +++ b/apps/frontend/src/shared/i18n/locales/en/common.json @@ -25,7 +25,8 @@ "hideArchivedTasks": "Hide archived tasks", "closeTab": "Close tab", "closeTabAriaLabel": "Close tab (removes project from app)", - "addProjectAriaLabel": "Add project" + "addProjectAriaLabel": "Add project", + "dragHandle": "Drag to reorder {{name}}" }, "accessibility": { "deleteFeatureAriaLabel": "Delete feature", @@ -142,7 +143,9 @@ "justNow": "Just now", "minutesAgo": "{{count}}m ago", "hoursAgo": "{{count}}h ago", - "daysAgo": "{{count}}d ago" + "daysAgo": "{{count}}d ago", + "yesterday": "yesterday", + "weeksAgo": "{{count}}w ago" }, "errors": { "generic": "An error occurred", @@ -431,6 +434,13 @@ "agentActivity": "Agent Activity", "showMore": "Show {{count}} more", "hideMore": "Hide {{count}} more" + }, + "reposCount": "{{count}} repos", + "multiRepo": { + "failedToCheckConnection": "Failed to check multi-repo connection", + "failedToLoadPRs": "Failed to load PRs", + "failedToRefreshPRs": "Failed to refresh PRs", + "unknownError": "Unknown error" } }, "downloads": { @@ -446,7 +456,22 @@ "failedLabel": "Failed", "starting": "Starting..." }, + "memory": { + "whatWorked": "What Worked", + "whatFailed": "What Failed", + "approach": "Approach", + "recommendations": "Recommendations", + "patterns": "Patterns", + "gotchas": "Gotchas", + "changedFiles": "Changed Files", + "fileInsights": "File Insights", + "subtasksCompleted": "Subtasks Completed", + "expand": "Expand", + "collapse": "Collapse", + "searchPlaceholder": "Search for patterns, insights, gotchas..." + }, "insights": { + "placeholder": "Ask about your codebase...", "suggestedTask": "Suggested Task", "creating": "Creating...", "taskCreated": "Task Created", @@ -492,6 +517,7 @@ } }, "ideation": { + "noIdeasToDisplay": "No ideas to display", "converting": "Converting...", "convertToTask": "Convert to Auto-Build Task", "dismissIdea": "Dismiss Idea", @@ -504,9 +530,18 @@ "conversionErrorDescription": "An error occurred while converting the idea" }, "issues": { + "noIssuesFound": "No issues found", + "selectIssueToView": "Select an issue to view details", "loadingMore": "Loading more...", "scrollForMore": "Scroll for more", - "allLoaded": "All issues loaded" + "allLoaded": "All issues loaded", + "reposCount": "{{count}} repos", + "multiRepo": { + "failedToCheckConnection": "Failed to check multi-repo connection", + "failedToLoadIssues": "Failed to load issues", + "failedToRefreshIssues": "Failed to refresh issues", + "unknownError": "Unknown error" + } }, "usage": { "dataUnavailable": "Usage data unavailable", @@ -712,6 +747,8 @@ } }, "auth": { + "claudeAuthRequired": "Claude Authentication Required", + "claudeAuthRequiredDescription": "A Claude Code OAuth token is required to generate AI-powered feature ideas.", "failure": { "title": "Authentication Required", "profileLabel": "Profile", diff --git a/apps/frontend/src/shared/i18n/locales/en/context.json b/apps/frontend/src/shared/i18n/locales/en/context.json new file mode 100644 index 0000000000..a865e893a8 --- /dev/null +++ b/apps/frontend/src/shared/i18n/locales/en/context.json @@ -0,0 +1,36 @@ +{ + "projectIndex": { + "title": "Project Structure", + "subtitle": "AI-discovered knowledge about your codebase", + "reanalyze": "Re-analyze", + "reanalyzeTooltip": "Force re-analyze all project structures from scratch", + "refresh": "Refresh", + "analyzeTooltip": "Analyze project structure", + "errorTitle": "Failed to load project index", + "analyzing": "Analyzing project structure...", + "repoProgress": "Repository {{current}} of {{total}}", + "noIndexTitle": "No Project Index Found", + "noIndexDescription": "Click the button below to analyze your project structure and create an index.", + "analyzeButton": "Analyze Project", + "overview": "Overview", + "serviceCount": "{{count}} service", + "serviceCount_other": "{{count}} services", + "repoCount": "{{count}} repository", + "repoCount_other": "{{count}} repositories", + "repositories": "Repositories", + "services": "Services", + "infrastructure": "Infrastructure", + "dockerCompose": "Docker Compose", + "ciCd": "CI/CD", + "deployment": "Deployment", + "dockerServices": "Docker Services", + "conventions": "Conventions", + "pythonLinting": "Python Linting", + "jsLinting": "JS Linting", + "formatting": "Formatting", + "gitHooks": "Git Hooks", + "typescript": "TypeScript", + "enabled": "Enabled", + "svcCount": "{{count}} svc" + } +} diff --git a/apps/frontend/src/shared/i18n/locales/en/dialogs.json b/apps/frontend/src/shared/i18n/locales/en/dialogs.json index 74ba84802f..d91ea68da2 100644 --- a/apps/frontend/src/shared/i18n/locales/en/dialogs.json +++ b/apps/frontend/src/shared/i18n/locales/en/dialogs.json @@ -134,6 +134,49 @@ "openExistingAriaLabel": "Open existing project folder", "createNewAriaLabel": "Create new project" }, + "addCustomer": { + "title": "Add Customer", + "description": "Create a new customer folder or select an existing one", + "createNew": "Create New Folder", + "createNewDescription": "Start fresh with a new customer folder", + "createNewSubtitle": "Set up a new customer folder", + "createNewAriaLabel": "Create new customer folder", + "openExisting": "Open Existing Folder", + "openExistingDescription": "Browse to an existing customer folder on your computer", + "openExistingAriaLabel": "Open existing customer folder", + "customerName": "Customer Name", + "customerNamePlaceholder": "e.g., Acme Corp", + "location": "Location", + "locationPlaceholder": "Select a folder...", + "browse": "Browse", + "willCreate": "Will create:", + "back": "Back", + "creating": "Creating...", + "createCustomer": "Create Customer", + "nameRequired": "Please enter a customer name", + "locationRequired": "Please select a location", + "failedToOpen": "Failed to open customer folder", + "failedToCreate": "Failed to create customer folder" + }, + "customerRepos": { + "title": "Clone Repositories", + "description": "Select repositories to clone into {{name}}'s folder", + "searchPlaceholder": "Search repositories...", + "loading": "Loading repositories...", + "failedToLoad": "Failed to load repositories", + "noResults": "No repositories match your search", + "noRepos": "No repositories found", + "clone": "Clone", + "cloning": "Cloning...", + "cloned": "Cloned", + "cloneFailed": "Failed to clone repository", + "retry": "Retry", + "clonedCount": "{{count}} repository cloned", + "clonedCount_other": "{{count}} repositories cloned", + "done": "Done", + "private": "Private repository", + "public": "Public repository" + }, "customModel": { "title": "Custom Model Configuration", "description": "Configure the model and thinking level for this chat session.", @@ -220,6 +263,12 @@ "skipDescription": "Generate roadmap without any competitor insights.", "cancel": "Cancel" }, + "envConfig": { + "profileNotFound": "Profile not found", + "failedToSaveToken": "Failed to save token", + "invalidProfileCredentials": "Selected profile does not have valid credentials. Please re-authenticate.", + "failedToUseProfile": "Failed to use profile" + }, "versionWarning": { "title": "Action Required", "subtitle": "Version 2.7.5 Update", diff --git a/apps/frontend/src/shared/i18n/locales/en/navigation.json b/apps/frontend/src/shared/i18n/locales/en/navigation.json index 6d4fb1b3d3..7937ab86d8 100644 --- a/apps/frontend/src/shared/i18n/locales/en/navigation.json +++ b/apps/frontend/src/shared/i18n/locales/en/navigation.json @@ -18,6 +18,18 @@ "worktrees": "Worktrees", "agentTools": "MCP Overview" }, + "projectSelector": { + "placeholder": "Select a project", + "selectRepo": "Select a repository", + "addProject": "Add Project", + "addCustomer": "Add Customer" + }, + "multiRepo": { + "allRepos": "All Repos", + "filterByRepo": "Filter by repository", + "repoCount_one": "{{count}} repo", + "repoCount_other": "{{count}} repos" + }, "actions": { "settings": "Settings", "help": "Help & Feedback", diff --git a/apps/frontend/src/shared/i18n/locales/en/settings.json b/apps/frontend/src/shared/i18n/locales/en/settings.json index bc7fd8fa8f..9a85e8ff45 100644 --- a/apps/frontend/src/shared/i18n/locales/en/settings.json +++ b/apps/frontend/src/shared/i18n/locales/en/settings.json @@ -184,7 +184,12 @@ "fineTuneDescription": "Adjust from 75% to 200% in 5% increments", "default": "Default", "comfortable": "Comfortable", - "large": "Large" + "large": "Large", + "resetToDefault": "Reset to default (100%)", + "decreaseScale": "Decrease scale by {{step}}%", + "increaseScale": "Increase scale by {{step}}%", + "applyChanges": "Apply scale changes", + "apply": "Apply" }, "logOrder": { "label": "Log Order", @@ -255,7 +260,9 @@ "dark": "Dark", "system": "System", "colorTheme": "Color Theme", - "colorThemeDescription": "Choose your preferred color palette" + "colorThemeDescription": "Choose your preferred color palette", + "backgroundColorTitle": "Background color", + "accentColorTitle": "Accent color" }, "devtools": { "title": "Developer Tools", @@ -342,7 +349,7 @@ "projectSections": { "general": { "title": "General", - "description": "Auto-Build and agent config", + "description": "Configure Auto-Build, agent model, and notifications for {{name}}", "useClaudeMd": "Use CLAUDE.md", "useClaudeMdDescription": "Include CLAUDE.md instructions in agent context" }, @@ -408,6 +415,11 @@ "resetToProfileDefaults": "Reset to {{profile}} defaults", "customized": "Customized", "phaseConfigNote": "These settings will be used as defaults when creating new tasks with this profile. You can override them per-task in the task creation wizard.", + "availableAgents": { + "title": "Available Specialist Agents", + "description": "All agents are available for automatic use during builds", + "info": "These agents from ~/.claude/agents/ are automatically available during task execution. The system selects the most relevant specialist based on the task context — no manual assignment needed." + }, "adaptiveThinking": { "badge": "Adaptive", "tooltip": "Opus uses adaptive thinking — it dynamically decides how much to think within the budget cap set by the thinking level." @@ -759,6 +771,40 @@ "createAnthropicKey": "Create Anthropic API Key", "openai": "This looks like an OpenAI API. You'll need an API key.", "createOpenaiKey": "Create OpenAI API Key" + }, + "globalMcps": { + "title": "Claude Code MCPs (Global)", + "description": "MCP servers configured in your Claude Code (~/.claude.json and ~/.claude/settings.json)", + "noGlobalMcps": "No global MCP servers configured in Claude Code", + "source": { + "plugin": "Plugin", + "settings": "Settings", + "claudeJson": "Claude Config" + }, + "badge": "Global", + "serverType": { + "command": "Command", + "http": "HTTP", + "sse": "SSE" + }, + "refreshTooltip": "Refresh global MCP list", + "readOnly": "Read-only — configure in Claude Code CLI settings", + "checkHealth": "Check Health", + "healthCheckFailed": "Health check failed", + "statusUnknown": "Status unknown — click Check Health", + "useIn": "Use in:", + "phases": { + "spec": "Spec", + "build": "Build", + "qa": "QA", + "utility": "Utility", + "ideation": "Ideation" + } + }, + "customAgents": { + "title": "Custom Agents (Global)", + "description": "Custom agent definitions from ~/.claude/agents/", + "refreshTooltip": "Refresh agent list" } }, "terminalFonts": { @@ -901,5 +947,74 @@ "label": "PR Template Filler", "description": "AI-fills GitHub PR templates from code changes" } + }, + "projectSelector": { + "placeholder": "Select a project...", + "noProjects": "No projects yet", + "addProject": "Add Project...", + "addCustomer": "Add Customer..." + }, + "linear": { + "enableSync": "Enable Linear Sync", + "enableSyncDescription": "Create and update Linear issues automatically", + "apiKey": "API Key", + "apiKeyDescription": "Get your API key from", + "linearSettings": "Linear Settings", + "connectionStatus": "Connection Status", + "checking": "Checking...", + "connected": "Connected to {{team}}", + "connectedNoTeam": "Connected", + "notConnected": "Not connected", + "tasksAvailable": "{{count}}+ tasks available to import", + "importTitle": "Import Existing Tasks", + "importDescription": "Select which Linear issues to import into AutoBuild as tasks.", + "importButton": "Import Tasks from Linear", + "realtimeSync": "Real-time Sync", + "realtimeSyncDescription": "Automatically import new tasks created in Linear", + "realtimeSyncWarning": "When enabled, new Linear issues will be automatically imported into AutoBuild. Make sure to configure your team/project filters below to control which issues are imported.", + "teamId": "Team ID (Optional)", + "projectId": "Project ID (Optional)" + }, + "github": { + "enableIssues": "Enable GitHub Issues", + "enableIssuesDescription": "Sync issues from GitHub and create tasks automatically", + "connectedViaCLI": "Connected via GitHub CLI", + "authenticatedAs": "Authenticated as {{username}}", + "useDifferentToken": "Use Different Token", + "authentication": "GitHub Authentication", + "useManualToken": "Use Manual Token", + "personalAccessToken": "Personal Access Token", + "useOAuthInstead": "Use OAuth Instead", + "tokenInstructions": "Create a token with", + "tokenScopeFrom": "scope from", + "githubSettings": "GitHub Settings", + "clonedRepositories": "Cloned Repositories", + "cloneRepositories": "Clone Repositories", + "cloned": "Cloned", + "cloning": "Cloning", + "clone": "Clone", + "refresh": "Refresh", + "loadRepos": "Load Repos", + "searchRepos": "Search repositories...", + "repository": "Repository", + "repositoryFormat": "Format: owner/repo (e.g., facebook/react)", + "selectRepository": "Select a repository...", + "enterManually": "Enter Manually", + "loadingRepositories": "Loading repositories...", + "noMatchingRepositories": "No matching repositories", + "noRepositoriesFound": "No repositories found", + "selected": "Selected", + "connectionStatus": "Connection Status", + "checking": "Checking...", + "connectedTo": "Connected to {{repo}}", + "notConnected": "Not connected", + "issuesAvailable": "Issues Available", + "issuesAvailableDescription": "Access GitHub Issues from the sidebar to view, investigate, and create tasks from issues.", + "autoSyncOnLoad": "Auto-Sync on Load", + "autoSyncDescription": "Automatically fetch issues when the project loads", + "failedToLoadBranches": "Failed to load branches", + "failedToLoadRepositories": "Failed to load repositories", + "cloneFailed": "Clone failed", + "failedToRegisterProject": "Failed to register cloned project" } } diff --git a/apps/frontend/src/shared/i18n/locales/en/tasks.json b/apps/frontend/src/shared/i18n/locales/en/tasks.json index ecd78827ad..67b0995558 100644 --- a/apps/frontend/src/shared/i18n/locales/en/tasks.json +++ b/apps/frontend/src/shared/i18n/locales/en/tasks.json @@ -346,6 +346,16 @@ "deletePermanently": "Delete Permanently", "deleting": "Deleting..." }, + "progress": { + "title": "Progress", + "currentSubtask": "Subtask: {{subtask}}", + "subtasksCompleted": "{{completed}}/{{total}} subtasks completed", + "noSubtasksYet": "No subtasks yet", + "phasePlanning": "Planning (0-20%)", + "phaseCoding": "Coding (20-80%)", + "phaseAIReview": "AI Review (80-95%)", + "phaseComplete": "Complete (95-100%)" + }, "referenceImages": { "title": "Reference Images (optional)", "description": "Add visual references like screenshots or designs to help the AI understand your requirements." diff --git a/apps/frontend/src/shared/i18n/locales/fr/common.json b/apps/frontend/src/shared/i18n/locales/fr/common.json index 113736f227..23684796e8 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/common.json +++ b/apps/frontend/src/shared/i18n/locales/fr/common.json @@ -25,7 +25,8 @@ "hideArchivedTasks": "Masquer les tâches archivées", "closeTab": "Fermer l'onglet", "closeTabAriaLabel": "Fermer l'onglet (retire le projet de l'application)", - "addProjectAriaLabel": "Ajouter un projet" + "addProjectAriaLabel": "Ajouter un projet", + "dragHandle": "Glisser pour réorganiser {{name}}" }, "accessibility": { "deleteFeatureAriaLabel": "Supprimer la fonctionnalité", @@ -142,7 +143,9 @@ "justNow": "À l'instant", "minutesAgo": "Il y a {{count}} min", "hoursAgo": "Il y a {{count}}h", - "daysAgo": "Il y a {{count}}j" + "daysAgo": "Il y a {{count}}j", + "yesterday": "hier", + "weeksAgo": "Il y a {{count}} sem" }, "errors": { "generic": "Une erreur s'est produite", @@ -431,6 +434,13 @@ "agentActivity": "Activité des agents", "showMore": "Afficher {{count}} de plus", "hideMore": "Masquer {{count}}" + }, + "reposCount": "{{count}} dépôts", + "multiRepo": { + "failedToCheckConnection": "Échec de la vérification de la connexion multi-dépôts", + "failedToLoadPRs": "Échec du chargement des PRs", + "failedToRefreshPRs": "Échec de l'actualisation des PRs", + "unknownError": "Erreur inconnue" } }, "downloads": { @@ -446,7 +456,22 @@ "failedLabel": "Échoué", "starting": "Démarrage..." }, + "memory": { + "whatWorked": "Ce qui a fonctionné", + "whatFailed": "Ce qui a échoué", + "approach": "Approche", + "recommendations": "Recommandations", + "patterns": "Modèles", + "gotchas": "Pièges", + "changedFiles": "Fichiers modifiés", + "fileInsights": "Informations sur les fichiers", + "subtasksCompleted": "Sous-tâches terminées", + "expand": "Développer", + "collapse": "Réduire", + "searchPlaceholder": "Rechercher des modèles, informations, pièges..." + }, "insights": { + "placeholder": "Posez une question sur votre codebase...", "suggestedTask": "Tâche suggérée", "creating": "Création...", "taskCreated": "Tâche créée", @@ -492,6 +517,7 @@ } }, "ideation": { + "noIdeasToDisplay": "Aucune idée à afficher", "converting": "Conversion...", "convertToTask": "Convertir en tâche Auto-Build", "dismissIdea": "Ignorer l'idée", @@ -504,9 +530,18 @@ "conversionErrorDescription": "Une erreur s'est produite lors de la conversion de l'idée" }, "issues": { + "noIssuesFound": "Aucune issue trouvée", + "selectIssueToView": "Sélectionnez une issue pour voir les détails", "loadingMore": "Chargement...", "scrollForMore": "Défiler pour plus", - "allLoaded": "Toutes les issues chargées" + "allLoaded": "Toutes les issues chargées", + "reposCount": "{{count}} dépôts", + "multiRepo": { + "failedToCheckConnection": "Échec de la vérification de la connexion multi-dépôts", + "failedToLoadIssues": "Échec du chargement des issues", + "failedToRefreshIssues": "Échec de l'actualisation des issues", + "unknownError": "Erreur inconnue" + } }, "usage": { "dataUnavailable": "Données d'utilisation non disponibles", @@ -712,6 +747,8 @@ } }, "auth": { + "claudeAuthRequired": "Authentification Claude requise", + "claudeAuthRequiredDescription": "Un token OAuth Claude Code est requis pour générer des idées de fonctionnalités.", "failure": { "title": "Authentification requise", "profileLabel": "Profil", diff --git a/apps/frontend/src/shared/i18n/locales/fr/context.json b/apps/frontend/src/shared/i18n/locales/fr/context.json new file mode 100644 index 0000000000..7073893c96 --- /dev/null +++ b/apps/frontend/src/shared/i18n/locales/fr/context.json @@ -0,0 +1,36 @@ +{ + "projectIndex": { + "title": "Structure du projet", + "subtitle": "Connaissances sur votre code découvertes par l'IA", + "reanalyze": "Ré-analyser", + "reanalyzeTooltip": "Forcer la ré-analyse de toutes les structures du projet", + "refresh": "Actualiser", + "analyzeTooltip": "Analyser la structure du projet", + "errorTitle": "Échec du chargement de l'index du projet", + "analyzing": "Analyse de la structure du projet...", + "repoProgress": "Dépôt {{current}} sur {{total}}", + "noIndexTitle": "Aucun index de projet trouvé", + "noIndexDescription": "Cliquez sur le bouton ci-dessous pour analyser la structure de votre projet et créer un index.", + "analyzeButton": "Analyser le projet", + "overview": "Aperçu", + "serviceCount": "{{count}} service", + "serviceCount_other": "{{count}} services", + "repoCount": "{{count}} dépôt", + "repoCount_other": "{{count}} dépôts", + "repositories": "Dépôts", + "services": "Services", + "infrastructure": "Infrastructure", + "dockerCompose": "Docker Compose", + "ciCd": "CI/CD", + "deployment": "Déploiement", + "dockerServices": "Services Docker", + "conventions": "Conventions", + "pythonLinting": "Linting Python", + "jsLinting": "Linting JS", + "formatting": "Formatage", + "gitHooks": "Hooks Git", + "typescript": "TypeScript", + "enabled": "Activé", + "svcCount": "{{count}} svc" + } +} diff --git a/apps/frontend/src/shared/i18n/locales/fr/dialogs.json b/apps/frontend/src/shared/i18n/locales/fr/dialogs.json index 87a2f6a918..db7592cd0d 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/dialogs.json +++ b/apps/frontend/src/shared/i18n/locales/fr/dialogs.json @@ -134,6 +134,49 @@ "openExistingAriaLabel": "Ouvrir un dossier de projet existant", "createNewAriaLabel": "Créer un nouveau projet" }, + "addCustomer": { + "title": "Ajouter un client", + "description": "Créer un nouveau dossier client ou sélectionner un existant", + "createNew": "Créer un nouveau dossier", + "createNewDescription": "Commencer avec un nouveau dossier client", + "createNewSubtitle": "Configurer un nouveau dossier client", + "createNewAriaLabel": "Créer un nouveau dossier client", + "openExisting": "Ouvrir un dossier existant", + "openExistingDescription": "Parcourir vers un dossier client existant sur votre ordinateur", + "openExistingAriaLabel": "Ouvrir un dossier client existant", + "customerName": "Nom du client", + "customerNamePlaceholder": "ex. Acme Corp", + "location": "Emplacement", + "locationPlaceholder": "Sélectionner un dossier...", + "browse": "Parcourir", + "willCreate": "Va créer :", + "back": "Retour", + "creating": "Création en cours...", + "createCustomer": "Créer le client", + "nameRequired": "Veuillez entrer un nom de client", + "locationRequired": "Veuillez sélectionner un emplacement", + "failedToOpen": "Échec de l'ouverture du dossier client", + "failedToCreate": "Échec de la création du dossier client" + }, + "customerRepos": { + "title": "Cloner des dépôts", + "description": "Sélectionnez les dépôts à cloner dans le dossier de {{name}}", + "searchPlaceholder": "Rechercher des dépôts...", + "loading": "Chargement des dépôts...", + "failedToLoad": "Échec du chargement des dépôts", + "noResults": "Aucun dépôt ne correspond à votre recherche", + "noRepos": "Aucun dépôt trouvé", + "clone": "Cloner", + "cloning": "Clonage...", + "cloned": "Cloné", + "cloneFailed": "Échec du clonage du dépôt", + "retry": "Réessayer", + "clonedCount": "{{count}} dépôt cloné", + "clonedCount_other": "{{count}} dépôts clonés", + "done": "Terminé", + "private": "Dépôt privé", + "public": "Dépôt public" + }, "customModel": { "title": "Configuration du modèle personnalisé", "description": "Configurez le modèle et le niveau de réflexion pour cette session de chat.", @@ -220,6 +263,12 @@ "skipDescription": "Générer la feuille de route sans informations concurrentielles.", "cancel": "Annuler" }, + "envConfig": { + "profileNotFound": "Profil introuvable", + "failedToSaveToken": "Échec de la sauvegarde du jeton", + "invalidProfileCredentials": "Le profil sélectionné n'a pas d'identifiants valides. Veuillez vous ré-authentifier.", + "failedToUseProfile": "Échec de l'utilisation du profil" + }, "versionWarning": { "title": "Action requise", "subtitle": "Mise à jour version 2.7.5", diff --git a/apps/frontend/src/shared/i18n/locales/fr/navigation.json b/apps/frontend/src/shared/i18n/locales/fr/navigation.json index 2c1a1fed82..cca250d99a 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/navigation.json +++ b/apps/frontend/src/shared/i18n/locales/fr/navigation.json @@ -18,6 +18,18 @@ "worktrees": "Worktrees", "agentTools": "Aperçu MCP" }, + "projectSelector": { + "placeholder": "Sélectionner un projet", + "selectRepo": "Sélectionner un dépôt", + "addProject": "Ajouter un projet", + "addCustomer": "Ajouter un client" + }, + "multiRepo": { + "allRepos": "Tous les dépôts", + "filterByRepo": "Filtrer par dépôt", + "repoCount_one": "{{count}} dépôt", + "repoCount_other": "{{count}} dépôts" + }, "actions": { "settings": "Paramètres", "help": "Aide & Feedback", diff --git a/apps/frontend/src/shared/i18n/locales/fr/settings.json b/apps/frontend/src/shared/i18n/locales/fr/settings.json index 8d506e900f..964180b435 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/settings.json +++ b/apps/frontend/src/shared/i18n/locales/fr/settings.json @@ -184,7 +184,12 @@ "fineTuneDescription": "Ajustez de 75% à 200% par incréments de 5%", "default": "Par défaut", "comfortable": "Confortable", - "large": "Grand" + "large": "Grand", + "resetToDefault": "Réinitialiser par défaut (100%)", + "decreaseScale": "Diminuer l'échelle de {{step}}%", + "increaseScale": "Augmenter l'échelle de {{step}}%", + "applyChanges": "Appliquer les changements d'échelle", + "apply": "Appliquer" }, "logOrder": { "label": "Ordre des journaux", @@ -255,7 +260,9 @@ "dark": "Sombre", "system": "Système", "colorTheme": "Thème de couleur", - "colorThemeDescription": "Choisissez votre palette de couleurs préférée" + "colorThemeDescription": "Choisissez votre palette de couleurs préférée", + "backgroundColorTitle": "Couleur d'arrière-plan", + "accentColorTitle": "Couleur d'accentuation" }, "devtools": { "title": "Outils de développement", @@ -342,7 +349,7 @@ "projectSections": { "general": { "title": "Général", - "description": "Auto-Build et configuration de l'agent", + "description": "Configurer Auto-Build, modèle d'agent et notifications pour {{name}}", "useClaudeMd": "Utiliser CLAUDE.md", "useClaudeMdDescription": "Inclure les instructions CLAUDE.md dans le contexte de l'agent" }, @@ -408,6 +415,11 @@ "resetToProfileDefaults": "Réinitialiser aux défauts de {{profile}}", "customized": "Personnalisé", "phaseConfigNote": "Ces paramètres seront utilisés par défaut lors de la création de nouvelles tâches avec ce profil. Vous pouvez les modifier par tâche dans l'assistant de création.", + "availableAgents": { + "title": "Agents Spécialistes Disponibles", + "description": "Tous les agents sont disponibles pour utilisation automatique", + "info": "Ces agents de ~/.claude/agents/ sont automatiquement disponibles pendant l'exécution des tâches. Le système sélectionne le spécialiste le plus pertinent en fonction du contexte — aucune assignation manuelle nécessaire." + }, "adaptiveThinking": { "badge": "Adaptatif", "tooltip": "Opus utilise la réflexion adaptative — il décide dynamiquement de la profondeur de réflexion dans la limite du budget défini par le niveau de réflexion." @@ -759,6 +771,40 @@ "createAnthropicKey": "Créer une clé API Anthropic", "openai": "Ceci ressemble à une API OpenAI. Vous aurez besoin d'une clé API.", "createOpenaiKey": "Créer une clé API OpenAI" + }, + "globalMcps": { + "title": "MCPs Claude Code (Global)", + "description": "Serveurs MCP configurés dans Claude Code (~/.claude.json et ~/.claude/settings.json)", + "noGlobalMcps": "Aucun serveur MCP global configuré dans Claude Code", + "source": { + "plugin": "Plugin", + "settings": "Paramètres", + "claudeJson": "Config Claude" + }, + "badge": "Global", + "serverType": { + "command": "Commande", + "http": "HTTP", + "sse": "SSE" + }, + "refreshTooltip": "Actualiser la liste des MCP globaux", + "readOnly": "Lecture seule — configurez dans les paramètres Claude Code CLI", + "checkHealth": "Vérifier", + "healthCheckFailed": "Échec de la vérification de santé", + "statusUnknown": "Statut inconnu — cliquez Vérifier", + "useIn": "Utiliser dans :", + "phases": { + "spec": "Spec", + "build": "Build", + "qa": "QA", + "utility": "Utilitaire", + "ideation": "Idéation" + } + }, + "customAgents": { + "title": "Agents Personnalisés (Global)", + "description": "Définitions d'agents personnalisés depuis ~/.claude/agents/", + "refreshTooltip": "Actualiser la liste des agents" } }, "terminalFonts": { @@ -901,5 +947,74 @@ "label": "Remplisseur de modèle PR", "description": "Remplit intelligemment les modèles de PR GitHub à partir des changements de code" } + }, + "projectSelector": { + "placeholder": "Sélectionner un projet...", + "noProjects": "Aucun projet", + "addProject": "Ajouter un projet...", + "addCustomer": "Ajouter un client..." + }, + "linear": { + "enableSync": "Activer la synchronisation Linear", + "enableSyncDescription": "Créer et mettre à jour les issues Linear automatiquement", + "apiKey": "Clé API", + "apiKeyDescription": "Obtenez votre clé API depuis", + "linearSettings": "Paramètres Linear", + "connectionStatus": "État de la connexion", + "checking": "Vérification...", + "connected": "Connecté à {{team}}", + "connectedNoTeam": "Connecté", + "notConnected": "Non connecté", + "tasksAvailable": "{{count}}+ tâches disponibles à importer", + "importTitle": "Importer les tâches existantes", + "importDescription": "Sélectionnez les issues Linear à importer dans AutoBuild.", + "importButton": "Importer depuis Linear", + "realtimeSync": "Synchronisation en temps réel", + "realtimeSyncDescription": "Importer automatiquement les nouvelles tâches créées dans Linear", + "realtimeSyncWarning": "Lorsqu'activé, les nouvelles issues Linear seront automatiquement importées dans AutoBuild. Assurez-vous de configurer les filtres équipe/projet ci-dessous.", + "teamId": "ID d'équipe (Optionnel)", + "projectId": "ID de projet (Optionnel)" + }, + "github": { + "enableIssues": "Activer les issues GitHub", + "enableIssuesDescription": "Synchroniser les issues depuis GitHub et créer des tâches automatiquement", + "connectedViaCLI": "Connecté via GitHub CLI", + "authenticatedAs": "Authentifié en tant que {{username}}", + "useDifferentToken": "Utiliser un autre token", + "authentication": "Authentification GitHub", + "useManualToken": "Utiliser un token manuel", + "personalAccessToken": "Token d'accès personnel", + "useOAuthInstead": "Utiliser OAuth à la place", + "tokenInstructions": "Créez un token avec", + "tokenScopeFrom": "scope depuis", + "githubSettings": "Paramètres GitHub", + "clonedRepositories": "Dépôts clonés", + "cloneRepositories": "Cloner des dépôts", + "cloned": "Cloné", + "cloning": "Clonage", + "clone": "Cloner", + "refresh": "Actualiser", + "loadRepos": "Charger les dépôts", + "searchRepos": "Rechercher des dépôts...", + "repository": "Dépôt", + "repositoryFormat": "Format : owner/repo (ex. : facebook/react)", + "selectRepository": "Sélectionner un dépôt...", + "enterManually": "Saisir manuellement", + "loadingRepositories": "Chargement des dépôts...", + "noMatchingRepositories": "Aucun dépôt correspondant", + "noRepositoriesFound": "Aucun dépôt trouvé", + "selected": "Sélectionné", + "connectionStatus": "État de la connexion", + "checking": "Vérification...", + "connectedTo": "Connecté à {{repo}}", + "notConnected": "Non connecté", + "issuesAvailable": "Issues disponibles", + "issuesAvailableDescription": "Accédez aux issues GitHub depuis la barre latérale pour les consulter, les analyser et créer des tâches.", + "autoSyncOnLoad": "Synchronisation automatique", + "autoSyncDescription": "Synchroniser automatiquement les issues au chargement du projet", + "failedToLoadBranches": "Échec du chargement des branches", + "failedToLoadRepositories": "Échec du chargement des dépôts", + "cloneFailed": "Échec du clonage", + "failedToRegisterProject": "Échec de l'enregistrement du projet cloné" } } diff --git a/apps/frontend/src/shared/i18n/locales/fr/tasks.json b/apps/frontend/src/shared/i18n/locales/fr/tasks.json index 00af23a49e..d7325e6c96 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/tasks.json +++ b/apps/frontend/src/shared/i18n/locales/fr/tasks.json @@ -346,6 +346,16 @@ "deletePermanently": "Supprimer d\u00e9finitivement", "deleting": "Suppression..." }, + "progress": { + "title": "Progression", + "currentSubtask": "Sous-tâche : {{subtask}}", + "subtasksCompleted": "{{completed}}/{{total}} sous-tâches terminées", + "noSubtasksYet": "Aucune sous-tâche pour le moment", + "phasePlanning": "Planification (0-20%)", + "phaseCoding": "Développement (20-80%)", + "phaseAIReview": "Revue IA (80-95%)", + "phaseComplete": "Terminé (95-100%)" + }, "referenceImages": { "title": "Images de référence (facultatif)", "description": "Ajoutez des références visuelles comme des captures d'écran ou des conceptions pour aider l'IA à comprendre vos exigences." diff --git a/apps/frontend/src/shared/types/integrations.ts b/apps/frontend/src/shared/types/integrations.ts index 741e388f33..bff5e22135 100644 --- a/apps/frontend/src/shared/types/integrations.ts +++ b/apps/frontend/src/shared/types/integrations.ts @@ -122,6 +122,47 @@ export interface GitHubSyncStatus { error?: string; } +/** + * Multi-repo GitHub connection status for Customer projects + */ +export interface MultiRepoGitHubStatus { + connected: boolean; + repos: { projectId: string; repoFullName: string }[]; + error?: string; +} + +/** + * Result type for multi-repo issue fetching + */ +export interface MultiRepoIssuesResult { + issues: GitHubIssue[]; + repos: string[]; + hasMore: boolean; +} + +export interface MultiRepoPRData { + number: number; + title: string; + body: string; + state: string; + author: { login: string }; + headRefName: string; + baseRefName: string; + additions: number; + deletions: number; + changedFiles: number; + assignees: Array<{ login: string }>; + createdAt: string; + updatedAt: string; + htmlUrl: string; + repoFullName: string; +} + +export interface MultiRepoPRsResult { + prs: MultiRepoPRData[]; + repos: string[]; +} + export interface GitHubImportResult { success: boolean; imported: number; @@ -478,3 +519,84 @@ export interface RoadmapProviderConfig { * Canny-specific status values */ export type CannyStatus = 'open' | 'under review' | 'planned' | 'in progress' | 'complete' | 'closed'; + +// ============================================ +// Claude Code Global MCP Types +// ============================================ + +/** + * A single MCP server entry resolved from Claude Code's global settings. + * Can originate from an enabled plugin (marketplace), an inline mcpServers definition + * in settings.json, or the top-level mcpServers in ~/.claude.json. + */ +export interface GlobalMcpServerEntry { + /** Plugin key (only for plugin-sourced servers), e.g. "context7@claude-plugins-official" */ + pluginKey?: string; + /** Server identifier from the MCP config, e.g. "context7" */ + serverId: string; + /** Human-readable name derived from serverId */ + serverName: string; + /** MCP server configuration */ + config: { + type?: 'http' | 'sse'; + command?: string; + args?: string[]; + url?: string; + headers?: Record; + env?: Record; + }; + /** Where this server config was sourced from */ + source: 'plugin' | 'settings' | 'claude-json'; +} + +/** + * Combined result of all global MCP servers from Claude Code settings. + */ +export interface GlobalMcpInfo { + /** MCP servers resolved from enabledPlugins (via plugin cache .mcp.json files) */ + pluginServers: GlobalMcpServerEntry[]; + /** MCP servers defined inline in the mcpServers field of settings.json */ + inlineServers: GlobalMcpServerEntry[]; + /** MCP servers from ~/.claude.json (main Claude Code config) */ + claudeJsonServers: GlobalMcpServerEntry[]; +} + +// ============================================ +// Claude Code Custom Agent Types +// ============================================ + +/** + * A custom agent definition from ~/.claude/agents/ + */ +export interface ClaudeCustomAgent { + /** Agent ID derived from filename (e.g. "frontend-developer") */ + agentId: string; + /** Human-readable name (e.g. "Frontend Developer") */ + agentName: string; + /** Category directory name (e.g. "01-core-development") */ + categoryDir: string; + /** Human-readable category name (e.g. "Core Development") */ + categoryName: string; + /** Full file path to the .md file */ + filePath: string; +} + +/** + * A category of custom agents + */ +export interface ClaudeAgentCategory { + /** Category directory name (e.g. "01-core-development") */ + categoryDir: string; + /** Human-readable name (e.g. "Core Development") */ + categoryName: string; + /** Agents in this category */ + agents: ClaudeCustomAgent[]; +} + +/** + * Combined result of all custom agents from ~/.claude/agents/ + */ +export interface ClaudeAgentsInfo { + categories: ClaudeAgentCategory[]; + totalAgents: number; +} diff --git a/apps/frontend/src/shared/types/ipc.ts b/apps/frontend/src/shared/types/ipc.ts index 532722db53..74f8dbb567 100644 --- a/apps/frontend/src/shared/types/ipc.ts +++ b/apps/frontend/src/shared/types/ipc.ts @@ -120,6 +120,8 @@ import type { LinearIssue, LinearImportResult, LinearSyncStatus, + GlobalMcpInfo, + ClaudeAgentsInfo, GitHubRepository, GitHubIssue, GitHubSyncStatus, @@ -179,11 +181,12 @@ export interface TabState { export interface ElectronAPI { // Project operations - addProject: (projectPath: string) => Promise>; + addProject: (projectPath: string, type?: 'project' | 'customer') => Promise>; removeProject: (projectId: string) => Promise; getProjects: () => Promise>; updateProjectSettings: (projectId: string, settings: Partial) => Promise; initializeProject: (projectId: string) => Promise>; + initializeCustomerProject: (projectId: string) => Promise>; checkProjectVersion: (projectId: string) => Promise>; // Tab State (persisted in main process for reliability) @@ -452,7 +455,8 @@ export interface ElectronAPI { // Context operations getProjectContext: (projectId: string) => Promise>; - refreshProjectIndex: (projectId: string) => Promise>; + refreshProjectIndex: (projectId: string, force?: boolean) => Promise>; + onIndexProgress: (callback: (data: { message: string; current?: number; total?: number; projectId?: string }) => void) => () => void; getMemoryStatus: (projectId: string) => Promise>; searchMemories: (projectId: string, query: string) => Promise>; getRecentMemories: (projectId: string, limit?: number) => Promise>; @@ -518,6 +522,7 @@ export interface ElectronAPI { getGitHubToken: () => Promise>; getGitHubUser: () => Promise>; listGitHubUserRepos: () => Promise }>>; + cloneGitHubRepo: (repoFullName: string, targetDir: string) => Promise>; detectGitHubRepo: (projectPath: string) => Promise>; getGitHubBranches: (repo: string, token: string) => Promise>; createGitHubRepo: ( @@ -880,6 +885,12 @@ export interface ElectronAPI { status: 'completed' | 'failed'; output: string[]; }>>; + /** Get the embedding dimension for an Ollama model (single source of truth from backend) */ + getOllamaEmbeddingDim: (modelName: string) => Promise>; // Ollama download progress listener onDownloadProgress: ( @@ -922,8 +933,15 @@ export interface ElectronAPI { // MCP Server health check operations checkMcpHealth: (server: CustomMcpServer) => Promise>; + checkGlobalMcpHealth: (server: CustomMcpServer) => Promise>; testMcpConnection: (server: CustomMcpServer) => Promise>; + // Claude Code global MCP configuration + getGlobalMcps: () => Promise>; + + // Claude Code custom agents + getClaudeAgents: () => Promise>; + // Screenshot capture operations getSources: () => Promise & { devMode?: boolean }>; capture: (options: { sourceId: string }) => Promise>; diff --git a/apps/frontend/src/shared/types/project.ts b/apps/frontend/src/shared/types/project.ts index 30bca7de2c..1d3f9bf631 100644 --- a/apps/frontend/src/shared/types/project.ts +++ b/apps/frontend/src/shared/types/project.ts @@ -10,6 +10,7 @@ export interface Project { settings: ProjectSettings; createdAt: Date; updatedAt: Date; + type?: 'project' | 'customer'; } export interface ProjectSettings { @@ -43,10 +44,12 @@ export interface NotificationSettings { export interface ProjectIndex { project_root: string; - project_type: 'single' | 'monorepo'; + project_type: 'single' | 'monorepo' | 'customer'; services: Record; infrastructure: InfrastructureInfo; conventions: ConventionsInfo; + /** For customer projects: indexes of each child repo keyed by repo name */ + child_repos?: Record; } export interface ServiceInfo { @@ -54,7 +57,7 @@ export interface ServiceInfo { path: string; language?: string; framework?: string; - type?: 'backend' | 'frontend' | 'worker' | 'scraper' | 'library' | 'proxy' | 'mobile' | 'desktop' | 'unknown'; + type?: 'backend' | 'frontend' | 'worker' | 'scraper' | 'library' | 'proxy' | 'mobile' | 'desktop' | 'documentation' | 'unknown'; package_manager?: string; default_port?: number; entry_point?: string; @@ -396,6 +399,8 @@ export interface CustomMcpServer { url?: string; /** HTTP headers (for type: 'http'). e.g., { "Authorization": "Bearer ..." } */ headers?: Record; + /** Environment variables to pass to the MCP server process */ + env?: Record; /** Optional description shown in UI */ description?: string; } diff --git a/apps/frontend/src/shared/types/settings.ts b/apps/frontend/src/shared/types/settings.ts index 77d3d6a32f..aeffa5866d 100644 --- a/apps/frontend/src/shared/types/settings.ts +++ b/apps/frontend/src/shared/types/settings.ts @@ -163,6 +163,27 @@ export type ThinkingLevel = 'low' | 'medium' | 'high'; // Model type shorthand export type ModelTypeShort = 'haiku' | 'sonnet' | 'opus' | 'opus-1m' | 'opus-4.5'; +// Phase-based custom agent configuration +// Each phase can optionally use a custom agent from ~/.claude/agents/ +export interface PhaseCustomAgentsConfig { + spec?: string; // Custom agent ID for spec creation + planning?: string; // Custom agent ID for planning + coding?: string; // Custom agent ID for coding + qa?: string; // Custom agent ID for QA +} + +/** + * Configuration for assigning global MCP servers to pipeline phases. + * Each phase has a list of global MCP server IDs that should be available during that phase. + */ +export interface GlobalMcpPhaseConfig { + spec?: string[]; + build?: string[]; + qa?: string[]; + utility?: string[]; + ideation?: string[]; +} + // Phase-based model configuration for Auto profile // Each phase can use a different model optimized for that task type export interface PhaseModelConfig { @@ -260,6 +281,10 @@ export interface AppSettings { // Custom phase configuration for Auto profile (overrides defaults) customPhaseModels?: PhaseModelConfig; customPhaseThinking?: PhaseThinkingConfig; + // Custom agent per phase (from ~/.claude/agents/) + phaseCustomAgents?: PhaseCustomAgentsConfig; + // Global MCP servers assigned to pipeline phases + globalMcpPhases?: GlobalMcpPhaseConfig; // Feature-specific configuration (insights, ideation, roadmap) featureModels?: FeatureModelConfig; featureThinking?: FeatureThinkingConfig; diff --git a/apps/frontend/src/shared/types/task.ts b/apps/frontend/src/shared/types/task.ts index 495b707380..03063d2b35 100644 --- a/apps/frontend/src/shared/types/task.ts +++ b/apps/frontend/src/shared/types/task.ts @@ -233,6 +233,7 @@ export interface TaskMetadata { isAutoProfile?: boolean; // True when using Auto (Optimized) profile phaseModels?: PhaseModelConfig; // Per-phase model configuration phaseThinking?: PhaseThinkingConfig; // Per-phase thinking configuration + phaseCustomAgents?: import('./settings').PhaseCustomAgentsConfig; // Per-phase custom agent IDs fastMode?: boolean; // Fast Mode — faster Opus 4.6 output, higher cost per token // Git/Worktree configuration diff --git a/package-lock.json b/package-lock.json index 31ab465ad8..89fbe28ed0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "auto-claude", - "version": "2.7.6-beta.6", + "version": "2.7.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "auto-claude", - "version": "2.7.6-beta.6", + "version": "2.7.7", "license": "AGPL-3.0", "workspaces": [ "apps/*", @@ -25,7 +25,7 @@ }, "apps/frontend": { "name": "auto-claude-ui", - "version": "2.7.6-beta.6", + "version": "2.7.7", "hasInstallScript": true, "license": "AGPL-3.0", "dependencies": { diff --git a/package.json b/package.json index 395f208fc7..0bb00ffbe1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "auto-claude", - "version": "2.7.6", + "version": "2.7.16", "description": "Autonomous multi-agent coding framework powered by Claude AI", "license": "AGPL-3.0", "author": "Auto Claude Team", diff --git a/tests/test_auth.py b/tests/test_auth.py index 33faf03d05..80633c2ccc 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -35,10 +35,14 @@ def clear_env(self): """Clear auth environment variables before each test.""" for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + saved_config_dir = os.environ.pop("CLAUDE_CONFIG_DIR", None) yield # Cleanup after test for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + os.environ.pop("CLAUDE_CONFIG_DIR", None) + if saved_config_dir is not None: + os.environ["CLAUDE_CONFIG_DIR"] = saved_config_dir def test_claude_oauth_token_from_env(self): """Reads CLAUDE_CODE_OAUTH_TOKEN from environment.""" @@ -373,6 +377,7 @@ def clear_env(self, monkeypatch): """Clear auth environment variables and mock keychain before each test.""" for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + monkeypatch.delenv("CLAUDE_CONFIG_DIR", raising=False) # Mock keychain to return None (tests that need a token will set env var) monkeypatch.setattr("core.auth.get_token_from_keychain", lambda _config_dir=None: None) yield @@ -437,11 +442,15 @@ def clear_env(self): for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) os.environ.pop("CLAUDE_CODE_OAUTH_TOKEN", None) + saved_config_dir = os.environ.pop("CLAUDE_CONFIG_DIR", None) yield # Cleanup after test for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) os.environ.pop("CLAUDE_CODE_OAUTH_TOKEN", None) + os.environ.pop("CLAUDE_CONFIG_DIR", None) + if saved_config_dir is not None: + os.environ["CLAUDE_CONFIG_DIR"] = saved_config_dir def test_does_nothing_when_already_set(self): """Doesn't modify env var when CLAUDE_CODE_OAUTH_TOKEN is already set.""" @@ -480,10 +489,14 @@ def clear_env(self): """Clear auth environment variables before each test.""" for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + saved_config_dir = os.environ.pop("CLAUDE_CONFIG_DIR", None) yield # Cleanup after test for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + os.environ.pop("CLAUDE_CONFIG_DIR", None) + if saved_config_dir is not None: + os.environ["CLAUDE_CONFIG_DIR"] = saved_config_dir def test_source_env_var_claude_oauth(self): """Identifies CLAUDE_CODE_OAUTH_TOKEN as source.""" @@ -982,10 +995,14 @@ def clear_env(self): """Clear auth environment variables before each test.""" for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + saved_config_dir = os.environ.pop("CLAUDE_CONFIG_DIR", None) yield # Cleanup after test for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + os.environ.pop("CLAUDE_CONFIG_DIR", None) + if saved_config_dir is not None: + os.environ["CLAUDE_CONFIG_DIR"] = saved_config_dir def test_keychain_encrypted_token_decryption_attempted(self, monkeypatch): """Verify encrypted tokens from keychain trigger decryption.""" diff --git a/tests/test_client.py b/tests/test_client.py index cf6cca5cd8..f8e93a5455 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -27,9 +27,13 @@ def clear_auth_env(): """Clear auth environment variables before and after each test.""" for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + saved_config_dir = os.environ.pop("CLAUDE_CONFIG_DIR", None) yield for var in AUTH_TOKEN_ENV_VARS: os.environ.pop(var, None) + os.environ.pop("CLAUDE_CONFIG_DIR", None) + if saved_config_dir is not None: + os.environ["CLAUDE_CONFIG_DIR"] = saved_config_dir class TestClientTokenValidation: