From 9132b9e872694056d35206823bb590d34d9c334c Mon Sep 17 00:00:00 2001 From: AutoResearch Agent Date: Tue, 10 Mar 2026 20:00:43 +0800 Subject: [PATCH 01/24] Introduce Component-System for component optimization, add a dashboard to monitor activities --- .gitignore | 7 + README.md | 36 + component_system/PDCA-DO-CHECK-ACTION.md | 99 ++ component_system/PDCA-PLAN.md | 112 ++ component_system/components/model.py | 380 +++++ component_system/components/optimizer.py | 179 +++ component_system/components/trainer.py | 191 +++ component_system/config.py | 31 + component_system/domain/models.py | 91 ++ component_system/entrypoint.py | 18 + component_system/package.json | 13 + component_system/postcss.config.js | 6 + component_system/protocol.md | 332 ++++ component_system/repositories/state.py | 75 + component_system/run.py | 764 ++++++++++ component_system/services/workflow.py | 1355 +++++++++++++++++ component_system/tailwind.config.js | 11 + component_system/task.py | 243 +++ component_system/training/mainline.py | 82 + component_system/web/app.py | 42 + component_system/web/routes.py | 337 ++++ component_system/web/static/app.css | 137 ++ component_system/web/static/app.js | 399 +++++ .../web/static/tailwind.input.css | 27 + component_system/web/templates/base.html | 32 + component_system/web/templates/dashboard.html | 120 ++ .../web/templates/partials/action_error.html | 3 + .../web/templates/partials/daemon_status.html | 14 + .../templates/partials/dashboard_board.html | 58 + .../web/templates/partials/seed_detail.html | 326 ++++ .../partials/seed_detail_response.html | 4 + .../web/templates/seed_detail_page.html | 15 + prepare.py | 3 +- pyproject.toml | 4 + scripts/clean_history.py | 147 ++ uv.lock | 216 ++- 36 files changed, 5907 insertions(+), 2 deletions(-) create mode 100644 component_system/PDCA-DO-CHECK-ACTION.md create mode 100644 component_system/PDCA-PLAN.md create mode 100644 component_system/components/model.py create mode 100644 component_system/components/optimizer.py create mode 100644 component_system/components/trainer.py create mode 100644 component_system/config.py create mode 100644 component_system/domain/models.py create mode 100644 component_system/entrypoint.py create mode 100644 component_system/package.json create mode 100644 component_system/postcss.config.js create mode 100644 component_system/protocol.md create mode 100644 component_system/repositories/state.py create mode 100644 component_system/run.py create mode 100644 component_system/services/workflow.py create mode 100644 component_system/tailwind.config.js create mode 100644 component_system/task.py create mode 100644 component_system/training/mainline.py create mode 100644 component_system/web/app.py create mode 100644 component_system/web/routes.py create mode 100644 component_system/web/static/app.css create mode 100644 component_system/web/static/app.js create mode 100644 component_system/web/static/tailwind.input.css create mode 100644 component_system/web/templates/base.html create mode 100644 component_system/web/templates/dashboard.html create mode 100644 component_system/web/templates/partials/action_error.html create mode 100644 component_system/web/templates/partials/daemon_status.html create mode 100644 component_system/web/templates/partials/dashboard_board.html create mode 100644 component_system/web/templates/partials/seed_detail.html create mode 100644 component_system/web/templates/partials/seed_detail_response.html create mode 100644 component_system/web/templates/seed_detail_page.html create mode 100644 scripts/clean_history.py diff --git a/.gitignore b/.gitignore index 99c30f52f..cb732b720 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Python-generated files __pycache__/ *.py[oc] +.pytest_cache/ build/ dist/ wheels/ @@ -21,3 +22,9 @@ dev/ # Results file results.tsv + +# Component-system runtime artifacts (logs, queue, state, worktrees under history/) +component_system/history/ +component_system/baseline_branches.json +component_system/baseline_metrics.json +*.log diff --git a/README.md b/README.md index 8459259ab..8de13d9cd 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,42 @@ Hi have a look at program.md and let's kick off a new experiment! let's do the s The `program.md` file is essentially a super lightweight "skill". +### Component-system workflow + +The component system runs a continuous **Seed → P → DCA** loop. A resident daemon manages two workers (P and DCA) that poll a file-based queue and dispatch each stage to an external code agent (Claude Code, Codex, or OpenCode). + +**1. Start the web dashboard** (optional, but recommended for monitoring): + +```bash +uv run uvicorn component_system.web.app:app --reload +``` + +Open http://127.0.0.1:8000 — the dashboard lives at `/component-system`. Use `--host 0.0.0.0` or `--port 8080` as needed. + +**2. Start the daemon:** + +```bash +# Default: uses Claude Code +uv run component_system/run.py + +# Or choose a different agent backend +PDCA_AGENT=codex uv run component_system/run.py +PDCA_AGENT=opencode uv run component_system/run.py +``` + +**3. Bootstrap via a coding agent.** Do *not* tell the agent to execute PDCA stages manually. Instead, give it a prompt like: + +```text +Understand this project and follow component_system/protocol.md. +Do not execute PDCA stages manually in this session. +Instead, bootstrap the component system by creating an initial seed +and queuing it to component_system/queue/p/, then confirm the daemon +(uv run component_system/run.py) is running so the P and DCA workers +can process stages automatically. +``` + +Once bootstrapped, seeds flow through `queue/p/` → P worker → `queue/dca/` → DCA worker → `state/` automatically. Results and promotions are tracked in `state/` and visible in the web dashboard. + ## Project structure ``` diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md new file mode 100644 index 000000000..6bed2d95d --- /dev/null +++ b/component_system/PDCA-DO-CHECK-ACTION.md @@ -0,0 +1,99 @@ +# DCA - Adapt, Check, Action + +This document merges the former `PDCA-DO.md`, `PDCA-CHECK.md`, and `PDCA-ACTION.md` +into one execution guide for the merged DCA stage. + +## Responsibility +Take the generated plan from P, adapt/fix it in the seed worktree, +run the canonical training entrypoint, evaluate results against baseline, and +promote only when the signal is positive. + +## Workspace and paths +Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use **only paths relative to your cwd**, and treat the copied files under `component_system/` as the canonical context inside the worktree. Do not use or request absolute paths, parent-directory paths, or files outside the workspace; the runner has already set your cwd to the correct worktree. + +## Input +- Read the task content embedded in the runner prompt. +- Read current baseline state from `component_system/baseline_branches.json` and `component_system/baseline_metrics.json`. +- Read and edit worktree-local files only. + +## Baseline measurement (seed_id __baseline__) +For **baseline measurement** tasks you must **retry until the run completes successfully** and you can report real metrics. Do not report empty metrics and stop. + +- If training fails with **CUDA out of memory (OOM)**, the default batch size is tuned for H100. Reduce `device_batch_size` in `component_system/components/trainer.py` (`TrainingSettings`: default `device_batch_size=128`). You may also need to reduce `total_batch_size` so that `total_batch_size % (device_batch_size * sequence_length) == 0` for gradient accumulation. Then rerun the entrypoint until training completes and report the resulting metrics. +- Only trivial execution fixes (e.g. batch size for VRAM) are allowed; do not change model architecture or training logic for baseline. +- **Commit any file changes before reporting.** If you modified files (e.g. reduced batch size), commit those changes on the baseline branch. An uncommitted worktree causes the follow-up merge into the baseline branch to fail. + +## Workflow +1. Work in the seed worktree prepared by the system (on the seed branch, one branch per seed). +2. Adapt or fix generated code until it runs cleanly. +3. Run the canonical command (allow at least **600 seconds** so the run is not killed by the execution environment; the first step can take ~150s and training runs for 300s): + - Preferred (when daemon/root `.venv` is active): `timeout 600 uv run --active component_system/entrypoint.py` + - Fallback (when no active root `.venv` is available): `timeout 600 uv run component_system/entrypoint.py` +4. If there is a simple bug or OOM, fix (e.g. reduce batch size) and rerun. For baseline measurement, keep retrying until the run succeeds. +5. Commit changes on the seed branch before reporting. +6. Print the DCA summary block for the runner; include the current commit SHA in the JSON so the runner can verify and record it. +7. Let the runner evaluate signal and handle promotion policy. + +## Output Format +Print a summary block for the runner. Report metrics in the JSON first; the +runner only falls back to parsing training stdout/stderr when the JSON metrics +are missing: + +```text +AUTORESEARCH_DCA_SUMMARY_BEGIN +{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}} +AUTORESEARCH_DCA_SUMMARY_END +``` + +If you cannot provide final metrics, still print the exact same JSON shape with +`"metrics": {}`. + +The runner falls back to extracting metrics from canonical training stdout/stderr: +`val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, +`total_tokens_M`, `num_steps`, `num_params_M`, and `depth`. + +If a DCA run finishes but still reports no metrics, the system does not +immediately mark it failed. Instead, it queues a follow-up DCA recovery task +that inspects the saved stdout/stderr logs and reports the metrics in the same +JSON format. Only if that recovery task still cannot recover metrics is the run +treated as failed. + +## Check: Signal Rules + +| Condition | Signal | +|-----------|--------| +| `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | +| `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | +| difference < 0.001 | `neutral` | +| no historical baseline `last_val_bpb` | `positive_signal` (first recording) | +| metrics missing or training error | `error` | + +The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). + +## Action: Promotion Rules + +Only the DCA (Do-Check-Action) stage may trigger a merge into baseline. The Plan stage must never merge code; the system performs the merge automatically after a successful DCA promotion. + +The runner records the DCA `commit_sha` from your summary (or from the current branch HEAD if omitted) for traceability. On positive signal, the workflow merges the seed branch into the baseline. If the merge fails (e.g. conflicts), the system queues a merge-resolution DCA run. + +### Promotion flow (`positive_signal` only) +1. The system merges the seed branch into the baseline branch (you do not run merge yourself). +2. The workflow updates `baseline_metrics.json` (and `baseline_branches.json` as needed) with `last_val_bpb`, `promoted_from`, `promoted_idea`, `promoted_at`, `promoted_branch`. +3. Promotion metadata is persisted in seed/run state files. + +### Merge failure and conflict resolution +- If the merge into baseline fails (e.g. conflicts), the system queues a **new DCA run** with `merge_resolution: true`. + - **Normal seed**: In the seed worktree, run `git merge __baseline__` (merge the baseline branch into the seed), resolve conflicts, commit, then print the DCA summary so the system can retry promotion. + - **Baseline seed (__baseline__)**: The goal is to merge __baseline__ *into* the target branch (e.g. master). Run from the directory that has the target branch checked out (use `git worktree list` to find it), then `git merge __baseline__`. Do *not* run from the __baseline__ worktree and do *not* run `git merge master` there—that would merge master into __baseline__, the wrong direction. + +### Non-promotion cases +- `neutral`, `negative_signal`, or `error`: log only, no baseline merge/update. +- Failed run info remains available via queue/state logs. + +## Constraints +- Training must use `run_mainline_training` or equivalent for evaluation consistency. +- Evaluation (`val_bpb`) must not be skipped. +- Do not edit `baseline_branches.json` or `baseline_metrics.json` directly; the workflow writes them. +- Only `positive_signal` can trigger promotion. +- Keep `component_system/entrypoint.py` as the canonical runner. +- Rely on git history plus state files for traceability. diff --git a/component_system/PDCA-PLAN.md b/component_system/PDCA-PLAN.md new file mode 100644 index 000000000..f81b6259d --- /dev/null +++ b/component_system/PDCA-PLAN.md @@ -0,0 +1,112 @@ +# P — Seed Planning And Generation + +## Responsibility +Extract exactly one testable improvement hypothesis from the seed prompt, +generate the first implementation in a candidate worktree, and hand the result +to DCA through the runner. + +## Workspace and paths +Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use only in-workspace paths from your current working directory, and do not use or request absolute paths or any paths outside the workspace; the runner has already set your cwd to the correct worktree. + +## Skill: arxiv-search + +Use the **arxiv-search** skill (`.agents/skills/arxiv-search`) to search for +relevant papers. + +If the skill is not installed or the search script is missing, do not pretend +the skill exists and do not fabricate paper references. Try to install or make +the skill available autonomously. If that still fails, continue planning from +the other input sources instead of asking the user questions. + +### Prerequisites +```bash +pip install arxiv +``` + +Install the Python package only after the skill itself is available. Installing +the package alone does not replace the missing skill. If the skill cannot be +made available, skip paper-driven search and proceed with the remaining inputs. + +### Search for papers +```bash +# Search by topic in cs.LG / cs.NE categories +python .agents/skills/arxiv-search/scripts/search_arxiv.py \ + --query "optimizer adaptive learning rate" \ + --category "cs.LG" \ + --sort-by submitted_date \ + --max-results 10 + +# Search for model architecture ideas +python .agents/skills/arxiv-search/scripts/search_arxiv.py \ + --query "ti:attention AND abs:efficiency" \ + --date-from "2024-01-01" \ + --output json +``` + +### How to Extract a Hypothesis from Results +1. Read the abstract of each result +2. Identify a concrete architectural or algorithmic change (not just a concept) +3. Map it to a target component: `model`, `optimizer`, or `trainer` +4. State the **expected benefit** (e.g. faster convergence, lower val loss, fewer params) +5. Reduce the idea to one isolated improvement that can be evaluated on its own + +## Read results.tsv first (avoid idea duplication) +Before choosing a hypothesis, **read `results.tsv` in your current working directory if it exists**. The runner copies the latest result history into the seed worktree before P runs. Use it to avoid proposing ideas that were already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). + +## Input Sources +- **results.tsv** in cwd (when present) — read first to avoid duplicating past ideas +- arXiv papers via **arxiv-search** skill (primary) +- Clues from past run failures in `queue/done/` +- Manual seed files + +## One-Improvement Rule + +Each P run must propose and implement exactly one improvement. + +- One seed means one hypothesis. +- One seed means one causal change to evaluate. +- Do not bundle multiple ideas into the same candidate, even if they seem + complementary. +- If the prompt contains several possible improvements, choose the single best + one for this iteration and leave the others for later seeds. +- If an idea would require several coordinated changes, choose the smallest + coherent version that still tests the hypothesis cleanly. + +Good examples: +- change only the optimizer schedule +- add only one architectural block +- simplify only one training heuristic + +Bad examples: +- change the model width and the optimizer and the batch schedule together +- combine several paper ideas in one seed +- make "general cleanup plus a new feature" in the same candidate + +## Output Format +Print a summary block for the runner: +```text +AUTORESEARCH_P_SUMMARY_BEGIN +{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} +AUTORESEARCH_P_SUMMARY_END +``` + +## Steps +1. If `results.tsv` exists in the worktree, read it first to avoid duplicating already-tried ideas. +2. Refine the seed prompt into one concrete idea +3. Reduce that idea to one isolated improvement with a clear expected benefit +4. Identify the target component (`model`, `optimizer`, or `trainer`) +5. Implement only that first version inside the candidate worktree created from `baseline` +6. Commit the candidate branch +7. Ensure the summary describes the single improvement being tested +8. Print the summary block; the runner records the commit on the seed branch. + +## Constraints +- Each seed targets exactly one component +- Each seed applies exactly one improvement +- Prefer the smallest viable implementation that can test the hypothesis +- Do not mix exploratory cleanup with the experimental change +- Do not include opportunistic refactors unless they are strictly required to make + the one improvement work +- The description must contain enough detail for DCA to continue independently +- One branch per seed: commit on the seed branch in the worktree; the runner does not merge branches. +- **Plan must never merge code.** Only the DCA (Do-Check-Action) stage may trigger a merge into baseline; the system performs the merge automatically after a successful DCA promotion. diff --git a/component_system/components/model.py b/component_system/components/model.py new file mode 100644 index 000000000..f74d89386 --- /dev/null +++ b/component_system/components/model.py @@ -0,0 +1,380 @@ +from __future__ import annotations + +from dataclasses import dataclass + +import torch +import torch.nn as nn +import torch.nn.functional as F +from kernels import get_kernel + +from prepare import MAX_SEQ_LEN + + +def _get_fa3(): + if torch.cuda.is_available(): + cap = torch.cuda.get_device_capability() + repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" + return get_kernel(repo).flash_attn_interface + return None + +_fa3 = None + +def get_fa3(): + global _fa3 + if _fa3 is None: + _fa3 = _get_fa3() + return _fa3 + + +@dataclass +class GPTConfig: + sequence_len: int = 2048 + vocab_size: int = 32768 + n_layer: int = 12 + n_head: int = 6 + n_kv_head: int = 6 + n_embd: int = 768 + window_pattern: str = "SSSL" + + +def norm(x: torch.Tensor) -> torch.Tensor: + return F.rms_norm(x, (x.size(-1),)) + + +def has_ve(layer_idx: int, n_layer: int) -> bool: + return layer_idx % 2 == (n_layer - 1) % 2 + + +def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: + assert x.ndim == 4 + d = x.shape[3] // 2 + x1, x2 = x[..., :d], x[..., d:] + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat([y1, y2], 3) + + +class CausalSelfAttention(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.n_head = config.n_head + self.n_kv_head = config.n_kv_head + self.n_embd = config.n_embd + self.head_dim = self.n_embd // self.n_head + assert self.n_embd % self.n_head == 0 + assert self.n_kv_head <= self.n_head and self.n_head % self.n_kv_head == 0 + self.c_q = nn.Linear(self.n_embd, self.n_head * self.head_dim, bias=False) + self.c_k = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_v = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False) + self.ve_gate_channels = 32 + self.ve_gate = ( + nn.Linear(self.ve_gate_channels, self.n_kv_head, bias=False) + if has_ve(layer_idx, config.n_layer) + else None + ) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + batch_size, seq_len, _ = x.size() + q = self.c_q(x).view(batch_size, seq_len, self.n_head, self.head_dim) + k = self.c_k(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + v = self.c_v(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + + # Value residual (ResFormer): mix in value embedding with input-dependent gate per head + if ve is not None: + ve = ve.view(batch_size, seq_len, self.n_kv_head, self.head_dim) + gate = 2 * torch.sigmoid(self.ve_gate(x[..., : self.ve_gate_channels])) + v = v + gate.unsqueeze(-1) * ve + + cos, sin = cos_sin + q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin) + q, k = norm(q), norm(k) + + fa3 = get_fa3() + if fa3 is None: + raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") + y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) + y = y.contiguous().view(batch_size, seq_len, -1) + return self.c_proj(y) + + +class MLP(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) + self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.c_fc(x) + x = F.relu(x).square() + x = self.c_proj(x) + return x + + +class Block(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.attn = CausalSelfAttention(config, layer_idx) + self.mlp = MLP(config) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + x = x + self.attn(norm(x), ve, cos_sin, window_size) + x = x + self.mlp(norm(x)) + return x + + +class GPT(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.config = config + self.window_sizes = self._compute_window_sizes(config) + self.transformer = nn.ModuleDict( + { + "wte": nn.Embedding(config.vocab_size, config.n_embd), + "h": nn.ModuleList([Block(config, i) for i in range(config.n_layer)]), + } + ) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + self.resid_lambdas = nn.Parameter(torch.ones(config.n_layer)) + self.x0_lambdas = nn.Parameter(torch.zeros(config.n_layer)) + head_dim = config.n_embd // config.n_head + kv_dim = config.n_kv_head * head_dim + self.value_embeds = nn.ModuleDict( + { + str(i): nn.Embedding(config.vocab_size, kv_dim) + for i in range(config.n_layer) + if has_ve(i, config.n_layer) + } + ) + self.rotary_seq_len = config.sequence_len * 10 + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.register_buffer("cos", cos, persistent=False) + self.register_buffer("sin", sin, persistent=False) + + @torch.no_grad() + def init_weights(self) -> None: + torch.nn.init.normal_(self.transformer.wte.weight, mean=0.0, std=1.0) + torch.nn.init.normal_(self.lm_head.weight, mean=0.0, std=0.001) + n_embd = self.config.n_embd + scale = 3**0.5 * n_embd**-0.5 + for block in self.transformer.h: + torch.nn.init.uniform_(block.attn.c_q.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) + torch.nn.init.zeros_(block.attn.c_proj.weight) + torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) + torch.nn.init.zeros_(block.mlp.c_proj.weight) + self.resid_lambdas.fill_(1.0) + self.x0_lambdas.fill_(0.1) + for ve in self.value_embeds.values(): + torch.nn.init.uniform_(ve.weight, -scale, scale) + for block in self.transformer.h: + if block.attn.ve_gate is not None: + torch.nn.init.zeros_(block.attn.ve_gate.weight) + head_dim = self.config.n_embd // self.config.n_head + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.cos, self.sin = cos, sin + self.transformer.wte.to(dtype=torch.bfloat16) + for ve in self.value_embeds.values(): + ve.to(dtype=torch.bfloat16) + + def _precompute_rotary_embeddings( + self, + seq_len: int, + head_dim: int, + base: int = 10000, + device: torch.device | None = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + if device is None: + device = self.transformer.wte.weight.device + channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device) + inv_freq = 1.0 / (base ** (channel_range / head_dim)) + t = torch.arange(seq_len, dtype=torch.float32, device=device) + freqs = torch.outer(t, inv_freq) + cos, sin = freqs.cos(), freqs.sin() + cos, sin = cos.bfloat16(), sin.bfloat16() + return cos[None, :, None, :], sin[None, :, None, :] + + def _compute_window_sizes(self, config: GPTConfig) -> list[tuple[int, int]]: + pattern = config.window_pattern.upper() + assert all(c in "SL" for c in pattern) + long_window = config.sequence_len + short_window = long_window // 2 + char_to_window = {"L": (long_window, 0), "S": (short_window, 0)} + window_sizes = [] + for layer_idx in range(config.n_layer): + char = pattern[layer_idx % len(pattern)] + window_sizes.append(char_to_window[char]) + window_sizes[-1] = (long_window, 0) + return window_sizes + + def estimate_flops(self) -> float: + nparams = sum(p.numel() for p in self.parameters()) + value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values()) + nparams_exclude = ( + self.transformer.wte.weight.numel() + + value_embeds_numel + + self.resid_lambdas.numel() + + self.x0_lambdas.numel() + ) + n_head = self.config.n_head + head_dim = self.config.n_embd // self.config.n_head + seq_len = self.config.sequence_len + attn_flops = 0 + for window_size in self.window_sizes: + window = window_size[0] + effective_seq = seq_len if window < 0 else min(window, seq_len) + attn_flops += 12 * n_head * head_dim * effective_seq + return 6 * (nparams - nparams_exclude) + attn_flops + + def num_scaling_params(self) -> dict[str, int]: + wte = sum(p.numel() for p in self.transformer.wte.parameters()) + value_embeds = sum(p.numel() for p in self.value_embeds.parameters()) + lm_head = sum(p.numel() for p in self.lm_head.parameters()) + transformer_matrices = sum(p.numel() for p in self.transformer.h.parameters()) + scalars = self.resid_lambdas.numel() + self.x0_lambdas.numel() + total = wte + value_embeds + lm_head + transformer_matrices + scalars + return { + "wte": wte, + "value_embeds": value_embeds, + "lm_head": lm_head, + "transformer_matrices": transformer_matrices, + "scalars": scalars, + "total": total, + } + + def setup_optimizer( + self, + unembedding_lr: float = 0.004, + embedding_lr: float = 0.2, + matrix_lr: float = 0.02, + weight_decay: float = 0.0, + adam_betas: tuple[float, float] = (0.8, 0.95), + scalar_lr: float = 0.5, + ): + from component_system.components.optimizer import MuonAdamW + + model_dim = self.config.n_embd + matrix_params = list(self.transformer.h.parameters()) + value_embeds_params = list(self.value_embeds.parameters()) + embedding_params = list(self.transformer.wte.parameters()) + lm_head_params = list(self.lm_head.parameters()) + resid_params = [self.resid_lambdas] + x0_params = [self.x0_lambdas] + assert len(list(self.parameters())) == ( + len(matrix_params) + + len(embedding_params) + + len(lm_head_params) + + len(value_embeds_params) + + len(resid_params) + + len(x0_params) + ) + # Scale LR ∝ 1/√dmodel (tuned at 768 dim) + dmodel_lr_scale = (model_dim / 768) ** -0.5 + print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") + param_groups = [ + dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), + ] + for shape in sorted({p.shape for p in matrix_params}): + group_params = [p for p in matrix_params if p.shape == shape] + param_groups.append( + dict( + kind="muon", + params=group_params, + lr=matrix_lr, + momentum=0.95, + ns_steps=5, + beta2=0.95, + weight_decay=weight_decay, + ) + ) + optimizer = MuonAdamW(param_groups) + for group in optimizer.param_groups: + group["initial_lr"] = group["lr"] + return optimizer + + def forward( + self, + idx: torch.Tensor, + targets: torch.Tensor | None = None, + reduction: str = "mean", + ) -> torch.Tensor: + _, seq_len = idx.size() + assert seq_len <= self.cos.size(1) + cos_sin = self.cos[:, :seq_len], self.sin[:, :seq_len] + x = self.transformer.wte(idx) + x = norm(x) + x0 = x + for layer_idx, block in enumerate(self.transformer.h): + x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 + ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None + x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) + x = norm(x) + logits = self.lm_head(x).float() + softcap = 15 + logits = softcap * torch.tanh(logits / softcap) + if targets is None: + return logits + return F.cross_entropy( + logits.view(-1, logits.size(-1)), + targets.view(-1), + ignore_index=-1, + reduction=reduction, + ) + + +def build_model_config( + depth: int, + *, + vocab_size: int, + aspect_ratio: int = 64, + head_dim: int = 128, + window_pattern: str = "SSSL", +) -> GPTConfig: + base_dim = depth * aspect_ratio + model_dim = ((base_dim + head_dim - 1) // head_dim) * head_dim + num_heads = model_dim // head_dim + return GPTConfig( + sequence_len=MAX_SEQ_LEN, + vocab_size=vocab_size, + n_layer=depth, + n_head=num_heads, + n_kv_head=num_heads, + n_embd=model_dim, + window_pattern=window_pattern, + ) + + +def create_model( + config: GPTConfig, + *, + device: torch.device | None = None, + compile_model: bool = True, +) -> tuple[GPT, dict[str, int], float]: + if device is None: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + with torch.device("meta"): + model = GPT(config) + model.to_empty(device=device) + model.init_weights() + param_counts = model.num_scaling_params() + num_flops_per_token = model.estimate_flops() + if compile_model: + model = torch.compile(model, dynamic=False) + return model, param_counts, num_flops_per_token diff --git a/component_system/components/optimizer.py b/component_system/components/optimizer.py new file mode 100644 index 000000000..227caaea9 --- /dev/null +++ b/component_system/components/optimizer.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import torch + + +polar_express_coeffs = [ + (8.156554524902461, -22.48329292557795, 15.878769915207462), + (4.042929935166739, -2.808917465908714, 0.5000178451051316), + (3.8916678022926607, -2.772484153217685, 0.5060648178503393), + (3.285753657755655, -2.3681294933425376, 0.46449024233003106), + (2.3465413258596377, -1.7097828382687081, 0.42323551169305323), +] + + +@torch.compile(dynamic=False, fullgraph=True) +def adamw_step_fused( + p: torch.Tensor, + grad: torch.Tensor, + exp_avg: torch.Tensor, + exp_avg_sq: torch.Tensor, + step_t: torch.Tensor, + lr_t: torch.Tensor, + beta1_t: torch.Tensor, + beta2_t: torch.Tensor, + eps_t: torch.Tensor, + wd_t: torch.Tensor, +) -> None: + p.mul_(1 - lr_t * wd_t) + exp_avg.lerp_(grad, 1 - beta1_t) + exp_avg_sq.lerp_(grad.square(), 1 - beta2_t) + bias1 = 1 - beta1_t**step_t + bias2 = 1 - beta2_t**step_t + denom = (exp_avg_sq / bias2).sqrt() + eps_t + step_size = lr_t / bias1 + p.add_(exp_avg / denom, alpha=-step_size) + + +@torch.compile(dynamic=False, fullgraph=True) +def muon_step_fused( + stacked_grads: torch.Tensor, + stacked_params: torch.Tensor, + momentum_buffer: torch.Tensor, + second_momentum_buffer: torch.Tensor, + momentum_t: torch.Tensor, + lr_t: torch.Tensor, + wd_t: torch.Tensor, + beta2_t: torch.Tensor, + ns_steps: int, + red_dim: int, +) -> None: + momentum = momentum_t.to(stacked_grads.dtype) + momentum_buffer.lerp_(stacked_grads, 1 - momentum) + g = stacked_grads.lerp_(momentum_buffer, momentum) + x = g.bfloat16() + x = x / (x.norm(dim=(-2, -1), keepdim=True) * 1.02 + 1e-6) + if g.size(-2) > g.size(-1): + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x.mT @ x + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + x @ b_matrix + else: + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x @ x.mT + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + b_matrix @ x + g = x + beta2 = beta2_t.to(g.dtype) + v_mean = g.float().square().mean(dim=red_dim, keepdim=True) + red_dim_size = g.size(red_dim) + v_norm_sq = v_mean.sum(dim=(-2, -1), keepdim=True) * red_dim_size + v_norm = v_norm_sq.sqrt() + second_momentum_buffer.lerp_(v_mean.to(dtype=second_momentum_buffer.dtype), 1 - beta2) + step_size = second_momentum_buffer.clamp_min(1e-10).rsqrt() + scaled_sq_sum = (v_mean * red_dim_size) * step_size.float().square() + v_norm_new = scaled_sq_sum.sum(dim=(-2, -1), keepdim=True).sqrt() + final_scale = step_size * (v_norm / v_norm_new.clamp_min(1e-10)) + g = g * final_scale.to(g.dtype) + lr = lr_t.to(g.dtype) + wd = wd_t.to(g.dtype) + mask = (g * stacked_params) >= 0 + stacked_params.sub_(lr * g + lr * wd * stacked_params * mask) + + +class MuonAdamW(torch.optim.Optimizer): + def __init__(self, param_groups: list[dict]) -> None: + super().__init__(param_groups, defaults={}) + self._adamw_step_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta1_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_eps_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_momentum_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + + def _step_adamw(self, group: dict) -> None: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + if not state: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like(p) + state["exp_avg_sq"] = torch.zeros_like(p) + state["step"] += 1 + self._adamw_step_t.fill_(state["step"]) + self._adamw_lr_t.fill_(group["lr"]) + self._adamw_beta1_t.fill_(group["betas"][0]) + self._adamw_beta2_t.fill_(group["betas"][1]) + self._adamw_eps_t.fill_(group["eps"]) + self._adamw_wd_t.fill_(group["weight_decay"]) + adamw_step_fused( + p, + grad, + state["exp_avg"], + state["exp_avg_sq"], + self._adamw_step_t, + self._adamw_lr_t, + self._adamw_beta1_t, + self._adamw_beta2_t, + self._adamw_eps_t, + self._adamw_wd_t, + ) + + def _step_muon(self, group: dict) -> None: + params = group["params"] + if not params: + return + first_param = params[0] + state = self.state[first_param] + num_params = len(params) + shape, device, dtype = first_param.shape, first_param.device, first_param.dtype + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros(num_params, *shape, dtype=dtype, device=device) + if "second_momentum_buffer" not in state: + state_shape = (num_params, shape[-2], 1) if shape[-2] >= shape[-1] else (num_params, 1, shape[-1]) + state["second_momentum_buffer"] = torch.zeros(state_shape, dtype=dtype, device=device) + red_dim = -1 if shape[-2] >= shape[-1] else -2 + stacked_grads = torch.stack([p.grad for p in params]) + stacked_params = torch.stack(params) + self._muon_momentum_t.fill_(group["momentum"]) + self._muon_beta2_t.fill_(group["beta2"] if group["beta2"] is not None else 0.0) + self._muon_lr_t.fill_(group["lr"] * max(1.0, shape[-2] / shape[-1]) ** 0.5) + self._muon_wd_t.fill_(group["weight_decay"]) + muon_step_fused( + stacked_grads, + stacked_params, + state["momentum_buffer"], + state["second_momentum_buffer"], + self._muon_momentum_t, + self._muon_lr_t, + self._muon_wd_t, + self._muon_beta2_t, + group["ns_steps"], + red_dim, + ) + torch._foreach_copy_(params, list(stacked_params.unbind(0))) + + @torch.no_grad() + def step(self) -> None: + for group in self.param_groups: + if group["kind"] == "adamw": + self._step_adamw(group) + elif group["kind"] == "muon": + self._step_muon(group) + + +def create_optimizer(model: torch.nn.Module, settings: object) -> MuonAdamW: + return model.setup_optimizer( + unembedding_lr=settings.unembedding_lr, + embedding_lr=settings.embedding_lr, + matrix_lr=settings.matrix_lr, + weight_decay=settings.weight_decay, + adam_betas=settings.adam_betas, + scalar_lr=settings.scalar_lr, + ) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py new file mode 100644 index 000000000..fd300348e --- /dev/null +++ b/component_system/components/trainer.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +import gc +import time +from dataclasses import dataclass +from typing import Any + +import torch + +from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader + + +H100_BF16_PEAK_FLOPS = 989.5e12 + + +@dataclass +class TrainingSettings: + aspect_ratio: int = 64 + head_dim: int = 128 + window_pattern: str = "SSSL" + total_batch_size: int = 2**19 + embedding_lr: float = 0.6 + unembedding_lr: float = 0.004 + matrix_lr: float = 0.04 + scalar_lr: float = 0.5 + weight_decay: float = 0.2 + adam_betas: tuple[float, float] = (0.8, 0.95) + warmup_ratio: float = 0.0 + warmdown_ratio: float = 0.5 + final_lr_frac: float = 0.0 + depth: int = 8 + device_batch_size: int = 32 # 24GB vram + seed: int = 42 + compile_model: bool = True + + +def default_training_settings() -> TrainingSettings: + return TrainingSettings() + + +def get_lr_multiplier(progress: float, settings: TrainingSettings) -> float: + if progress < settings.warmup_ratio: + return progress / settings.warmup_ratio if settings.warmup_ratio > 0 else 1.0 + if progress < 1.0 - settings.warmdown_ratio: + return 1.0 + cooldown = (1.0 - progress) / settings.warmdown_ratio + return cooldown + (1 - cooldown) * settings.final_lr_frac + + +def get_muon_momentum(step: int) -> float: + frac = min(step / 300, 1) + return (1 - frac) * 0.85 + frac * 0.95 + + +def get_weight_decay(progress: float, settings: TrainingSettings) -> float: + return settings.weight_decay * (1 - progress) + + +def run_training_session( + *, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + tokenizer: Any, + settings: TrainingSettings, + param_counts: dict[str, int], + num_flops_per_token: float, + baseline_binding: dict[str, Any], +) -> dict[str, Any]: + t_start = time.time() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + autocast_device = "cuda" if device.type == "cuda" else "cpu" + autocast_ctx = torch.amp.autocast(device_type=autocast_device, dtype=torch.bfloat16) + + tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN + assert settings.total_batch_size % tokens_per_fwdbwd == 0 + grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd + train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") + x, y, epoch = next(train_loader) + + print(f"Vocab size: {tokenizer.get_vocab_size():,}") + print(f"Time budget: {TIME_BUDGET}s") + print(f"Gradient accumulation steps: {grad_accum_steps}") + print("Training session started") + + t_start_training = time.time() + smooth_train_loss = 0.0 + total_training_time = 0.0 + step = 0 + + while True: + if device.type == "cuda": + torch.cuda.synchronize(device=device) + t0 = time.time() + for _ in range(grad_accum_steps): + with autocast_ctx: + loss = model(x, y) + train_loss = loss.detach() + loss = loss / grad_accum_steps + loss.backward() + x, y, epoch = next(train_loader) + + progress = min(total_training_time / TIME_BUDGET, 1.0) + lrm = get_lr_multiplier(progress, settings) + muon_momentum = get_muon_momentum(step) + muon_weight_decay = get_weight_decay(progress, settings) + for group in optimizer.param_groups: + group["lr"] = group["initial_lr"] * lrm + if group["kind"] == "muon": + group["momentum"] = muon_momentum + group["weight_decay"] = muon_weight_decay + + optimizer.step() + model.zero_grad(set_to_none=True) + train_loss_f = train_loss.item() + if train_loss_f > 100: + raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") + + torch.cuda.synchronize(device=device) + dt = time.time() - t0 + if step > 10: + total_training_time += dt + + ema_beta = 0.9 + smooth_train_loss = ema_beta * smooth_train_loss + (1 - ema_beta) * train_loss_f + debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) + pct_done = 100 * progress + tok_per_sec = int(settings.total_batch_size / dt) + mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS + remaining = max(0.0, TIME_BUDGET - total_training_time) + print( + f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " + f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " + f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", + end="", + flush=True, + ) + + if step == 0: + gc.collect() + gc.freeze() + gc.disable() + elif (step + 1) % 5000 == 0: + gc.collect() + + step += 1 + if step > 10 and total_training_time >= TIME_BUDGET: + break + + print() + total_tokens = step * settings.total_batch_size + model.eval() + with autocast_ctx: + val_bpb = evaluate_bpb(model, tokenizer, settings.device_batch_size) + + t_end = time.time() + peak_vram_mb = torch.cuda.max_memory_allocated() / 1024 / 1024 + steady_state_mfu = ( + 100 + * num_flops_per_token + * settings.total_batch_size + * (step - 10) + / total_training_time + / H100_BF16_PEAK_FLOPS + if total_training_time > 0 + else 0.0 + ) + num_params = param_counts["total"] + metrics = { + "val_bpb": float(val_bpb), + "training_seconds": float(total_training_time), + "total_seconds": float(t_end - t_start), + "peak_vram_mb": float(peak_vram_mb), + "mfu_percent": float(steady_state_mfu), + "total_tokens_M": float(total_tokens / 1e6), + "num_steps": int(step), + "num_params_M": float(num_params / 1e6), + "depth": int(settings.depth), + "startup_seconds": float(t_start_training - t_start), + } + + print("---") + print(f"val_bpb: {metrics['val_bpb']:.6f}") + print(f"training_seconds: {metrics['training_seconds']:.1f}") + print(f"total_seconds: {metrics['total_seconds']:.1f}") + print(f"peak_vram_mb: {metrics['peak_vram_mb']:.1f}") + print(f"mfu_percent: {metrics['mfu_percent']:.2f}") + print(f"total_tokens_M: {metrics['total_tokens_M']:.1f}") + print(f"num_steps: {metrics['num_steps']}") + print(f"num_params_M: {metrics['num_params_M']:.1f}") + print(f"depth: {metrics['depth']}") + return metrics diff --git a/component_system/config.py b/component_system/config.py new file mode 100644 index 000000000..9975ab2d2 --- /dev/null +++ b/component_system/config.py @@ -0,0 +1,31 @@ +"""Static configuration for the component system. No dynamic or per-run values.""" +from __future__ import annotations + +from pathlib import Path + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent + +# Module import paths for training (used by mainline assembler) +MODEL_MODULE = "component_system.components.model" +OPTIMIZER_MODULE = "component_system.components.optimizer" +TRAINING_STEP_MODULE = "component_system.components.trainer" + +# Promotion threshold: improve val_bpb by at least this much to promote +PROMOTION_THRESHOLD = 0.001 + +# Worktree root relative to project (string for display/config compatibility) +WORKTREE_ROOT = "component_system/history/worktrees" + +# Default branch name suggested in UI when no branches exist (not a global baseline) +DEFAULT_BASELINE_BRANCH = "master" + + +def get_training_binding() -> dict[str, str | float]: + """Return a static dict used by training mainline/trainer (no baseline_version).""" + return { + "model_module": MODEL_MODULE, + "optimizer_module": OPTIMIZER_MODULE, + "training_step_module": TRAINING_STEP_MODULE, + "promotion_threshold": PROMOTION_THRESHOLD, + "worktree_root": WORKTREE_ROOT, + } diff --git a/component_system/domain/models.py b/component_system/domain/models.py new file mode 100644 index 000000000..f03c9a121 --- /dev/null +++ b/component_system/domain/models.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import BaseModel, Field + + +class SeedStatus(str, Enum): + draft = "draft" + queued = "queued" + planning = "planning" + generated = "generated" + dca_queued = "dca_queued" + adapting = "adapting" + running = "running" + failed = "failed" + passed = "passed" + promoted = "promoted" + + +class StageName(str, Enum): + p = "p" + dca = "dca" + direct = "direct" + + +class RunStatus(str, Enum): + queued = "queued" + running = "running" + succeeded = "succeeded" + failed = "failed" + + +class PlanIdea(BaseModel): + title: str = "" + target_component: str = "model" + description: str = "" + source_refs: list[str] = Field(default_factory=list) + commit_sha: str | None = None + + +class StageRun(BaseModel): + run_id: str + seed_id: str + stage: StageName + status: RunStatus + task_id: str + created_at: float + updated_at: float + log_path: str | None = None + stderr_log_path: str | None = None + prompt_path: str | None = None + summary: dict[str, Any] = Field(default_factory=dict) + metrics: dict[str, Any] = Field(default_factory=dict) + signal: str | None = None + error: str | None = None + + +class SeedRecord(BaseModel): + seed_id: str + prompt: str + status: SeedStatus = SeedStatus.draft + created_at: float + updated_at: float + baseline_branch: str = "baseline" + worktree_path: str | None = None + latest_run_id: str | None = None + ralph_loop_enabled: bool = False + latest_signal: str | None = None + latest_metrics: dict[str, Any] = Field(default_factory=dict) + plan: PlanIdea | None = None + last_error: str | None = None + + +class DashboardColumn(BaseModel): + id: str + title: str + description: str + seeds: list[SeedRecord] + + +class DashboardViewModel(BaseModel): + setup_error: str | None = None + baseline_metrics_by_branch: dict[str, dict[str, object]] = Field(default_factory=dict) + default_baseline_branch: str = "master" + available_branches: list[str] = Field(default_factory=list) + seed_count: int + columns: list[DashboardColumn] + selected_seed: SeedRecord | None = None + daemon_status: str = "stopped" # "running" | "stopped" diff --git a/component_system/entrypoint.py b/component_system/entrypoint.py new file mode 100644 index 000000000..33fc2d426 --- /dev/null +++ b/component_system/entrypoint.py @@ -0,0 +1,18 @@ +"""Standalone entrypoint for the component_system baseline.""" +from __future__ import annotations + +import sys +from pathlib import Path + +if __package__ in {None, ""}: + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +from component_system.training.mainline import run_mainline_training + + +def main() -> None: + run_mainline_training() + + +if __name__ == "__main__": + main() diff --git a/component_system/package.json b/component_system/package.json new file mode 100644 index 000000000..5ae45136d --- /dev/null +++ b/component_system/package.json @@ -0,0 +1,13 @@ +{ + "name": "autoresearch-component-system-ui", + "private": true, + "scripts": { + "build:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --minify", + "watch:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --watch" + }, + "devDependencies": { + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.17" + } +} diff --git a/component_system/postcss.config.js b/component_system/postcss.config.js new file mode 100644 index 000000000..5cbc2c7d8 --- /dev/null +++ b/component_system/postcss.config.js @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; diff --git a/component_system/protocol.md b/component_system/protocol.md new file mode 100644 index 000000000..6f50dec72 --- /dev/null +++ b/component_system/protocol.md @@ -0,0 +1,332 @@ +# autoresearch — Component-System Protocol + +This document is the operating manual for the component-system workflow. +The system runs a continuous Seed -> P -> DCA loop to discover, generate, +adapt, evaluate, and promote improvements to the training stack. + +The main objective is simple: improve `val_bpb` against the current baseline +without breaking the canonical component-system entrypoint or introducing +unreasonable complexity. + +VRAM is a first-class constraint. Higher memory use is acceptable only when the +quality gain is meaningful; avoid candidates that produce small or ambiguous +`val_bpb` gains while causing large memory growth. + +## Top-Level Bootstrap Rule + +If you are an interactive code agent that was merely told to "follow this +protocol", do not manually simulate the entire workflow inside one foreground +session. + +The intended control flow is: +1. Read this file and the required context files. +2. Ensure the queue and state layout exist. +3. Create or refine a seed from a human prompt. +4. Queue that seed into `component_system/history/queue/p/`. +5. Start the resident daemon with `uv run component_system/run.py`. +6. Let the daemon workers execute P and DCA through file-based handoff. +7. Monitor the daemon, queue, and logs instead of roleplaying stage work yourself. + +Manual execution of an individual stage is only for the agent process that was +invoked by the daemon for that specific task. + +## Architecture + +```text +component_system/ + protocol.md <- overall workflow protocol + entrypoint.py <- canonical training entrypoint + PDCA-PLAN.md <- P stage rules + PDCA-DO-CHECK-ACTION.md <- DCA stage rules + run.py <- resident daemon and worker dispatch + task.py <- queue and JSON state helpers + baseline_branches.json <- per-branch baseline mapping (workflow-managed; read-only) + baseline_metrics.json <- baseline run metrics (workflow-managed; read-only) + config.py <- promotion threshold and static binding + history/ <- runtime dir (auto-created) + logs/ <- agent stdout/stderr logs + queue/{p,dca,done,error}/ <- stage handoff and archival + state/{seeds,runs,events}/<- durable workflow state + worktrees/ <- per-seed git worktrees + components/ + model.py + optimizer.py + trainer.py + training/ + mainline.py +``` + +## Core Goal and Decision Rule + +Optimize for lower `val_bpb`. A candidate is worth promoting only when the gain +is real, the implementation is understandable, and the cost in memory or +complexity is justified. + +Apply this bias consistently: +- Lower `val_bpb` is the primary success metric. +- VRAM is a soft but important constraint: some increase is acceptable, but + dramatic growth needs correspondingly strong quality gains. +- Simpler changes are preferred when results are similar. +- A tiny gain that adds brittle complexity is usually not worth promotion. +- A tiny gain that materially increases VRAM is usually not worth promotion. +- A simplification that preserves or slightly improves quality is a strong outcome. +- If the signal is ambiguous, treat it as `neutral` and do not promote. + +## Required Reading Before Any Work + +Read in this order: +1. `component_system/protocol.md` +2. The stage-specific document (right after protocol): `component_system/PDCA-DO-CHECK-ACTION.md` for DCA, `component_system/PDCA-PLAN.md` for P +3. `prepare.py` for fixed data and evaluation behavior; never modify it +4. `component_system/entrypoint.py` for the canonical execution path +5. `component_system/config.py` for promotion threshold and static binding + +Baseline reference files (workflow-managed; read-only): `component_system/baseline_branches.json` (per-branch baseline mapping), `component_system/baseline_metrics.json` (baseline run metrics). The workflow writes these; only read them for context. + +For interactive bootstrap, also inspect the current queue/state situation, +especially recent items in `queue/done/` and the latest baseline information. + +## Workspace and Path Rules + +When the daemon invokes you for a P or DCA task, your current working directory +is the seed worktree. In that mode: + +- Read and edit only within the seed worktree. +- Use only relative paths from the current working directory. +- Do not request or depend on absolute paths or files outside the worktree. + +## Hard Constraints + +1. Never modify `prepare.py`. +2. `uv run component_system/entrypoint.py` must remain the canonical, + working component-system training command. +3. The root repo must stay compatible with the upstream implementation; + do not require changes to root `train.py`. +4. Stage-to-stage handoff must happen through files under `queue/`, not + merely in memory or only in agent conversation state. +5. Only the DCA promotion flow may update `baseline_metrics.json` and `baseline_branches.json`. +6. Do not bypass the baseline mechanism by manually merging branches or + force-advancing the baseline outside workflow control. + +## Baseline-First Rule + +The first meaningful measurement in a fresh component-system run is the +baseline result. + +- If `baseline_metrics.json` has no `last_val_bpb` for the baseline branch, the system should establish a + baseline run before evaluating ordinary seeds. +- The baseline seed is a "no changes" measurement of the currently bound + component modules. +- Treat that first baseline result as the reference point for later promotion + decisions. + +This mirrors the root-project rule that the first run should establish the +baseline before experimenting. + +```mermaid +flowchart TD + A[Create seed] --> B{Baseline result exists?} + B -- No --> C[Create or reuse __baseline__ seed] + C --> D[Queue baseline DCA] + D --> E[Run baseline measurement from project root] + E --> F[Save baseline metrics in baseline_metrics.json] + F --> G[Release waiting seeds] + B -- Yes --> G + G --> H[Seed stays in draft or queued with no worktree] + H --> I[Queue P run] + I --> J[Create seed worktree at P start] + J --> K[P agent plans and commits on seed branch] + K --> L[Queue DCA run] + L --> M[DCA agent adapts, runs training, and reports metrics] + M --> N{Promotion signal?} + N -- Positive --> O[Merge seed branch into baseline] + O --> P{Merge conflict?} + P -- No --> Q[Update baseline metadata and finish seed] + P -- Yes --> R[Queue conflict-resolution DCA] + R --> M + N -- Neutral or Negative --> S[Keep result in state only] +``` + +## Workflow Stages + +The sections below describe what each daemon-dispatched stage worker does. +They are not instructions for a top-level interactive agent to perform the +entire lifecycle manually. + +### P — Discovery / Plan / Initial Generation + +Read `component_system/PDCA-PLAN.md`. + +Responsibilities: +- Refine the seed prompt into a concrete plan. +- Create or refresh the seed worktree from the active baseline. +- Generate the first candidate implementation in the worktree. +- Keep the change focused enough that DCA can evaluate it cleanly. +- Commit the generated candidate on the seed branch so DCA receives a stable snapshot. + +P is about producing a plausible, testable first version, not claiming success. + +### DCA — Delivery / Check / Action + +Read `component_system/PDCA-DO-CHECK-ACTION.md`. + +Responsibilities: +- Adapt and fix the generated candidate inside the seed worktree. +- Run the canonical training/evaluation entrypoint. +- Read the structured metrics from the run output. +- Decide whether the result is positive, neutral, or negative relative to baseline. +- Promote the seed branch into baseline only when the signal is strong enough. + +DCA is the stage that turns a raw idea into a measured outcome. + +## Canonical Run and Output + +The canonical component-system execution path is: + +```bash +uv run component_system/entrypoint.py +``` + +When the DCA agent runs this (e.g. in a sandbox or tool), the run needs **at least 600 seconds** (first step ~150s + training budget 300s + buffer); use e.g. `timeout 600 uv run ...` so the execution environment does not kill the process early. + +The DCA agent must report a structured JSON summary between the required +markers, including a `metrics` object. The runner uses that structured report +first and only falls back to parsing stdout/stderr when the JSON metrics are +missing. If the initial DCA summary still lacks metrics, the system queues a +follow-up recovery DCA that inspects the saved logs before declaring failure. +The canonical metrics are: + +```text +--- +val_bpb: 0.997900 +training_seconds: 300.1 +total_seconds: 325.9 +peak_vram_mb: 45060.2 +mfu_percent: 39.80 +total_tokens_M: 499.6 +num_steps: 953 +num_params_M: 50.3 +depth: 8 +startup_seconds: 25.8 +``` + +Treat `val_bpb` as the primary metric. `peak_vram_mb`, total runtime, and code +complexity are secondary constraints that influence promotion decisions. + +## VRAM Rule + +Track `peak_vram_mb` on every serious evaluation run and treat it as required +decision input, not a cosmetic metric. + +- Some VRAM growth is acceptable when it buys a clear `val_bpb` improvement. +- Large VRAM increases require a correspondingly strong quality gain. +- If two candidates are similar on `val_bpb`, prefer the lower-VRAM one. +- If a candidate regresses or barely improves `val_bpb` while increasing VRAM + substantially, treat it as a bad trade and do not promote it. +- Avoid changes that risk blowing up memory usage unless the expected upside is + compelling enough to justify the experiment. + +## Promotion Rule + +A run is promotable only if all of the following hold: +- The run completed successfully. +- `val_bpb` improved enough over the active baseline to count as a real win. +- VRAM growth is not unreasonable for the magnitude of the gain. +- The change is understandable, maintainable, and reversible. + +If the candidate is equal, worse, noisy, or hard to justify, do not promote it. +Record the outcome and move on. + +## Failure Handling + +Use the same judgment standard as the original autoresearch loop: + +- If a run crashes because of a simple bug, fix it, rerun, and update the same + run record. +- If the idea is fundamentally flawed, archive it without promotion. +- If the task cannot be recovered quickly, move it into the error flow and + persist the failure details. +- Crashes are negative evidence; they should not silently disappear. + +## Bootstrap Procedure for Interactive Sessions + +When a human starts a fresh interactive session and asks you to use this +component system, do this: + +1. Read `baseline_branches.json`, `baseline_metrics.json`, and recent queue/state outputs. +2. Ensure the queue/state/worktree layout exists. +3. Create an initial seed from the human prompt. +4. Queue P for that seed. +5. Start `uv run component_system/run.py`. +6. Monitor the daemon and logs instead of manually executing P and DCA yourself. + +## Operating Loop + +Once the daemon is running, the queue-driven loop is: + +1. A seed is persisted under `state/seeds/` and queued to `queue/p/`. +2. P creates or refreshes the seed worktree from baseline, generates code, and + commits on the seed branch. +3. The daemon automatically queues DCA. +4. DCA adapts, checks, runs, evaluates, and either promotes or archives the seed. +5. The system persists runs and events under `state/` and continues with the + next available work. + +The system is intended to behave like an autonomous researcher: keep moving, +measure results, retain wins, discard losses, and continue until explicitly +stopped. + +## State and Logging + +The durable record of the workflow lives in files: + +- `state/seeds/` stores seed definitions and status. +- `state/runs/` stores stage-run metadata and run outcomes. +- `state/events/` stores seed event histories. +- `queue/done/` archives completed tasks. +- `queue/error/` captures failed tasks. +- `logs/` stores stdout/stderr from agent invocations. + +Do not rely on chat context as the source of truth when the filesystem state +already records the workflow. + +## Daemon + +The resident daemon in `component_system/run.py` manages two single-threaded +worker pools that poll `queue/{p,dca}/` continuously. Each worker dispatches a +task to an external code agent, which reads files, modifies code in a git +worktree, runs the canonical entrypoint, and prints structured summaries for +the runner to persist. + +Start the daemon with: + +```bash +# Default backend +uv run component_system/run.py + +# Alternate backends +PDCA_AGENT=codex uv run component_system/run.py +PDCA_AGENT=opencode uv run component_system/run.py +``` + +### Agent Backends + +| `PDCA_AGENT` | CLI invoked | Prompt delivery | +|--------------|-------------|-----------------| +| `claude` (default) | `claude -p --verbose` | stdin | +| `codex` | `codex exec -a never --sandbox workspace-write` | positional arg | +| `opencode` | `opencode run` | positional arg | + +### Timeouts + +Each stage has a default timeout in seconds and can be overridden through the +environment: + +| Variable | Default | Purpose | +|----------|---------|---------| +| `PDCA_TIMEOUT_P` | 900 | Planning and initial code generation | +| `PDCA_TIMEOUT_DCA` | 3600 | Adaptation, training, evaluation, and promotion | + +### Logs + +Agent stdout/stderr for every invocation is saved to `component_system/history/logs/`. diff --git a/component_system/repositories/state.py b/component_system/repositories/state.py new file mode 100644 index 000000000..7ff3ab2fd --- /dev/null +++ b/component_system/repositories/state.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from typing import Any + +from component_system.domain.models import SeedRecord, StageRun +from component_system.task import ( + append_event, + list_runs, + list_seeds, + load_baseline_branch_map, + load_baseline_metrics, + load_events, + load_run, + load_seed, + save_baseline_branch_map, + save_baseline_metrics, + save_run, + save_seed, +) + + +class BaselineBranchMapRepository: + """Per-seed baseline branch mapping (seed_id -> baseline_branch).""" + + def set_branch_for_seed(self, seed_id: str, branch: str) -> None: + m = load_baseline_branch_map() + m[seed_id] = branch + save_baseline_branch_map(m) + + +class BaselineMetricsRepository: + """Per-baseline-branch metrics (last_val_bpb, promoted_*, etc.).""" + + def get_all(self) -> dict[str, dict[str, Any]]: + return load_baseline_metrics() + + def get_for_branch(self, branch: str) -> dict[str, Any] | None: + return load_baseline_metrics().get(branch) + + def update_for_branch(self, branch: str, metrics: dict[str, Any]) -> None: + data = load_baseline_metrics() + data[branch] = {**data.get(branch, {}), **metrics} + save_baseline_metrics(data) + + +class SeedRepository: + def list(self) -> list[SeedRecord]: + return [SeedRecord.model_validate(seed) for seed in list_seeds()] + + def get(self, seed_id: str) -> SeedRecord | None: + data = load_seed(seed_id) + return SeedRecord.model_validate(data) if data else None + + def save(self, seed: SeedRecord) -> SeedRecord: + save_seed(seed.model_dump(mode="json")) + return seed + + def append_event(self, seed_id: str, kind: str, message: str, **payload: Any) -> list[dict[str, Any]]: + return append_event(seed_id, {"kind": kind, "message": message, **payload}) + + def events(self, seed_id: str) -> list[dict[str, Any]]: + return load_events(seed_id) + + +class RunRepository: + def list(self, seed_id: str | None = None) -> list[StageRun]: + return [StageRun.model_validate(run) for run in list_runs(seed_id)] + + def get(self, run_id: str) -> StageRun | None: + data = load_run(run_id) + return StageRun.model_validate(data) if data else None + + def save(self, run: StageRun) -> StageRun: + save_run(run.model_dump(mode="json")) + return run diff --git a/component_system/run.py b/component_system/run.py new file mode 100644 index 000000000..0e3bff04e --- /dev/null +++ b/component_system/run.py @@ -0,0 +1,764 @@ +"""Seed -> P -> DCA daemon for the component-system web app.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +import json +import os +import shutil +import signal +import subprocess +import sys +import threading +import time +import traceback +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path +from typing import Any + +from component_system.domain.models import StageName +from component_system.services.workflow import BASELINE_SEED_ID, WorkflowService +from component_system.task import ( + BASELINE_BRANCHES_PATH, + BASELINE_METRICS_PATH, + COMPONENT_SYSTEM_ROOT, + DAEMON_HEARTBEAT_PATH, + ensure_queue_layout, + daemon_heartbeat, + list_pending, + LOG_ROOT, + move_to_done, + read_task, +) + +PROJECT_ROOT = COMPONENT_SYSTEM_ROOT.parent +LOG_DIR = LOG_ROOT +RESULTS_TSV = PROJECT_ROOT / "results.tsv" +PROGRESS_PNG = PROJECT_ROOT / "progress.png" + +POLL_INTERVAL = 10.0 +_shutdown = False +WORKFLOW = WorkflowService() + +DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} + +STAGE_DOCS = { + "p": ["PDCA-PLAN.md"], + "dca": ["PDCA-DO-CHECK-ACTION.md"], +} + +AGENT_CONFIGS: dict[str, dict[str, Any]] = { + "claude": {"cmd": ["claude", "-p", "--verbose"], "via": "stdin"}, + "codex": {"cmd": ["codex", "exec", "-a", "never", "--sandbox", "workspace-write"], "via": "arg"}, + "opencode": {"cmd": ["opencode", "run"], "via": "arg"}, +} + + +def _signal_handler(_sig: int, _frame: Any) -> None: + global _shutdown + _shutdown = True + print("\n[daemon] shutdown requested") + + +def _get_timeout(stage: str) -> int: + return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 600))) + + +def _build_log_paths(run_id: str) -> tuple[Path, Path]: + LOG_DIR.mkdir(parents=True, exist_ok=True) + stdout_path = LOG_DIR / f"{run_id}.stdout.log" + stderr_path = LOG_DIR / f"{run_id}.stderr.log" + return stdout_path, stderr_path + + +def _write_prompt_file(run_id: str, prompt: str) -> Path: + """Save the agent prompt to a file for debugging. Returns the path.""" + LOG_DIR.mkdir(parents=True, exist_ok=True) + prompt_path = LOG_DIR / f"{run_id}.prompt.txt" + prompt_path.write_text(prompt, encoding="utf-8") + return prompt_path + + +def _is_root_venv_active() -> bool: + expected = (PROJECT_ROOT / ".venv").resolve() + active = os.environ.get("VIRTUAL_ENV") + if not active: + return False + try: + return Path(active).resolve() == expected + except OSError: + return False + + +def _dca_command_guidance() -> tuple[str, str]: + if _is_root_venv_active(): + return ( + "uv run --active component_system/entrypoint.py", + "Root .venv is active; use --active to reuse it from the worktree.", + ) + return ( + "uv run component_system/entrypoint.py", + "No active root .venv detected; fallback avoids --active so uv can run normally.", + ) + + +def _build_direct_code_prompt(prompt: str) -> str: + return ( + "You are running as a direct code agent from the project root of this repository.\n" + "Execute the user's request directly in the current working tree.\n" + "Do not switch into seed worktrees for this task.\n\n" + "User request:\n" + f"{prompt.strip()}\n" + ) + + +def _stream_pipe_to_file(pipe: Any, handle: Any, chunks: list[str]) -> None: + try: + while True: + piece = pipe.read(16) + if not piece: + break + chunks.append(piece) + handle.write(piece) + handle.flush() + finally: + try: + pipe.close() + except Exception: + pass + + +def _combined_output(stdout: str, stderr: str) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr + + +def _agent_failure_reason(exit_code: int, stdout: str, stderr: str) -> str: + combined = _combined_output(stdout, stderr) + if "timeout after " in combined: + return combined.strip().splitlines()[-1] + if exit_code == -1: + if combined.strip(): + return combined.strip().splitlines()[-1] + return "Agent execution failed before completion. See stdout/stderr logs for details." + return f"Agent exited with code {exit_code}. See stdout/stderr logs for details." + + +def _should_salvage_completed_dca(stage: str, exit_code: int, output_text: str) -> bool: + """Accept a DCA run when canonical metrics were printed despite agent exit issues.""" + if stage != "dca" or exit_code == 0: + return False + summary = WORKFLOW.extract_summary(output_text, StageName.dca) or {} + metrics = WORKFLOW.extract_dca_metrics(output_text, summary) + return metrics.get("val_bpb") is not None + + +def _agent_cwd(worktree_path: str | None) -> str: + """Resolve cwd for the agent: seed worktree when provided and present, else project root.""" + if not worktree_path: + return str(PROJECT_ROOT) + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return str(resolved) if resolved.is_dir() else str(PROJECT_ROOT) + + +def _resolve_worktree_path(worktree_path: str | None) -> Path | None: + """Resolve worktree path to absolute Path, or None if invalid/missing.""" + if not worktree_path: + return None + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return resolved if resolved.is_dir() else None + + +def _sync_results_tsv_into_worktree(worktree_path: str | None) -> None: + """Copy the latest root results.tsv into the seed worktree if it exists. Non-fatal on failure.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None or not RESULTS_TSV.exists(): + return + dest = resolved / "results.tsv" + try: + shutil.copy2(RESULTS_TSV, dest) + except OSError as err: + print(f"[P] could not copy results.tsv into worktree: {err}", file=sys.stderr) + + +def _sync_baseline_json_into_worktree(worktree_path: str | None) -> None: + """Copy baseline_metrics.json and baseline_branches.json from project component_system into the worktree. + Worktrees check out from baseline-branch; without this sync the agent would see stale or missing baseline data.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None: + return + dest_dir = resolved / "component_system" + dest_dir.mkdir(parents=True, exist_ok=True) + for src_path, name in [ + (BASELINE_METRICS_PATH, "baseline_metrics.json"), + (BASELINE_BRANCHES_PATH, "baseline_branches.json"), + ]: + if not src_path.exists(): + continue + dest = dest_dir / name + try: + shutil.copy2(src_path, dest) + except OSError as err: + print(f"[P] could not copy {name} into worktree: {err}", file=sys.stderr) + + +def _sync_worktree_context(worktree_path: str | None) -> None: + """Sync all workflow-managed live data into the worktree so the agent sees current state. + Call before invoking the agent when cwd is a worktree (P or DCA).""" + _sync_results_tsv_into_worktree(worktree_path) + _sync_baseline_json_into_worktree(worktree_path) + + +def _invoke_agent( + prompt: str, stage: str, run_id: str, worktree_path: str | None = None +) -> tuple[int, str, str, Path | None, Path | None]: + agent_name = os.environ.get("PDCA_AGENT", "claude") + config = AGENT_CONFIGS.get(agent_name) + if config is None: + raise ValueError(f"Unknown PDCA_AGENT={agent_name!r}. Supported: {', '.join(AGENT_CONFIGS)}") + + cmd = list(config["cmd"]) + timeout = _get_timeout(stage) + cwd = _agent_cwd(worktree_path) + # PYTHONUNBUFFERED=1 so child Python (e.g. uv run entrypoint.py) flushes stdout + # immediately instead of block-buffering when stdout is a pipe; otherwise + # stdout log only appears in one shot after the task finishes. + env = {**os.environ, "PYTHONUNBUFFERED": "1"} + if agent_name == "opencode": + project_root_glob = str(PROJECT_ROOT.resolve().as_posix()) + "/**" + existing = {} + try: + if os.environ.get("OPENCODE_PERMISSION"): + existing = json.loads(os.environ["OPENCODE_PERMISSION"]) + except (json.JSONDecodeError, KeyError): + pass + ext_dir = dict(existing.get("external_directory", {})) + ext_dir[project_root_glob] = "allow" + env["OPENCODE_PERMISSION"] = json.dumps({"external_directory": ext_dir}) + popen_kwargs: dict[str, Any] = { + "cwd": cwd, + "env": env, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "text": True, + "encoding": "utf-8", + "errors": "replace", + "bufsize": 1, + } + if config["via"] == "stdin": + popen_kwargs["stdin"] = subprocess.PIPE + else: + cmd.append(prompt) + + print(f"[{stage.upper()}] invoking {agent_name} (timeout={timeout}s)") + stdout_path, stderr_path = _build_log_paths(run_id) + try: + process = subprocess.Popen(cmd, **popen_kwargs) + except FileNotFoundError: + msg = f"{agent_name!r} binary not found. Install it or set PDCA_AGENT to a different backend." + return -1, "", msg, None, None + + if config["via"] == "stdin" and process.stdin is not None: + process.stdin.write(prompt) + process.stdin.close() + + stdout_chunks: list[str] = [] + stderr_chunks: list[str] = [] + with open(stdout_path, "w", encoding="utf-8") as stdout_handle, open( + stderr_path, "w", encoding="utf-8" + ) as stderr_handle: + stdout_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stdout_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stdout_handle.flush() + stderr_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stderr_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stderr_handle.flush() + + stdout_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stdout, stdout_handle, stdout_chunks), + daemon=True, + ) + stderr_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stderr, stderr_handle, stderr_chunks), + daemon=True, + ) + stdout_thread.start() + stderr_thread.start() + + timed_out = False + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + timed_out = True + process.kill() + + stdout_thread.join() + stderr_thread.join() + + stdout = "".join(stdout_chunks) + stderr = "".join(stderr_chunks) + if timed_out: + timeout_message = f"timeout after {timeout}s" + if stderr: + stderr = f"{stderr}\n{timeout_message}" + else: + stderr = timeout_message + return -1, stdout, stderr, stdout_path, stderr_path + + return process.returncode, stdout, stderr, stdout_path, stderr_path + + +def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: + """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" + task_json = json.dumps(task, indent=2) + target_branch = task.get("baseline_branch", "master") # branch we want to merge into (e.g. master) + worktree_path = task.get("worktree_path") or "" + seed_id = task.get("seed_id", "") + last_metrics = task.get("last_metrics") or {} + last_summary = task.get("last_summary") or {} + notes = last_summary.get("notes", "Merge resolution: committed and merged into baseline.") + completed_at = last_summary.get("completed_at", "YYYY-MM-DD HH:MM:SS") + report_json = json.dumps({ + "checks": ["merge_resolution"], + "notes": notes, + "completed_at": completed_at, + "commit_sha": "", + "metrics": last_metrics, + }, indent=2) + + if seed_id == BASELINE_SEED_ID: + # We are resolving the merge of __baseline__ INTO target_branch (e.g. master). + # git merge X = merge X into current branch; so we need to be on target_branch, then git merge __baseline__. + cwd_note = ( + "Your working directory is the project root (main repo). " + "Do NOT run the merge from the __baseline__ worktree: that would merge the wrong way.\n\n" + ) + steps = ( + "Steps:\n" + f"1. Find where {target_branch!r} is checked out: run git worktree list and identify the path whose branch is {target_branch!r} (often the main repo).\n" + f"2. cd to that directory, then run: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + f" Correct example (merge __baseline__ into {target_branch}):\n" + f" git worktree list\n" + f" cd # e.g. main repo\n" + f" git merge {BASELINE_SEED_ID!r}\n" + " Wrong (do not do this): cd to the __baseline__ worktree and run git merge master — that merges master into __baseline__.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + else: + # Normal seed: merge the baseline branch (__baseline__) INTO the seed worktree so the seed is up to date. + if worktree_path: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at {worktree_path!r}; run git commands from that directory (e.g. cd there first).\n\n" + ) + else: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at component_system/history/worktrees/{seed_id!r}; run git commands from that directory for the merge.\n\n" + ) + steps = ( + "Steps:\n" + "1. Commit any uncommitted changes in the seed worktree (e.g. batch-size or other fixes).\n" + f"2. In the seed worktree, merge the baseline branch into the current branch: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + + return ( + "MERGE RESOLUTION (focused task). Do not read protocol or stage docs.\n\n" + "Task (inline):\n" + f"{task_json}\n\n" + f"{cwd_note}" + f"{steps}" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"{report_json}\n" + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + +def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: + """Build the agent prompt for a stage. Prompt types (by weight): + - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. + - DCA metrics_recovery: full header + log-recovery instructions. Heavy. + - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. + - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. + - DCA normal: full header + adapt/run/commit/report. Heavy. + """ + task_json = json.dumps(task, indent=2) + rel_task = task_path.relative_to(PROJECT_ROOT).as_posix() + worktree_path = task.get("worktree_path", "component_system/history/worktrees") + agent_cwd = _agent_cwd(worktree_path) + worktree_dir = Path(agent_cwd) + + # Worktree runs must stay entirely within the copied seed workspace to avoid external_directory requests. + if worktree_dir.resolve() != PROJECT_ROOT.resolve(): + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_block = ( + "Task content (provided inline; do not look up any external task file):\n" + f"{task_json}\n\n" + ) + worktree_note = ( + "Your working directory is the assigned workflow worktree (your current directory).\n" + "All required file context is already copied into this worktree under component_system/.\n" + "Use only paths relative to your current working directory. " + "Do not request access to absolute paths, parent-directory paths, or files outside the worktree.\n" + ) + else: + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_path_rel = f" - {rel_task}" + task_block = f"Task file:\n{task_path_rel}\n\nTask content:\n{task_json}\n\n" + worktree_note = "Your working directory is the project root.\n" + + required_context = ( + "Required context (read first; paths relative to your cwd):\n" + f" - component_system/protocol.md\n" + f"{docs}\n" + ) + baseline_files_note = ( + "Baseline reference files (workflow-managed; read-only):\n" + " - component_system/baseline_branches.json (per-branch baseline mapping)\n" + " - component_system/baseline_metrics.json (baseline run metrics)\n" + "The workflow writes these; only read them for context.\n\n" + ) + header = ( + "You are working on the autoresearch component-system workflow.\n\n" + f"{required_context}\n" + f"{baseline_files_note}" + f"{task_block}" + f"{worktree_note}" + "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" + ) + + if stage == "p": + return header + ( + "You are the P stage.\n\n" + "## Read results.tsv first (avoid idea duplication)\n" + "Before choosing a hypothesis, read `results.tsv` in your cwd if it exists. " + "Use it to avoid proposing ideas already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). " + "See component_system/PDCA-PLAN.md for full guidance.\n\n" + "Workflow:\n" + "1. Refine the seed prompt into a concrete implementation idea.\n" + "2. Implement the first generated version of that idea in the provided worktree.\n" + "3. Create a git commit in the seed branch (current branch in the worktree).\n" + "4. Print a JSON summary between these exact markers:\n" + "AUTORESEARCH_P_SUMMARY_BEGIN\n" + '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' + "AUTORESEARCH_P_SUMMARY_END\n" + "One branch per seed: you are already on the seed branch in the worktree.\n" + "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + ) + if stage == "dca": + merge_resolution = task.get("merge_resolution") is True + metrics_recovery = task.get("metrics_recovery") is True + if merge_resolution: + return _build_merge_resolution_prompt(task) + dca_cmd, dca_note = _dca_command_guidance() + baseline_measurement = task.get("seed_id") == "__baseline__" + conflict_block = "" + if metrics_recovery: + source_run_id = task.get("source_run_id", "unknown") + stdout_log = task.get("source_stdout_log_path", "missing") + stderr_log = task.get("source_stderr_log_path", "missing") + return header + ( + "METRICS RECOVERY: The previous DCA run completed, but the runner could not confirm metrics from its final report.\n" + "Do not rerun training. Do not edit code. Do not create a commit.\n" + f"Inspect the saved logs for source run {source_run_id!r}:\n" + f"- stdout log: {stdout_log}\n" + f"- stderr log: {stderr_log}\n" + "Recover the canonical metrics from those logs if they are present, then print the final JSON summary.\n" + "Use this exact shape:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["log_metrics_recovery"],"notes":"Recovered metrics from saved logs.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If you still cannot recover metrics, print the same object with an empty metrics object and explain why in notes.\n" + ) + if baseline_measurement: + return header + conflict_block + ( + "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" + "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" + "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" + "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" + f"Run the canonical command: {dca_cmd}\n" + f"({dca_note})\n" + "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["baseline_measurement"],"notes":"Measured the current baseline in the dedicated baseline worktree.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If after all retries (including batch size reduction) metrics are still unavailable, only then print the same object with an empty metrics object and explain in notes.\n" + ) + return header + conflict_block + ( + "You are the DCA stage.\n" + "Workflow:\n" + "1. Adapt or fix the generated code in the seed worktree until it runs.\n" + f"2. Run the canonical command: {dca_cmd}\n" + f" ({dca_note})\n" + "3. If it fails for a simple reason, fix and rerun.\n" + "4. Create a git commit in the seed branch for your changes.\n" + "5. Report the final result in JSON between these exact markers. Include the current commit SHA in the summary.\n" + " Use this exact shape and include numeric metric values when available:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + " Do not omit the markers. Prefer this exact JSON report over prose. If metrics are unavailable,\n" + " still print the same object with an empty metrics object.\n" + "Do not edit baseline_branches.json or baseline_metrics.json (workflow writes them; read only). Do not merge branches yourself; the system will evaluate and promote if appropriate.\n" + ) + raise ValueError(f"Unknown stage: {stage}") + + +def _append_results_tsv(seed_id: str, run_metrics: dict[str, Any], signal: str, description: str) -> None: + status = "KEEP" if signal == "positive_signal" else "DISCARD" + val_bpb = run_metrics.get("val_bpb", "") + peak_vram_mb = run_metrics.get("peak_vram_mb", 0) + memory_gb = round(float(peak_vram_mb) / 1024, 2) if peak_vram_mb else "" + write_header = not RESULTS_TSV.exists() + with open(RESULTS_TSV, "a", encoding="utf-8") as handle: + if write_header: + handle.write("commit\tval_bpb\tmemory_gb\tstatus\tdescription\n") + handle.write(f"{seed_id}\t{val_bpb}\t{memory_gb}\t{status}\t{description}\n") + + +def _regenerate_progress_png() -> None: + try: + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + import pandas as pd + except ImportError: + return + + if not RESULTS_TSV.exists(): + return + + try: + df = pd.read_csv(RESULTS_TSV, sep="\t") + df["val_bpb"] = pd.to_numeric(df["val_bpb"], errors="coerce") + df["memory_gb"] = pd.to_numeric(df["memory_gb"], errors="coerce") + df["status"] = df["status"].str.strip().str.upper() + valid = df[df["val_bpb"].notna()].copy().reset_index(drop=True) + if valid.empty: + return + + baseline_bpb = valid.loc[0, "val_bpb"] + kept = valid[valid["status"] == "KEEP"] + best = float(kept["val_bpb"].min()) if not kept.empty else float(baseline_bpb) + + fig, ax = plt.subplots(figsize=(14, 7)) + ax.scatter(valid.index, valid["val_bpb"], c="#94a3b8", s=18, alpha=0.6, label="Runs") + if not kept.empty: + ax.scatter(kept.index, kept["val_bpb"], c="#38bdf8", s=42, label="Promoted") + ax.step(kept.index, kept["val_bpb"].cummin(), where="post", color="#0ea5e9", linewidth=2) + ax.set_xlabel("Experiment #") + ax.set_ylabel("Validation BPB (lower is better)") + ax.set_title("Component System Progress") + margin = (baseline_bpb - best) * 0.15 if baseline_bpb != best else 0.005 + ax.set_ylim(best - margin, float(baseline_bpb) + margin) + ax.grid(True, alpha=0.2) + ax.legend(loc="upper right") + plt.tight_layout() + plt.savefig(PROGRESS_PNG, dpi=150, bbox_inches="tight") + plt.close(fig) + except Exception: + traceback.print_exc() + + +def _worker(stage: str) -> None: + print(f"[daemon] worker-{stage.upper()} started") + while not _shutdown: + pending = list_pending(stage) + if not pending: + time.sleep(POLL_INTERVAL) + continue + + task_path = pending[0] + try: + task = read_task(task_path) + seed_id = task["seed_id"] + run_id = task["run_id"] + started_seed = None + if stage == "direct": + started_seed, _ = WORKFLOW.mark_direct_code_run_started(seed_id, run_id) + else: + started_seed, _ = WORKFLOW.mark_run_started(seed_id, run_id) + if ( + stage == "dca" + and task.get("metrics_recovery") is not True + ): + started_seed = WORKFLOW.ensure_seed_worktree_ready(seed_id) + print(f"[{stage.upper()}] picked up {task['task_id']} for {seed_id}") + + worktree_path = task.get("worktree_path") + if started_seed is not None and started_seed.worktree_path is not None: + worktree_path = started_seed.worktree_path + # Merge-resolution DCA runs from project root so the agent can operate on repo and worktrees + if stage == "dca" and ( + task.get("merge_resolution") is True or task.get("metrics_recovery") is True + ): + worktree_path = None + + if worktree_path: + _sync_worktree_context(worktree_path) + + if stage == "direct": + prompt = _build_direct_code_prompt(task["prompt"]) + else: + prompt = _build_prompt(stage, task, task_path) + prompt_path = _write_prompt_file(run_id, prompt) + prompt_path_str = str(prompt_path) + exit_code, stdout, stderr, stdout_log_path, stderr_log_path = _invoke_agent( + prompt, stage, run_id, worktree_path=worktree_path + ) + + combined_output = _combined_output(stdout, stderr) + salvaged_dca = _should_salvage_completed_dca(stage, exit_code, combined_output) + if exit_code == 0 or salvaged_dca: + if stage == "p": + WORKFLOW.finish_p_run( + seed_id, + run_id, + stdout, + str(stdout_log_path) if stdout_log_path else None, + str(stderr_log_path) if stderr_log_path else None, + prompt_path_str, + ) + elif stage == "direct": + WORKFLOW.finish_direct_code_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + ) + else: + run = WORKFLOW.finish_dca_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + metrics_recovery=task.get("metrics_recovery") is True, + merge_resolution=task.get("merge_resolution") is True, + ) + if not run.summary.get("metrics_recovery_queued"): + description = run.summary.get("notes") or run.summary.get("idea") or seed_id + _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) + _regenerate_progress_png() + if salvaged_dca: + WORKFLOW.seed_repo.append_event( + seed_id, + "dca.salvaged", + f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", + run_id=run_id, + ) + move_to_done(task_path) + print(f"[{stage.upper()}] task {task['task_id']} done") + else: + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, + prompt_path=prompt_path_str, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + ) + else: + WORKFLOW.mark_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, prompt_path=prompt_path_str, + ) + print(f"[{stage.upper()}] task {task['task_id']} failed") + except Exception as exc: + traceback.print_exc() + try: + task = read_task(task_path) + prompt_path_str = None + run_id = task.get("run_id") + if run_id: + p_path = LOG_DIR / f"{run_id}.prompt.txt" + if p_path.exists(): + prompt_path_str = str(p_path) + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + task["seed_id"], + task["run_id"], + str(exc), + task_path=task_path, + prompt_path=prompt_path_str, + ) + else: + WORKFLOW.mark_run_failed( + task["seed_id"], task["run_id"], str(exc), + task_path=task_path, prompt_path=prompt_path_str, + ) + except Exception: + traceback.print_exc() + + print(f"[daemon] worker-{stage.upper()} stopped") + + +def main() -> None: + global _shutdown + signal.signal(signal.SIGINT, _signal_handler) + if sys.platform != "win32": + signal.signal(signal.SIGTERM, _signal_handler) + + ensure_queue_layout() + daemon_heartbeat() + agent = os.environ.get("PDCA_AGENT", "claude") + print(f"[daemon] starting component-system daemon — agent={agent}, workers=P/DCA/DIRECT") + + pools: list[ThreadPoolExecutor] = [] + for stage in ("p", "dca", "direct"): + pool = ThreadPoolExecutor(max_workers=1, thread_name_prefix=f"pdca-{stage}") + pools.append(pool) + pool.submit(_worker, stage) + + last_heartbeat = time.monotonic() + try: + while not _shutdown: + time.sleep(1.0) + if not _shutdown and (time.monotonic() - last_heartbeat) >= 5.0: + daemon_heartbeat() + last_heartbeat = time.monotonic() + except KeyboardInterrupt: + pass + finally: + _shutdown = True + if DAEMON_HEARTBEAT_PATH.exists(): + try: + DAEMON_HEARTBEAT_PATH.unlink() + except OSError: + pass + for pool in pools: + pool.shutdown(wait=True) + + print("[daemon] all workers stopped") + + +if __name__ == "__main__": + main() diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py new file mode 100644 index 000000000..8c1dbf484 --- /dev/null +++ b/component_system/services/workflow.py @@ -0,0 +1,1355 @@ +from __future__ import annotations + +import json +from typing import Any +import re +import subprocess +from pathlib import Path + +from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD +from component_system.domain.models import ( + DashboardColumn, + DashboardViewModel, + PlanIdea, + RunStatus, + SeedRecord, + SeedStatus, + StageName, + StageRun, +) +from component_system.repositories.state import ( + BaselineBranchMapRepository, + BaselineMetricsRepository, + RunRepository, + SeedRepository, +) +from component_system.task import ( + COMPONENT_SYSTEM_ROOT, + WORKTREE_ROOT, + get_daemon_status, + move_to_error, + now_ts, + new_run_id, + new_seed_id, + write_task, +) + +SUMMARY_MARKERS = { + "p": ("AUTORESEARCH_P_SUMMARY_BEGIN", "AUTORESEARCH_P_SUMMARY_END"), + "dca": ("AUTORESEARCH_DCA_SUMMARY_BEGIN", "AUTORESEARCH_DCA_SUMMARY_END"), +} + +BASELINE_SEED_ID = "__baseline__" + + +class GitCommandError(RuntimeError): + pass + + +class GitService: + def __init__(self) -> None: + pass + + def _run_git(self, *args: str, cwd: Path | None = None) -> str: + try: + result = subprocess.run( + ["git", *args], + cwd=str(cwd) if cwd else None, + capture_output=True, + text=True, + check=True, + ) + except FileNotFoundError as exc: + raise GitCommandError("Git is not installed or not available on PATH.") from exc + except subprocess.CalledProcessError as exc: + stderr = (exc.stderr or exc.stdout or "").strip() + raise GitCommandError(stderr or f"git {' '.join(args)} failed") from exc + return result.stdout.strip() + + def repo_root(self) -> Path: + return Path(self._run_git("rev-parse", "--show-toplevel")) + + def current_head(self) -> str: + return self._run_git("rev-parse", "HEAD") + + def branch_exists(self, branch: str) -> bool: + try: + self._run_git("rev-parse", "--verify", branch) + return True + except GitCommandError: + return False + + def ensure_branch(self, branch: str, start_point: str) -> None: + if not self.branch_exists(branch): + self._run_git("branch", branch, start_point) + + def list_branches(self) -> list[str]: + output = self._run_git("branch", "--format=%(refname:short)") + branches = [line.strip() for line in output.splitlines() if line.strip()] + if not branches: + # Unborn repositories can have HEAD pointing to a branch name even before first commit. + try: + head_branch = self._run_git("symbolic-ref", "--short", "HEAD").strip() + if head_branch: + branches.append(head_branch) + except GitCommandError: + pass + return sorted(set(branches)) + + @staticmethod + def is_seed_specific_branch(branch: str) -> bool: + """True if this branch is the single working branch for a seed (seed_id), not a baseline choice.""" + if branch == BASELINE_SEED_ID: + return True + # One branch per seed: seed- + 6 hex chars, e.g. seed-e57b95 + if branch.startswith("seed-") and len(branch) == 11 and all( + c in "abcdef0123456789" for c in branch[5:] + ): + return True + if branch.startswith("seed/"): + return True # legacy candidate branches, e.g. seed/seed-e57b95 + return False + + def setup_error(self) -> str | None: + try: + self.repo_root() + return None + except GitCommandError as exc: + return str(exc) + + def setup_error_for_branches(self, baseline_branch: str) -> str | None: + try: + root = self.repo_root() + if not baseline_branch: + return "Please select a baseline branch." + if not self.branch_exists(baseline_branch): + return ( + f"Git repo found at {root}, but branch {baseline_branch!r} does not exist yet. " + "Select an existing baseline branch." + ) + return None + except GitCommandError as exc: + return str(exc) + + def ensure_seed_worktrees(self, seed: SeedRecord) -> SeedRecord: + """Ensure the seed worktree exists on the single branch for this seed: seed_id (SSOT).""" + repo_head = self.current_head() + self.ensure_branch(seed.baseline_branch, repo_head) + + seed_worktree = WORKTREE_ROOT / seed.seed_id + if seed_worktree.exists(): + seed.worktree_path = str(seed_worktree) + return seed + # One branch per seed: branch name = seed_id, created from baseline. + self._run_git("worktree", "add", "-B", seed.seed_id, str(seed_worktree), seed.baseline_branch) + + seed.worktree_path = str(seed_worktree) + return seed + + def commit_sha(self, ref: str) -> str: + return self._run_git("rev-parse", "--short", ref) + + def head_sha_at(self, cwd: Path) -> str: + """Return the short commit SHA of HEAD in the given worktree directory.""" + return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) + + def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: + """Reset the seed worktree's branch to the given ref (e.g. commit before P). + No-op for baseline seed or when worktree is missing.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("reset", "--hard", ref, cwd=worktree_path) + + def promote_seed_branch( + self, seed: SeedRecord, target_branch: str | None = None + ) -> str: + """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. + If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, + pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" + merge_into = target_branch if target_branch is not None else seed.baseline_branch + baseline_worktree = WORKTREE_ROOT / "baseline" + if baseline_worktree.exists(): + try: + self._run_git("worktree", "remove", "--force", str(baseline_worktree)) + except GitCommandError: + pass + self._run_git( + "worktree", + "add", + "--force", + "-B", + merge_into, + str(baseline_worktree), + merge_into, + ) + self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) + return self.commit_sha(merge_into) + + +class WorkflowService: + def __init__( + self, + seed_repo: SeedRepository | None = None, + run_repo: RunRepository | None = None, + branch_map_repo: BaselineBranchMapRepository | None = None, + metrics_repo: BaselineMetricsRepository | None = None, + git_service: GitService | None = None, + ) -> None: + self.seed_repo = seed_repo or SeedRepository() + self.run_repo = run_repo or RunRepository() + self.branch_map_repo = branch_map_repo or BaselineBranchMapRepository() + self.metrics_repo = metrics_repo or BaselineMetricsRepository() + self.git_service = git_service or GitService() + + @staticmethod + def _seed_worktree_path(seed_id: str) -> str: + return str(WORKTREE_ROOT / seed_id) + + @staticmethod + def _baseline_worktree_path() -> str: + return str(WORKTREE_ROOT / BASELINE_SEED_ID) + + def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: + """Clean up legacy persisted seed state that no longer matches runtime rules.""" + if seed.seed_id != BASELINE_SEED_ID: + return seed + expected_worktree = self._baseline_worktree_path() + if seed.worktree_path == expected_worktree: + return seed + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + + def ensure_seed_worktree_ready(self, seed_id: str) -> SeedRecord: + """Ensure the runtime seed worktree exists; recreate only when missing.""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + expected_worktree = self._baseline_worktree_path() + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.baseline_branch) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing baseline worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + expected_worktree = self._seed_worktree_path(seed.seed_id) + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing seed worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + + def _preferred_baseline_branch(self) -> str: + setup_error = self.git_service.setup_error() + if setup_error is not None: + return DEFAULT_BASELINE_BRANCH + try: + branches = [ + branch + for branch in self.git_service.list_branches() + if not self.git_service.is_seed_specific_branch(branch) + ] + except GitCommandError: + return DEFAULT_BASELINE_BRANCH + if branches and DEFAULT_BASELINE_BRANCH in branches: + return DEFAULT_BASELINE_BRANCH + return branches[0] if branches else DEFAULT_BASELINE_BRANCH + + def _first_user_seed_baseline_branch(self) -> str | None: + """Return the baseline_branch of the earliest-created user seed (excluding __baseline__), or None.""" + user_seeds = [s for s in self.seed_repo.list() if s.seed_id != BASELINE_SEED_ID] + if not user_seeds: + return None + first = min(user_seeds, key=lambda s: s.created_at) + return first.baseline_branch or None + + def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", event_message: str = "Queued Plan stage for the seed.") -> StageRun: + run = StageRun( + run_id=new_run_id("p"), + seed_id=seed.seed_id, + stage=StageName.p, + status=RunStatus.queued, + task_id=new_run_id("task-p"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message) + write_task( + "p", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + }, + task_id=run.task_id, + ) + return run + + def _release_seeds_waiting_for_baseline(self, branch: str) -> None: + """Release seeds that were waiting for baseline result on the given branch.""" + branch_metrics = self.metrics_repo.get_for_branch(branch) + if not branch_metrics or branch_metrics.get("last_val_bpb") is None: + return + waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) + for seed in waiting_seeds: + if seed.seed_id == BASELINE_SEED_ID: + continue + if seed.baseline_branch != branch: + continue + if seed.status is not SeedStatus.queued or seed.latest_run_id is not None: + continue + self._enqueue_plan_run( + seed, + event_kind="p.released", + event_message="Baseline is ready; queued Plan stage for the waiting seed.", + ) + + @staticmethod + def _status_from_dca_signal(signal: str) -> SeedStatus: + """Centralized mapping from DCA signal to terminal seed status.""" + if signal == "positive_signal": + return SeedStatus.promoted + if signal == "error": + return SeedStatus.failed + return SeedStatus.passed + + def _reconcile_seed_status_signal(self, seed: SeedRecord) -> bool: + """ + Auto-heal known inconsistent terminal combinations from historical data. + + Returns True when the seed was updated and persisted. + """ + if seed.status is SeedStatus.passed and seed.latest_signal == "error": + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "seed.reconciled", + "Reconciled inconsistent terminal state (passed + error) to failed.", + ) + return True + return False + + def create_seed( + self, + prompt: str, + baseline_branch: str | None = None, + ralph_loop_enabled: bool = False, + ) -> SeedRecord: + seed_id = new_seed_id() + selected_baseline = (baseline_branch or DEFAULT_BASELINE_BRANCH).strip() + seed = SeedRecord( + seed_id=seed_id, + prompt=prompt.strip(), + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=selected_baseline, + worktree_path=self._seed_worktree_path(seed_id), + ralph_loop_enabled=ralph_loop_enabled, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, selected_baseline) + try: + pass # branch seed_id is created when Plan is queued (ensure_seed_worktrees) + except GitCommandError: + # Keep seed creation non-blocking; branch creation will be retried at P queue time. + pass + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from prompt.") + if ralph_loop_enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + return seed + + def create_direct_code_seed(self, prompt: str) -> tuple[SeedRecord, StageRun]: + cleaned_prompt = prompt.strip() + if not cleaned_prompt: + raise RuntimeError("Prompt cannot be empty.") + baseline_branch = self._preferred_baseline_branch() + seed_id = new_seed_id("direct") + now = now_ts() + run = StageRun( + run_id=new_run_id("direct"), + seed_id=seed_id, + stage=StageName.direct, + status=RunStatus.queued, + task_id=new_run_id("task-direct"), + created_at=now, + updated_at=now, + ) + seed = SeedRecord( + seed_id=seed_id, + prompt=cleaned_prompt, + status=SeedStatus.adapting, + created_at=now, + updated_at=now, + baseline_branch=baseline_branch, + worktree_path=str(COMPONENT_SYSTEM_ROOT.parent), + latest_run_id=run.run_id, + plan=PlanIdea( + title="Direct code agent", + target_component="project_root", + description="Direct code agent run requested from the dashboard and executed from the project root.", + ), + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, baseline_branch) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from direct code agent prompt.") + self.seed_repo.append_event( + seed.seed_id, + "direct_code.queued", + "Queued direct code agent run from the project root.", + run_id=run.run_id, + ) + write_task( + "direct", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": None, + }, + task_id=run.task_id, + ) + return seed, run + + def _get_or_create_baseline_seed(self) -> SeedRecord: + """Return the baseline seed used to establish initial val_bpb; create and persist it if missing.""" + seed = self.seed_repo.get(BASELINE_SEED_ID) + if seed is not None: + return self._normalize_seed_runtime_state(seed) + branch = self._first_user_seed_baseline_branch() or DEFAULT_BASELINE_BRANCH + seed = SeedRecord( + seed_id=BASELINE_SEED_ID, + prompt="Baseline measurement: run training on current code without changes.", + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=branch, + worktree_path=self._baseline_worktree_path(), + ralph_loop_enabled=False, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(BASELINE_SEED_ID, branch) + self.seed_repo.append_event( + seed.seed_id, + "seed.created", + "Baseline seed created for initial measurement.", + ) + return seed + + def ensure_baseline_result(self) -> None: + """ + If there is no baseline result (last_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and + queue its DCA so the first run establishes the baseline. Idempotent; safe to call + before queue_p for any user seed. + """ + seed = self._get_or_create_baseline_seed() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): + return + if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + setup_error = self.git_service.setup_error() + if setup_error is not None: + return + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + return + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + return + seed.status = SeedStatus.generated + seed.plan = PlanIdea(title="Baseline", description="No changes; measure current baseline.") + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "baseline.queued", + "Queued DCA to establish baseline result before first seed.", + ) + self.queue_dca(seed.seed_id) + + def set_ralph_loop(self, seed_id: str, enabled: bool) -> SeedRecord: + seed = self.require_seed(seed_id) + if seed.ralph_loop_enabled == enabled: + return seed + seed.ralph_loop_enabled = enabled + seed.updated_at = now_ts() + self.seed_repo.save(seed) + if enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + else: + self.seed_repo.append_event(seed.seed_id, "ralph.disabled", "Ralph loop disabled by user.") + return seed + + def can_edit_seed_prompt(self, seed: SeedRecord) -> bool: + return seed.status in {SeedStatus.draft, SeedStatus.queued} + + def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: + seed = self.require_seed(seed_id) + if not self.can_edit_seed_prompt(seed): + raise RuntimeError("Seed prompt can only be edited before Plan starts.") + updated_prompt = prompt.strip() + if not updated_prompt: + raise RuntimeError("Prompt cannot be empty.") + if updated_prompt == seed.prompt: + return seed + seed.prompt = updated_prompt + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "seed.updated", "Seed prompt was edited before execution.") + return seed + + def queue_p(self, seed_id: str) -> StageRun | None: + seed = self.require_seed(seed_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if seed_id != BASELINE_SEED_ID and not has_baseline: + self.ensure_baseline_result() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if not has_baseline: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + return self._enqueue_plan_run(seed) + + def queue_dca( + self, + seed_id: str, + merge_resolution: bool = False, + metrics_recovery: bool = False, + source_run_id: str | None = None, + source_stdout_log_path: str | None = None, + source_stderr_log_path: str | None = None, + last_metrics: dict[str, Any] | None = None, + last_summary: dict[str, Any] | None = None, + restore_ref: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + if not metrics_recovery and seed.status in {SeedStatus.draft, SeedStatus.queued, SeedStatus.planning}: + raise RuntimeError("Run Plan first. Do-Check-Action is available after code is generated into the seed branch.") + if not metrics_recovery: + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + if seed.seed_id != BASELINE_SEED_ID: + try: + # Ref to restore worktree to on negative signal (commit before P when from finish_p_run, else baseline). + run.summary["restore_ref"] = ( + restore_ref + if restore_ref is not None + else self.git_service.commit_sha(seed.baseline_branch) + ) + except GitCommandError: + pass + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.queued", + "Queued DCA for merge conflict resolution." + if merge_resolution + else "Queued DCA for metrics recovery from saved logs." + if metrics_recovery + else "Queued DCA stage for the seed.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "merge_resolution": merge_resolution, + "metrics_recovery": metrics_recovery, + } + if merge_resolution: + payload["baseline_branch"] = seed.baseline_branch + if last_metrics is not None: + payload["last_metrics"] = last_metrics + if last_summary is not None: + payload["last_summary"] = last_summary + if metrics_recovery: + payload["source_run_id"] = source_run_id + payload["source_stdout_log_path"] = source_stdout_log_path + payload["source_stderr_log_path"] = source_stderr_log_path + payload["worktree_path"] = None + write_task("dca", payload, task_id=run.task_id) + return run + + def require_seed(self, seed_id: str) -> SeedRecord: + seed = self.seed_repo.get(seed_id) + if seed is None: + raise KeyError(f"Unknown seed_id={seed_id}") + return self._normalize_seed_runtime_state(seed) + + def require_run(self, run_id: str) -> StageRun: + run = self.run_repo.get(run_id) + if run is None: + raise KeyError(f"Unknown run_id={run_id}") + return run + + def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + if run.stage is StageName.p: + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + seed = self.ensure_seed_worktree_ready(seed.seed_id) + if seed.worktree_path: + worktree_path = Path(seed.worktree_path) + if worktree_path.is_dir(): + try: + run.summary["commit_sha_before_p"] = self.git_service.head_sha_at( + worktree_path + ) + except GitCommandError: + pass + seed.status = SeedStatus.planning + event_kind = "p.started" + event_message = "Plan stage started in the candidate worktree." + else: + seed.status = SeedStatus.adapting + event_kind = "dca.started" + event_message = ( + "Baseline measurement started in the baseline worktree." + if seed.seed_id == BASELINE_SEED_ID + else "DCA stage started in the seed worktree." + ) + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message, run_id=run_id) + return seed, run + + def mark_direct_code_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + seed.status = SeedStatus.adapting + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.started", + "Direct code agent started from the project root.", + run_id=run_id, + ) + return seed, run + + def mark_direct_code_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "direct_code.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def mark_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, f"{run.stage.value}.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def finish_direct_code_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = { + "mode": "direct_code_agent", + "cwd": str(COMPONENT_SYSTEM_ROOT.parent), + "stdout_bytes": len(stdout.encode("utf-8", errors="replace")), + "stderr_bytes": len((stderr or "").encode("utf-8", errors="replace")), + } + run.signal = "direct_code_completed" + seed.status = SeedStatus.passed + seed.updated_at = now_ts() + seed.latest_signal = run.signal + seed.last_error = None + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.completed", + "Direct code agent completed from the project root.", + run_id=run_id, + ) + return run + + def finish_p_run( + self, + seed_id: str, + run_id: str, + stdout: str, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + summary = self.extract_summary(stdout, StageName.p) or {} + seed.plan = PlanIdea( + title=summary.get("idea", "Generated plan"), + target_component=summary.get("target_component", "model"), + description=summary.get("description", ""), + source_refs=summary.get("source_refs", []), + commit_sha=summary.get("commit_sha"), + ) + # Single branch per seed (SSOT): worktree is already on seed_id branch. + commit_sha = self.git_service.commit_sha(seed.seed_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve run.summary fields set earlier (e.g. commit_sha_before_p) when merging P output. + run.summary = run.summary | summary | {"commit_sha": commit_sha} + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.completed", + "Plan completed on seed branch.", + commit_sha=commit_sha, + ) + self.queue_dca( + seed.seed_id, + restore_ref=run.summary.get("commit_sha_before_p"), + ) + return run + + @staticmethod + def combine_output(stdout: str, stderr: str | None = None) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr or "" + + def finish_dca_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + metrics_recovery: bool = False, + merge_resolution: bool = False, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + last_val_bpb = float(branch_metrics["last_val_bpb"]) if branch_metrics and branch_metrics.get("last_val_bpb") is not None else None + output_text = self.combine_output(stdout, stderr) + summary = self.extract_summary(output_text, StageName.dca) or {} + metrics = self.extract_dca_metrics(output_text, summary) + signal = self.evaluate_signal(metrics, last_val_bpb, PROMOTION_THRESHOLD) + commit_sha = summary.get("commit_sha") + if not (isinstance(commit_sha, str) and commit_sha.strip()): + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + commit_sha = "" + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = summary | {"commit_sha": commit_sha} + run.metrics = metrics + run.signal = signal + seed.updated_at = now_ts() + if signal == "error" and not metrics_recovery: + run.summary = run.summary | {"metrics_recovery_queued": True} + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.metrics_recovery_queued", + "DCA completed without recoverable metrics in the structured report; queued a follow-up DCA to inspect saved logs.", + run_id=run_id, + ) + self.queue_dca( + seed.seed_id, + metrics_recovery=True, + source_run_id=run_id, + source_stdout_log_path=log_path, + source_stderr_log_path=stderr_log_path, + ) + return run + seed.latest_metrics = metrics + seed.latest_signal = signal + terminal_status = self._status_from_dca_signal(signal) + merge_commit_sha = None # set when seed branch is successfully merged into baseline + if seed.seed_id == BASELINE_SEED_ID and last_val_bpb is None: + if "val_bpb" not in metrics: + seed.status = SeedStatus.failed + event_message = ( + "Baseline metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "Baseline measurement completed without metrics; marked as failed." + ) + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + ) + return run + target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch + # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. + if signal != "positive_signal": + self.metrics_repo.update_for_branch( + target_branch, + {"last_val_bpb": metrics["val_bpb"]}, + ) + seed.status = terminal_status + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed (no promotion); not merged into baseline branch.", + signal=signal, + metrics=metrics, + ) + return run + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + }, + ) + seed.status = SeedStatus.passed + event_message = f"Baseline measurement completed and __baseline__ was merged into {target_branch}; waiting seeds can now start Plan." + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + commit_sha=merge_commit_sha, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=target_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + }, + ) + seed.status = SeedStatus.passed + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed; merge into baseline branch failed again after resolution run (loop avoided). Baseline metrics recorded; manual merge may be needed.", + signal=signal, + metrics=metrics, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + if terminal_status is SeedStatus.promoted: + try: + self.metrics_repo.update_for_branch( + seed.baseline_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], + "promoted_at": summary.get("completed_at"), + }, + ) + merge_commit_sha = self.git_service.promote_seed_branch(seed) + seed.status = terminal_status + event_message = "DCA succeeded and seed branch was promoted into baseline." + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=seed.baseline_branch, + ) + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, "dca.completed", "DCA run completed but merge failed; conflict-resolution DCA queued.", signal=signal, metrics=metrics + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event(seed.seed_id, "ralph.requeued", "Ralph loop queued the next Plan run.") + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event(seed.seed_id, "ralph.requeue_failed", f"Ralph loop could not queue the next Plan run: {exc}") + return run + elif terminal_status is SeedStatus.failed: + seed.status = terminal_status + event_message = ( + "DCA metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "DCA completed but metrics were missing; marked as failed." + ) + else: + seed.status = terminal_status + event_message = "DCA completed without promotion." + self.run_repo.save(run) + self.seed_repo.save(seed) + event_commit_sha = merge_commit_sha if merge_commit_sha else run.summary.get("commit_sha") + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + **({"commit_sha": event_commit_sha} if event_commit_sha else {}), + ) + if ( + seed.ralph_loop_enabled + and signal in ("negative_signal", "neutral", "error") + and not merge_resolution + and not metrics_recovery + and seed.seed_id != BASELINE_SEED_ID + ): + ref = run.summary.get("restore_ref") or run.summary.get("baseline_commit_at_dca_start") + if ref: + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run: {exc}", + ) + return run + + def build_dashboard(self, selected_seed_id: str | None = None) -> DashboardViewModel: + seeds = self.seed_repo.list() + selected_seed = self.seed_repo.get(selected_seed_id) if selected_seed_id else None + baseline_metrics_by_branch = self.metrics_repo.get_all() + available_branches: list[str] = [] + setup_error = self.git_service.setup_error() + if setup_error is None: + try: + all_branches = self.git_service.list_branches() + if not all_branches: + setup_error = "No local branches found yet. Create an initial commit/branch, then reload." + else: + available_branches = [ + b for b in all_branches + if not self.git_service.is_seed_specific_branch(b) + ] + # Use only branches that exist in the repo; do not add DEFAULT_BASELINE_BRANCH + # if it does not exist, so the dropdown never shows a non-existent branch. + except GitCommandError as exc: + setup_error = str(exc) + # Default to first existing branch so the selected value is always valid. + default_baseline_branch = (available_branches[0] if available_branches else DEFAULT_BASELINE_BRANCH) or "master" + status_column_map = { + SeedStatus.draft: "seedInbox", + SeedStatus.queued: "seedInbox", + SeedStatus.planning: "generated", + SeedStatus.generated: "generated", + SeedStatus.dca_queued: "generated", + SeedStatus.adapting: "activeDca", + SeedStatus.running: "activeDca", + SeedStatus.passed: "completed", + SeedStatus.failed: "completed", + SeedStatus.promoted: "completed", + } + seeds_by_column: dict[str, list[SeedRecord]] = { + "seedInbox": [], + "generated": [], + "activeDca": [], + "completed": [], + } + for seed in seeds: + self._reconcile_seed_status_signal(seed) + column_id = status_column_map.get(seed.status, "seedInbox") + seeds_by_column[column_id].append(seed) + columns = [ + DashboardColumn( + id="seedInbox", + title="Seed", + description="New prompts and queued planning work.", + seeds=seeds_by_column["seedInbox"], + ), + DashboardColumn( + id="generated", + title="Plan", + description="Planning and generated code ready for Do-Check-Action.", + seeds=seeds_by_column["generated"], + ), + DashboardColumn( + id="activeDca", + title="Do-Check-Action", + description="Adapting, fixing, and running the seed run.", + seeds=seeds_by_column["activeDca"], + ), + DashboardColumn( + id="completed", + title="Completed", + description="Finished runs; promoted seeds merged into baseline.", + seeds=seeds_by_column["completed"], + ), + ] + return DashboardViewModel( + setup_error=setup_error, + baseline_metrics_by_branch=baseline_metrics_by_branch, + default_baseline_branch=default_baseline_branch, + available_branches=available_branches, + seed_count=len(seeds), + columns=columns, + selected_seed=selected_seed, + daemon_status=get_daemon_status(), + ) + + def seed_detail(self, seed_id: str) -> dict[str, object]: + seed = self.require_seed(seed_id) + expected_worktree = ( + self._baseline_worktree_path() + if seed.seed_id == BASELINE_SEED_ID + else self._seed_worktree_path(seed.seed_id) + ) + needs_save = False + if expected_worktree is not None and not seed.worktree_path: + seed.worktree_path = expected_worktree + needs_save = True + if needs_save: + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self._reconcile_seed_status_signal(seed) + return { + "seed": seed, + "can_edit_prompt": self.can_edit_seed_prompt(seed), + "runs": self.run_repo.list(seed_id), + "events": self.seed_repo.events(seed_id), + "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), + "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), + } + + def extract_summary(self, output_text: str, stage: StageName) -> dict[str, object] | None: + start_marker, end_marker = SUMMARY_MARKERS[stage.value] + pattern = rf"{start_marker}\s*(\{{.*?\}})\s*{end_marker}" + match = re.search(pattern, output_text, flags=re.DOTALL) + if not match: + return None + try: + return json.loads(match.group(1)) + except json.JSONDecodeError: + return {"raw_summary": match.group(1)} + + def extract_metrics(self, output_text: str) -> dict[str, float | int]: + patterns = { + "val_bpb": r"^val_bpb:\s+([0-9.]+)", + "training_seconds": r"^training_seconds:\s+([0-9.]+)", + "total_seconds": r"^total_seconds:\s+([0-9.]+)", + "startup_seconds": r"^startup_seconds:\s+([0-9.]+)", + "peak_vram_mb": r"^peak_vram_mb:\s+([0-9.]+)", + "mfu_percent": r"^mfu_percent:\s+([0-9.]+)", + "total_tokens_M": r"^total_tokens_M:\s+([0-9.]+)", + "num_steps": r"^num_steps:\s+([0-9]+)", + "num_params_M": r"^num_params_M:\s+([0-9.]+)", + "depth": r"^depth:\s+([0-9]+)", + } + metrics: dict[str, float | int] = {} + for key, pattern in patterns.items(): + match = re.search(pattern, output_text, flags=re.MULTILINE) + if not match: + continue + metrics[key] = int(match.group(1)) if key in {"num_steps", "depth"} else float(match.group(1)) + return metrics + + def extract_dca_metrics( + self, output_text: str, summary: dict[str, object] | None = None + ) -> dict[str, float | int]: + if summary: + summary_metrics = summary.get("metrics") + if isinstance(summary_metrics, dict): + parsed: dict[str, float | int] = {} + int_keys = {"num_steps", "depth"} + float_keys = { + "val_bpb", + "training_seconds", + "total_seconds", + "startup_seconds", + "peak_vram_mb", + "mfu_percent", + "total_tokens_M", + "num_params_M", + } + for key in int_keys | float_keys: + value = summary_metrics.get(key) + if value is None: + continue + try: + parsed[key] = int(value) if key in int_keys else float(value) + except (TypeError, ValueError): + continue + if parsed: + return parsed + return self.extract_metrics(output_text) + + @staticmethod + def evaluate_signal( + metrics: dict[str, float | int], + last_val_bpb: float | None, + promotion_threshold: float = PROMOTION_THRESHOLD, + ) -> str: + val_bpb = metrics.get("val_bpb") + if val_bpb is None: + return "error" + if last_val_bpb is None: + return "positive_signal" + delta = float(last_val_bpb) - float(val_bpb) + if delta >= promotion_threshold: + return "positive_signal" + if delta <= -promotion_threshold: + return "negative_signal" + return "neutral" + + +def default_workflow_service() -> WorkflowService: + return WorkflowService() diff --git a/component_system/tailwind.config.js b/component_system/tailwind.config.js new file mode 100644 index 000000000..ea1a7a372 --- /dev/null +++ b/component_system/tailwind.config.js @@ -0,0 +1,11 @@ +/** @type {import('tailwindcss').Config} */ +module.exports = { + content: [ + "./web/templates/**/*.html", + "./web/static/**/*.js" + ], + theme: { + extend: {} + }, + plugins: [] +}; diff --git a/component_system/task.py b/component_system/task.py new file mode 100644 index 000000000..7aa32f09f --- /dev/null +++ b/component_system/task.py @@ -0,0 +1,243 @@ +"""Shared queue and JSON state helpers for the component-system web app.""" +from __future__ import annotations + +import json +import os +import shutil +import time +import uuid +from pathlib import Path +from typing import Any + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent +HISTORY_ROOT = COMPONENT_SYSTEM_ROOT / "history" +QUEUE_ROOT = HISTORY_ROOT / "queue" +STATE_ROOT = HISTORY_ROOT / "state" +SEEDS_ROOT = STATE_ROOT / "seeds" +RUNS_ROOT = STATE_ROOT / "runs" +EVENTS_ROOT = STATE_ROOT / "events" +BASELINE_BRANCHES_PATH = COMPONENT_SYSTEM_ROOT / "baseline_branches.json" +BASELINE_METRICS_PATH = COMPONENT_SYSTEM_ROOT / "baseline_metrics.json" +WORKTREE_ROOT = HISTORY_ROOT / "worktrees" +LOG_ROOT = HISTORY_ROOT / "logs" + +STAGE_DIRS = { + "p": QUEUE_ROOT / "p", + "dca": QUEUE_ROOT / "dca", + "direct": QUEUE_ROOT / "direct", +} +DONE_DIR = QUEUE_ROOT / "done" +ERROR_DIR = QUEUE_ROOT / "error" +DAEMON_HEARTBEAT_PATH = STATE_ROOT / "daemon_heartbeat.json" +DAEMON_HEARTBEAT_STALE_SECONDS = 5 + +def _read_json(path: Path, default: Any) -> Any: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_json(path: Path, payload: Any) -> Path: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8") + return path + + +def now_ts() -> float: + return time.time() + + +def now_iso() -> str: + return time.strftime("%Y-%m-%d %H:%M:%S") + + +def daemon_heartbeat() -> None: + """Write the daemon heartbeat file (call from the daemon process).""" + ensure_queue_layout() + _write_json( + DAEMON_HEARTBEAT_PATH, + {"timestamp": now_ts(), "pid": os.getpid()}, + ) + + +def get_daemon_status() -> str: + """Return 'running' if the daemon heartbeat is recent, else 'stopped'.""" + if not DAEMON_HEARTBEAT_PATH.exists(): + return "stopped" + try: + data = _read_json(DAEMON_HEARTBEAT_PATH, {}) + ts = data.get("timestamp") + if ts is None: + return "stopped" + if (now_ts() - float(ts)) <= DAEMON_HEARTBEAT_STALE_SECONDS: + return "running" + except Exception: + pass + return "stopped" + + +def ensure_queue_layout() -> None: + HISTORY_ROOT.mkdir(parents=True, exist_ok=True) + for d in STAGE_DIRS.values(): + d.mkdir(parents=True, exist_ok=True) + DONE_DIR.mkdir(parents=True, exist_ok=True) + ERROR_DIR.mkdir(parents=True, exist_ok=True) + SEEDS_ROOT.mkdir(parents=True, exist_ok=True) + RUNS_ROOT.mkdir(parents=True, exist_ok=True) + EVENTS_ROOT.mkdir(parents=True, exist_ok=True) + WORKTREE_ROOT.mkdir(parents=True, exist_ok=True) + LOG_ROOT.mkdir(parents=True, exist_ok=True) + # Auto-create baseline JSON files if missing (like results.tsv for recording run data) + if not BASELINE_METRICS_PATH.exists(): + _write_json(BASELINE_METRICS_PATH, {}) + if not BASELINE_BRANCHES_PATH.exists(): + _write_json(BASELINE_BRANCHES_PATH, {}) + + +def new_task_id(prefix: str | None = None) -> str: + ts = time.strftime("%Y%m%d-%H%M%S") + short = uuid.uuid4().hex[:8] + task_id = f"{ts}-{short}" + return f"{prefix}-{task_id}" if prefix else task_id + + +def new_seed_id(prefix: str = "seed") -> str: + return f"{prefix}-{uuid.uuid4().hex[:6]}" + + +def new_run_id(stage: str) -> str: + return new_task_id(stage) + + +def write_task(stage: str, payload: dict[str, Any], task_id: str | None = None) -> Path: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + tid = task_id or new_task_id(stage) + path = STAGE_DIRS[stage] / f"{tid}.json" + payload_with_meta = {"task_id": tid, "stage": stage, "created_at": now_ts(), **payload} + return _write_json(path, payload_with_meta) + + +def read_task(path: Path) -> dict[str, Any]: + return _read_json(path, {}) + + +def move_to_done(path: Path) -> Path: + ensure_queue_layout() + dest = DONE_DIR / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def move_to_error(path: Path) -> Path: + ensure_queue_layout() + dest = ERROR_DIR / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def list_pending(stage: str) -> list[Path]: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + return sorted(STAGE_DIRS[stage].glob("*.json")) + + +def seed_path(seed_id: str) -> Path: + return SEEDS_ROOT / f"{seed_id}.json" + + +def run_path(run_id: str) -> Path: + return RUNS_ROOT / f"{run_id}.json" + + +def event_path(seed_id: str) -> Path: + return EVENTS_ROOT / f"{seed_id}.json" + + +def save_seed(seed: dict[str, Any]) -> Path: + seed_id = seed["seed_id"] + return _write_json(seed_path(seed_id), seed) + + +def load_seed(seed_id: str) -> dict[str, Any]: + return _read_json(seed_path(seed_id), {}) + + +def list_seeds() -> list[dict[str, Any]]: + ensure_queue_layout() + seeds = [_read_json(path, {}) for path in SEEDS_ROOT.glob("*.json")] + return sorted(seeds, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def save_run(run: dict[str, Any]) -> Path: + return _write_json(run_path(run["run_id"]), run) + + +def load_run(run_id: str) -> dict[str, Any]: + return _read_json(run_path(run_id), {}) + + +def list_runs(seed_id: str | None = None) -> list[dict[str, Any]]: + ensure_queue_layout() + runs = [_read_json(path, {}) for path in RUNS_ROOT.glob("*.json")] + if seed_id is not None: + runs = [run for run in runs if run.get("seed_id") == seed_id] + return sorted(runs, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def append_event(seed_id: str, event: dict[str, Any]) -> list[dict[str, Any]]: + ensure_queue_layout() + payload = _read_json(event_path(seed_id), []) + payload.append({"created_at": now_ts(), "created_at_human": now_iso(), **event}) + _write_json(event_path(seed_id), payload) + return payload + + +def load_events(seed_id: str) -> list[dict[str, Any]]: + return _read_json(event_path(seed_id), []) + + +def delete_seed(seed_id: str) -> None: + for path in (seed_path(seed_id), event_path(seed_id)): + if path.exists(): + path.unlink() + for run in list_runs(seed_id): + path = run_path(run["run_id"]) + if path.exists(): + path.unlink() + + +def load_baseline_branch_map() -> dict[str, str]: + """Load seed_id -> baseline_branch mapping (for agent lookup and workflow).""" + ensure_queue_layout() + return _read_json(BASELINE_BRANCHES_PATH, {}) + + +def save_baseline_branch_map(mapping: dict[str, str]) -> None: + """Persist seed_id -> baseline_branch mapping.""" + ensure_queue_layout() + _write_json(BASELINE_BRANCHES_PATH, mapping) + + +def load_baseline_metrics() -> dict[str, dict[str, Any]]: + """Load baseline_branch -> { last_val_bpb, promoted_branch, promoted_at, promoted_idea }.""" + ensure_queue_layout() + return _read_json(BASELINE_METRICS_PATH, {}) + + +def save_baseline_metrics(metrics_by_branch: dict[str, dict[str, Any]]) -> None: + """Persist per-branch baseline metrics.""" + ensure_queue_layout() + _write_json(BASELINE_METRICS_PATH, metrics_by_branch) + + +def reset_worktree(path: str | Path) -> None: + worktree = Path(path) + if worktree.exists(): + shutil.rmtree(worktree) diff --git a/component_system/training/mainline.py b/component_system/training/mainline.py new file mode 100644 index 000000000..e91771d85 --- /dev/null +++ b/component_system/training/mainline.py @@ -0,0 +1,82 @@ +"""Mainline assembler: reads static config, dynamically loads components, runs training.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +import importlib +import os +from dataclasses import asdict +from typing import Any + +import torch + +from prepare import Tokenizer + +from component_system.config import get_training_binding + + +def _prepare_environment() -> None: + os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True" + os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed(42) + torch.set_float32_matmul_precision("high") + torch.cuda.reset_peak_memory_stats() + + +def _import_module(path: str) -> Any: + return importlib.import_module(path) + + +def run_mainline_training(binding_path: str | None = None) -> dict[str, Any]: + _prepare_environment() + binding = get_training_binding() + + tokenizer = Tokenizer.from_directory() + vocab_size = tokenizer.get_vocab_size() + + model_module = _import_module(binding["model_module"]) + optimizer_module = _import_module(binding["optimizer_module"]) + training_step_module = _import_module(binding["training_step_module"]) + + settings = training_step_module.default_training_settings() + config = model_module.build_model_config( + depth=settings.depth, + vocab_size=vocab_size, + aspect_ratio=settings.aspect_ratio, + head_dim=settings.head_dim, + window_pattern=settings.window_pattern, + ) + + print("Loaded training binding from config") + print(f"Model config: {asdict(config)}") + + model, param_counts, num_flops_per_token = model_module.create_model( + config, + compile_model=settings.compile_model, + ) + + print("Parameter counts:") + for key, value in param_counts.items(): + print(f" {key:24s}: {value:,}") + print(f"Estimated FLOPs per token: {num_flops_per_token:e}") + + optimizer = optimizer_module.create_optimizer(model, settings) + return training_step_module.run_training_session( + model=model, + optimizer=optimizer, + tokenizer=tokenizer, + settings=settings, + param_counts=param_counts, + num_flops_per_token=num_flops_per_token, + baseline_binding=binding, + ) + + +if __name__ == "__main__": + run_mainline_training() diff --git a/component_system/web/app.py b/component_system/web/app.py new file mode 100644 index 000000000..18a82ae21 --- /dev/null +++ b/component_system/web/app.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi import FastAPI +from fastapi.responses import RedirectResponse, Response +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates + +from component_system.services.workflow import default_workflow_service +from component_system.task import ensure_queue_layout +from component_system.web.routes import router + +WEB_ROOT = Path(__file__).resolve().parent +TEMPLATE_ROOT = WEB_ROOT / "templates" +STATIC_ROOT = WEB_ROOT / "static" + +def create_app() -> FastAPI: + ensure_queue_layout() + app = FastAPI(title="Component System", version="0.1.0") + app.state.workflow = default_workflow_service() + app.state.templates = Jinja2Templates(directory=str(TEMPLATE_ROOT)) + app.mount("/static", StaticFiles(directory=str(STATIC_ROOT)), name="static") + app.include_router(router, prefix="/component-system") + + @app.get("/", include_in_schema=False) + def root() -> RedirectResponse: + return RedirectResponse(url="/component-system", status_code=307) + + @app.get("/favicon.ico", include_in_schema=False) + def favicon() -> Response: + return Response(status_code=204) + + @app.get("/.well-known/appspecific/com.chrome.devtools.json", include_in_schema=False) + def chrome_devtools_probe() -> Response: + # Chrome DevTools probes this endpoint; return 204 to avoid log spam. + return Response(status_code=204) + + return app + + +app = create_app() diff --git a/component_system/web/routes.py b/component_system/web/routes.py new file mode 100644 index 000000000..e5424ccc7 --- /dev/null +++ b/component_system/web/routes.py @@ -0,0 +1,337 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi import APIRouter, Form, HTTPException, Query, Request +from fastapi.responses import HTMLResponse, RedirectResponse, Response + +from component_system.domain.models import SeedStatus +from component_system.services.workflow import GitCommandError, WorkflowService +from component_system.task import COMPONENT_SYSTEM_ROOT, get_daemon_status, LOG_ROOT + +router = APIRouter() + + +def _templates(request: Request): + return request.app.state.templates + + +def _workflow(request: Request) -> WorkflowService: + return request.app.state.workflow + + +def _is_htmx(request: Request) -> bool: + return request.headers.get("hx-request", "").lower() == "true" + + +def _render(request: Request, template_name: str, context: dict, status_code: int = 200) -> HTMLResponse: + templates = _templates(request) + return templates.TemplateResponse(request, template_name, {"request": request, **context}, status_code=status_code) + + +def _resolve_log_path(run_id: str, stream: str, run_log_path: str | None) -> Path | None: + # Primary source: persisted run metadata path. + if run_log_path: + candidate = Path(run_log_path) + if candidate.exists() and candidate.is_file(): + return candidate + + # Deterministic run-id naming (new format). + run_named = LOG_ROOT / f"{run_id}.{stream}.log" + if run_named.exists() and run_named.is_file(): + return run_named + + return None + + +def _resolve_prompt_path(run_id: str, run_prompt_path: str | None) -> Path | None: + if run_prompt_path: + candidate = Path(run_prompt_path) + if candidate.exists() and candidate.is_file(): + return candidate + prompt_named = LOG_ROOT / f"{run_id}.prompt.txt" + if prompt_named.exists() and prompt_named.is_file(): + return prompt_named + return None + + +@router.get("/", response_class=HTMLResponse) +def dashboard(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + "dashboard": viewmodel, + "selected_seed_id": seed_id, + "detail": workflow.seed_detail(seed_id) if seed_id else None, + } + return _render(request, "dashboard.html", context) + + +@router.get("/partials/dashboard", response_class=HTMLResponse) +def dashboard_board(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + return _render(request, "partials/dashboard_board.html", {"dashboard": viewmodel, "selected_seed_id": seed_id}) + + +@router.get("/partials/daemon-status", response_class=HTMLResponse) +def daemon_status_partial(request: Request) -> HTMLResponse: + return _render(request, "partials/daemon_status.html", {"daemon_status": get_daemon_status()}) + + +@router.get("/partials/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + } + return _render(request, "partials/seed_detail_response.html", context) + + +@router.get("/api/runs/{run_id}/prompt") +def run_prompt(request: Request, run_id: str) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + run_prompt_path = run.prompt_path if run is not None else None + prompt_path = _resolve_prompt_path(run_id, run_prompt_path) + if prompt_path is None: + raise HTTPException(status_code=404, detail=f"Prompt for run '{run_id}' not found.") + content = prompt_path.read_text(encoding="utf-8", errors="replace") + return {"content": content} + + +@router.get("/api/runs/{run_id}/log") +def run_log_chunk( + request: Request, + run_id: str, + stream: str = Query("stdout"), + offset: int = Query(0, ge=0), + limit: int = Query(64 * 1024, ge=1024, le=512 * 1024), +) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + + complete_status = bool(run is not None and run.status.value in {"succeeded", "failed"}) + if stream not in {"stdout", "stderr"}: + raise HTTPException(status_code=400, detail="stream must be one of: stdout, stderr") + + run_log_path = None + if run is not None: + run_log_path = run.log_path if stream == "stdout" else run.stderr_log_path + if not run_log_path and stream == "stderr" and run.log_path and run.log_path.endswith(".stdout.log"): + run_log_path = run.log_path.replace(".stdout.log", ".stderr.log") + + log_path = _resolve_log_path(run_id, stream, run_log_path) + if log_path is None and run is not None and not complete_status: + # During queued/running phases metadata may not yet include paths and files may appear slightly later. + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": False, + } + + if log_path is None: + raise HTTPException(status_code=404, detail=f"Log for run '{run_id}' ({stream}) not found.") + + if not log_path.exists() or not log_path.is_file(): + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": complete_status, + } + + file_size = log_path.stat().st_size + if offset > file_size: + offset = file_size + + with open(log_path, "rb") as handle: + handle.seek(offset) + payload = handle.read(limit) + + next_offset = offset + len(payload) + return { + "chunk": payload.decode("utf-8", errors="replace"), + "next_offset": next_offset, + "size": file_size, + "complete": bool(complete_status and next_offset >= file_size), + } + + +@router.get("/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_page(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render(request, "seed_detail_page.html", detail) + + +@router.post("/actions/seeds", response_class=HTMLResponse) +def create_seed( + request: Request, + prompt: str = Form(...), + baseline_branch: str = Form(...), + seed_mode: str = Form("manual"), +) -> Response: + workflow = _workflow(request) + seed = workflow.create_seed( + prompt, + baseline_branch=baseline_branch, + ralph_loop_enabled=seed_mode == "ralph", + ) + if seed_mode == "ralph": + try: + workflow.queue_p(seed.seed_id) + except (RuntimeError, GitCommandError) as exc: + workflow.seed_repo.append_event( + seed.seed_id, + "ralph.start_failed", + f"Ralph loop could not queue the initial Plan run: {exc}", + ) + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/direct-code-agent", response_class=HTMLResponse) +def start_direct_code_agent(request: Request, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + seed, _run = workflow.create_direct_code_seed(prompt) + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/p", response_class=HTMLResponse) +def queue_p(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/prompt", response_class=HTMLResponse) +def update_seed_prompt(request: Request, seed_id: str, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + workflow.update_seed_prompt(seed_id, prompt) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + + if _is_htmx(request): + detail = workflow.seed_detail(seed_id) + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + } + return _render(request, "partials/seed_detail_response.html", context) + + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/dca", response_class=HTMLResponse) +def queue_dca(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_dca(seed_id) + except (KeyError, RuntimeError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/start", response_class=HTMLResponse) +def start_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + seed = workflow.set_ralph_loop(seed_id, True) + if seed.status in { + SeedStatus.draft, + SeedStatus.generated, + SeedStatus.passed, + SeedStatus.failed, + SeedStatus.promoted, + }: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/stop", response_class=HTMLResponse) +def stop_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.set_ralph_loop(seed_id, False) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) diff --git a/component_system/web/static/app.css b/component_system/web/static/app.css new file mode 100644 index 000000000..7edabb7c7 --- /dev/null +++ b/component_system/web/static/app.css @@ -0,0 +1,137 @@ +:root { + color-scheme: dark; + --card-bg: rgb(15 23 42 / 0.6); + --card-border: rgb(51 65 85); + --muted: rgb(148 163 184); +} + +body { + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + -webkit-font-smoothing: antialiased; +} + +/* IDs and branch names */ +.font-mono-id { + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 0.9em; + letter-spacing: 0.02em; +} + +.line-clamp-3 { + display: -webkit-box; + overflow: hidden; + -webkit-box-orient: vertical; + -webkit-line-clamp: 3; +} + +/* Card hover for clickable seed cards */ +.seed-card { + transition: border-color 0.15s ease, background-color 0.15s ease; +} +.seed-card:hover { + border-color: rgb(56 189 248 / 0.5); + background-color: rgb(15 23 42 / 0.9); +} +.seed-card.is-selected { + border-color: rgb(14 165 233); + background-color: rgb(14 165 233 / 0.14); + box-shadow: inset 0 0 0 1px rgb(14 165 233 / 0.35); +} + +/* Status pills */ +.status-pill { + display: inline-flex; + align-items: center; + border: 1px solid transparent; + font-size: 0.625rem; + font-weight: 600; + letter-spacing: 0.04em; + text-transform: uppercase; + line-height: 1; + padding: 0.2rem 0.5rem; + border-radius: 9999px; + white-space: nowrap; +} +.status-draft { background: rgb(51 65 85 / 0.62); border-color: rgb(148 163 184 / 0.4); color: rgb(226 232 240); } +.status-queued { background: rgb(146 64 14 / 0.45); border-color: rgb(245 158 11 / 0.45); color: rgb(253 230 138); } +.status-planning { background: rgb(30 64 175 / 0.4); border-color: rgb(96 165 250 / 0.45); color: rgb(191 219 254); } +.status-generated { background: rgb(15 118 110 / 0.38); border-color: rgb(45 212 191 / 0.4); color: rgb(153 246 228); } +.status-dca_queued { background: rgb(8 145 178 / 0.33); border-color: rgb(34 211 238 / 0.38); color: rgb(165 243 252); } +.status-adapting, +.status-running { background: rgb(109 40 217 / 0.35); border-color: rgb(192 132 252 / 0.42); color: rgb(233 213 255); } +.status-passed { background: rgb(21 128 61 / 0.28); border-color: rgb(74 222 128 / 0.4); color: rgb(187 247 208); } +.status-failed { background: rgb(153 27 27 / 0.34); border-color: rgb(248 113 113 / 0.42); color: rgb(254 202 202); } +.status-promoted { background: rgb(22 163 74 / 0.28); border-color: rgb(74 222 128 / 0.42); color: rgb(187 247 208); } + +/* Empty state placeholder */ +.empty-value { + color: rgb(100 116 139); + font-style: normal; +} + +/* Section headers */ +.section-label { + font-size: 11px; + letter-spacing: 0.2em; + text-transform: uppercase; + color: rgb(100 116 139); +} + +/* Scroll containers for long dashboard lists */ +.scroll-pane { + min-height: 0; + overflow-y: auto; + scrollbar-gutter: stable; +} + +.scroll-pane-stage { + max-height: min(32rem, 68vh); +} + +.scroll-pane-detail { + max-height: min(30rem, 62vh); +} + +.run-log-grid { + display: grid; + gap: 0.75rem; + grid-template-columns: minmax(0, 2fr) minmax(0, 4fr) minmax(0, 4fr); +} + +.run-log-pane { + min-width: 0; + min-height: 0; + display: flex; + flex-direction: column; +} + +.run-log-pre { + min-width: 0; + min-height: 0; + flex: 1 1 auto; + overflow: auto; + white-space: pre-wrap; + overflow-wrap: anywhere; + word-break: break-word; + border: 1px solid rgb(30 41 59); + border-radius: 0.25rem; + background: rgb(0 0 0 / 0.3); + padding: 0.5rem; + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 11px; + line-height: 1.25rem; + color: rgb(226 232 240); +} + +@media (max-width: 1024px) { + .run-log-grid { + grid-template-columns: 1fr; + } +} diff --git a/component_system/web/static/app.js b/component_system/web/static/app.js new file mode 100644 index 000000000..77b514abc --- /dev/null +++ b/component_system/web/static/app.js @@ -0,0 +1,399 @@ +document.body.addEventListener("htmx:responseError", (event) => { + const target = event.detail.target; + if (!target) { + return; + } + target.innerHTML = `
Request failed.
`; +}); + +function selectedSeedIdFromUrl() { + const params = new URLSearchParams(window.location.search); + return params.get("seed_id"); +} + +function applySelectedSeed(seedId) { + const cards = document.querySelectorAll(".seed-card[data-seed-id]"); + cards.forEach((card) => { + const isSelected = seedId !== null && card.dataset.seedId === seedId; + card.classList.toggle("is-selected", isSelected); + card.setAttribute("aria-current", isSelected ? "true" : "false"); + }); +} + +let dashboardPollInFlight = false; +let seedDetailPollInFlight = false; + +function seedDetailUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedDetailUrlTemplate; + if (!template || !seedId) { + return null; + } + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function isLogViewerOpen() { + const target = document.getElementById("seed-detail"); + if (!target) { + return false; + } + if (target.querySelector('[data-log-viewer-open="true"]')) { + return true; + } + if (target.querySelector("[data-log-stream]")) { + return true; + } + const seedId = selectedSeedIdFromUrl(); + return Boolean(seedId && localStorage.getItem(`seed-active-run-${seedId}`)); +} + +function dashboardBoardUrl() { + const board = document.getElementById("dashboard-board"); + const base = board?.dataset.dashboardPartialUrl; + if (!base) { + return null; + } + const seedId = selectedSeedIdFromUrl(); + if (!seedId) { + return base; + } + const separator = base.includes("?") ? "&" : "?"; + return `${base}${separator}seed_id=${encodeURIComponent(seedId)}`; +} + +function pollDashboardBoard() { + const target = document.getElementById("dashboard-board"); + const url = dashboardBoardUrl(); + if (!target || !url || dashboardPollInFlight) { + return; + } + dashboardPollInFlight = true; + htmx + .ajax("GET", url, { target: "#dashboard-board", swap: "outerHTML" }) + .finally(() => { + dashboardPollInFlight = false; + }); +} + +function pollSeedDetail() { + const seedId = selectedSeedIdFromUrl(); + const target = document.getElementById("seed-detail"); + const url = seedDetailUrl(seedId); + if (!target || !url || seedDetailPollInFlight) { + return; + } + if (isLogViewerOpen()) { + return; + } + seedDetailPollInFlight = true; + htmx.ajax("GET", url, { target: "#seed-detail", swap: "innerHTML" }).finally(() => { + seedDetailPollInFlight = false; + }); +} + +function pollDashboard() { + if (document.hidden) { + return; + } + if (isLogViewerOpen()) { + return; + } + pollDashboardBoard(); + pollSeedDetail(); +} + +document.body.addEventListener("htmx:beforeRequest", (event) => { + const target = event.detail?.target; + if (!target || !isLogViewerOpen()) { + return; + } + // Pause daemon status auto-refresh while viewing logs. + if (target.id === "daemon-status-panel") { + event.preventDefault(); + } +}); + +document.body.addEventListener("click", (event) => { + const card = event.target.closest(".seed-card[data-seed-id]"); + if (!card) { + return; + } + applySelectedSeed(card.dataset.seedId); +}); + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (target && target.id === "seed-detail") { + applySelectedSeed(selectedSeedIdFromUrl()); + } +}); + +window.addEventListener("popstate", () => { + applySelectedSeed(selectedSeedIdFromUrl()); +}); + +applySelectedSeed(selectedSeedIdFromUrl()); +window.setInterval(pollDashboard, 5000); + +const logStreamIntervals = new Map(); +const logStreamState = new Map(); +const ansiCtor = window.AnsiUp || window.ansi_up?.AnsiUp || null; +const ansiRenderer = ansiCtor ? new ansiCtor() : null; + +if (ansiRenderer && Object.prototype.hasOwnProperty.call(ansiRenderer, "escape_html")) { + ansiRenderer.escape_html = true; +} + +function stripAnsiSequences(value) { + // CSI: \x1b[...m, OSC: \x1b]...\x07 or \x1b\ ; then any remaining ESC controls. + return (value || "") + .replace(/\u001b\][^\u0007]*(?:\u0007|\u001b\\)/g, "") + .replace(/\u001b\[[0-?]*[ -/]*[@-~]/g, "") + .replace(/\u001b[@-_]/g, ""); +} + +function isRunComplete(status) { + return status === "succeeded" || status === "failed"; +} + +function updateLogStatus(runId, text) { + const nodes = document.querySelectorAll(`[data-log-status][data-run-id="${runId}"]`); + nodes.forEach((node) => { + node.textContent = text; + }); +} + +function updateCopyButtonState(runId, stream, enabled) { + const buttons = document.querySelectorAll( + `[data-log-copy][data-run-id="${runId}"][data-stream="${stream}"]` + ); + buttons.forEach((button) => { + button.disabled = !enabled; + }); +} + +function appendLogContent(pre, chunk) { + const currentRaw = pre.dataset.rawLog || ""; + const nextRaw = currentRaw + (chunk || ""); + + // Keep the viewer responsive for very large logs. + const maxChars = 200_000; + const trimmedRaw = + nextRaw.length > maxChars ? nextRaw.slice(nextRaw.length - maxChars) : nextRaw; + + pre.dataset.rawLog = trimmedRaw; + if (ansiRenderer) { + pre.innerHTML = ansiRenderer.ansi_to_html(trimmedRaw); + } else { + pre.textContent = stripAnsiSequences(trimmedRaw); + } + + pre.scrollTop = pre.scrollHeight; +} + +async function pollLogStream(pre) { + const runId = pre.dataset.runId; + const stream = pre.dataset.stream || "stdout"; + if (!runId) { + return; + } + + const state = logStreamState.get(pre) || { offset: 0, complete: false }; + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/log?stream=${encodeURIComponent(stream)}&offset=${state.offset}` + ); + if (!response.ok) { + throw new Error(`Failed to fetch logs for ${runId}: ${response.status}`); + } + + const payload = await response.json(); + const chunk = payload.chunk || ""; + const nextOffset = Number(payload.next_offset || 0); + const complete = Boolean(payload.complete); + + appendLogContent(pre, chunk); + updateCopyButtonState(runId, stream, pre.textContent.length > 0); + logStreamState.set(pre, { offset: nextOffset, complete }); + + if (complete) { + updateLogStatus(runId, "Completed"); + const intervalId = logStreamIntervals.get(pre); + if (intervalId) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + } + return; + } + + if (chunk) { + updateLogStatus(runId, "Streaming..."); + } else { + updateLogStatus(runId, "Waiting for log output..."); + } +} + +function cleanupDetachedLogStreams() { + for (const [pre, intervalId] of logStreamIntervals.entries()) { + if (!document.body.contains(pre)) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + logStreamState.delete(pre); + } + } +} + +function initializeLogCopyButtons(root) { + root.querySelectorAll("[data-log-copy]").forEach((button) => { + if (button.dataset.logCopyReady === "true") { + return; + } + button.dataset.logCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) { + return; + } + const stream = button.dataset.stream || "stdout"; + const pre = root.querySelector( + `[data-log-stream][data-run-id="${runId}"][data-stream="${stream}"]` + ); + if (!pre || !pre.textContent) { + return; + } + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (error) { + console.error("Failed to copy log output", error); + } + }); + }); +} + +async function loadPromptContent(pre) { + const runId = pre.dataset.runId; + if (!runId) return; + try { + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/prompt` + ); + if (!response.ok) return; + const payload = await response.json(); + const content = payload.content ?? ""; + pre.textContent = content; + const copyBtn = document.querySelector( + `[data-prompt-copy][data-run-id="${runId}"]` + ); + if (copyBtn) copyBtn.disabled = false; + } catch (err) { + console.error("Failed to load prompt for run", runId, err); + } +} + +function initializePromptDisplays(root) { + root.querySelectorAll("[data-prompt-content]").forEach((pre) => { + if (pre.dataset.promptLoaded === "true") return; + pre.dataset.promptLoaded = "true"; + loadPromptContent(pre); + }); + root.querySelectorAll("[data-prompt-copy]").forEach((button) => { + if (button.dataset.promptCopyReady === "true") return; + button.dataset.promptCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) return; + const pre = root.querySelector( + `[data-prompt-content][data-run-id="${runId}"]` + ); + if (!pre || !pre.textContent) return; + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (err) { + console.error("Failed to copy prompt", err); + } + }); + }); +} + +function initializeLogStreams(root = document) { + cleanupDetachedLogStreams(); + initializeLogCopyButtons(root); + initializePromptDisplays(root); + + root.querySelectorAll("[data-log-stream]").forEach((pre) => { + if (pre.dataset.logStreamReady === "true") { + return; + } + pre.dataset.logStreamReady = "true"; + const runStatus = pre.dataset.runStatus || ""; + const runId = pre.dataset.runId; + if (!runId) { + return; + } + + if (isRunComplete(runStatus)) { + updateLogStatus(runId, "Completed"); + } else { + updateLogStatus(runId, "Connecting..."); + } + + const runPoll = async () => { + try { + await pollLogStream(pre); + } catch (error) { + updateLogStatus(runId, "Log fetch failed"); + console.error(error); + } + }; + + runPoll(); + const intervalId = window.setInterval(runPoll, 2000); + logStreamIntervals.set(pre, intervalId); + }); +} + +function observeLogStreamMounts() { + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if (mutation.type !== "childList" || mutation.addedNodes.length === 0) { + continue; + } + for (const node of mutation.addedNodes) { + if (!(node instanceof Element)) { + continue; + } + if ( + node.matches?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") || + node.querySelector?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") + ) { + initializeLogStreams(node); + return; + } + } + } + }); + + observer.observe(document.body, { childList: true, subtree: true }); +} + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (!target) { + return; + } + if (target.id === "seed-detail") { + initializeLogStreams(target); + } +}); + +initializeLogStreams(document); +observeLogStreamMounts(); diff --git a/component_system/web/static/tailwind.input.css b/component_system/web/static/tailwind.input.css new file mode 100644 index 000000000..a563500f2 --- /dev/null +++ b/component_system/web/static/tailwind.input.css @@ -0,0 +1,27 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + :root { + color-scheme: dark; + } + + body { + @apply min-h-screen bg-slate-950 text-slate-100; + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + } +} + +@layer utilities { + .card-panel { + @apply rounded-2xl border border-slate-800 bg-slate-900; + } +} diff --git a/component_system/web/templates/base.html b/component_system/web/templates/base.html new file mode 100644 index 000000000..ee1ac5364 --- /dev/null +++ b/component_system/web/templates/base.html @@ -0,0 +1,32 @@ + + + + + + {% block title %}Component System{% endblock %} + + + + + + + + +
+
+
+ + Component System + +

Seed -> Plan -> Do-Check-Action orchestration with FastAPI, HTMX, Alpine, and Tailwind.

+
+ +
+
+
+ {% block content %}{% endblock %} +
+ + diff --git a/component_system/web/templates/dashboard.html b/component_system/web/templates/dashboard.html new file mode 100644 index 000000000..5bff902d4 --- /dev/null +++ b/component_system/web/templates/dashboard.html @@ -0,0 +1,120 @@ +{% extends "base.html" %} +{% block title %}Component System Dashboard{% endblock %} +{% block content %} +
+
+
+

Create Seed

+

Start a new seed from a prompt. Baseline branch is selected here; each seed has one branch (seed id).

+
+
+
+ + +
+
+

One branch per seed: the seed id is the branch name (e.g. seed-a1b2c3).

+ + + +
+ {% if dashboard.setup_error %} +
+

Git setup required

+

{{ dashboard.setup_error }}

+
+ {% endif %} + {% with daemon_status=dashboard.daemon_status %} + {% include "partials/daemon_status.html" %} + {% endwith %} +
+

Baseline branches

+

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

+ {% if dashboard.baseline_metrics_by_branch %} +
+ {% for branch, m in dashboard.baseline_metrics_by_branch.items() %} +
+
{{ branch }}
+
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}
+
+ {% endfor %} +
+ {% else %} +

No baseline metrics yet. Run the first DCA to establish baseline for a branch.

+ {% endif %} +
+
+

Direct Code Agent

+

Run the configured code agent from the project root with a dedicated single-worker executor. New runs appear in the Do-Check-Action column.

+
+ + + +
+
+
+
+ {% include "partials/dashboard_board.html" %} +
+ {% if detail %} + {% with + seed=detail.seed, + runs=detail.runs, + events=detail.events, + baseline_metrics_for_branch=detail.baseline_metrics_for_branch, + setup_error=detail.setup_error + %} + {% include "partials/seed_detail.html" %} + {% endwith %} + {% else %} +
+ Select a seed to inspect its worktree, plan, runs, logs, and promotion history. +
+ {% endif %} +
+
+
+
+{% endblock %} diff --git a/component_system/web/templates/partials/action_error.html b/component_system/web/templates/partials/action_error.html new file mode 100644 index 000000000..8a856804e --- /dev/null +++ b/component_system/web/templates/partials/action_error.html @@ -0,0 +1,3 @@ +
+ {{ message }} +
diff --git a/component_system/web/templates/partials/daemon_status.html b/component_system/web/templates/partials/daemon_status.html new file mode 100644 index 000000000..75a0d5e2e --- /dev/null +++ b/component_system/web/templates/partials/daemon_status.html @@ -0,0 +1,14 @@ +
+

Daemon: {% if daemon_status == 'running' %}running{% else %}not running{% endif %}

+

Plan and Do-Check-Action runs are executed by the daemon.

+ {% if daemon_status != 'running' %} +

Start it in a terminal:

+

uv run component_system/run.py

+ {% endif %} +
diff --git a/component_system/web/templates/partials/dashboard_board.html b/component_system/web/templates/partials/dashboard_board.html new file mode 100644 index 000000000..df82ab6ad --- /dev/null +++ b/component_system/web/templates/partials/dashboard_board.html @@ -0,0 +1,58 @@ +
+
+

+ Dashboard {{ dashboard.seed_count }} seed{{ 's' if dashboard.seed_count != 1 else '' }} across all stages +

+
+
+ {% for column in dashboard.columns %} +
+
+

{{ column.title }}

+

{{ column.description }}

+
+
+ {% if column.seeds %} + {% for seed in column.seeds %} + {% set is_selected = selected_seed_id == seed.seed_id %} + {% set is_promoted = column.id == 'completed' and seed.status.value == 'promoted' %} + +
+

{{ seed.seed_id }}

+ {{ seed.status.value|replace('_', ' ')|title }} +
+

{{ seed.prompt }}

+ {% if seed.plan %} +

{{ seed.plan.title }}

+ {% endif %} + {% if seed.latest_metrics and seed.latest_metrics.get('val_bpb') is not none %} +

val_bpb {{ "%.4f"|format(seed.latest_metrics.val_bpb) }}{% if seed.latest_signal %} · {{ seed.latest_signal }}{% endif %}

+ {% endif %} +
+ {% endfor %} + {% else %} +
+ No seeds in this stage. +
+ {% endif %} +
+
+ {% endfor %} +
+
diff --git a/component_system/web/templates/partials/seed_detail.html b/component_system/web/templates/partials/seed_detail.html new file mode 100644 index 000000000..93f5439cb --- /dev/null +++ b/component_system/web/templates/partials/seed_detail.html @@ -0,0 +1,326 @@ +
+
+
+ +

{{ seed.seed_id }}

+ {% if can_edit_prompt %} +
+ + + +
+ {% else %} +

{{ seed.prompt }}

+ {% endif %} +
+
+ {% if seed.ralph_loop_enabled %} +
+ +
+ {% else %} +
+ +
+ {% endif %} +
+ +
+
+ +
+
+
+ + {% if setup_error %} +
+ {{ setup_error }} +
+ {% endif %} + +
+
+
+ + {{ seed.status.value|replace('_', ' ')|title }} +
+

Ralph loop: {% if seed.ralph_loop_enabled %}enabled{% else %}disabled{% endif %}

+

Latest signal: {% if seed.latest_signal %}{{ seed.latest_signal }}{% else %}{% endif %}

+
+
+ +
+
Baseline
{{ seed.baseline_branch }}
+
Branch
{{ seed.seed_id }}
+
+
+
+ +
+
Seed worktree
{{ seed.worktree_path or "—" }}
+
+
+
+ +
+
+
+

Plan

+ {% if seed.plan %} +
+
+ +

{{ seed.plan.title }}

+
+
+ +

{{ seed.plan.target_component }}

+
+
+ +

{{ seed.plan.description }}

+
+ {% if seed.plan.commit_sha %} +
+ +

{{ seed.plan.commit_sha }}

+
+ {% endif %} +
+ {% else %} +

No plan yet. Click Run Plan to queue the task; the plan is generated when the daemon runs it.

+ {% endif %} +
+ +
+
+

Runs

+
+ {% if runs and seed.status.value in ['queued', 'planning'] %} +

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

+ {% endif %} +
+ {% if runs %} + {% for run in runs %} +
+
+
+

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{{ run.run_id }}

+
+
+ {% if run.signal %} + {{ run.signal }} + {% endif %} + +
+
+ {% if run.metrics %} +
+ {% for key, value in run.metrics.items() %} +
+
{{ key }}
+
{{ value }}
+
+ {% endfor %} +
+ {% endif %} +
+ + {% endfor %} + {% else %} +

No runs yet. Use Run Plan to start.

+ {% endif %} +
+
+
+ +
+
+

Latest Metrics

+ {% if seed.latest_metrics %} +
+ {% for key, value in seed.latest_metrics.items() %} +
+ +
{{ value }}
+
+ {% endfor %} +
+ {% else %} +

Metrics appear here after Do-Check-Action runs the training entrypoint.

+ {% endif %} +
+ +
+
+

Timeline

+ +
+
+ {% if events %} + {% for event in events %} +
+

{{ event.message }}

+ {% if event.commit_sha %} +

commit: {{ event.commit_sha }}

+ {% endif %} + {% if event.target_branch %} +

target branch: {{ event.target_branch }}

+ {% endif %} +

{{ event.kind }} · {{ event.created_at_human }}

+
+ {% endfor %} + {% else %} +

No events yet.

+ {% endif %} +
+
+
+
+
diff --git a/component_system/web/templates/partials/seed_detail_response.html b/component_system/web/templates/partials/seed_detail_response.html new file mode 100644 index 000000000..ca64317e3 --- /dev/null +++ b/component_system/web/templates/partials/seed_detail_response.html @@ -0,0 +1,4 @@ +{% with oob=True %} + {% include "partials/dashboard_board.html" %} +{% endwith %} +{% include "partials/seed_detail.html" %} diff --git a/component_system/web/templates/seed_detail_page.html b/component_system/web/templates/seed_detail_page.html new file mode 100644 index 000000000..ec7ca146c --- /dev/null +++ b/component_system/web/templates/seed_detail_page.html @@ -0,0 +1,15 @@ +{% extends "base.html" %} +{% block title %}Seed {{ seed.seed_id }}{% endblock %} +{% block content %} + +
+ {% include "partials/seed_detail.html" %} +
+{% endblock %} diff --git a/prepare.py b/prepare.py index 62607b9af..b64b909fc 100644 --- a/prepare.py +++ b/prepare.py @@ -38,7 +38,8 @@ CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "autoresearch") DATA_DIR = os.path.join(CACHE_DIR, "data") TOKENIZER_DIR = os.path.join(CACHE_DIR, "tokenizer") -BASE_URL = "https://huggingface.co/datasets/karpathy/climbmix-400b-shuffle/resolve/main" +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co").rstrip("/") +BASE_URL = f"{HF_ENDPOINT}/datasets/karpathy/climbmix-400b-shuffle/resolve/main" MAX_SHARD = 6542 # the last datashard is shard_06542.parquet VAL_SHARD = MAX_SHARD # pinned validation shard (shard_06542) VAL_FILENAME = f"shard_{VAL_SHARD:05d}.parquet" diff --git a/pyproject.toml b/pyproject.toml index 94ae32989..d95798a99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,15 +5,19 @@ description = "Autonomous pretraining research swarm" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "fastapi>=0.116.0", + "jinja2>=3.1.6", "kernels>=0.11.7", "matplotlib>=3.10.8", "numpy>=2.2.6", "pandas>=2.3.3", "pyarrow>=21.0.0", + "python-multipart>=0.0.20", "requests>=2.32.0", "rustbpe>=0.1.0", "tiktoken>=0.11.0", "torch==2.9.1", + "uvicorn>=0.35.0", ] [tool.uv.sources] diff --git a/scripts/clean_history.py b/scripts/clean_history.py new file mode 100644 index 000000000..df085d0ce --- /dev/null +++ b/scripts/clean_history.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +"""Reset local autoresearch history/runtime artifacts. + +Actions: +1) Checkout main branch (configurable) +2) Remove all extra git worktrees +3) Delete all local branches except main +4) Clear component_system runtime state/history folders +5) Remove .pytest_cache, __pycache__, and results.tsv +""" + +from __future__ import annotations + +import argparse +import shutil +import subprocess +from pathlib import Path + + +def run_git(args: list[str], cwd: Path, dry_run: bool = False) -> list[str]: + cmd = ["git", *args] + if dry_run: + print(f"[dry-run] {' '.join(cmd)}") + return [] + proc = subprocess.run(cmd, cwd=cwd, text=True, capture_output=True) + if proc.returncode != 0: + raise RuntimeError( + f"Command failed: {' '.join(cmd)}\n" + f"stdout:\n{proc.stdout}\n" + f"stderr:\n{proc.stderr}" + ) + return [line for line in proc.stdout.splitlines() if line.strip()] + + +def is_broken_worktree_remove_error(error: RuntimeError) -> bool: + msg = str(error) + return ( + "worktree remove --force" in msg + and "validation failed, cannot remove working tree" in msg + and ".git' does not exist" in msg + ) + + +def remove_children(path: Path, dry_run: bool = False) -> None: + if not path.exists(): + return + for child in path.iterdir(): + if dry_run: + print(f"[dry-run] remove {child}") + continue + if child.is_dir(): + shutil.rmtree(child, ignore_errors=True) + else: + child.unlink(missing_ok=True) + + +def remove_pycache_dirs(repo_root: Path, dry_run: bool = False) -> None: + for pycache in repo_root.rglob("__pycache__"): + parts = set(pycache.parts) + if ".venv" in parts or ".git" in parts: + continue + if pycache.is_dir(): + if dry_run: + print(f"[dry-run] remove {pycache}") + else: + shutil.rmtree(pycache, ignore_errors=True) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Clean local branches/worktrees and runtime history.") + parser.add_argument("--main-branch", default="master", help="Branch to keep. Default: main") + parser.add_argument("--dry-run", action="store_true", help="Print actions without changing anything") + args = parser.parse_args() + + repo_root = Path.cwd().resolve() + print(f"Repository: {repo_root}") + + print("Verifying git repository...") + run_git(["rev-parse", "--is-inside-work-tree"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Checking out '{args.main_branch}'...") + run_git(["checkout", args.main_branch], cwd=repo_root, dry_run=args.dry_run) + + print("Removing extra worktrees...") + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + wt_lines = run_git(["worktree", "list", "--porcelain"], cwd=repo_root, dry_run=args.dry_run) + worktrees: list[Path] = [] + for line in wt_lines: + if line.startswith("worktree "): + worktrees.append(Path(line[len("worktree ") :]).resolve()) + + for wt in worktrees: + if wt != repo_root: + print(f" - removing worktree {wt}") + try: + run_git(["worktree", "remove", "--force", str(wt)], cwd=repo_root, dry_run=args.dry_run) + except RuntimeError as error: + if not is_broken_worktree_remove_error(error): + raise + print(f" ! stale/broken worktree metadata detected, deleting directory: {wt}") + if args.dry_run: + print(f"[dry-run] remove {wt}") + else: + shutil.rmtree(wt, ignore_errors=True) + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Deleting local branches except '{args.main_branch}'...") + branches = run_git( + ["for-each-ref", "--format=%(refname:short)", "refs/heads"], + cwd=repo_root, + dry_run=args.dry_run, + ) + for branch in branches: + if branch != args.main_branch: + print(f" - deleting branch {branch}") + run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) + + print("Clearing component-system runtime/history artifacts...") + history_root = repo_root / "component_system" / "history" + for name in ("state", "queue", "worktrees", "logs"): + remove_children(history_root / name, dry_run=args.dry_run) + + pytest_cache = repo_root / ".pytest_cache" + if pytest_cache.exists(): + if args.dry_run: + print(f"[dry-run] remove {pytest_cache}") + else: + shutil.rmtree(pytest_cache, ignore_errors=True) + + results_tsv = repo_root / "results.tsv" + if results_tsv.exists(): + if args.dry_run: + print(f"[dry-run] remove {results_tsv}") + else: + results_tsv.unlink(missing_ok=True) + + print("Removing __pycache__ directories...") + remove_pycache_dirs(repo_root, dry_run=args.dry_run) + + print("Done.") + print("Remaining branches:") + for branch in run_git(["branch", "--format=%(refname:short)"], cwd=repo_root, dry_run=args.dry_run): + print(f" {branch}") + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock index c840d62f5..931a2d7d9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'linux'", @@ -27,6 +27,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, ] +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + [[package]] name = "anyio" version = "4.12.1" @@ -46,6 +55,8 @@ name = "autoresearch" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "fastapi" }, + { name = "jinja2" }, { name = "kernels" }, { name = "matplotlib" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, @@ -53,23 +64,29 @@ dependencies = [ { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pyarrow" }, + { name = "python-multipart" }, { name = "requests" }, { name = "rustbpe" }, { name = "tiktoken" }, { name = "torch" }, + { name = "uvicorn" }, ] [package.metadata] requires-dist = [ + { name = "fastapi", specifier = ">=0.116.0" }, + { name = "jinja2", specifier = ">=3.1.6" }, { name = "kernels", specifier = ">=0.11.7" }, { name = "matplotlib", specifier = ">=3.10.8" }, { name = "numpy", specifier = ">=2.2.6" }, { name = "pandas", specifier = ">=2.3.3" }, { name = "pyarrow", specifier = ">=21.0.0" }, + { name = "python-multipart", specifier = ">=0.0.20" }, { name = "requests", specifier = ">=2.32.0" }, { name = "rustbpe", specifier = ">=0.1.0" }, { name = "tiktoken", specifier = ">=0.11.0" }, { name = "torch", specifier = "==2.9.1", index = "https://download.pytorch.org/whl/cu128" }, + { name = "uvicorn", specifier = ">=0.35.0" }, ] [[package]] @@ -379,6 +396,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] +[[package]] +name = "fastapi" +version = "0.135.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-doc" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/7b/f8e0211e9380f7195ba3f3d40c292594fd81ba8ec4629e3854c353aaca45/fastapi-0.135.1.tar.gz", hash = "sha256:d04115b508d936d254cea545b7312ecaa58a7b3a0f84952535b4c9afae7668cd", size = 394962, upload-time = "2026-03-01T18:18:29.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/72/42e900510195b23a56bde950d26a51f8b723846bfcaa0286e90287f0422b/fastapi-0.135.1-py3-none-any.whl", hash = "sha256:46e2fc5745924b7c840f71ddd277382af29ce1cdb7d5eab5bf697e3fb9999c9e", size = 116999, upload-time = "2026-03-01T18:18:30.831Z" }, +] + [[package]] name = "filelock" version = "3.24.3" @@ -1524,6 +1557,139 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" }, ] +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + [[package]] name = "pygments" version = "2.19.2" @@ -1554,6 +1720,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] +[[package]] +name = "python-multipart" +version = "0.0.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" }, +] + [[package]] name = "pytz" version = "2026.1.post1" @@ -1840,6 +2015,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "starlette" +version = "0.52.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/68/79977123bb7be889ad680d79a40f339082c1978b5cfcf62c2d8d196873ac/starlette-0.52.1.tar.gz", hash = "sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933", size = 2653702, upload-time = "2026-01-18T13:34:11.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/0d/13d1d239a25cbfb19e740db83143e95c772a1fe10202dda4b76792b114dd/starlette-0.52.1-py3-none-any.whl", hash = "sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74", size = 74272, upload-time = "2026-01-18T13:34:09.188Z" }, +] + [[package]] name = "sympy" version = "1.14.0" @@ -2078,6 +2266,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + [[package]] name = "tzdata" version = "2025.3" @@ -2095,3 +2295,17 @@ sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6 wheels = [ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] + +[[package]] +name = "uvicorn" +version = "0.41.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" }, +] From cb5aa72ce041b4561fcfa0fd1358463abe1b8b0e Mon Sep 17 00:00:00 2001 From: AutoResearch Agent Date: Tue, 10 Mar 2026 20:00:43 +0800 Subject: [PATCH 02/24] Introduce Component-System for component optimization, add a dashboard to monitor activities --- .gitignore | 8 + README.md | 214 +-- component_system/PDCA-DO-CHECK-ACTION.md | 100 ++ component_system/PDCA-PLAN.md | 112 ++ component_system/components/model.py | 380 +++++ component_system/components/optimizer.py | 179 +++ component_system/components/trainer.py | 191 +++ component_system/config.py | 31 + component_system/domain/models.py | 91 ++ component_system/entrypoint.py | 18 + component_system/package.json | 13 + component_system/postcss.config.js | 6 + component_system/protocol.md | 332 ++++ component_system/repositories/state.py | 75 + component_system/run.py | 779 +++++++++ component_system/services/workflow.py | 1395 +++++++++++++++++ component_system/tailwind.config.js | 11 + component_system/task.py | 280 ++++ component_system/training/mainline.py | 82 + component_system/web/app.py | 42 + component_system/web/routes.py | 337 ++++ component_system/web/static/app.css | 137 ++ component_system/web/static/app.js | 399 +++++ .../web/static/tailwind.input.css | 27 + component_system/web/templates/base.html | 32 + component_system/web/templates/dashboard.html | 120 ++ .../web/templates/partials/action_error.html | 3 + .../web/templates/partials/daemon_status.html | 14 + .../templates/partials/dashboard_board.html | 58 + .../web/templates/partials/seed_detail.html | 326 ++++ .../partials/seed_detail_response.html | 4 + .../web/templates/seed_detail_page.html | 15 + prepare.py | 3 +- pyproject.toml | 4 + scripts/clean_history.py | 147 ++ uv.lock | 216 ++- 36 files changed, 6090 insertions(+), 91 deletions(-) create mode 100644 component_system/PDCA-DO-CHECK-ACTION.md create mode 100644 component_system/PDCA-PLAN.md create mode 100644 component_system/components/model.py create mode 100644 component_system/components/optimizer.py create mode 100644 component_system/components/trainer.py create mode 100644 component_system/config.py create mode 100644 component_system/domain/models.py create mode 100644 component_system/entrypoint.py create mode 100644 component_system/package.json create mode 100644 component_system/postcss.config.js create mode 100644 component_system/protocol.md create mode 100644 component_system/repositories/state.py create mode 100644 component_system/run.py create mode 100644 component_system/services/workflow.py create mode 100644 component_system/tailwind.config.js create mode 100644 component_system/task.py create mode 100644 component_system/training/mainline.py create mode 100644 component_system/web/app.py create mode 100644 component_system/web/routes.py create mode 100644 component_system/web/static/app.css create mode 100644 component_system/web/static/app.js create mode 100644 component_system/web/static/tailwind.input.css create mode 100644 component_system/web/templates/base.html create mode 100644 component_system/web/templates/dashboard.html create mode 100644 component_system/web/templates/partials/action_error.html create mode 100644 component_system/web/templates/partials/daemon_status.html create mode 100644 component_system/web/templates/partials/dashboard_board.html create mode 100644 component_system/web/templates/partials/seed_detail.html create mode 100644 component_system/web/templates/partials/seed_detail_response.html create mode 100644 component_system/web/templates/seed_detail_page.html create mode 100644 scripts/clean_history.py diff --git a/.gitignore b/.gitignore index 99c30f52f..a3fb245de 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Python-generated files __pycache__/ *.py[oc] +.pytest_cache/ build/ dist/ wheels/ @@ -21,3 +22,10 @@ dev/ # Results file results.tsv + +# Component-system runtime artifacts (logs, queue, state, worktrees under history/) +component_system/history/ +component_system/baseline_branches.json +component_system/baseline_metrics.json +*.log +.ipynb_checkpoints/ \ No newline at end of file diff --git a/README.md b/README.md index 8459259ab..6a0deafc1 100644 --- a/README.md +++ b/README.md @@ -1,89 +1,125 @@ -# autoresearch - -![teaser](progress.png) - -*One day, frontier AI research used to be done by meat computers in between eating, sleeping, having other fun, and synchronizing once in a while using sound wave interconnect in the ritual of "group meeting". That era is long gone. Research is now entirely the domain of autonomous swarms of AI agents running across compute cluster megastructures in the skies. The agents claim that we are now in the 10,205th generation of the code base, in any case no one could tell if that's right or wrong as the "code" is now a self-modifying binary that has grown beyond human comprehension. This repo is the story of how it all began. -@karpathy, March 2026*. - -The idea: give an AI agent a small but real LLM training setup and let it experiment autonomously overnight. It modifies the code, trains for 5 minutes, checks if the result improved, keeps or discards, and repeats. You wake up in the morning to a log of experiments and (hopefully) a better model. The training code here is a simplified single-GPU implementation of [nanochat](https://github.com/karpathy/nanochat). The core idea is that you're not touching any of the Python files like you normally would as a researcher. Instead, you are programming the `program.md` Markdown files that provide context to the AI agents and set up your autonomous research org. The default `program.md` in this repo is intentionally kept as a bare bones baseline, though it's obvious how one would iterate on it over time to find the "research org code" that achieves the fastest research progress, how you'd add more agents to the mix, etc. A bit more context on this project is here in this [tweet](https://x.com/karpathy/status/2029701092347630069). - -## How it works - -The repo is deliberately kept small and only really has a three files that matter: - -- **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. -- **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. -- **`program.md`** — baseline instructions for one agent. Point your agent here and let it go. **This file is edited and iterated on by the human**. - -By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. - -## Quick start - -**Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). - -```bash - -# 1. Install uv project manager (if you don't already have it) -curl -LsSf https://astral.sh/uv/install.sh | sh - -# 2. Install dependencies -uv sync - -# 3. Download data and train tokenizer (one-time, ~2 min) -uv run prepare.py - -# 4. Manually run a single training experiment (~5 min) -uv run train.py -``` - -If the above commands all work ok, your setup is working and you can go into autonomous research mode. - -## Running the agent - -Simply spin up your Claude/Codex or whatever you want in this repo (and disable all permissions), then you can prompt something like: - -``` -Hi have a look at program.md and let's kick off a new experiment! let's do the setup first. -``` - -The `program.md` file is essentially a super lightweight "skill". - -## Project structure - -``` -prepare.py — constants, data prep + runtime utilities (do not modify) -train.py — model, optimizer, training loop (agent modifies this) -program.md — agent instructions -pyproject.toml — dependencies -``` - -## Design choices - -- **Single file to modify.** The agent only touches `train.py`. This keeps the scope manageable and diffs reviewable. -- **Fixed time budget.** Training always runs for exactly 5 minutes, regardless of your specific platform. This means you can expect approx 12 experiments/hour and approx 100 experiments while you sleep. There are two upsides of this design decision. First, this makes experiments directly comparable regardless of what the agent changes (model size, batch size, architecture, etc). Second, this means that autoresearch will find the most optimal model for your platform in that time budget. The downside is that your runs (and results) become not comparable to other people running on other compute platforms. -- **Self-contained.** No external dependencies beyond PyTorch and a few small packages. No distributed training, no complex configs. One GPU, one file, one metric. - -## Platform support - -This code currently requires that you have a single NVIDIA GPU. In principle it is quite possible to support CPU, MPS and other platforms but this would also bloat the code. I'm not 100% sure that I want to take this on personally right now. People can reference (or have their agents reference) the full/parent nanochat repository that has wider platform support and shows the various solutions (e.g. a Flash Attention 3 kernels fallback implementation, generic device support, autodetection, etc.), feel free to create forks or discussions for other platforms and I'm happy to link to them here in the README in some new notable forks section or etc. - -Seeing as there seems to be a lot of interest in tinkering with autoresearch on much smaller compute platforms than an H100, a few extra words. If you're going to try running autoresearch on smaller computers (Macbooks etc.), I'd recommend one of the forks below. On top of this, here are some recommendations for how to tune the defaults for much smaller models for aspiring forks: - -1. To get half-decent results I'd use a dataset with a lot less entropy, e.g. this [TinyStories dataset](https://huggingface.co/datasets/karpathy/tinystories-gpt4-clean). These are GPT-4 generated short stories. Because the data is a lot narrower in scope, you will see reasonable results with a lot smaller models (if you try to sample from them after training). -2. You might experiment with decreasing `vocab_size`, e.g. from 8192 down to 4096, 2048, 1024, or even - simply byte-level tokenizer with 256 possibly bytes after utf-8 encoding. -3. In `prepare.py`, you'll want to lower `MAX_SEQ_LEN` a lot, depending on the computer even down to 256 etc. As you lower `MAX_SEQ_LEN`, you may want to experiment with increasing `DEVICE_BATCH_SIZE` in `train.py` slightly to compensate. The number of tokens per fwd/bwd pass is the product of these two. -4. Also in `prepare.py`, you'll want to decrease `EVAL_TOKENS` so that your validation loss is evaluated on a lot less data. -5. In `train.py`, the primary single knob that controls model complexity is the `DEPTH` (default 8, here). A lot of variables are just functions of this, so e.g. lower it down to e.g. 4. -6. You'll want to most likely use `WINDOW_PATTERN` of just "L", because "SSSL" uses alternating banded attention pattern that may be very inefficient for you. Try it. -7. You'll want to lower `TOTAL_BATCH_SIZE` a lot, but keep it powers of 2, e.g. down to `2**14` (~16K) or so even, hard to tell. - -I think these would be the reasonable hyperparameters to play with. Ask your favorite coding agent for help and copy paste them this guide, as well as the full source code. - -## Notable forks - -- [miolini/autoresearch-macos](https://github.com/miolini/autoresearch-macos) (MacOS) -- [trevin-creator/autoresearch-mlx](https://github.com/trevin-creator/autoresearch-mlx) (MacOS) -- [jsegov/autoresearch-win-rtx](https://github.com/jsegov/autoresearch-win-rtx) (Windows) - -## License - -MIT +# autoresearch + +![teaser](progress.png) + +*One day, frontier AI research used to be done by meat computers in between eating, sleeping, having other fun, and synchronizing once in a while using sound wave interconnect in the ritual of "group meeting". That era is long gone. Research is now entirely the domain of autonomous swarms of AI agents running across compute cluster megastructures in the skies. The agents claim that we are now in the 10,205th generation of the code base, in any case no one could tell if that's right or wrong as the "code" is now a self-modifying binary that has grown beyond human comprehension. This repo is the story of how it all began. -@karpathy, March 2026*. + +The idea: give an AI agent a small but real LLM training setup and let it experiment autonomously overnight. It modifies the code, trains for 5 minutes, checks if the result improved, keeps or discards, and repeats. You wake up in the morning to a log of experiments and (hopefully) a better model. The training code here is a simplified single-GPU implementation of [nanochat](https://github.com/karpathy/nanochat). The core idea is that you're not touching any of the Python files like you normally would as a researcher. Instead, you are programming the `program.md` Markdown files that provide context to the AI agents and set up your autonomous research org. The default `program.md` in this repo is intentionally kept as a bare bones baseline, though it's obvious how one would iterate on it over time to find the "research org code" that achieves the fastest research progress, how you'd add more agents to the mix, etc. A bit more context on this project is here in this [tweet](https://x.com/karpathy/status/2029701092347630069). + +## How it works + +The repo is deliberately kept small and only really has a three files that matter: + +- **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. +- **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. +- **`program.md`** — baseline instructions for one agent. Point your agent here and let it go. **This file is edited and iterated on by the human**. + +By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. + +## Quick start + +**Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). + +```bash + +# 1. Install uv project manager (if you don't already have it) +curl -LsSf https://astral.sh/uv/install.sh | sh + +# 2. Install dependencies +uv sync + +# 3. Download data and train tokenizer (one-time, ~2 min) +uv run prepare.py + +# 4. Manually run a single training experiment (~5 min) +uv run train.py +``` + +If the above commands all work ok, your setup is working and you can go into autonomous research mode. + +## Running the agent + +Simply spin up your Claude/Codex or whatever you want in this repo (and disable all permissions), then you can prompt something like: + +``` +Hi have a look at program.md and let's kick off a new experiment! let's do the setup first. +``` + +The `program.md` file is essentially a super lightweight "skill". + +### Component-system workflow + +The component system runs a continuous **Seed → P → DCA** loop. A resident daemon manages two workers (P and DCA) that poll a file-based queue and dispatch each stage to an external code agent (Claude Code, Codex, or OpenCode). + +**1. Start the web dashboard** (optional, but recommended for monitoring): + +```bash +uv run uvicorn component_system.web.app:app --reload +``` + +Open http://127.0.0.1:8000 — the dashboard lives at `/component-system`. Use `--host 0.0.0.0` or `--port 8080` as needed. + +**2. Start the daemon:** + +```bash +# Default: uses Claude Code +uv run component_system/run.py + +# Or choose a different agent backend +PDCA_AGENT=codex uv run component_system/run.py +PDCA_AGENT=opencode uv run component_system/run.py +``` + +**3. Bootstrap via a coding agent.** Do *not* tell the agent to execute PDCA stages manually. Instead, give it a prompt like: + +```text +Understand this project and follow component_system/protocol.md. +Do not execute PDCA stages manually in this session. +Instead, bootstrap the component system by creating an initial seed +and queuing it to component_system/queue/p/, then confirm the daemon +(uv run component_system/run.py) is running so the P and DCA workers +can process stages automatically. +``` + +Once bootstrapped, seeds flow through `queue/p/` → P worker → `queue/dca/` → DCA worker → `state/` automatically. Results and promotions are tracked in `state/` and visible in the web dashboard. + +## Project structure + +``` +prepare.py — constants, data prep + runtime utilities (do not modify) +train.py — model, optimizer, training loop (agent modifies this) +program.md — agent instructions +pyproject.toml — dependencies +``` + +## Design choices + +- **Single file to modify.** The agent only touches `train.py`. This keeps the scope manageable and diffs reviewable. +- **Fixed time budget.** Training always runs for exactly 5 minutes, regardless of your specific platform. This means you can expect approx 12 experiments/hour and approx 100 experiments while you sleep. There are two upsides of this design decision. First, this makes experiments directly comparable regardless of what the agent changes (model size, batch size, architecture, etc). Second, this means that autoresearch will find the most optimal model for your platform in that time budget. The downside is that your runs (and results) become not comparable to other people running on other compute platforms. +- **Self-contained.** No external dependencies beyond PyTorch and a few small packages. No distributed training, no complex configs. One GPU, one file, one metric. + +## Platform support + +This code currently requires that you have a single NVIDIA GPU. In principle it is quite possible to support CPU, MPS and other platforms but this would also bloat the code. I'm not 100% sure that I want to take this on personally right now. People can reference (or have their agents reference) the full/parent nanochat repository that has wider platform support and shows the various solutions (e.g. a Flash Attention 3 kernels fallback implementation, generic device support, autodetection, etc.), feel free to create forks or discussions for other platforms and I'm happy to link to them here in the README in some new notable forks section or etc. + +Seeing as there seems to be a lot of interest in tinkering with autoresearch on much smaller compute platforms than an H100, a few extra words. If you're going to try running autoresearch on smaller computers (Macbooks etc.), I'd recommend one of the forks below. On top of this, here are some recommendations for how to tune the defaults for much smaller models for aspiring forks: + +1. To get half-decent results I'd use a dataset with a lot less entropy, e.g. this [TinyStories dataset](https://huggingface.co/datasets/karpathy/tinystories-gpt4-clean). These are GPT-4 generated short stories. Because the data is a lot narrower in scope, you will see reasonable results with a lot smaller models (if you try to sample from them after training). +2. You might experiment with decreasing `vocab_size`, e.g. from 8192 down to 4096, 2048, 1024, or even - simply byte-level tokenizer with 256 possibly bytes after utf-8 encoding. +3. In `prepare.py`, you'll want to lower `MAX_SEQ_LEN` a lot, depending on the computer even down to 256 etc. As you lower `MAX_SEQ_LEN`, you may want to experiment with increasing `DEVICE_BATCH_SIZE` in `train.py` slightly to compensate. The number of tokens per fwd/bwd pass is the product of these two. +4. Also in `prepare.py`, you'll want to decrease `EVAL_TOKENS` so that your validation loss is evaluated on a lot less data. +5. In `train.py`, the primary single knob that controls model complexity is the `DEPTH` (default 8, here). A lot of variables are just functions of this, so e.g. lower it down to e.g. 4. +6. You'll want to most likely use `WINDOW_PATTERN` of just "L", because "SSSL" uses alternating banded attention pattern that may be very inefficient for you. Try it. +7. You'll want to lower `TOTAL_BATCH_SIZE` a lot, but keep it powers of 2, e.g. down to `2**14` (~16K) or so even, hard to tell. + +I think these would be the reasonable hyperparameters to play with. Ask your favorite coding agent for help and copy paste them this guide, as well as the full source code. + +## Notable forks + +- [miolini/autoresearch-macos](https://github.com/miolini/autoresearch-macos) (MacOS) +- [trevin-creator/autoresearch-mlx](https://github.com/trevin-creator/autoresearch-mlx) (MacOS) +- [jsegov/autoresearch-win-rtx](https://github.com/jsegov/autoresearch-win-rtx) (Windows) + +## License + +MIT diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md new file mode 100644 index 000000000..b0a1cc503 --- /dev/null +++ b/component_system/PDCA-DO-CHECK-ACTION.md @@ -0,0 +1,100 @@ +# DCA - Adapt, Check, Action + +This document merges the former `PDCA-DO.md`, `PDCA-CHECK.md`, and `PDCA-ACTION.md` +into one execution guide for the merged DCA stage. + +## Responsibility +Take the generated plan from P, adapt/fix it in the seed worktree, +run the canonical training entrypoint, evaluate results against baseline, and +promote only when the signal is positive. Do not propose new ideas or optimize for better metrics; only adapt/fix so the plan runs and report outcomes. + +## Workspace and paths +Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use **only paths relative to your cwd**, and treat the copied files under `component_system/` as the canonical context inside the worktree. Do not use or request absolute paths, parent-directory paths, or files outside the workspace; the runner has already set your cwd to the correct worktree. + +## Input +- Read the task content embedded in the runner prompt. +- Read current baseline state from `component_system/baseline_branches.json` and `component_system/baseline_metrics.json`. +- Read and edit worktree-local files only. + +## Baseline measurement (seed_id __baseline__) +For **baseline measurement** tasks you must **retry until the run completes successfully** and you can report real metrics. Do not report empty metrics and stop. + +- If training fails with **CUDA out of memory (OOM)**, the default batch size is tuned for H100. Reduce `device_batch_size` in `component_system/components/trainer.py` (`TrainingSettings`: default `device_batch_size=128`). You may also need to reduce `total_batch_size` so that `total_batch_size % (device_batch_size * sequence_length) == 0` for gradient accumulation. Then rerun the entrypoint until training completes and report the resulting metrics. +- Only trivial execution fixes (e.g. batch size for VRAM) are allowed; do not change model architecture or training logic for baseline. +- **Commit any file changes before reporting.** If you modified files (e.g. reduced batch size), commit those changes on the baseline branch. An uncommitted worktree causes the follow-up merge into the baseline branch to fail. + +## Workflow +1. Work in the seed worktree prepared by the system (on the seed branch, one branch per seed). +2. Adapt or fix generated code until it runs cleanly. "Adapt or fix" is limited to runtime/environment fixes (bugs, OOM, imports, config); do not change model architecture, hyperparameters, or training logic to improve metrics. +3. Run the canonical command (allow at least **600 seconds** so the run is not killed by the execution environment; the first step can take ~150s and training runs for 300s): + - Preferred (when daemon/root `.venv` is active): `timeout 600 uv run --active component_system/entrypoint.py` + - Fallback (when no active root `.venv` is available): `timeout 600 uv run component_system/entrypoint.py` +4. If there is a simple bug or OOM, fix (e.g. reduce batch size) and rerun. For baseline measurement, keep retrying until the run succeeds. +5. Commit changes on the seed branch before reporting. +6. Print the DCA summary block for the runner; include the current commit SHA in the JSON so the runner can verify and record it. +7. Let the runner evaluate signal and handle promotion policy. + +## Output Format +Print a summary block for the runner. Report metrics in the JSON first; the +runner only falls back to parsing training stdout/stderr when the JSON metrics +are missing: + +```text +AUTORESEARCH_DCA_SUMMARY_BEGIN +{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}} +AUTORESEARCH_DCA_SUMMARY_END +``` + +If you cannot provide final metrics, still print the exact same JSON shape with +`"metrics": {}`. + +The runner falls back to extracting metrics from canonical training stdout/stderr: +`val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, +`total_tokens_M`, `num_steps`, `num_params_M`, and `depth`. + +If a DCA run finishes but still reports no metrics, the system does not +immediately mark it failed. Instead, it queues a follow-up DCA recovery task +that inspects the saved stdout/stderr logs and reports the metrics in the same +JSON format. Only if that recovery task still cannot recover metrics is the run +treated as failed. + +## Check: Signal Rules + +| Condition | Signal | +|-----------|--------| +| `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | +| `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | +| difference < 0.001 | `neutral` | +| no historical baseline `last_val_bpb` | `positive_signal` (first recording) | +| metrics missing or training error | `error` | + +The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). + +## Action: Promotion Rules + +Only the DCA (Do-Check-Action) stage may trigger a merge into baseline. The Plan stage must never merge code; the system performs the merge automatically after a successful DCA promotion. + +The runner records the DCA `commit_sha` from your summary (or from the current branch HEAD if omitted) for traceability. On positive signal, the workflow merges the seed branch into the baseline. If the merge fails (e.g. conflicts), the system queues a merge-resolution DCA run. + +### Promotion flow (`positive_signal` only) +1. The system merges the seed branch into the baseline branch (you do not run merge yourself). +2. The workflow updates `baseline_metrics.json` (and `baseline_branches.json` as needed) with `last_val_bpb`, `promoted_from`, `promoted_idea`, `promoted_at`, `promoted_branch`. +3. Promotion metadata is persisted in seed/run state files. + +### Merge failure and conflict resolution +- If the merge into baseline fails (e.g. conflicts), the system queues a **new DCA run** with `merge_resolution: true`. + - **Normal seed**: In the seed worktree, run `git merge __baseline__` (merge the baseline branch into the seed), resolve conflicts, commit, then print the DCA summary so the system can retry promotion. + - **Baseline seed (__baseline__)**: The goal is to merge __baseline__ *into* the target branch (e.g. master). Run from the directory that has the target branch checked out (use `git worktree list` to find it), then `git merge __baseline__`. Do *not* run from the __baseline__ worktree and do *not* run `git merge master` there—that would merge master into __baseline__, the wrong direction. + +### Non-promotion cases +- `neutral`, `negative_signal`, or `error`: log only, no baseline merge/update. +- Failed run info remains available via queue/state logs. + +## Constraints +- Do not change model architecture, optimizer, or training logic to improve metrics; only make the existing plan run (fix bugs, OOM via batch size, etc.). +- Training must use `run_mainline_training` or equivalent for evaluation consistency. +- Evaluation (`val_bpb`) must not be skipped. +- Do not edit `baseline_branches.json` or `baseline_metrics.json` directly; the workflow writes them. +- Only `positive_signal` can trigger promotion. +- Keep `component_system/entrypoint.py` as the canonical runner. +- Rely on git history plus state files for traceability. diff --git a/component_system/PDCA-PLAN.md b/component_system/PDCA-PLAN.md new file mode 100644 index 000000000..f81b6259d --- /dev/null +++ b/component_system/PDCA-PLAN.md @@ -0,0 +1,112 @@ +# P — Seed Planning And Generation + +## Responsibility +Extract exactly one testable improvement hypothesis from the seed prompt, +generate the first implementation in a candidate worktree, and hand the result +to DCA through the runner. + +## Workspace and paths +Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use only in-workspace paths from your current working directory, and do not use or request absolute paths or any paths outside the workspace; the runner has already set your cwd to the correct worktree. + +## Skill: arxiv-search + +Use the **arxiv-search** skill (`.agents/skills/arxiv-search`) to search for +relevant papers. + +If the skill is not installed or the search script is missing, do not pretend +the skill exists and do not fabricate paper references. Try to install or make +the skill available autonomously. If that still fails, continue planning from +the other input sources instead of asking the user questions. + +### Prerequisites +```bash +pip install arxiv +``` + +Install the Python package only after the skill itself is available. Installing +the package alone does not replace the missing skill. If the skill cannot be +made available, skip paper-driven search and proceed with the remaining inputs. + +### Search for papers +```bash +# Search by topic in cs.LG / cs.NE categories +python .agents/skills/arxiv-search/scripts/search_arxiv.py \ + --query "optimizer adaptive learning rate" \ + --category "cs.LG" \ + --sort-by submitted_date \ + --max-results 10 + +# Search for model architecture ideas +python .agents/skills/arxiv-search/scripts/search_arxiv.py \ + --query "ti:attention AND abs:efficiency" \ + --date-from "2024-01-01" \ + --output json +``` + +### How to Extract a Hypothesis from Results +1. Read the abstract of each result +2. Identify a concrete architectural or algorithmic change (not just a concept) +3. Map it to a target component: `model`, `optimizer`, or `trainer` +4. State the **expected benefit** (e.g. faster convergence, lower val loss, fewer params) +5. Reduce the idea to one isolated improvement that can be evaluated on its own + +## Read results.tsv first (avoid idea duplication) +Before choosing a hypothesis, **read `results.tsv` in your current working directory if it exists**. The runner copies the latest result history into the seed worktree before P runs. Use it to avoid proposing ideas that were already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). + +## Input Sources +- **results.tsv** in cwd (when present) — read first to avoid duplicating past ideas +- arXiv papers via **arxiv-search** skill (primary) +- Clues from past run failures in `queue/done/` +- Manual seed files + +## One-Improvement Rule + +Each P run must propose and implement exactly one improvement. + +- One seed means one hypothesis. +- One seed means one causal change to evaluate. +- Do not bundle multiple ideas into the same candidate, even if they seem + complementary. +- If the prompt contains several possible improvements, choose the single best + one for this iteration and leave the others for later seeds. +- If an idea would require several coordinated changes, choose the smallest + coherent version that still tests the hypothesis cleanly. + +Good examples: +- change only the optimizer schedule +- add only one architectural block +- simplify only one training heuristic + +Bad examples: +- change the model width and the optimizer and the batch schedule together +- combine several paper ideas in one seed +- make "general cleanup plus a new feature" in the same candidate + +## Output Format +Print a summary block for the runner: +```text +AUTORESEARCH_P_SUMMARY_BEGIN +{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} +AUTORESEARCH_P_SUMMARY_END +``` + +## Steps +1. If `results.tsv` exists in the worktree, read it first to avoid duplicating already-tried ideas. +2. Refine the seed prompt into one concrete idea +3. Reduce that idea to one isolated improvement with a clear expected benefit +4. Identify the target component (`model`, `optimizer`, or `trainer`) +5. Implement only that first version inside the candidate worktree created from `baseline` +6. Commit the candidate branch +7. Ensure the summary describes the single improvement being tested +8. Print the summary block; the runner records the commit on the seed branch. + +## Constraints +- Each seed targets exactly one component +- Each seed applies exactly one improvement +- Prefer the smallest viable implementation that can test the hypothesis +- Do not mix exploratory cleanup with the experimental change +- Do not include opportunistic refactors unless they are strictly required to make + the one improvement work +- The description must contain enough detail for DCA to continue independently +- One branch per seed: commit on the seed branch in the worktree; the runner does not merge branches. +- **Plan must never merge code.** Only the DCA (Do-Check-Action) stage may trigger a merge into baseline; the system performs the merge automatically after a successful DCA promotion. diff --git a/component_system/components/model.py b/component_system/components/model.py new file mode 100644 index 000000000..f74d89386 --- /dev/null +++ b/component_system/components/model.py @@ -0,0 +1,380 @@ +from __future__ import annotations + +from dataclasses import dataclass + +import torch +import torch.nn as nn +import torch.nn.functional as F +from kernels import get_kernel + +from prepare import MAX_SEQ_LEN + + +def _get_fa3(): + if torch.cuda.is_available(): + cap = torch.cuda.get_device_capability() + repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" + return get_kernel(repo).flash_attn_interface + return None + +_fa3 = None + +def get_fa3(): + global _fa3 + if _fa3 is None: + _fa3 = _get_fa3() + return _fa3 + + +@dataclass +class GPTConfig: + sequence_len: int = 2048 + vocab_size: int = 32768 + n_layer: int = 12 + n_head: int = 6 + n_kv_head: int = 6 + n_embd: int = 768 + window_pattern: str = "SSSL" + + +def norm(x: torch.Tensor) -> torch.Tensor: + return F.rms_norm(x, (x.size(-1),)) + + +def has_ve(layer_idx: int, n_layer: int) -> bool: + return layer_idx % 2 == (n_layer - 1) % 2 + + +def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: + assert x.ndim == 4 + d = x.shape[3] // 2 + x1, x2 = x[..., :d], x[..., d:] + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat([y1, y2], 3) + + +class CausalSelfAttention(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.n_head = config.n_head + self.n_kv_head = config.n_kv_head + self.n_embd = config.n_embd + self.head_dim = self.n_embd // self.n_head + assert self.n_embd % self.n_head == 0 + assert self.n_kv_head <= self.n_head and self.n_head % self.n_kv_head == 0 + self.c_q = nn.Linear(self.n_embd, self.n_head * self.head_dim, bias=False) + self.c_k = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_v = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False) + self.ve_gate_channels = 32 + self.ve_gate = ( + nn.Linear(self.ve_gate_channels, self.n_kv_head, bias=False) + if has_ve(layer_idx, config.n_layer) + else None + ) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + batch_size, seq_len, _ = x.size() + q = self.c_q(x).view(batch_size, seq_len, self.n_head, self.head_dim) + k = self.c_k(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + v = self.c_v(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + + # Value residual (ResFormer): mix in value embedding with input-dependent gate per head + if ve is not None: + ve = ve.view(batch_size, seq_len, self.n_kv_head, self.head_dim) + gate = 2 * torch.sigmoid(self.ve_gate(x[..., : self.ve_gate_channels])) + v = v + gate.unsqueeze(-1) * ve + + cos, sin = cos_sin + q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin) + q, k = norm(q), norm(k) + + fa3 = get_fa3() + if fa3 is None: + raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") + y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) + y = y.contiguous().view(batch_size, seq_len, -1) + return self.c_proj(y) + + +class MLP(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) + self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.c_fc(x) + x = F.relu(x).square() + x = self.c_proj(x) + return x + + +class Block(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.attn = CausalSelfAttention(config, layer_idx) + self.mlp = MLP(config) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + x = x + self.attn(norm(x), ve, cos_sin, window_size) + x = x + self.mlp(norm(x)) + return x + + +class GPT(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.config = config + self.window_sizes = self._compute_window_sizes(config) + self.transformer = nn.ModuleDict( + { + "wte": nn.Embedding(config.vocab_size, config.n_embd), + "h": nn.ModuleList([Block(config, i) for i in range(config.n_layer)]), + } + ) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + self.resid_lambdas = nn.Parameter(torch.ones(config.n_layer)) + self.x0_lambdas = nn.Parameter(torch.zeros(config.n_layer)) + head_dim = config.n_embd // config.n_head + kv_dim = config.n_kv_head * head_dim + self.value_embeds = nn.ModuleDict( + { + str(i): nn.Embedding(config.vocab_size, kv_dim) + for i in range(config.n_layer) + if has_ve(i, config.n_layer) + } + ) + self.rotary_seq_len = config.sequence_len * 10 + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.register_buffer("cos", cos, persistent=False) + self.register_buffer("sin", sin, persistent=False) + + @torch.no_grad() + def init_weights(self) -> None: + torch.nn.init.normal_(self.transformer.wte.weight, mean=0.0, std=1.0) + torch.nn.init.normal_(self.lm_head.weight, mean=0.0, std=0.001) + n_embd = self.config.n_embd + scale = 3**0.5 * n_embd**-0.5 + for block in self.transformer.h: + torch.nn.init.uniform_(block.attn.c_q.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) + torch.nn.init.zeros_(block.attn.c_proj.weight) + torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) + torch.nn.init.zeros_(block.mlp.c_proj.weight) + self.resid_lambdas.fill_(1.0) + self.x0_lambdas.fill_(0.1) + for ve in self.value_embeds.values(): + torch.nn.init.uniform_(ve.weight, -scale, scale) + for block in self.transformer.h: + if block.attn.ve_gate is not None: + torch.nn.init.zeros_(block.attn.ve_gate.weight) + head_dim = self.config.n_embd // self.config.n_head + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.cos, self.sin = cos, sin + self.transformer.wte.to(dtype=torch.bfloat16) + for ve in self.value_embeds.values(): + ve.to(dtype=torch.bfloat16) + + def _precompute_rotary_embeddings( + self, + seq_len: int, + head_dim: int, + base: int = 10000, + device: torch.device | None = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + if device is None: + device = self.transformer.wte.weight.device + channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device) + inv_freq = 1.0 / (base ** (channel_range / head_dim)) + t = torch.arange(seq_len, dtype=torch.float32, device=device) + freqs = torch.outer(t, inv_freq) + cos, sin = freqs.cos(), freqs.sin() + cos, sin = cos.bfloat16(), sin.bfloat16() + return cos[None, :, None, :], sin[None, :, None, :] + + def _compute_window_sizes(self, config: GPTConfig) -> list[tuple[int, int]]: + pattern = config.window_pattern.upper() + assert all(c in "SL" for c in pattern) + long_window = config.sequence_len + short_window = long_window // 2 + char_to_window = {"L": (long_window, 0), "S": (short_window, 0)} + window_sizes = [] + for layer_idx in range(config.n_layer): + char = pattern[layer_idx % len(pattern)] + window_sizes.append(char_to_window[char]) + window_sizes[-1] = (long_window, 0) + return window_sizes + + def estimate_flops(self) -> float: + nparams = sum(p.numel() for p in self.parameters()) + value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values()) + nparams_exclude = ( + self.transformer.wte.weight.numel() + + value_embeds_numel + + self.resid_lambdas.numel() + + self.x0_lambdas.numel() + ) + n_head = self.config.n_head + head_dim = self.config.n_embd // self.config.n_head + seq_len = self.config.sequence_len + attn_flops = 0 + for window_size in self.window_sizes: + window = window_size[0] + effective_seq = seq_len if window < 0 else min(window, seq_len) + attn_flops += 12 * n_head * head_dim * effective_seq + return 6 * (nparams - nparams_exclude) + attn_flops + + def num_scaling_params(self) -> dict[str, int]: + wte = sum(p.numel() for p in self.transformer.wte.parameters()) + value_embeds = sum(p.numel() for p in self.value_embeds.parameters()) + lm_head = sum(p.numel() for p in self.lm_head.parameters()) + transformer_matrices = sum(p.numel() for p in self.transformer.h.parameters()) + scalars = self.resid_lambdas.numel() + self.x0_lambdas.numel() + total = wte + value_embeds + lm_head + transformer_matrices + scalars + return { + "wte": wte, + "value_embeds": value_embeds, + "lm_head": lm_head, + "transformer_matrices": transformer_matrices, + "scalars": scalars, + "total": total, + } + + def setup_optimizer( + self, + unembedding_lr: float = 0.004, + embedding_lr: float = 0.2, + matrix_lr: float = 0.02, + weight_decay: float = 0.0, + adam_betas: tuple[float, float] = (0.8, 0.95), + scalar_lr: float = 0.5, + ): + from component_system.components.optimizer import MuonAdamW + + model_dim = self.config.n_embd + matrix_params = list(self.transformer.h.parameters()) + value_embeds_params = list(self.value_embeds.parameters()) + embedding_params = list(self.transformer.wte.parameters()) + lm_head_params = list(self.lm_head.parameters()) + resid_params = [self.resid_lambdas] + x0_params = [self.x0_lambdas] + assert len(list(self.parameters())) == ( + len(matrix_params) + + len(embedding_params) + + len(lm_head_params) + + len(value_embeds_params) + + len(resid_params) + + len(x0_params) + ) + # Scale LR ∝ 1/√dmodel (tuned at 768 dim) + dmodel_lr_scale = (model_dim / 768) ** -0.5 + print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") + param_groups = [ + dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), + ] + for shape in sorted({p.shape for p in matrix_params}): + group_params = [p for p in matrix_params if p.shape == shape] + param_groups.append( + dict( + kind="muon", + params=group_params, + lr=matrix_lr, + momentum=0.95, + ns_steps=5, + beta2=0.95, + weight_decay=weight_decay, + ) + ) + optimizer = MuonAdamW(param_groups) + for group in optimizer.param_groups: + group["initial_lr"] = group["lr"] + return optimizer + + def forward( + self, + idx: torch.Tensor, + targets: torch.Tensor | None = None, + reduction: str = "mean", + ) -> torch.Tensor: + _, seq_len = idx.size() + assert seq_len <= self.cos.size(1) + cos_sin = self.cos[:, :seq_len], self.sin[:, :seq_len] + x = self.transformer.wte(idx) + x = norm(x) + x0 = x + for layer_idx, block in enumerate(self.transformer.h): + x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 + ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None + x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) + x = norm(x) + logits = self.lm_head(x).float() + softcap = 15 + logits = softcap * torch.tanh(logits / softcap) + if targets is None: + return logits + return F.cross_entropy( + logits.view(-1, logits.size(-1)), + targets.view(-1), + ignore_index=-1, + reduction=reduction, + ) + + +def build_model_config( + depth: int, + *, + vocab_size: int, + aspect_ratio: int = 64, + head_dim: int = 128, + window_pattern: str = "SSSL", +) -> GPTConfig: + base_dim = depth * aspect_ratio + model_dim = ((base_dim + head_dim - 1) // head_dim) * head_dim + num_heads = model_dim // head_dim + return GPTConfig( + sequence_len=MAX_SEQ_LEN, + vocab_size=vocab_size, + n_layer=depth, + n_head=num_heads, + n_kv_head=num_heads, + n_embd=model_dim, + window_pattern=window_pattern, + ) + + +def create_model( + config: GPTConfig, + *, + device: torch.device | None = None, + compile_model: bool = True, +) -> tuple[GPT, dict[str, int], float]: + if device is None: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + with torch.device("meta"): + model = GPT(config) + model.to_empty(device=device) + model.init_weights() + param_counts = model.num_scaling_params() + num_flops_per_token = model.estimate_flops() + if compile_model: + model = torch.compile(model, dynamic=False) + return model, param_counts, num_flops_per_token diff --git a/component_system/components/optimizer.py b/component_system/components/optimizer.py new file mode 100644 index 000000000..227caaea9 --- /dev/null +++ b/component_system/components/optimizer.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import torch + + +polar_express_coeffs = [ + (8.156554524902461, -22.48329292557795, 15.878769915207462), + (4.042929935166739, -2.808917465908714, 0.5000178451051316), + (3.8916678022926607, -2.772484153217685, 0.5060648178503393), + (3.285753657755655, -2.3681294933425376, 0.46449024233003106), + (2.3465413258596377, -1.7097828382687081, 0.42323551169305323), +] + + +@torch.compile(dynamic=False, fullgraph=True) +def adamw_step_fused( + p: torch.Tensor, + grad: torch.Tensor, + exp_avg: torch.Tensor, + exp_avg_sq: torch.Tensor, + step_t: torch.Tensor, + lr_t: torch.Tensor, + beta1_t: torch.Tensor, + beta2_t: torch.Tensor, + eps_t: torch.Tensor, + wd_t: torch.Tensor, +) -> None: + p.mul_(1 - lr_t * wd_t) + exp_avg.lerp_(grad, 1 - beta1_t) + exp_avg_sq.lerp_(grad.square(), 1 - beta2_t) + bias1 = 1 - beta1_t**step_t + bias2 = 1 - beta2_t**step_t + denom = (exp_avg_sq / bias2).sqrt() + eps_t + step_size = lr_t / bias1 + p.add_(exp_avg / denom, alpha=-step_size) + + +@torch.compile(dynamic=False, fullgraph=True) +def muon_step_fused( + stacked_grads: torch.Tensor, + stacked_params: torch.Tensor, + momentum_buffer: torch.Tensor, + second_momentum_buffer: torch.Tensor, + momentum_t: torch.Tensor, + lr_t: torch.Tensor, + wd_t: torch.Tensor, + beta2_t: torch.Tensor, + ns_steps: int, + red_dim: int, +) -> None: + momentum = momentum_t.to(stacked_grads.dtype) + momentum_buffer.lerp_(stacked_grads, 1 - momentum) + g = stacked_grads.lerp_(momentum_buffer, momentum) + x = g.bfloat16() + x = x / (x.norm(dim=(-2, -1), keepdim=True) * 1.02 + 1e-6) + if g.size(-2) > g.size(-1): + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x.mT @ x + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + x @ b_matrix + else: + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x @ x.mT + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + b_matrix @ x + g = x + beta2 = beta2_t.to(g.dtype) + v_mean = g.float().square().mean(dim=red_dim, keepdim=True) + red_dim_size = g.size(red_dim) + v_norm_sq = v_mean.sum(dim=(-2, -1), keepdim=True) * red_dim_size + v_norm = v_norm_sq.sqrt() + second_momentum_buffer.lerp_(v_mean.to(dtype=second_momentum_buffer.dtype), 1 - beta2) + step_size = second_momentum_buffer.clamp_min(1e-10).rsqrt() + scaled_sq_sum = (v_mean * red_dim_size) * step_size.float().square() + v_norm_new = scaled_sq_sum.sum(dim=(-2, -1), keepdim=True).sqrt() + final_scale = step_size * (v_norm / v_norm_new.clamp_min(1e-10)) + g = g * final_scale.to(g.dtype) + lr = lr_t.to(g.dtype) + wd = wd_t.to(g.dtype) + mask = (g * stacked_params) >= 0 + stacked_params.sub_(lr * g + lr * wd * stacked_params * mask) + + +class MuonAdamW(torch.optim.Optimizer): + def __init__(self, param_groups: list[dict]) -> None: + super().__init__(param_groups, defaults={}) + self._adamw_step_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta1_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_eps_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_momentum_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + + def _step_adamw(self, group: dict) -> None: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + if not state: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like(p) + state["exp_avg_sq"] = torch.zeros_like(p) + state["step"] += 1 + self._adamw_step_t.fill_(state["step"]) + self._adamw_lr_t.fill_(group["lr"]) + self._adamw_beta1_t.fill_(group["betas"][0]) + self._adamw_beta2_t.fill_(group["betas"][1]) + self._adamw_eps_t.fill_(group["eps"]) + self._adamw_wd_t.fill_(group["weight_decay"]) + adamw_step_fused( + p, + grad, + state["exp_avg"], + state["exp_avg_sq"], + self._adamw_step_t, + self._adamw_lr_t, + self._adamw_beta1_t, + self._adamw_beta2_t, + self._adamw_eps_t, + self._adamw_wd_t, + ) + + def _step_muon(self, group: dict) -> None: + params = group["params"] + if not params: + return + first_param = params[0] + state = self.state[first_param] + num_params = len(params) + shape, device, dtype = first_param.shape, first_param.device, first_param.dtype + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros(num_params, *shape, dtype=dtype, device=device) + if "second_momentum_buffer" not in state: + state_shape = (num_params, shape[-2], 1) if shape[-2] >= shape[-1] else (num_params, 1, shape[-1]) + state["second_momentum_buffer"] = torch.zeros(state_shape, dtype=dtype, device=device) + red_dim = -1 if shape[-2] >= shape[-1] else -2 + stacked_grads = torch.stack([p.grad for p in params]) + stacked_params = torch.stack(params) + self._muon_momentum_t.fill_(group["momentum"]) + self._muon_beta2_t.fill_(group["beta2"] if group["beta2"] is not None else 0.0) + self._muon_lr_t.fill_(group["lr"] * max(1.0, shape[-2] / shape[-1]) ** 0.5) + self._muon_wd_t.fill_(group["weight_decay"]) + muon_step_fused( + stacked_grads, + stacked_params, + state["momentum_buffer"], + state["second_momentum_buffer"], + self._muon_momentum_t, + self._muon_lr_t, + self._muon_wd_t, + self._muon_beta2_t, + group["ns_steps"], + red_dim, + ) + torch._foreach_copy_(params, list(stacked_params.unbind(0))) + + @torch.no_grad() + def step(self) -> None: + for group in self.param_groups: + if group["kind"] == "adamw": + self._step_adamw(group) + elif group["kind"] == "muon": + self._step_muon(group) + + +def create_optimizer(model: torch.nn.Module, settings: object) -> MuonAdamW: + return model.setup_optimizer( + unembedding_lr=settings.unembedding_lr, + embedding_lr=settings.embedding_lr, + matrix_lr=settings.matrix_lr, + weight_decay=settings.weight_decay, + adam_betas=settings.adam_betas, + scalar_lr=settings.scalar_lr, + ) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py new file mode 100644 index 000000000..fd300348e --- /dev/null +++ b/component_system/components/trainer.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +import gc +import time +from dataclasses import dataclass +from typing import Any + +import torch + +from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader + + +H100_BF16_PEAK_FLOPS = 989.5e12 + + +@dataclass +class TrainingSettings: + aspect_ratio: int = 64 + head_dim: int = 128 + window_pattern: str = "SSSL" + total_batch_size: int = 2**19 + embedding_lr: float = 0.6 + unembedding_lr: float = 0.004 + matrix_lr: float = 0.04 + scalar_lr: float = 0.5 + weight_decay: float = 0.2 + adam_betas: tuple[float, float] = (0.8, 0.95) + warmup_ratio: float = 0.0 + warmdown_ratio: float = 0.5 + final_lr_frac: float = 0.0 + depth: int = 8 + device_batch_size: int = 32 # 24GB vram + seed: int = 42 + compile_model: bool = True + + +def default_training_settings() -> TrainingSettings: + return TrainingSettings() + + +def get_lr_multiplier(progress: float, settings: TrainingSettings) -> float: + if progress < settings.warmup_ratio: + return progress / settings.warmup_ratio if settings.warmup_ratio > 0 else 1.0 + if progress < 1.0 - settings.warmdown_ratio: + return 1.0 + cooldown = (1.0 - progress) / settings.warmdown_ratio + return cooldown + (1 - cooldown) * settings.final_lr_frac + + +def get_muon_momentum(step: int) -> float: + frac = min(step / 300, 1) + return (1 - frac) * 0.85 + frac * 0.95 + + +def get_weight_decay(progress: float, settings: TrainingSettings) -> float: + return settings.weight_decay * (1 - progress) + + +def run_training_session( + *, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + tokenizer: Any, + settings: TrainingSettings, + param_counts: dict[str, int], + num_flops_per_token: float, + baseline_binding: dict[str, Any], +) -> dict[str, Any]: + t_start = time.time() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + autocast_device = "cuda" if device.type == "cuda" else "cpu" + autocast_ctx = torch.amp.autocast(device_type=autocast_device, dtype=torch.bfloat16) + + tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN + assert settings.total_batch_size % tokens_per_fwdbwd == 0 + grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd + train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") + x, y, epoch = next(train_loader) + + print(f"Vocab size: {tokenizer.get_vocab_size():,}") + print(f"Time budget: {TIME_BUDGET}s") + print(f"Gradient accumulation steps: {grad_accum_steps}") + print("Training session started") + + t_start_training = time.time() + smooth_train_loss = 0.0 + total_training_time = 0.0 + step = 0 + + while True: + if device.type == "cuda": + torch.cuda.synchronize(device=device) + t0 = time.time() + for _ in range(grad_accum_steps): + with autocast_ctx: + loss = model(x, y) + train_loss = loss.detach() + loss = loss / grad_accum_steps + loss.backward() + x, y, epoch = next(train_loader) + + progress = min(total_training_time / TIME_BUDGET, 1.0) + lrm = get_lr_multiplier(progress, settings) + muon_momentum = get_muon_momentum(step) + muon_weight_decay = get_weight_decay(progress, settings) + for group in optimizer.param_groups: + group["lr"] = group["initial_lr"] * lrm + if group["kind"] == "muon": + group["momentum"] = muon_momentum + group["weight_decay"] = muon_weight_decay + + optimizer.step() + model.zero_grad(set_to_none=True) + train_loss_f = train_loss.item() + if train_loss_f > 100: + raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") + + torch.cuda.synchronize(device=device) + dt = time.time() - t0 + if step > 10: + total_training_time += dt + + ema_beta = 0.9 + smooth_train_loss = ema_beta * smooth_train_loss + (1 - ema_beta) * train_loss_f + debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) + pct_done = 100 * progress + tok_per_sec = int(settings.total_batch_size / dt) + mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS + remaining = max(0.0, TIME_BUDGET - total_training_time) + print( + f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " + f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " + f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", + end="", + flush=True, + ) + + if step == 0: + gc.collect() + gc.freeze() + gc.disable() + elif (step + 1) % 5000 == 0: + gc.collect() + + step += 1 + if step > 10 and total_training_time >= TIME_BUDGET: + break + + print() + total_tokens = step * settings.total_batch_size + model.eval() + with autocast_ctx: + val_bpb = evaluate_bpb(model, tokenizer, settings.device_batch_size) + + t_end = time.time() + peak_vram_mb = torch.cuda.max_memory_allocated() / 1024 / 1024 + steady_state_mfu = ( + 100 + * num_flops_per_token + * settings.total_batch_size + * (step - 10) + / total_training_time + / H100_BF16_PEAK_FLOPS + if total_training_time > 0 + else 0.0 + ) + num_params = param_counts["total"] + metrics = { + "val_bpb": float(val_bpb), + "training_seconds": float(total_training_time), + "total_seconds": float(t_end - t_start), + "peak_vram_mb": float(peak_vram_mb), + "mfu_percent": float(steady_state_mfu), + "total_tokens_M": float(total_tokens / 1e6), + "num_steps": int(step), + "num_params_M": float(num_params / 1e6), + "depth": int(settings.depth), + "startup_seconds": float(t_start_training - t_start), + } + + print("---") + print(f"val_bpb: {metrics['val_bpb']:.6f}") + print(f"training_seconds: {metrics['training_seconds']:.1f}") + print(f"total_seconds: {metrics['total_seconds']:.1f}") + print(f"peak_vram_mb: {metrics['peak_vram_mb']:.1f}") + print(f"mfu_percent: {metrics['mfu_percent']:.2f}") + print(f"total_tokens_M: {metrics['total_tokens_M']:.1f}") + print(f"num_steps: {metrics['num_steps']}") + print(f"num_params_M: {metrics['num_params_M']:.1f}") + print(f"depth: {metrics['depth']}") + return metrics diff --git a/component_system/config.py b/component_system/config.py new file mode 100644 index 000000000..9975ab2d2 --- /dev/null +++ b/component_system/config.py @@ -0,0 +1,31 @@ +"""Static configuration for the component system. No dynamic or per-run values.""" +from __future__ import annotations + +from pathlib import Path + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent + +# Module import paths for training (used by mainline assembler) +MODEL_MODULE = "component_system.components.model" +OPTIMIZER_MODULE = "component_system.components.optimizer" +TRAINING_STEP_MODULE = "component_system.components.trainer" + +# Promotion threshold: improve val_bpb by at least this much to promote +PROMOTION_THRESHOLD = 0.001 + +# Worktree root relative to project (string for display/config compatibility) +WORKTREE_ROOT = "component_system/history/worktrees" + +# Default branch name suggested in UI when no branches exist (not a global baseline) +DEFAULT_BASELINE_BRANCH = "master" + + +def get_training_binding() -> dict[str, str | float]: + """Return a static dict used by training mainline/trainer (no baseline_version).""" + return { + "model_module": MODEL_MODULE, + "optimizer_module": OPTIMIZER_MODULE, + "training_step_module": TRAINING_STEP_MODULE, + "promotion_threshold": PROMOTION_THRESHOLD, + "worktree_root": WORKTREE_ROOT, + } diff --git a/component_system/domain/models.py b/component_system/domain/models.py new file mode 100644 index 000000000..f03c9a121 --- /dev/null +++ b/component_system/domain/models.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import BaseModel, Field + + +class SeedStatus(str, Enum): + draft = "draft" + queued = "queued" + planning = "planning" + generated = "generated" + dca_queued = "dca_queued" + adapting = "adapting" + running = "running" + failed = "failed" + passed = "passed" + promoted = "promoted" + + +class StageName(str, Enum): + p = "p" + dca = "dca" + direct = "direct" + + +class RunStatus(str, Enum): + queued = "queued" + running = "running" + succeeded = "succeeded" + failed = "failed" + + +class PlanIdea(BaseModel): + title: str = "" + target_component: str = "model" + description: str = "" + source_refs: list[str] = Field(default_factory=list) + commit_sha: str | None = None + + +class StageRun(BaseModel): + run_id: str + seed_id: str + stage: StageName + status: RunStatus + task_id: str + created_at: float + updated_at: float + log_path: str | None = None + stderr_log_path: str | None = None + prompt_path: str | None = None + summary: dict[str, Any] = Field(default_factory=dict) + metrics: dict[str, Any] = Field(default_factory=dict) + signal: str | None = None + error: str | None = None + + +class SeedRecord(BaseModel): + seed_id: str + prompt: str + status: SeedStatus = SeedStatus.draft + created_at: float + updated_at: float + baseline_branch: str = "baseline" + worktree_path: str | None = None + latest_run_id: str | None = None + ralph_loop_enabled: bool = False + latest_signal: str | None = None + latest_metrics: dict[str, Any] = Field(default_factory=dict) + plan: PlanIdea | None = None + last_error: str | None = None + + +class DashboardColumn(BaseModel): + id: str + title: str + description: str + seeds: list[SeedRecord] + + +class DashboardViewModel(BaseModel): + setup_error: str | None = None + baseline_metrics_by_branch: dict[str, dict[str, object]] = Field(default_factory=dict) + default_baseline_branch: str = "master" + available_branches: list[str] = Field(default_factory=list) + seed_count: int + columns: list[DashboardColumn] + selected_seed: SeedRecord | None = None + daemon_status: str = "stopped" # "running" | "stopped" diff --git a/component_system/entrypoint.py b/component_system/entrypoint.py new file mode 100644 index 000000000..33fc2d426 --- /dev/null +++ b/component_system/entrypoint.py @@ -0,0 +1,18 @@ +"""Standalone entrypoint for the component_system baseline.""" +from __future__ import annotations + +import sys +from pathlib import Path + +if __package__ in {None, ""}: + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +from component_system.training.mainline import run_mainline_training + + +def main() -> None: + run_mainline_training() + + +if __name__ == "__main__": + main() diff --git a/component_system/package.json b/component_system/package.json new file mode 100644 index 000000000..5ae45136d --- /dev/null +++ b/component_system/package.json @@ -0,0 +1,13 @@ +{ + "name": "autoresearch-component-system-ui", + "private": true, + "scripts": { + "build:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --minify", + "watch:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --watch" + }, + "devDependencies": { + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.17" + } +} diff --git a/component_system/postcss.config.js b/component_system/postcss.config.js new file mode 100644 index 000000000..5cbc2c7d8 --- /dev/null +++ b/component_system/postcss.config.js @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; diff --git a/component_system/protocol.md b/component_system/protocol.md new file mode 100644 index 000000000..2b1c1d87d --- /dev/null +++ b/component_system/protocol.md @@ -0,0 +1,332 @@ +# autoresearch — Component-System Protocol + +This document is the operating manual for the component-system workflow. +The system runs a continuous Seed -> P -> DCA loop to discover, generate, +adapt, evaluate, and promote improvements to the training stack. + +The main objective is simple: improve `val_bpb` against the current baseline +without breaking the canonical component-system entrypoint or introducing +unreasonable complexity. + +VRAM is a first-class constraint. Higher memory use is acceptable only when the +quality gain is meaningful; avoid candidates that produce small or ambiguous +`val_bpb` gains while causing large memory growth. + +## Top-Level Bootstrap Rule + +If you are an interactive code agent that was merely told to "follow this +protocol", do not manually simulate the entire workflow inside one foreground +session. + +The intended control flow is: +1. Read this file and the required context files. +2. Ensure the queue and state layout exist. +3. Create or refine a seed from a human prompt. +4. Queue that seed into `component_system/history/queue/p/`. +5. Start the resident daemon with `uv run component_system/run.py`. +6. Let the daemon workers execute P and DCA through file-based handoff. +7. Monitor the daemon, queue, and logs instead of roleplaying stage work yourself. + +Manual execution of an individual stage is only for the agent process that was +invoked by the daemon for that specific task. + +## Architecture + +```text +component_system/ + protocol.md <- overall workflow protocol + entrypoint.py <- canonical training entrypoint + PDCA-PLAN.md <- P stage rules + PDCA-DO-CHECK-ACTION.md <- DCA stage rules + run.py <- resident daemon and worker dispatch + task.py <- queue and JSON state helpers + baseline_branches.json <- per-branch baseline mapping (workflow-managed; read-only) + baseline_metrics.json <- baseline run metrics (workflow-managed; read-only) + config.py <- promotion threshold and static binding + history/ <- runtime dir (auto-created) + logs/ <- agent stdout/stderr logs + queue/{p,dca,done,error}/ <- stage handoff and archival + state/{seeds,runs,events}/<- durable workflow state + worktrees/ <- per-seed git worktrees + components/ + model.py + optimizer.py + trainer.py + training/ + mainline.py +``` + +## Core Goal and Decision Rule + +Optimize for lower `val_bpb`. A candidate is worth promoting only when the gain +is real, the implementation is understandable, and the cost in memory or +complexity is justified. + +Apply this bias consistently: +- Lower `val_bpb` is the primary success metric. +- VRAM is a soft but important constraint: some increase is acceptable, but + dramatic growth needs correspondingly strong quality gains. +- Simpler changes are preferred when results are similar. +- A tiny gain that adds brittle complexity is usually not worth promotion. +- A tiny gain that materially increases VRAM is usually not worth promotion. +- A simplification that preserves or slightly improves quality is a strong outcome. +- If the signal is ambiguous, treat it as `neutral` and do not promote. + +## Required Reading Before Any Work + +Read in this order: +1. `component_system/protocol.md` +2. The stage-specific document (right after protocol): `component_system/PDCA-DO-CHECK-ACTION.md` for DCA, `component_system/PDCA-PLAN.md` for P +3. `prepare.py` for fixed data and evaluation behavior; never modify it +4. `component_system/entrypoint.py` for the canonical execution path +5. `component_system/config.py` for promotion threshold and static binding + +Baseline reference files (workflow-managed; read-only): `component_system/baseline_branches.json` (per-branch baseline mapping), `component_system/baseline_metrics.json` (baseline run metrics). The workflow writes these; only read them for context. + +For interactive bootstrap, also inspect the current queue/state situation, +especially recent items in `queue/done/` and the latest baseline information. + +## Workspace and Path Rules + +When the daemon invokes you for a P or DCA task, your current working directory +is the seed worktree. In that mode: + +- Read and edit only within the seed worktree. +- Use only relative paths from the current working directory. +- Do not request or depend on absolute paths or files outside the worktree. + +## Hard Constraints + +1. Never modify `prepare.py`. +2. `uv run component_system/entrypoint.py` must remain the canonical, + working component-system training command. +3. The root repo must stay compatible with the upstream implementation; + do not require changes to root `train.py`. +4. Stage-to-stage handoff must happen through files under `queue/`, not + merely in memory or only in agent conversation state. +5. Only the DCA promotion flow may update `baseline_metrics.json` and `baseline_branches.json`. +6. Do not bypass the baseline mechanism by manually merging branches or + force-advancing the baseline outside workflow control. + +## Baseline-First Rule + +The first meaningful measurement in a fresh component-system run is the +baseline result. + +- If `baseline_metrics.json` has no `last_val_bpb` for the baseline branch, the system should establish a + baseline run before evaluating ordinary seeds. +- The baseline seed is a "no changes" measurement of the currently bound + component modules. +- Treat that first baseline result as the reference point for later promotion + decisions. + +This mirrors the root-project rule that the first run should establish the +baseline before experimenting. + +```mermaid +flowchart TD + A[Create seed] --> B{Baseline result exists?} + B -- No --> C[Create or reuse __baseline__ seed] + C --> D[Queue baseline DCA] + D --> E[Run baseline measurement from project root] + E --> F[Save baseline metrics in baseline_metrics.json] + F --> G[Release waiting seeds] + B -- Yes --> G + G --> H[Seed stays in draft or queued with no worktree] + H --> I[Queue P run] + I --> J[Create seed worktree at P start] + J --> K[P agent plans and commits on seed branch] + K --> L[Queue DCA run] + L --> M[DCA agent adapts, runs training, and reports metrics] + M --> N{Promotion signal?} + N -- Positive --> O[Merge seed branch into baseline] + O --> P{Merge conflict?} + P -- No --> Q[Update baseline metadata and finish seed] + P -- Yes --> R[Queue conflict-resolution DCA] + R --> M + N -- Neutral or Negative --> S[Keep result in state only] +``` + +## Workflow Stages + +The sections below describe what each daemon-dispatched stage worker does. +They are not instructions for a top-level interactive agent to perform the +entire lifecycle manually. + +### P — Discovery / Plan / Initial Generation + +Read `component_system/PDCA-PLAN.md`. + +Responsibilities: +- Refine the seed prompt into a concrete plan. +- Create or refresh the seed worktree from the active baseline. +- Generate the first candidate implementation in the worktree. +- Keep the change focused enough that DCA can evaluate it cleanly. +- Commit the generated candidate on the seed branch so DCA receives a stable snapshot. + +P is about producing a plausible, testable first version, not claiming success. + +### DCA — Delivery / Check / Action + +Read `component_system/PDCA-DO-CHECK-ACTION.md`. + +Responsibilities: +- Adapt and fix the generated candidate inside the seed worktree. +- Run the canonical training/evaluation entrypoint. +- Read the structured metrics from the run output. +- Decide whether the result is positive, neutral, or negative relative to baseline. +- Promote the seed branch into baseline only when the signal is strong enough. + +DCA is the stage that turns a raw idea into a measured outcome. + +## Canonical Run and Output + +The canonical component-system execution path is: + +```bash +uv run component_system/entrypoint.py +``` + +When the DCA agent runs this (e.g. in a sandbox or tool), the run needs **at least 600 seconds** (first step ~150s + training budget 300s + buffer); use e.g. `timeout 600 uv run ...` so the execution environment does not kill the process early. + +The DCA agent must report a structured JSON summary between the required +markers, including a `metrics` object. The runner uses that structured report +first and only falls back to parsing stdout/stderr when the JSON metrics are +missing. If the initial DCA summary still lacks metrics, the system queues a +follow-up recovery DCA that inspects the saved logs before declaring failure. +The canonical metrics are: + +```text +--- +val_bpb: 0.997900 +training_seconds: 300.1 +total_seconds: 325.9 +peak_vram_mb: 45060.2 +mfu_percent: 39.80 +total_tokens_M: 499.6 +num_steps: 953 +num_params_M: 50.3 +depth: 8 +startup_seconds: 25.8 +``` + +Treat `val_bpb` as the primary metric. `peak_vram_mb`, total runtime, and code +complexity are secondary constraints that influence promotion decisions. + +## VRAM Rule + +Track `peak_vram_mb` on every serious evaluation run and treat it as required +decision input, not a cosmetic metric. + +- Some VRAM growth is acceptable when it buys a clear `val_bpb` improvement. +- Large VRAM increases require a correspondingly strong quality gain. +- If two candidates are similar on `val_bpb`, prefer the lower-VRAM one. +- If a candidate regresses or barely improves `val_bpb` while increasing VRAM + substantially, treat it as a bad trade and do not promote it. +- Avoid changes that risk blowing up memory usage unless the expected upside is + compelling enough to justify the experiment. + +## Promotion Rule + +A run is promotable only if all of the following hold: +- The run completed successfully. +- `val_bpb` improved enough over the active baseline to count as a real win. +- VRAM growth is not unreasonable for the magnitude of the gain. +- The change is understandable, maintainable, and reversible. + +If the candidate is equal, worse, noisy, or hard to justify, do not promote it. +Record the outcome and move on. + +## Failure Handling + +Use the same judgment standard as the original autoresearch loop: + +- If a run crashes because of a simple bug, fix it, rerun, and update the same + run record. +- If the idea is fundamentally flawed, archive it without promotion. +- If the task cannot be recovered quickly, move it into the error flow and + persist the failure details. +- Crashes are negative evidence; they should not silently disappear. + +## Bootstrap Procedure for Interactive Sessions + +When a human starts a fresh interactive session and asks you to use this +component system, do this: + +1. Read `baseline_branches.json`, `baseline_metrics.json`, and recent queue/state outputs. +2. Ensure the queue/state/worktree layout exists. +3. Create an initial seed from the human prompt. +4. Queue P for that seed. +5. Start `uv run component_system/run.py`. +6. Monitor the daemon and logs instead of manually executing P and DCA yourself. + +## Operating Loop + +Once the daemon is running, the queue-driven loop is: + +1. A seed is persisted under `state/seeds/` and queued to `queue/p/`. +2. P creates or refreshes the seed worktree from baseline, generates code, and + commits on the seed branch. +3. The daemon automatically queues DCA. +4. DCA adapts, checks, runs, evaluates, and either promotes or archives the seed. +5. The system persists runs and events under `state/` and continues with the + next available work. + +The system is intended to behave like an autonomous researcher: keep moving, +measure results, retain wins, discard losses, and continue until explicitly +stopped. + +## State and Logging + +The durable record of the workflow lives in files: + +- `state/seeds/` stores seed definitions and status. +- `state/runs/` stores stage-run metadata and run outcomes. +- `state/events/` stores seed event histories. +- `queue/done/` archives completed tasks. +- `queue/error/` captures failed tasks. +- `logs/` stores stdout/stderr from agent invocations. + +Do not rely on chat context as the source of truth when the filesystem state +already records the workflow. + +## Daemon + +The resident daemon in `component_system/run.py` manages two single-threaded +worker pools that poll `queue/{p,dca}/` continuously. Each worker dispatches a +task to an external code agent, which reads files, modifies code in a git +worktree, runs the canonical entrypoint, and prints structured summaries for +the runner to persist. + +Start the daemon with: + +```bash +# Default backend +uv run component_system/run.py + +# Alternate backends +PDCA_AGENT=codex uv run component_system/run.py +PDCA_AGENT=opencode uv run component_system/run.py +``` + +### Agent Backends + +| `PDCA_AGENT` | CLI invoked | Prompt delivery | +|--------------|-------------|-----------------| +| `claude` (default) | `claude -p --verbose` | stdin | +| `codex` | `codex exec -a never --sandbox workspace-write` | positional arg | +| `opencode` | `opencode run` | positional arg | + +### Timeouts + +Each stage has a default timeout in seconds and can be overridden through the +environment: + +| Variable | Default | Purpose | +|----------|---------|---------| +| `PDCA_TIMEOUT_P` | 900 | Planning and initial code generation | +| `PDCA_TIMEOUT_DCA` | 3600 | Adaptation, training, evaluation, and promotion | + +### Logs + +Agent stdout/stderr for every invocation is saved to `component_system/history/logs/`. diff --git a/component_system/repositories/state.py b/component_system/repositories/state.py new file mode 100644 index 000000000..7ff3ab2fd --- /dev/null +++ b/component_system/repositories/state.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from typing import Any + +from component_system.domain.models import SeedRecord, StageRun +from component_system.task import ( + append_event, + list_runs, + list_seeds, + load_baseline_branch_map, + load_baseline_metrics, + load_events, + load_run, + load_seed, + save_baseline_branch_map, + save_baseline_metrics, + save_run, + save_seed, +) + + +class BaselineBranchMapRepository: + """Per-seed baseline branch mapping (seed_id -> baseline_branch).""" + + def set_branch_for_seed(self, seed_id: str, branch: str) -> None: + m = load_baseline_branch_map() + m[seed_id] = branch + save_baseline_branch_map(m) + + +class BaselineMetricsRepository: + """Per-baseline-branch metrics (last_val_bpb, promoted_*, etc.).""" + + def get_all(self) -> dict[str, dict[str, Any]]: + return load_baseline_metrics() + + def get_for_branch(self, branch: str) -> dict[str, Any] | None: + return load_baseline_metrics().get(branch) + + def update_for_branch(self, branch: str, metrics: dict[str, Any]) -> None: + data = load_baseline_metrics() + data[branch] = {**data.get(branch, {}), **metrics} + save_baseline_metrics(data) + + +class SeedRepository: + def list(self) -> list[SeedRecord]: + return [SeedRecord.model_validate(seed) for seed in list_seeds()] + + def get(self, seed_id: str) -> SeedRecord | None: + data = load_seed(seed_id) + return SeedRecord.model_validate(data) if data else None + + def save(self, seed: SeedRecord) -> SeedRecord: + save_seed(seed.model_dump(mode="json")) + return seed + + def append_event(self, seed_id: str, kind: str, message: str, **payload: Any) -> list[dict[str, Any]]: + return append_event(seed_id, {"kind": kind, "message": message, **payload}) + + def events(self, seed_id: str) -> list[dict[str, Any]]: + return load_events(seed_id) + + +class RunRepository: + def list(self, seed_id: str | None = None) -> list[StageRun]: + return [StageRun.model_validate(run) for run in list_runs(seed_id)] + + def get(self, run_id: str) -> StageRun | None: + data = load_run(run_id) + return StageRun.model_validate(data) if data else None + + def save(self, run: StageRun) -> StageRun: + save_run(run.model_dump(mode="json")) + return run diff --git a/component_system/run.py b/component_system/run.py new file mode 100644 index 000000000..4a927ffb8 --- /dev/null +++ b/component_system/run.py @@ -0,0 +1,779 @@ +"""Seed -> P -> DCA daemon for the component-system web app.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +import json +import os +import shutil +import signal +import subprocess +import sys +import threading +import time +import traceback +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path +from typing import Any + +from component_system.domain.models import StageName +from component_system.services.workflow import BASELINE_SEED_ID, WorkflowService +from component_system.task import ( + BASELINE_BRANCHES_PATH, + BASELINE_METRICS_PATH, + COMPONENT_SYSTEM_ROOT, + claim_pending, + DAEMON_HEARTBEAT_PATH, + daemon_heartbeat, + ensure_queue_layout, + LOG_ROOT, + move_to_done, + read_task, + restore_in_progress_tasks, +) + +PROJECT_ROOT = COMPONENT_SYSTEM_ROOT.parent +LOG_DIR = LOG_ROOT +RESULTS_TSV = PROJECT_ROOT / "results.tsv" +PROGRESS_PNG = PROJECT_ROOT / "progress.png" + +POLL_INTERVAL = 10.0 +_shutdown = False +WORKFLOW = WorkflowService() + +DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} + +STAGE_DOCS = { + "p": ["PDCA-PLAN.md"], + "dca": ["PDCA-DO-CHECK-ACTION.md"], +} + +AGENT_CONFIGS: dict[str, dict[str, Any]] = { + "claude": {"cmd": ["claude", "-p", "--verbose"], "via": "stdin"}, + "codex": {"cmd": ["codex", "exec", "-a", "never", "--sandbox", "workspace-write"], "via": "arg"}, + "opencode": {"cmd": ["opencode", "run"], "via": "arg"}, +} + + +def _signal_handler(_sig: int, _frame: Any) -> None: + global _shutdown + _shutdown = True + print("\n[daemon] shutdown requested") + + +def _get_timeout(stage: str) -> int: + return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 600))) + + +def _build_log_paths(run_id: str) -> tuple[Path, Path]: + LOG_DIR.mkdir(parents=True, exist_ok=True) + stdout_path = LOG_DIR / f"{run_id}.stdout.log" + stderr_path = LOG_DIR / f"{run_id}.stderr.log" + return stdout_path, stderr_path + + +def _write_prompt_file(run_id: str, prompt: str) -> Path: + """Save the agent prompt to a file for debugging. Returns the path.""" + LOG_DIR.mkdir(parents=True, exist_ok=True) + prompt_path = LOG_DIR / f"{run_id}.prompt.txt" + prompt_path.write_text(prompt, encoding="utf-8") + return prompt_path + + +def _is_root_venv_active() -> bool: + expected = (PROJECT_ROOT / ".venv").resolve() + active = os.environ.get("VIRTUAL_ENV") + if not active: + return False + try: + return Path(active).resolve() == expected + except OSError: + return False + + +def _dca_command_guidance() -> tuple[str, str]: + if _is_root_venv_active(): + return ( + "uv run --active component_system/entrypoint.py", + "Root .venv is active; use --active to reuse it from the worktree.", + ) + return ( + "uv run component_system/entrypoint.py", + "No active root .venv detected; fallback avoids --active so uv can run normally.", + ) + + +def _build_direct_code_prompt(prompt: str) -> str: + return ( + "You are running as a direct code agent from the project root of this repository.\n" + "Execute the user's request directly in the current working tree.\n" + "Do not switch into seed worktrees for this task.\n\n" + "User request:\n" + f"{prompt.strip()}\n" + ) + + +def _stream_pipe_to_file(pipe: Any, handle: Any, chunks: list[str]) -> None: + try: + while True: + piece = pipe.read(16) + if not piece: + break + chunks.append(piece) + handle.write(piece) + handle.flush() + finally: + try: + pipe.close() + except Exception: + pass + + +def _combined_output(stdout: str, stderr: str) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr + + +def _agent_failure_reason(exit_code: int, stdout: str, stderr: str) -> str: + combined = _combined_output(stdout, stderr) + if "timeout after " in combined: + return combined.strip().splitlines()[-1] + if exit_code == -1: + if combined.strip(): + return combined.strip().splitlines()[-1] + return "Agent execution failed before completion. See stdout/stderr logs for details." + return f"Agent exited with code {exit_code}. See stdout/stderr logs for details." + + +def _should_salvage_completed_dca(stage: str, exit_code: int, output_text: str) -> bool: + """Accept a DCA run when canonical metrics were printed despite agent exit issues.""" + if stage != "dca" or exit_code == 0: + return False + summary = WORKFLOW.extract_summary(output_text, StageName.dca) or {} + metrics = WORKFLOW.extract_dca_metrics(output_text, summary) + return metrics.get("val_bpb") is not None + + +def _agent_cwd(worktree_path: str | None) -> str: + """Resolve cwd for the agent: seed worktree when provided and present, else project root.""" + if not worktree_path: + return str(PROJECT_ROOT) + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return str(resolved) if resolved.is_dir() else str(PROJECT_ROOT) + + +def _resolve_worktree_path(worktree_path: str | None) -> Path | None: + """Resolve worktree path to absolute Path, or None if invalid/missing.""" + if not worktree_path: + return None + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return resolved if resolved.is_dir() else None + + +def _sync_results_tsv_into_worktree(worktree_path: str | None) -> None: + """Copy the latest root results.tsv into the seed worktree if it exists. Non-fatal on failure.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None or not RESULTS_TSV.exists(): + return + dest = resolved / "results.tsv" + try: + shutil.copy2(RESULTS_TSV, dest) + except OSError as err: + print(f"[P] could not copy results.tsv into worktree: {err}", file=sys.stderr) + + +def _sync_baseline_json_into_worktree(worktree_path: str | None) -> None: + """Copy baseline_metrics.json and baseline_branches.json from project component_system into the worktree. + Worktrees check out from baseline-branch; without this sync the agent would see stale or missing baseline data.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None: + return + dest_dir = resolved / "component_system" + dest_dir.mkdir(parents=True, exist_ok=True) + for src_path, name in [ + (BASELINE_METRICS_PATH, "baseline_metrics.json"), + (BASELINE_BRANCHES_PATH, "baseline_branches.json"), + ]: + if not src_path.exists(): + continue + dest = dest_dir / name + try: + shutil.copy2(src_path, dest) + except OSError as err: + print(f"[P] could not copy {name} into worktree: {err}", file=sys.stderr) + + +def _sync_worktree_context(worktree_path: str | None) -> None: + """Sync all workflow-managed live data into the worktree so the agent sees current state. + Call before invoking the agent when cwd is a worktree (P or DCA).""" + _sync_results_tsv_into_worktree(worktree_path) + _sync_baseline_json_into_worktree(worktree_path) + + +def _invoke_agent( + prompt: str, stage: str, run_id: str, worktree_path: str | None = None +) -> tuple[int, str, str, Path | None, Path | None]: + agent_name = os.environ.get("PDCA_AGENT", "claude") + config = AGENT_CONFIGS.get(agent_name) + if config is None: + raise ValueError(f"Unknown PDCA_AGENT={agent_name!r}. Supported: {', '.join(AGENT_CONFIGS)}") + + cmd = list(config["cmd"]) + timeout = _get_timeout(stage) + cwd = _agent_cwd(worktree_path) + # PYTHONUNBUFFERED=1 so child Python (e.g. uv run entrypoint.py) flushes stdout + # immediately instead of block-buffering when stdout is a pipe; otherwise + # stdout log only appears in one shot after the task finishes. + env = {**os.environ, "PYTHONUNBUFFERED": "1"} + if agent_name == "opencode": + project_root_glob = str(PROJECT_ROOT.resolve().as_posix()) + "/**" + existing = {} + try: + if os.environ.get("OPENCODE_PERMISSION"): + existing = json.loads(os.environ["OPENCODE_PERMISSION"]) + except (json.JSONDecodeError, KeyError): + pass + ext_dir = dict(existing.get("external_directory", {})) + ext_dir[project_root_glob] = "allow" + env["OPENCODE_PERMISSION"] = json.dumps({"external_directory": ext_dir}) + popen_kwargs: dict[str, Any] = { + "cwd": cwd, + "env": env, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "text": True, + "encoding": "utf-8", + "errors": "replace", + "bufsize": 1, + } + if config["via"] == "stdin": + popen_kwargs["stdin"] = subprocess.PIPE + else: + # Use DEVNULL so the agent never reads from parent's stdin (avoids EBADF under nohup/redirects). + popen_kwargs["stdin"] = subprocess.DEVNULL + cmd.append(prompt) + + print(f"[{stage.upper()}] invoking {agent_name} (timeout={timeout}s)") + stdout_path, stderr_path = _build_log_paths(run_id) + try: + process = subprocess.Popen(cmd, **popen_kwargs) + except FileNotFoundError: + msg = f"{agent_name!r} binary not found. Install it or set PDCA_AGENT to a different backend." + return -1, "", msg, None, None + + if config["via"] == "stdin" and process.stdin is not None: + process.stdin.write(prompt) + process.stdin.close() + + stdout_chunks: list[str] = [] + stderr_chunks: list[str] = [] + with open(stdout_path, "w", encoding="utf-8") as stdout_handle, open( + stderr_path, "w", encoding="utf-8" + ) as stderr_handle: + stdout_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stdout_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stdout_handle.flush() + stderr_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stderr_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stderr_handle.flush() + + stdout_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stdout, stdout_handle, stdout_chunks), + daemon=True, + ) + stderr_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stderr, stderr_handle, stderr_chunks), + daemon=True, + ) + stdout_thread.start() + stderr_thread.start() + + timed_out = False + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + timed_out = True + process.kill() + + stdout_thread.join() + stderr_thread.join() + + stdout = "".join(stdout_chunks) + stderr = "".join(stderr_chunks) + if timed_out: + timeout_message = f"timeout after {timeout}s" + if stderr: + stderr = f"{stderr}\n{timeout_message}" + else: + stderr = timeout_message + return -1, stdout, stderr, stdout_path, stderr_path + + return process.returncode, stdout, stderr, stdout_path, stderr_path + + +def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: + """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" + task_json = json.dumps(task, indent=2) + target_branch = task.get("baseline_branch", "master") # branch we want to merge into (e.g. master) + worktree_path = task.get("worktree_path") or "" + seed_id = task.get("seed_id", "") + last_metrics = task.get("last_metrics") or {} + last_summary = task.get("last_summary") or {} + notes = last_summary.get("notes", "Merge resolution: committed and merged into baseline.") + completed_at = last_summary.get("completed_at", "YYYY-MM-DD HH:MM:SS") + report_json = json.dumps({ + "checks": ["merge_resolution"], + "notes": notes, + "completed_at": completed_at, + "commit_sha": "", + "metrics": last_metrics, + }, indent=2) + + if seed_id == BASELINE_SEED_ID: + # We are resolving the merge of __baseline__ INTO target_branch (e.g. master). + # git merge X = merge X into current branch; so we need to be on target_branch, then git merge __baseline__. + cwd_note = ( + "Your working directory is the project root (main repo). " + "Do NOT run the merge from the __baseline__ worktree: that would merge the wrong way.\n\n" + ) + steps = ( + "Steps:\n" + f"1. Find where {target_branch!r} is checked out: run git worktree list and identify the path whose branch is {target_branch!r} (often the main repo).\n" + f"2. cd to that directory, then run: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + f" Correct example (merge __baseline__ into {target_branch}):\n" + f" git worktree list\n" + f" cd # e.g. main repo\n" + f" git merge {BASELINE_SEED_ID!r}\n" + " Wrong (do not do this): cd to the __baseline__ worktree and run git merge master — that merges master into __baseline__.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + else: + # Normal seed: merge the baseline branch (__baseline__) INTO the seed worktree so the seed is up to date. + if worktree_path: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at {worktree_path!r}; run git commands from that directory (e.g. cd there first).\n\n" + ) + else: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at component_system/history/worktrees/{seed_id!r}; run git commands from that directory for the merge.\n\n" + ) + steps = ( + "Steps:\n" + "1. Commit any uncommitted changes in the seed worktree (e.g. batch-size or other fixes).\n" + f"2. In the seed worktree, merge the baseline branch into the current branch: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + + return ( + "MERGE RESOLUTION (focused task). Do not read protocol or stage docs.\n\n" + "Task (inline):\n" + f"{task_json}\n\n" + f"{cwd_note}" + f"{steps}" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"{report_json}\n" + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + +def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: + """Build the agent prompt for a stage. Prompt types (by weight): + - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. + - DCA metrics_recovery: full header + log-recovery instructions. Heavy. + - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. + - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. + - DCA normal: full header + adapt/run/commit/report. Heavy. + """ + task_json = json.dumps(task, indent=2) + rel_task = task_path.relative_to(PROJECT_ROOT).as_posix() + worktree_path = task.get("worktree_path", "component_system/history/worktrees") + agent_cwd = _agent_cwd(worktree_path) + worktree_dir = Path(agent_cwd) + + # Worktree runs must stay entirely within the copied seed workspace to avoid external_directory requests. + if worktree_dir.resolve() != PROJECT_ROOT.resolve(): + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_block = ( + "Task content (provided inline; do not look up any external task file):\n" + f"{task_json}\n\n" + ) + worktree_note = ( + "Your working directory is the assigned workflow worktree (your current directory).\n" + "All required file context is already copied into this worktree under component_system/.\n" + "Use only paths relative to your current working directory. " + "Do not request access to absolute paths, parent-directory paths, or files outside the worktree.\n" + ) + else: + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_path_rel = f" - {rel_task}" + task_block = f"Task file:\n{task_path_rel}\n\nTask content:\n{task_json}\n\n" + worktree_note = "Your working directory is the project root.\n" + + required_context = ( + "Required context (read first; paths relative to your cwd):\n" + f" - component_system/protocol.md\n" + f"{docs}\n" + ) + baseline_files_note = ( + "Baseline reference files (workflow-managed; read-only):\n" + " - component_system/baseline_branches.json (per-branch baseline mapping)\n" + " - component_system/baseline_metrics.json (baseline run metrics)\n" + "The workflow writes these; only read them for context.\n\n" + ) + header = ( + "You are working on the autoresearch component-system workflow.\n\n" + f"{required_context}\n" + f"{baseline_files_note}" + f"{task_block}" + f"{worktree_note}" + "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" + ) + + if stage == "p": + return header + ( + "You are the P stage.\n\n" + "## Read results.tsv first (avoid idea duplication)\n" + "Before choosing a hypothesis, read `results.tsv` in your cwd if it exists. " + "Use it to avoid proposing ideas already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). " + "See component_system/PDCA-PLAN.md for full guidance.\n\n" + "Workflow:\n" + "1. Refine the seed prompt into a concrete implementation idea.\n" + "2. Implement the first generated version of that idea in the provided worktree.\n" + "3. Create a git commit in the seed branch (current branch in the worktree).\n" + "4. Print a JSON summary between these exact markers:\n" + "AUTORESEARCH_P_SUMMARY_BEGIN\n" + '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' + "AUTORESEARCH_P_SUMMARY_END\n" + "One branch per seed: you are already on the seed branch in the worktree.\n" + "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + ) + if stage == "dca": + merge_resolution = task.get("merge_resolution") is True + metrics_recovery = task.get("metrics_recovery") is True + if merge_resolution: + return _build_merge_resolution_prompt(task) + dca_cmd, dca_note = _dca_command_guidance() + baseline_measurement = task.get("seed_id") == "__baseline__" + conflict_block = "" + if metrics_recovery: + source_run_id = task.get("source_run_id", "unknown") + stdout_log = task.get("source_stdout_log_path", "missing") + stderr_log = task.get("source_stderr_log_path", "missing") + return header + ( + "METRICS RECOVERY: The previous DCA run completed, but the runner could not confirm metrics from its final report.\n" + "Do not rerun training. Do not edit code. Do not create a commit.\n" + f"Inspect the saved logs for source run {source_run_id!r}:\n" + f"- stdout log: {stdout_log}\n" + f"- stderr log: {stderr_log}\n" + "Recover the canonical metrics from those logs if they are present, then print the final JSON summary.\n" + "Use this exact shape:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["log_metrics_recovery"],"notes":"Recovered metrics from saved logs.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If you still cannot recover metrics, print the same object with an empty metrics object and explain why in notes.\n" + ) + if baseline_measurement: + return header + conflict_block + ( + "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" + "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" + "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" + "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" + f"Run the canonical command: {dca_cmd}\n" + f"({dca_note})\n" + "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["baseline_measurement"],"notes":"Measured the current baseline in the dedicated baseline worktree.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If after all retries (including batch size reduction) metrics are still unavailable, only then print the same object with an empty metrics object and explain in notes.\n" + ) + return header + conflict_block + ( + "You are the DCA stage.\n" + "Do not put forward new ideas or optimize for better metrics. Your only goal is to make the P-stage code run and report the result. " + '"Adapt or fix" means: fix bugs, import/runtime errors, OOM (e.g. reduce batch size), and config/path issues only. ' + "Do not change model architecture, optimizer logic, hyperparameters, or training logic to improve results. " + "The task \"prompt\" is for context only; do not treat it as a goal to achieve in this stage.\n\n" + "Workflow:\n" + "1. Adapt or fix the generated code in the seed worktree until it runs.\n" + f"2. Run the canonical command: {dca_cmd}\n" + f" ({dca_note})\n" + "3. If it fails for a simple reason, fix and rerun.\n" + "4. Create a git commit in the seed branch for your changes.\n" + "5. Report the final result in JSON between these exact markers. Include the current commit SHA in the summary.\n" + " Use this exact shape and include numeric metric values when available:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + " Do not omit the markers. Prefer this exact JSON report over prose. If metrics are unavailable,\n" + " still print the same object with an empty metrics object.\n" + "Do not edit baseline_branches.json or baseline_metrics.json (workflow writes them; read only). Do not merge branches yourself; the system will evaluate and promote if appropriate.\n" + ) + raise ValueError(f"Unknown stage: {stage}") + + +def _append_results_tsv(seed_id: str, run_metrics: dict[str, Any], signal: str, description: str) -> None: + status = "KEEP" if signal == "positive_signal" else "DISCARD" + val_bpb = run_metrics.get("val_bpb", "") + peak_vram_mb = run_metrics.get("peak_vram_mb", 0) + memory_gb = round(float(peak_vram_mb) / 1024, 2) if peak_vram_mb else "" + write_header = not RESULTS_TSV.exists() + with open(RESULTS_TSV, "a", encoding="utf-8") as handle: + if write_header: + handle.write("commit\tval_bpb\tmemory_gb\tstatus\tdescription\n") + handle.write(f"{seed_id}\t{val_bpb}\t{memory_gb}\t{status}\t{description}\n") + + +def _regenerate_progress_png() -> None: + try: + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + import pandas as pd + except ImportError: + return + + if not RESULTS_TSV.exists(): + return + + try: + df = pd.read_csv(RESULTS_TSV, sep="\t") + df["val_bpb"] = pd.to_numeric(df["val_bpb"], errors="coerce") + df["memory_gb"] = pd.to_numeric(df["memory_gb"], errors="coerce") + df["status"] = df["status"].str.strip().str.upper() + valid = df[df["val_bpb"].notna()].copy().reset_index(drop=True) + if valid.empty: + return + + baseline_bpb = valid.loc[0, "val_bpb"] + kept = valid[valid["status"] == "KEEP"] + best = float(kept["val_bpb"].min()) if not kept.empty else float(baseline_bpb) + + fig, ax = plt.subplots(figsize=(14, 7)) + ax.scatter(valid.index, valid["val_bpb"], c="#94a3b8", s=18, alpha=0.6, label="Runs") + if not kept.empty: + ax.scatter(kept.index, kept["val_bpb"], c="#38bdf8", s=42, label="Promoted") + ax.step(kept.index, kept["val_bpb"].cummin(), where="post", color="#0ea5e9", linewidth=2) + ax.set_xlabel("Experiment #") + ax.set_ylabel("Validation BPB (lower is better)") + ax.set_title("Component System Progress") + margin = (baseline_bpb - best) * 0.15 if baseline_bpb != best else 0.005 + ax.set_ylim(best - margin, float(baseline_bpb) + margin) + ax.grid(True, alpha=0.2) + ax.legend(loc="upper right") + plt.tight_layout() + plt.savefig(PROGRESS_PNG, dpi=150, bbox_inches="tight") + plt.close(fig) + except Exception: + traceback.print_exc() + + +def _worker(stage: str) -> None: + print(f"[daemon] worker-{stage.upper()} started") + while not _shutdown: + task_path = claim_pending(stage) + if task_path is None: + time.sleep(POLL_INTERVAL) + continue + + try: + task = read_task(task_path) + seed_id = task["seed_id"] + run_id = task["run_id"] + started_seed = None + if stage == "direct": + started_seed, _ = WORKFLOW.mark_direct_code_run_started(seed_id, run_id) + else: + started_seed, _ = WORKFLOW.mark_run_started(seed_id, run_id) + if ( + stage == "dca" + and task.get("metrics_recovery") is not True + ): + started_seed = WORKFLOW.ensure_seed_worktree_ready(seed_id) + print(f"[{stage.upper()}] picked up {task['task_id']} for {seed_id}") + + worktree_path = task.get("worktree_path") + if started_seed is not None and started_seed.worktree_path is not None: + worktree_path = started_seed.worktree_path + # Merge-resolution DCA runs from project root so the agent can operate on repo and worktrees + if stage == "dca" and ( + task.get("merge_resolution") is True or task.get("metrics_recovery") is True + ): + worktree_path = None + + if worktree_path: + _sync_worktree_context(worktree_path) + + if stage == "direct": + prompt = _build_direct_code_prompt(task["prompt"]) + else: + prompt = _build_prompt(stage, task, task_path) + prompt_path = _write_prompt_file(run_id, prompt) + prompt_path_str = str(prompt_path) + exit_code, stdout, stderr, stdout_log_path, stderr_log_path = _invoke_agent( + prompt, stage, run_id, worktree_path=worktree_path + ) + + combined_output = _combined_output(stdout, stderr) + salvaged_dca = _should_salvage_completed_dca(stage, exit_code, combined_output) + if exit_code == 0 or salvaged_dca: + if stage == "p": + WORKFLOW.finish_p_run( + seed_id, + run_id, + stdout, + str(stdout_log_path) if stdout_log_path else None, + str(stderr_log_path) if stderr_log_path else None, + prompt_path_str, + ) + elif stage == "direct": + WORKFLOW.finish_direct_code_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + ) + else: + run = WORKFLOW.finish_dca_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + metrics_recovery=task.get("metrics_recovery") is True, + merge_resolution=task.get("merge_resolution") is True, + ) + if not run.summary.get("metrics_recovery_queued"): + description = run.summary.get("notes") or run.summary.get("idea") or seed_id + _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) + _regenerate_progress_png() + if salvaged_dca: + WORKFLOW.seed_repo.append_event( + seed_id, + "dca.salvaged", + f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", + run_id=run_id, + ) + move_to_done(task_path) + print(f"[{stage.upper()}] task {task['task_id']} done") + else: + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, + prompt_path=prompt_path_str, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + ) + else: + WORKFLOW.mark_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, prompt_path=prompt_path_str, + ) + print(f"[{stage.upper()}] task {task['task_id']} failed") + except Exception as exc: + traceback.print_exc() + try: + task = read_task(task_path) + prompt_path_str = None + run_id = task.get("run_id") + if run_id: + p_path = LOG_DIR / f"{run_id}.prompt.txt" + if p_path.exists(): + prompt_path_str = str(p_path) + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + task["seed_id"], + task["run_id"], + str(exc), + task_path=task_path, + prompt_path=prompt_path_str, + ) + else: + WORKFLOW.mark_run_failed( + task["seed_id"], task["run_id"], str(exc), + task_path=task_path, prompt_path=prompt_path_str, + ) + except Exception: + traceback.print_exc() + + print(f"[daemon] worker-{stage.upper()} stopped") + + +def main() -> None: + global _shutdown + signal.signal(signal.SIGINT, _signal_handler) + if sys.platform != "win32": + signal.signal(signal.SIGTERM, _signal_handler) + + ensure_queue_layout() + restored = restore_in_progress_tasks() + total_restored = sum(restored.values()) + if total_restored: + print( + "[daemon] restored in_progress tasks " + f"(p={restored['p']}, dca={restored['dca']}, direct={restored['direct']})" + ) + daemon_heartbeat() + agent = os.environ.get("PDCA_AGENT", "claude") + print(f"[daemon] starting component-system daemon — agent={agent}, workers=P/DCA/DIRECT") + + pools: list[ThreadPoolExecutor] = [] + for stage in ("p", "dca", "direct"): + worker_count = 2 if stage == "p" else 1 + pool = ThreadPoolExecutor(max_workers=worker_count, thread_name_prefix=f"pdca-{stage}") + pools.append(pool) + for _ in range(worker_count): + pool.submit(_worker, stage) + + last_heartbeat = time.monotonic() + try: + while not _shutdown: + time.sleep(1.0) + if not _shutdown and (time.monotonic() - last_heartbeat) >= 5.0: + daemon_heartbeat() + last_heartbeat = time.monotonic() + except KeyboardInterrupt: + pass + finally: + _shutdown = True + if DAEMON_HEARTBEAT_PATH.exists(): + try: + DAEMON_HEARTBEAT_PATH.unlink() + except OSError: + pass + for pool in pools: + pool.shutdown(wait=True) + + print("[daemon] all workers stopped") + + +if __name__ == "__main__": + main() diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py new file mode 100644 index 000000000..a79ec064d --- /dev/null +++ b/component_system/services/workflow.py @@ -0,0 +1,1395 @@ +from __future__ import annotations + +import json +from typing import Any +import re +import subprocess +from pathlib import Path + +from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD +from component_system.domain.models import ( + DashboardColumn, + DashboardViewModel, + PlanIdea, + RunStatus, + SeedRecord, + SeedStatus, + StageName, + StageRun, +) +from component_system.repositories.state import ( + BaselineBranchMapRepository, + BaselineMetricsRepository, + RunRepository, + SeedRepository, +) +from component_system.task import ( + COMPONENT_SYSTEM_ROOT, + WORKTREE_ROOT, + get_daemon_status, + move_to_error, + now_ts, + new_run_id, + new_seed_id, + write_task, +) + +SUMMARY_MARKERS = { + "p": ("AUTORESEARCH_P_SUMMARY_BEGIN", "AUTORESEARCH_P_SUMMARY_END"), + "dca": ("AUTORESEARCH_DCA_SUMMARY_BEGIN", "AUTORESEARCH_DCA_SUMMARY_END"), +} + +BASELINE_SEED_ID = "__baseline__" + + +class GitCommandError(RuntimeError): + pass + + +class GitService: + def __init__(self) -> None: + pass + + def _run_git(self, *args: str, cwd: Path | None = None) -> str: + try: + result = subprocess.run( + ["git", *args], + cwd=str(cwd) if cwd else None, + capture_output=True, + text=True, + check=True, + ) + except FileNotFoundError as exc: + raise GitCommandError("Git is not installed or not available on PATH.") from exc + except subprocess.CalledProcessError as exc: + stderr = (exc.stderr or exc.stdout or "").strip() + raise GitCommandError(stderr or f"git {' '.join(args)} failed") from exc + return result.stdout.strip() + + def repo_root(self) -> Path: + return Path(self._run_git("rev-parse", "--show-toplevel")) + + def current_head(self) -> str: + return self._run_git("rev-parse", "HEAD") + + def branch_exists(self, branch: str) -> bool: + try: + self._run_git("rev-parse", "--verify", branch) + return True + except GitCommandError: + return False + + def ensure_branch(self, branch: str, start_point: str) -> None: + if not self.branch_exists(branch): + self._run_git("branch", branch, start_point) + + def list_branches(self) -> list[str]: + output = self._run_git("branch", "--format=%(refname:short)") + branches = [line.strip() for line in output.splitlines() if line.strip()] + if not branches: + # Unborn repositories can have HEAD pointing to a branch name even before first commit. + try: + head_branch = self._run_git("symbolic-ref", "--short", "HEAD").strip() + if head_branch: + branches.append(head_branch) + except GitCommandError: + pass + return sorted(set(branches)) + + @staticmethod + def is_seed_specific_branch(branch: str) -> bool: + """True if this branch is the single working branch for a seed (seed_id), not a baseline choice.""" + if branch == BASELINE_SEED_ID: + return True + # One branch per seed: seed- + 6 hex chars, e.g. seed-e57b95 + if branch.startswith("seed-") and len(branch) == 11 and all( + c in "abcdef0123456789" for c in branch[5:] + ): + return True + if branch.startswith("seed/"): + return True # legacy candidate branches, e.g. seed/seed-e57b95 + return False + + def setup_error(self) -> str | None: + try: + self.repo_root() + return None + except GitCommandError as exc: + return str(exc) + + def setup_error_for_branches(self, baseline_branch: str) -> str | None: + try: + root = self.repo_root() + if not baseline_branch: + return "Please select a baseline branch." + if not self.branch_exists(baseline_branch): + return ( + f"Git repo found at {root}, but branch {baseline_branch!r} does not exist yet. " + "Select an existing baseline branch." + ) + return None + except GitCommandError as exc: + return str(exc) + + def ensure_seed_worktrees(self, seed: SeedRecord) -> SeedRecord: + """Ensure the seed worktree exists on the single branch for this seed: seed_id (SSOT).""" + repo_head = self.current_head() + self.ensure_branch(seed.baseline_branch, repo_head) + + seed_worktree = WORKTREE_ROOT / seed.seed_id + if seed_worktree.exists(): + seed.worktree_path = str(seed_worktree) + return seed + # One branch per seed: branch name = seed_id, created from baseline. + try: + self._run_git("worktree", "add", "-B", seed.seed_id, str(seed_worktree), seed.baseline_branch) + except GitCommandError as exc: + # Recover from stale git worktree metadata like: + # "__baseline__ is already checked out at /old/path/__baseline__" + if not self._recover_checked_out_worktree_conflict( + seed.seed_id, seed_worktree, seed.baseline_branch, str(exc) + ): + raise + + seed.worktree_path = str(seed_worktree) + return seed + + @staticmethod + def _extract_checked_out_path(error: str) -> Path | None: + # git message example: fatal: '__baseline__' is already checked out at '/path' + match = re.search(r"already checked out at ['\"]([^'\"]+)['\"]", error) + if not match: + return None + return Path(match.group(1)) + + def _recover_checked_out_worktree_conflict( + self, branch: str, target_worktree: Path, start_point: str, error: str + ) -> bool: + if "already checked out at" not in error: + return False + # First, prune stale registrations from missing worktrees. + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + conflict_path = self._extract_checked_out_path(error) + if conflict_path is not None and conflict_path != target_worktree: + # If the conflicting worktree still exists, force-remove it from registry. + try: + self._run_git("worktree", "remove", "--force", str(conflict_path)) + except GitCommandError: + pass + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + self._run_git("worktree", "add", "-B", branch, str(target_worktree), start_point) + return True + + def commit_sha(self, ref: str) -> str: + return self._run_git("rev-parse", "--short", ref) + + def head_sha_at(self, cwd: Path) -> str: + """Return the short commit SHA of HEAD in the given worktree directory.""" + return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) + + def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: + """Reset the seed worktree's branch to the given ref (e.g. commit before P). + No-op for baseline seed or when worktree is missing.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("reset", "--hard", ref, cwd=worktree_path) + + def promote_seed_branch( + self, seed: SeedRecord, target_branch: str | None = None + ) -> str: + """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. + If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, + pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" + merge_into = target_branch if target_branch is not None else seed.baseline_branch + baseline_worktree = WORKTREE_ROOT / "baseline" + if baseline_worktree.exists(): + try: + self._run_git("worktree", "remove", "--force", str(baseline_worktree)) + except GitCommandError: + pass + self._run_git( + "worktree", + "add", + "--force", + "-B", + merge_into, + str(baseline_worktree), + merge_into, + ) + self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) + return self.commit_sha(merge_into) + + +class WorkflowService: + def __init__( + self, + seed_repo: SeedRepository | None = None, + run_repo: RunRepository | None = None, + branch_map_repo: BaselineBranchMapRepository | None = None, + metrics_repo: BaselineMetricsRepository | None = None, + git_service: GitService | None = None, + ) -> None: + self.seed_repo = seed_repo or SeedRepository() + self.run_repo = run_repo or RunRepository() + self.branch_map_repo = branch_map_repo or BaselineBranchMapRepository() + self.metrics_repo = metrics_repo or BaselineMetricsRepository() + self.git_service = git_service or GitService() + + @staticmethod + def _seed_worktree_path(seed_id: str) -> str: + return str(WORKTREE_ROOT / seed_id) + + @staticmethod + def _baseline_worktree_path() -> str: + return str(WORKTREE_ROOT / BASELINE_SEED_ID) + + def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: + """Clean up legacy persisted seed state that no longer matches runtime rules.""" + if seed.seed_id != BASELINE_SEED_ID: + return seed + expected_worktree = self._baseline_worktree_path() + if seed.worktree_path == expected_worktree: + return seed + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + + def ensure_seed_worktree_ready(self, seed_id: str) -> SeedRecord: + """Ensure the runtime seed worktree exists; recreate only when missing.""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + expected_worktree = self._baseline_worktree_path() + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.baseline_branch) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing baseline worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + expected_worktree = self._seed_worktree_path(seed.seed_id) + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing seed worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + + def _preferred_baseline_branch(self) -> str: + setup_error = self.git_service.setup_error() + if setup_error is not None: + return DEFAULT_BASELINE_BRANCH + try: + branches = [ + branch + for branch in self.git_service.list_branches() + if not self.git_service.is_seed_specific_branch(branch) + ] + except GitCommandError: + return DEFAULT_BASELINE_BRANCH + if branches and DEFAULT_BASELINE_BRANCH in branches: + return DEFAULT_BASELINE_BRANCH + return branches[0] if branches else DEFAULT_BASELINE_BRANCH + + def _first_user_seed_baseline_branch(self) -> str | None: + """Return the baseline_branch of the earliest-created user seed (excluding __baseline__), or None.""" + user_seeds = [s for s in self.seed_repo.list() if s.seed_id != BASELINE_SEED_ID] + if not user_seeds: + return None + first = min(user_seeds, key=lambda s: s.created_at) + return first.baseline_branch or None + + def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", event_message: str = "Queued Plan stage for the seed.") -> StageRun: + run = StageRun( + run_id=new_run_id("p"), + seed_id=seed.seed_id, + stage=StageName.p, + status=RunStatus.queued, + task_id=new_run_id("task-p"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message) + write_task( + "p", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + }, + task_id=run.task_id, + ) + return run + + def _release_seeds_waiting_for_baseline(self, branch: str) -> None: + """Release seeds that were waiting for baseline result on the given branch.""" + branch_metrics = self.metrics_repo.get_for_branch(branch) + if not branch_metrics or branch_metrics.get("last_val_bpb") is None: + return + waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) + for seed in waiting_seeds: + if seed.seed_id == BASELINE_SEED_ID: + continue + if seed.baseline_branch != branch: + continue + if seed.status is not SeedStatus.queued or seed.latest_run_id is not None: + continue + self._enqueue_plan_run( + seed, + event_kind="p.released", + event_message="Baseline is ready; queued Plan stage for the waiting seed.", + ) + + @staticmethod + def _status_from_dca_signal(signal: str) -> SeedStatus: + """Centralized mapping from DCA signal to terminal seed status.""" + if signal == "positive_signal": + return SeedStatus.promoted + if signal == "error": + return SeedStatus.failed + return SeedStatus.passed + + def _reconcile_seed_status_signal(self, seed: SeedRecord) -> bool: + """ + Auto-heal known inconsistent terminal combinations from historical data. + + Returns True when the seed was updated and persisted. + """ + if seed.status is SeedStatus.passed and seed.latest_signal == "error": + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "seed.reconciled", + "Reconciled inconsistent terminal state (passed + error) to failed.", + ) + return True + return False + + def create_seed( + self, + prompt: str, + baseline_branch: str | None = None, + ralph_loop_enabled: bool = False, + ) -> SeedRecord: + seed_id = new_seed_id() + selected_baseline = (baseline_branch or DEFAULT_BASELINE_BRANCH).strip() + seed = SeedRecord( + seed_id=seed_id, + prompt=prompt.strip(), + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=selected_baseline, + worktree_path=self._seed_worktree_path(seed_id), + ralph_loop_enabled=ralph_loop_enabled, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, selected_baseline) + try: + pass # branch seed_id is created when Plan is queued (ensure_seed_worktrees) + except GitCommandError: + # Keep seed creation non-blocking; branch creation will be retried at P queue time. + pass + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from prompt.") + if ralph_loop_enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + return seed + + def create_direct_code_seed(self, prompt: str) -> tuple[SeedRecord, StageRun]: + cleaned_prompt = prompt.strip() + if not cleaned_prompt: + raise RuntimeError("Prompt cannot be empty.") + baseline_branch = self._preferred_baseline_branch() + seed_id = new_seed_id("direct") + now = now_ts() + run = StageRun( + run_id=new_run_id("direct"), + seed_id=seed_id, + stage=StageName.direct, + status=RunStatus.queued, + task_id=new_run_id("task-direct"), + created_at=now, + updated_at=now, + ) + seed = SeedRecord( + seed_id=seed_id, + prompt=cleaned_prompt, + status=SeedStatus.adapting, + created_at=now, + updated_at=now, + baseline_branch=baseline_branch, + worktree_path=str(COMPONENT_SYSTEM_ROOT.parent), + latest_run_id=run.run_id, + plan=PlanIdea( + title="Direct code agent", + target_component="project_root", + description="Direct code agent run requested from the dashboard and executed from the project root.", + ), + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, baseline_branch) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from direct code agent prompt.") + self.seed_repo.append_event( + seed.seed_id, + "direct_code.queued", + "Queued direct code agent run from the project root.", + run_id=run.run_id, + ) + write_task( + "direct", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": None, + }, + task_id=run.task_id, + ) + return seed, run + + def _get_or_create_baseline_seed(self) -> SeedRecord: + """Return the baseline seed used to establish initial val_bpb; create and persist it if missing.""" + seed = self.seed_repo.get(BASELINE_SEED_ID) + if seed is not None: + return self._normalize_seed_runtime_state(seed) + branch = self._first_user_seed_baseline_branch() or DEFAULT_BASELINE_BRANCH + seed = SeedRecord( + seed_id=BASELINE_SEED_ID, + prompt="Baseline measurement: run training on current code without changes.", + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=branch, + worktree_path=self._baseline_worktree_path(), + ralph_loop_enabled=False, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(BASELINE_SEED_ID, branch) + self.seed_repo.append_event( + seed.seed_id, + "seed.created", + "Baseline seed created for initial measurement.", + ) + return seed + + def ensure_baseline_result(self) -> None: + """ + If there is no baseline result (last_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and + queue its DCA so the first run establishes the baseline. Idempotent; safe to call + before queue_p for any user seed. + """ + seed = self._get_or_create_baseline_seed() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): + return + if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + setup_error = self.git_service.setup_error() + if setup_error is not None: + return + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + return + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + return + seed.status = SeedStatus.generated + seed.plan = PlanIdea(title="Baseline", description="No changes; measure current baseline.") + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "baseline.queued", + "Queued DCA to establish baseline result before first seed.", + ) + self.queue_dca(seed.seed_id) + + def set_ralph_loop(self, seed_id: str, enabled: bool) -> SeedRecord: + seed = self.require_seed(seed_id) + if seed.ralph_loop_enabled == enabled: + return seed + seed.ralph_loop_enabled = enabled + seed.updated_at = now_ts() + self.seed_repo.save(seed) + if enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + else: + self.seed_repo.append_event(seed.seed_id, "ralph.disabled", "Ralph loop disabled by user.") + return seed + + def can_edit_seed_prompt(self, seed: SeedRecord) -> bool: + return seed.status in {SeedStatus.draft, SeedStatus.queued} + + def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: + seed = self.require_seed(seed_id) + if not self.can_edit_seed_prompt(seed): + raise RuntimeError("Seed prompt can only be edited before Plan starts.") + updated_prompt = prompt.strip() + if not updated_prompt: + raise RuntimeError("Prompt cannot be empty.") + if updated_prompt == seed.prompt: + return seed + seed.prompt = updated_prompt + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "seed.updated", "Seed prompt was edited before execution.") + return seed + + def queue_p(self, seed_id: str) -> StageRun | None: + seed = self.require_seed(seed_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if seed_id != BASELINE_SEED_ID and not has_baseline: + self.ensure_baseline_result() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if not has_baseline: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + return self._enqueue_plan_run(seed) + + def queue_dca( + self, + seed_id: str, + merge_resolution: bool = False, + metrics_recovery: bool = False, + source_run_id: str | None = None, + source_stdout_log_path: str | None = None, + source_stderr_log_path: str | None = None, + last_metrics: dict[str, Any] | None = None, + last_summary: dict[str, Any] | None = None, + restore_ref: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + if not metrics_recovery and seed.status in {SeedStatus.draft, SeedStatus.queued, SeedStatus.planning}: + raise RuntimeError("Run Plan first. Do-Check-Action is available after code is generated into the seed branch.") + if not metrics_recovery: + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + if seed.seed_id != BASELINE_SEED_ID: + try: + # Ref to restore worktree to on negative signal (commit before P when from finish_p_run, else baseline). + run.summary["restore_ref"] = ( + restore_ref + if restore_ref is not None + else self.git_service.commit_sha(seed.baseline_branch) + ) + except GitCommandError: + pass + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.queued", + "Queued DCA for merge conflict resolution." + if merge_resolution + else "Queued DCA for metrics recovery from saved logs." + if metrics_recovery + else "Queued DCA stage for the seed.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "merge_resolution": merge_resolution, + "metrics_recovery": metrics_recovery, + } + if merge_resolution: + payload["baseline_branch"] = seed.baseline_branch + if last_metrics is not None: + payload["last_metrics"] = last_metrics + if last_summary is not None: + payload["last_summary"] = last_summary + if metrics_recovery: + payload["source_run_id"] = source_run_id + payload["source_stdout_log_path"] = source_stdout_log_path + payload["source_stderr_log_path"] = source_stderr_log_path + payload["worktree_path"] = None + write_task("dca", payload, task_id=run.task_id) + return run + + def require_seed(self, seed_id: str) -> SeedRecord: + seed = self.seed_repo.get(seed_id) + if seed is None: + raise KeyError(f"Unknown seed_id={seed_id}") + return self._normalize_seed_runtime_state(seed) + + def require_run(self, run_id: str) -> StageRun: + run = self.run_repo.get(run_id) + if run is None: + raise KeyError(f"Unknown run_id={run_id}") + return run + + def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + if run.stage is StageName.p: + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + seed = self.ensure_seed_worktree_ready(seed.seed_id) + if seed.worktree_path: + worktree_path = Path(seed.worktree_path) + if worktree_path.is_dir(): + try: + run.summary["commit_sha_before_p"] = self.git_service.head_sha_at( + worktree_path + ) + except GitCommandError: + pass + seed.status = SeedStatus.planning + event_kind = "p.started" + event_message = "Plan stage started in the candidate worktree." + else: + seed.status = SeedStatus.adapting + event_kind = "dca.started" + event_message = ( + "Baseline measurement started in the baseline worktree." + if seed.seed_id == BASELINE_SEED_ID + else "DCA stage started in the seed worktree." + ) + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message, run_id=run_id) + return seed, run + + def mark_direct_code_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + seed.status = SeedStatus.adapting + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.started", + "Direct code agent started from the project root.", + run_id=run_id, + ) + return seed, run + + def mark_direct_code_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "direct_code.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def mark_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, f"{run.stage.value}.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def finish_direct_code_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = { + "mode": "direct_code_agent", + "cwd": str(COMPONENT_SYSTEM_ROOT.parent), + "stdout_bytes": len(stdout.encode("utf-8", errors="replace")), + "stderr_bytes": len((stderr or "").encode("utf-8", errors="replace")), + } + run.signal = "direct_code_completed" + seed.status = SeedStatus.passed + seed.updated_at = now_ts() + seed.latest_signal = run.signal + seed.last_error = None + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.completed", + "Direct code agent completed from the project root.", + run_id=run_id, + ) + return run + + def finish_p_run( + self, + seed_id: str, + run_id: str, + stdout: str, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + summary = self.extract_summary(stdout, StageName.p) or {} + seed.plan = PlanIdea( + title=summary.get("idea", "Generated plan"), + target_component=summary.get("target_component", "model"), + description=summary.get("description", ""), + source_refs=summary.get("source_refs", []), + commit_sha=summary.get("commit_sha"), + ) + # Single branch per seed (SSOT): worktree is already on seed_id branch. + commit_sha = self.git_service.commit_sha(seed.seed_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve run.summary fields set earlier (e.g. commit_sha_before_p) when merging P output. + run.summary = run.summary | summary | {"commit_sha": commit_sha} + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.completed", + "Plan completed on seed branch.", + commit_sha=commit_sha, + ) + self.queue_dca( + seed.seed_id, + restore_ref=run.summary.get("commit_sha_before_p"), + ) + return run + + @staticmethod + def combine_output(stdout: str, stderr: str | None = None) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr or "" + + def finish_dca_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + metrics_recovery: bool = False, + merge_resolution: bool = False, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + last_val_bpb = float(branch_metrics["last_val_bpb"]) if branch_metrics and branch_metrics.get("last_val_bpb") is not None else None + output_text = self.combine_output(stdout, stderr) + summary = self.extract_summary(output_text, StageName.dca) or {} + metrics = self.extract_dca_metrics(output_text, summary) + signal = self.evaluate_signal(metrics, last_val_bpb, PROMOTION_THRESHOLD) + commit_sha = summary.get("commit_sha") + if not (isinstance(commit_sha, str) and commit_sha.strip()): + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + commit_sha = "" + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = summary | {"commit_sha": commit_sha} + run.metrics = metrics + run.signal = signal + seed.updated_at = now_ts() + if signal == "error" and not metrics_recovery: + run.summary = run.summary | {"metrics_recovery_queued": True} + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.metrics_recovery_queued", + "DCA completed without recoverable metrics in the structured report; queued a follow-up DCA to inspect saved logs.", + run_id=run_id, + ) + self.queue_dca( + seed.seed_id, + metrics_recovery=True, + source_run_id=run_id, + source_stdout_log_path=log_path, + source_stderr_log_path=stderr_log_path, + ) + return run + seed.latest_metrics = metrics + seed.latest_signal = signal + terminal_status = self._status_from_dca_signal(signal) + merge_commit_sha = None # set when seed branch is successfully merged into baseline + if seed.seed_id == BASELINE_SEED_ID and last_val_bpb is None: + if "val_bpb" not in metrics: + seed.status = SeedStatus.failed + event_message = ( + "Baseline metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "Baseline measurement completed without metrics; marked as failed." + ) + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + ) + return run + target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch + # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. + if signal != "positive_signal": + self.metrics_repo.update_for_branch( + target_branch, + {"last_val_bpb": metrics["val_bpb"]}, + ) + seed.status = terminal_status + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed (no promotion); not merged into baseline branch.", + signal=signal, + metrics=metrics, + ) + return run + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + }, + ) + seed.status = SeedStatus.passed + event_message = f"Baseline measurement completed and __baseline__ was merged into {target_branch}; waiting seeds can now start Plan." + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + commit_sha=merge_commit_sha, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=target_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + }, + ) + seed.status = SeedStatus.passed + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed; merge into baseline branch failed again after resolution run (loop avoided). Baseline metrics recorded; manual merge may be needed.", + signal=signal, + metrics=metrics, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + if terminal_status is SeedStatus.promoted: + try: + self.metrics_repo.update_for_branch( + seed.baseline_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], + "promoted_at": summary.get("completed_at"), + }, + ) + merge_commit_sha = self.git_service.promote_seed_branch(seed) + seed.status = terminal_status + event_message = "DCA succeeded and seed branch was promoted into baseline." + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=seed.baseline_branch, + ) + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, "dca.completed", "DCA run completed but merge failed; conflict-resolution DCA queued.", signal=signal, metrics=metrics + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event(seed.seed_id, "ralph.requeued", "Ralph loop queued the next Plan run.") + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event(seed.seed_id, "ralph.requeue_failed", f"Ralph loop could not queue the next Plan run: {exc}") + return run + elif terminal_status is SeedStatus.failed: + seed.status = terminal_status + event_message = ( + "DCA metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "DCA completed but metrics were missing; marked as failed." + ) + else: + seed.status = terminal_status + event_message = "DCA completed without promotion." + self.run_repo.save(run) + self.seed_repo.save(seed) + event_commit_sha = merge_commit_sha if merge_commit_sha else run.summary.get("commit_sha") + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + **({"commit_sha": event_commit_sha} if event_commit_sha else {}), + ) + if ( + seed.ralph_loop_enabled + and signal in ("negative_signal", "neutral", "error") + and not merge_resolution + and not metrics_recovery + and seed.seed_id != BASELINE_SEED_ID + ): + ref = run.summary.get("restore_ref") or run.summary.get("baseline_commit_at_dca_start") + if ref: + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run: {exc}", + ) + return run + + def build_dashboard(self, selected_seed_id: str | None = None) -> DashboardViewModel: + seeds = self.seed_repo.list() + selected_seed = self.seed_repo.get(selected_seed_id) if selected_seed_id else None + baseline_metrics_by_branch = self.metrics_repo.get_all() + available_branches: list[str] = [] + setup_error = self.git_service.setup_error() + if setup_error is None: + try: + all_branches = self.git_service.list_branches() + if not all_branches: + setup_error = "No local branches found yet. Create an initial commit/branch, then reload." + else: + available_branches = [ + b for b in all_branches + if not self.git_service.is_seed_specific_branch(b) + ] + # Use only branches that exist in the repo; do not add DEFAULT_BASELINE_BRANCH + # if it does not exist, so the dropdown never shows a non-existent branch. + except GitCommandError as exc: + setup_error = str(exc) + # Default to first existing branch so the selected value is always valid. + default_baseline_branch = (available_branches[0] if available_branches else DEFAULT_BASELINE_BRANCH) or "master" + status_column_map = { + SeedStatus.draft: "seedInbox", + SeedStatus.queued: "seedInbox", + SeedStatus.planning: "generated", + SeedStatus.generated: "generated", + SeedStatus.dca_queued: "generated", + SeedStatus.adapting: "activeDca", + SeedStatus.running: "activeDca", + SeedStatus.passed: "completed", + SeedStatus.failed: "completed", + SeedStatus.promoted: "completed", + } + seeds_by_column: dict[str, list[SeedRecord]] = { + "seedInbox": [], + "generated": [], + "activeDca": [], + "completed": [], + } + for seed in seeds: + self._reconcile_seed_status_signal(seed) + column_id = status_column_map.get(seed.status, "seedInbox") + seeds_by_column[column_id].append(seed) + columns = [ + DashboardColumn( + id="seedInbox", + title="Seed", + description="New prompts and queued planning work.", + seeds=seeds_by_column["seedInbox"], + ), + DashboardColumn( + id="generated", + title="Plan", + description="Planning and generated code ready for Do-Check-Action.", + seeds=seeds_by_column["generated"], + ), + DashboardColumn( + id="activeDca", + title="Do-Check-Action", + description="Adapting, fixing, and running the seed run.", + seeds=seeds_by_column["activeDca"], + ), + DashboardColumn( + id="completed", + title="Completed", + description="Finished runs; promoted seeds merged into baseline.", + seeds=seeds_by_column["completed"], + ), + ] + return DashboardViewModel( + setup_error=setup_error, + baseline_metrics_by_branch=baseline_metrics_by_branch, + default_baseline_branch=default_baseline_branch, + available_branches=available_branches, + seed_count=len(seeds), + columns=columns, + selected_seed=selected_seed, + daemon_status=get_daemon_status(), + ) + + def seed_detail(self, seed_id: str) -> dict[str, object]: + seed = self.require_seed(seed_id) + expected_worktree = ( + self._baseline_worktree_path() + if seed.seed_id == BASELINE_SEED_ID + else self._seed_worktree_path(seed.seed_id) + ) + needs_save = False + if expected_worktree is not None and not seed.worktree_path: + seed.worktree_path = expected_worktree + needs_save = True + if needs_save: + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self._reconcile_seed_status_signal(seed) + return { + "seed": seed, + "can_edit_prompt": self.can_edit_seed_prompt(seed), + "runs": self.run_repo.list(seed_id), + "events": self.seed_repo.events(seed_id), + "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), + "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), + } + + def extract_summary(self, output_text: str, stage: StageName) -> dict[str, object] | None: + start_marker, end_marker = SUMMARY_MARKERS[stage.value] + pattern = rf"{start_marker}\s*(\{{.*?\}})\s*{end_marker}" + match = re.search(pattern, output_text, flags=re.DOTALL) + if not match: + return None + try: + return json.loads(match.group(1)) + except json.JSONDecodeError: + return {"raw_summary": match.group(1)} + + def extract_metrics(self, output_text: str) -> dict[str, float | int]: + patterns = { + "val_bpb": r"^val_bpb:\s+([0-9.]+)", + "training_seconds": r"^training_seconds:\s+([0-9.]+)", + "total_seconds": r"^total_seconds:\s+([0-9.]+)", + "startup_seconds": r"^startup_seconds:\s+([0-9.]+)", + "peak_vram_mb": r"^peak_vram_mb:\s+([0-9.]+)", + "mfu_percent": r"^mfu_percent:\s+([0-9.]+)", + "total_tokens_M": r"^total_tokens_M:\s+([0-9.]+)", + "num_steps": r"^num_steps:\s+([0-9]+)", + "num_params_M": r"^num_params_M:\s+([0-9.]+)", + "depth": r"^depth:\s+([0-9]+)", + } + metrics: dict[str, float | int] = {} + for key, pattern in patterns.items(): + match = re.search(pattern, output_text, flags=re.MULTILINE) + if not match: + continue + metrics[key] = int(match.group(1)) if key in {"num_steps", "depth"} else float(match.group(1)) + return metrics + + def extract_dca_metrics( + self, output_text: str, summary: dict[str, object] | None = None + ) -> dict[str, float | int]: + if summary: + summary_metrics = summary.get("metrics") + if isinstance(summary_metrics, dict): + parsed: dict[str, float | int] = {} + int_keys = {"num_steps", "depth"} + float_keys = { + "val_bpb", + "training_seconds", + "total_seconds", + "startup_seconds", + "peak_vram_mb", + "mfu_percent", + "total_tokens_M", + "num_params_M", + } + for key in int_keys | float_keys: + value = summary_metrics.get(key) + if value is None: + continue + try: + parsed[key] = int(value) if key in int_keys else float(value) + except (TypeError, ValueError): + continue + if parsed: + return parsed + return self.extract_metrics(output_text) + + @staticmethod + def evaluate_signal( + metrics: dict[str, float | int], + last_val_bpb: float | None, + promotion_threshold: float = PROMOTION_THRESHOLD, + ) -> str: + val_bpb = metrics.get("val_bpb") + if val_bpb is None: + return "error" + if last_val_bpb is None: + return "positive_signal" + delta = float(last_val_bpb) - float(val_bpb) + if delta >= promotion_threshold: + return "positive_signal" + if delta <= -promotion_threshold: + return "negative_signal" + return "neutral" + + +def default_workflow_service() -> WorkflowService: + return WorkflowService() diff --git a/component_system/tailwind.config.js b/component_system/tailwind.config.js new file mode 100644 index 000000000..ea1a7a372 --- /dev/null +++ b/component_system/tailwind.config.js @@ -0,0 +1,11 @@ +/** @type {import('tailwindcss').Config} */ +module.exports = { + content: [ + "./web/templates/**/*.html", + "./web/static/**/*.js" + ], + theme: { + extend: {} + }, + plugins: [] +}; diff --git a/component_system/task.py b/component_system/task.py new file mode 100644 index 000000000..587f50540 --- /dev/null +++ b/component_system/task.py @@ -0,0 +1,280 @@ +"""Shared queue and JSON state helpers for the component-system web app.""" +from __future__ import annotations + +import json +import os +import shutil +import time +import uuid +from pathlib import Path +from typing import Any + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent +HISTORY_ROOT = COMPONENT_SYSTEM_ROOT / "history" +QUEUE_ROOT = HISTORY_ROOT / "queue" +STATE_ROOT = HISTORY_ROOT / "state" +SEEDS_ROOT = STATE_ROOT / "seeds" +RUNS_ROOT = STATE_ROOT / "runs" +EVENTS_ROOT = STATE_ROOT / "events" +BASELINE_BRANCHES_PATH = COMPONENT_SYSTEM_ROOT / "baseline_branches.json" +BASELINE_METRICS_PATH = COMPONENT_SYSTEM_ROOT / "baseline_metrics.json" +WORKTREE_ROOT = HISTORY_ROOT / "worktrees" +LOG_ROOT = HISTORY_ROOT / "logs" + +STAGE_DIRS = { + "p": QUEUE_ROOT / "p", + "dca": QUEUE_ROOT / "dca", + "direct": QUEUE_ROOT / "direct", +} +IN_PROGRESS_DIR = QUEUE_ROOT / "in_progress" +DONE_DIR = QUEUE_ROOT / "done" +ERROR_DIR = QUEUE_ROOT / "error" +DAEMON_HEARTBEAT_PATH = STATE_ROOT / "daemon_heartbeat.json" +DAEMON_HEARTBEAT_STALE_SECONDS = 5 + +def _read_json(path: Path, default: Any) -> Any: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_json(path: Path, payload: Any) -> Path: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8") + return path + + +def now_ts() -> float: + return time.time() + + +def now_iso() -> str: + return time.strftime("%Y-%m-%d %H:%M:%S") + + +def daemon_heartbeat() -> None: + """Write the daemon heartbeat file (call from the daemon process).""" + ensure_queue_layout() + _write_json( + DAEMON_HEARTBEAT_PATH, + {"timestamp": now_ts(), "pid": os.getpid()}, + ) + + +def get_daemon_status() -> str: + """Return 'running' if the daemon heartbeat is recent, else 'stopped'.""" + if not DAEMON_HEARTBEAT_PATH.exists(): + return "stopped" + try: + data = _read_json(DAEMON_HEARTBEAT_PATH, {}) + ts = data.get("timestamp") + if ts is None: + return "stopped" + if (now_ts() - float(ts)) <= DAEMON_HEARTBEAT_STALE_SECONDS: + return "running" + except Exception: + pass + return "stopped" + + +def ensure_queue_layout() -> None: + HISTORY_ROOT.mkdir(parents=True, exist_ok=True) + for d in STAGE_DIRS.values(): + d.mkdir(parents=True, exist_ok=True) + IN_PROGRESS_DIR.mkdir(parents=True, exist_ok=True) + DONE_DIR.mkdir(parents=True, exist_ok=True) + ERROR_DIR.mkdir(parents=True, exist_ok=True) + SEEDS_ROOT.mkdir(parents=True, exist_ok=True) + RUNS_ROOT.mkdir(parents=True, exist_ok=True) + EVENTS_ROOT.mkdir(parents=True, exist_ok=True) + WORKTREE_ROOT.mkdir(parents=True, exist_ok=True) + LOG_ROOT.mkdir(parents=True, exist_ok=True) + # Auto-create baseline JSON files if missing (like results.tsv for recording run data) + if not BASELINE_METRICS_PATH.exists(): + _write_json(BASELINE_METRICS_PATH, {}) + if not BASELINE_BRANCHES_PATH.exists(): + _write_json(BASELINE_BRANCHES_PATH, {}) + + +def new_task_id(prefix: str | None = None) -> str: + ts = time.strftime("%Y%m%d-%H%M%S") + short = uuid.uuid4().hex[:8] + task_id = f"{ts}-{short}" + return f"{prefix}-{task_id}" if prefix else task_id + + +def new_seed_id(prefix: str = "seed") -> str: + return f"{prefix}-{uuid.uuid4().hex[:6]}" + + +def new_run_id(stage: str) -> str: + return new_task_id(stage) + + +def write_task(stage: str, payload: dict[str, Any], task_id: str | None = None) -> Path: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + tid = task_id or new_task_id(stage) + path = STAGE_DIRS[stage] / f"{tid}.json" + payload_with_meta = {"task_id": tid, "stage": stage, "created_at": now_ts(), **payload} + return _write_json(path, payload_with_meta) + + +def read_task(path: Path) -> dict[str, Any]: + return _read_json(path, {}) + + +def move_to_done(path: Path) -> Path: + ensure_queue_layout() + dest = DONE_DIR / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def move_to_error(path: Path) -> Path: + ensure_queue_layout() + dest = ERROR_DIR / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def list_pending(stage: str) -> list[Path]: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + return sorted(STAGE_DIRS[stage].glob("*.json")) + + +def claim_pending(stage: str) -> Path | None: + """Atomically claim the oldest pending task for a stage.""" + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + for path in sorted(STAGE_DIRS[stage].glob("*.json")): + claimed_path = IN_PROGRESS_DIR / path.name + try: + path.rename(claimed_path) + return claimed_path + except FileNotFoundError: + continue + except OSError: + # Another worker likely claimed the task first. + continue + return None + + +def restore_in_progress_tasks() -> dict[str, int]: + """Move stranded in-progress tasks back to their stage queue.""" + ensure_queue_layout() + restored = {stage: 0 for stage in STAGE_DIRS} + for path in sorted(IN_PROGRESS_DIR.glob("*.json")): + payload = _read_json(path, {}) + stage = payload.get("stage") + if stage not in STAGE_DIRS: + continue + dest = STAGE_DIRS[stage] / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + restored[stage] += 1 + return restored + + +def seed_path(seed_id: str) -> Path: + return SEEDS_ROOT / f"{seed_id}.json" + + +def run_path(run_id: str) -> Path: + return RUNS_ROOT / f"{run_id}.json" + + +def event_path(seed_id: str) -> Path: + return EVENTS_ROOT / f"{seed_id}.json" + + +def save_seed(seed: dict[str, Any]) -> Path: + seed_id = seed["seed_id"] + return _write_json(seed_path(seed_id), seed) + + +def load_seed(seed_id: str) -> dict[str, Any]: + return _read_json(seed_path(seed_id), {}) + + +def list_seeds() -> list[dict[str, Any]]: + ensure_queue_layout() + seeds = [_read_json(path, {}) for path in SEEDS_ROOT.glob("*.json")] + return sorted(seeds, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def save_run(run: dict[str, Any]) -> Path: + return _write_json(run_path(run["run_id"]), run) + + +def load_run(run_id: str) -> dict[str, Any]: + return _read_json(run_path(run_id), {}) + + +def list_runs(seed_id: str | None = None) -> list[dict[str, Any]]: + ensure_queue_layout() + runs = [_read_json(path, {}) for path in RUNS_ROOT.glob("*.json")] + if seed_id is not None: + runs = [run for run in runs if run.get("seed_id") == seed_id] + return sorted(runs, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def append_event(seed_id: str, event: dict[str, Any]) -> list[dict[str, Any]]: + ensure_queue_layout() + payload = _read_json(event_path(seed_id), []) + payload.append({"created_at": now_ts(), "created_at_human": now_iso(), **event}) + _write_json(event_path(seed_id), payload) + return payload + + +def load_events(seed_id: str) -> list[dict[str, Any]]: + return _read_json(event_path(seed_id), []) + + +def delete_seed(seed_id: str) -> None: + for path in (seed_path(seed_id), event_path(seed_id)): + if path.exists(): + path.unlink() + for run in list_runs(seed_id): + path = run_path(run["run_id"]) + if path.exists(): + path.unlink() + + +def load_baseline_branch_map() -> dict[str, str]: + """Load seed_id -> baseline_branch mapping (for agent lookup and workflow).""" + ensure_queue_layout() + return _read_json(BASELINE_BRANCHES_PATH, {}) + + +def save_baseline_branch_map(mapping: dict[str, str]) -> None: + """Persist seed_id -> baseline_branch mapping.""" + ensure_queue_layout() + _write_json(BASELINE_BRANCHES_PATH, mapping) + + +def load_baseline_metrics() -> dict[str, dict[str, Any]]: + """Load baseline_branch -> { last_val_bpb, promoted_branch, promoted_at, promoted_idea }.""" + ensure_queue_layout() + return _read_json(BASELINE_METRICS_PATH, {}) + + +def save_baseline_metrics(metrics_by_branch: dict[str, dict[str, Any]]) -> None: + """Persist per-branch baseline metrics.""" + ensure_queue_layout() + _write_json(BASELINE_METRICS_PATH, metrics_by_branch) + + +def reset_worktree(path: str | Path) -> None: + worktree = Path(path) + if worktree.exists(): + shutil.rmtree(worktree) diff --git a/component_system/training/mainline.py b/component_system/training/mainline.py new file mode 100644 index 000000000..e91771d85 --- /dev/null +++ b/component_system/training/mainline.py @@ -0,0 +1,82 @@ +"""Mainline assembler: reads static config, dynamically loads components, runs training.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +import importlib +import os +from dataclasses import asdict +from typing import Any + +import torch + +from prepare import Tokenizer + +from component_system.config import get_training_binding + + +def _prepare_environment() -> None: + os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True" + os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed(42) + torch.set_float32_matmul_precision("high") + torch.cuda.reset_peak_memory_stats() + + +def _import_module(path: str) -> Any: + return importlib.import_module(path) + + +def run_mainline_training(binding_path: str | None = None) -> dict[str, Any]: + _prepare_environment() + binding = get_training_binding() + + tokenizer = Tokenizer.from_directory() + vocab_size = tokenizer.get_vocab_size() + + model_module = _import_module(binding["model_module"]) + optimizer_module = _import_module(binding["optimizer_module"]) + training_step_module = _import_module(binding["training_step_module"]) + + settings = training_step_module.default_training_settings() + config = model_module.build_model_config( + depth=settings.depth, + vocab_size=vocab_size, + aspect_ratio=settings.aspect_ratio, + head_dim=settings.head_dim, + window_pattern=settings.window_pattern, + ) + + print("Loaded training binding from config") + print(f"Model config: {asdict(config)}") + + model, param_counts, num_flops_per_token = model_module.create_model( + config, + compile_model=settings.compile_model, + ) + + print("Parameter counts:") + for key, value in param_counts.items(): + print(f" {key:24s}: {value:,}") + print(f"Estimated FLOPs per token: {num_flops_per_token:e}") + + optimizer = optimizer_module.create_optimizer(model, settings) + return training_step_module.run_training_session( + model=model, + optimizer=optimizer, + tokenizer=tokenizer, + settings=settings, + param_counts=param_counts, + num_flops_per_token=num_flops_per_token, + baseline_binding=binding, + ) + + +if __name__ == "__main__": + run_mainline_training() diff --git a/component_system/web/app.py b/component_system/web/app.py new file mode 100644 index 000000000..18a82ae21 --- /dev/null +++ b/component_system/web/app.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi import FastAPI +from fastapi.responses import RedirectResponse, Response +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates + +from component_system.services.workflow import default_workflow_service +from component_system.task import ensure_queue_layout +from component_system.web.routes import router + +WEB_ROOT = Path(__file__).resolve().parent +TEMPLATE_ROOT = WEB_ROOT / "templates" +STATIC_ROOT = WEB_ROOT / "static" + +def create_app() -> FastAPI: + ensure_queue_layout() + app = FastAPI(title="Component System", version="0.1.0") + app.state.workflow = default_workflow_service() + app.state.templates = Jinja2Templates(directory=str(TEMPLATE_ROOT)) + app.mount("/static", StaticFiles(directory=str(STATIC_ROOT)), name="static") + app.include_router(router, prefix="/component-system") + + @app.get("/", include_in_schema=False) + def root() -> RedirectResponse: + return RedirectResponse(url="/component-system", status_code=307) + + @app.get("/favicon.ico", include_in_schema=False) + def favicon() -> Response: + return Response(status_code=204) + + @app.get("/.well-known/appspecific/com.chrome.devtools.json", include_in_schema=False) + def chrome_devtools_probe() -> Response: + # Chrome DevTools probes this endpoint; return 204 to avoid log spam. + return Response(status_code=204) + + return app + + +app = create_app() diff --git a/component_system/web/routes.py b/component_system/web/routes.py new file mode 100644 index 000000000..e5424ccc7 --- /dev/null +++ b/component_system/web/routes.py @@ -0,0 +1,337 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi import APIRouter, Form, HTTPException, Query, Request +from fastapi.responses import HTMLResponse, RedirectResponse, Response + +from component_system.domain.models import SeedStatus +from component_system.services.workflow import GitCommandError, WorkflowService +from component_system.task import COMPONENT_SYSTEM_ROOT, get_daemon_status, LOG_ROOT + +router = APIRouter() + + +def _templates(request: Request): + return request.app.state.templates + + +def _workflow(request: Request) -> WorkflowService: + return request.app.state.workflow + + +def _is_htmx(request: Request) -> bool: + return request.headers.get("hx-request", "").lower() == "true" + + +def _render(request: Request, template_name: str, context: dict, status_code: int = 200) -> HTMLResponse: + templates = _templates(request) + return templates.TemplateResponse(request, template_name, {"request": request, **context}, status_code=status_code) + + +def _resolve_log_path(run_id: str, stream: str, run_log_path: str | None) -> Path | None: + # Primary source: persisted run metadata path. + if run_log_path: + candidate = Path(run_log_path) + if candidate.exists() and candidate.is_file(): + return candidate + + # Deterministic run-id naming (new format). + run_named = LOG_ROOT / f"{run_id}.{stream}.log" + if run_named.exists() and run_named.is_file(): + return run_named + + return None + + +def _resolve_prompt_path(run_id: str, run_prompt_path: str | None) -> Path | None: + if run_prompt_path: + candidate = Path(run_prompt_path) + if candidate.exists() and candidate.is_file(): + return candidate + prompt_named = LOG_ROOT / f"{run_id}.prompt.txt" + if prompt_named.exists() and prompt_named.is_file(): + return prompt_named + return None + + +@router.get("/", response_class=HTMLResponse) +def dashboard(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + "dashboard": viewmodel, + "selected_seed_id": seed_id, + "detail": workflow.seed_detail(seed_id) if seed_id else None, + } + return _render(request, "dashboard.html", context) + + +@router.get("/partials/dashboard", response_class=HTMLResponse) +def dashboard_board(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + return _render(request, "partials/dashboard_board.html", {"dashboard": viewmodel, "selected_seed_id": seed_id}) + + +@router.get("/partials/daemon-status", response_class=HTMLResponse) +def daemon_status_partial(request: Request) -> HTMLResponse: + return _render(request, "partials/daemon_status.html", {"daemon_status": get_daemon_status()}) + + +@router.get("/partials/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + } + return _render(request, "partials/seed_detail_response.html", context) + + +@router.get("/api/runs/{run_id}/prompt") +def run_prompt(request: Request, run_id: str) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + run_prompt_path = run.prompt_path if run is not None else None + prompt_path = _resolve_prompt_path(run_id, run_prompt_path) + if prompt_path is None: + raise HTTPException(status_code=404, detail=f"Prompt for run '{run_id}' not found.") + content = prompt_path.read_text(encoding="utf-8", errors="replace") + return {"content": content} + + +@router.get("/api/runs/{run_id}/log") +def run_log_chunk( + request: Request, + run_id: str, + stream: str = Query("stdout"), + offset: int = Query(0, ge=0), + limit: int = Query(64 * 1024, ge=1024, le=512 * 1024), +) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + + complete_status = bool(run is not None and run.status.value in {"succeeded", "failed"}) + if stream not in {"stdout", "stderr"}: + raise HTTPException(status_code=400, detail="stream must be one of: stdout, stderr") + + run_log_path = None + if run is not None: + run_log_path = run.log_path if stream == "stdout" else run.stderr_log_path + if not run_log_path and stream == "stderr" and run.log_path and run.log_path.endswith(".stdout.log"): + run_log_path = run.log_path.replace(".stdout.log", ".stderr.log") + + log_path = _resolve_log_path(run_id, stream, run_log_path) + if log_path is None and run is not None and not complete_status: + # During queued/running phases metadata may not yet include paths and files may appear slightly later. + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": False, + } + + if log_path is None: + raise HTTPException(status_code=404, detail=f"Log for run '{run_id}' ({stream}) not found.") + + if not log_path.exists() or not log_path.is_file(): + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": complete_status, + } + + file_size = log_path.stat().st_size + if offset > file_size: + offset = file_size + + with open(log_path, "rb") as handle: + handle.seek(offset) + payload = handle.read(limit) + + next_offset = offset + len(payload) + return { + "chunk": payload.decode("utf-8", errors="replace"), + "next_offset": next_offset, + "size": file_size, + "complete": bool(complete_status and next_offset >= file_size), + } + + +@router.get("/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_page(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render(request, "seed_detail_page.html", detail) + + +@router.post("/actions/seeds", response_class=HTMLResponse) +def create_seed( + request: Request, + prompt: str = Form(...), + baseline_branch: str = Form(...), + seed_mode: str = Form("manual"), +) -> Response: + workflow = _workflow(request) + seed = workflow.create_seed( + prompt, + baseline_branch=baseline_branch, + ralph_loop_enabled=seed_mode == "ralph", + ) + if seed_mode == "ralph": + try: + workflow.queue_p(seed.seed_id) + except (RuntimeError, GitCommandError) as exc: + workflow.seed_repo.append_event( + seed.seed_id, + "ralph.start_failed", + f"Ralph loop could not queue the initial Plan run: {exc}", + ) + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/direct-code-agent", response_class=HTMLResponse) +def start_direct_code_agent(request: Request, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + seed, _run = workflow.create_direct_code_seed(prompt) + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/p", response_class=HTMLResponse) +def queue_p(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/prompt", response_class=HTMLResponse) +def update_seed_prompt(request: Request, seed_id: str, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + workflow.update_seed_prompt(seed_id, prompt) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + + if _is_htmx(request): + detail = workflow.seed_detail(seed_id) + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + } + return _render(request, "partials/seed_detail_response.html", context) + + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/dca", response_class=HTMLResponse) +def queue_dca(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_dca(seed_id) + except (KeyError, RuntimeError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/start", response_class=HTMLResponse) +def start_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + seed = workflow.set_ralph_loop(seed_id, True) + if seed.status in { + SeedStatus.draft, + SeedStatus.generated, + SeedStatus.passed, + SeedStatus.failed, + SeedStatus.promoted, + }: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/stop", response_class=HTMLResponse) +def stop_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.set_ralph_loop(seed_id, False) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) diff --git a/component_system/web/static/app.css b/component_system/web/static/app.css new file mode 100644 index 000000000..7edabb7c7 --- /dev/null +++ b/component_system/web/static/app.css @@ -0,0 +1,137 @@ +:root { + color-scheme: dark; + --card-bg: rgb(15 23 42 / 0.6); + --card-border: rgb(51 65 85); + --muted: rgb(148 163 184); +} + +body { + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + -webkit-font-smoothing: antialiased; +} + +/* IDs and branch names */ +.font-mono-id { + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 0.9em; + letter-spacing: 0.02em; +} + +.line-clamp-3 { + display: -webkit-box; + overflow: hidden; + -webkit-box-orient: vertical; + -webkit-line-clamp: 3; +} + +/* Card hover for clickable seed cards */ +.seed-card { + transition: border-color 0.15s ease, background-color 0.15s ease; +} +.seed-card:hover { + border-color: rgb(56 189 248 / 0.5); + background-color: rgb(15 23 42 / 0.9); +} +.seed-card.is-selected { + border-color: rgb(14 165 233); + background-color: rgb(14 165 233 / 0.14); + box-shadow: inset 0 0 0 1px rgb(14 165 233 / 0.35); +} + +/* Status pills */ +.status-pill { + display: inline-flex; + align-items: center; + border: 1px solid transparent; + font-size: 0.625rem; + font-weight: 600; + letter-spacing: 0.04em; + text-transform: uppercase; + line-height: 1; + padding: 0.2rem 0.5rem; + border-radius: 9999px; + white-space: nowrap; +} +.status-draft { background: rgb(51 65 85 / 0.62); border-color: rgb(148 163 184 / 0.4); color: rgb(226 232 240); } +.status-queued { background: rgb(146 64 14 / 0.45); border-color: rgb(245 158 11 / 0.45); color: rgb(253 230 138); } +.status-planning { background: rgb(30 64 175 / 0.4); border-color: rgb(96 165 250 / 0.45); color: rgb(191 219 254); } +.status-generated { background: rgb(15 118 110 / 0.38); border-color: rgb(45 212 191 / 0.4); color: rgb(153 246 228); } +.status-dca_queued { background: rgb(8 145 178 / 0.33); border-color: rgb(34 211 238 / 0.38); color: rgb(165 243 252); } +.status-adapting, +.status-running { background: rgb(109 40 217 / 0.35); border-color: rgb(192 132 252 / 0.42); color: rgb(233 213 255); } +.status-passed { background: rgb(21 128 61 / 0.28); border-color: rgb(74 222 128 / 0.4); color: rgb(187 247 208); } +.status-failed { background: rgb(153 27 27 / 0.34); border-color: rgb(248 113 113 / 0.42); color: rgb(254 202 202); } +.status-promoted { background: rgb(22 163 74 / 0.28); border-color: rgb(74 222 128 / 0.42); color: rgb(187 247 208); } + +/* Empty state placeholder */ +.empty-value { + color: rgb(100 116 139); + font-style: normal; +} + +/* Section headers */ +.section-label { + font-size: 11px; + letter-spacing: 0.2em; + text-transform: uppercase; + color: rgb(100 116 139); +} + +/* Scroll containers for long dashboard lists */ +.scroll-pane { + min-height: 0; + overflow-y: auto; + scrollbar-gutter: stable; +} + +.scroll-pane-stage { + max-height: min(32rem, 68vh); +} + +.scroll-pane-detail { + max-height: min(30rem, 62vh); +} + +.run-log-grid { + display: grid; + gap: 0.75rem; + grid-template-columns: minmax(0, 2fr) minmax(0, 4fr) minmax(0, 4fr); +} + +.run-log-pane { + min-width: 0; + min-height: 0; + display: flex; + flex-direction: column; +} + +.run-log-pre { + min-width: 0; + min-height: 0; + flex: 1 1 auto; + overflow: auto; + white-space: pre-wrap; + overflow-wrap: anywhere; + word-break: break-word; + border: 1px solid rgb(30 41 59); + border-radius: 0.25rem; + background: rgb(0 0 0 / 0.3); + padding: 0.5rem; + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 11px; + line-height: 1.25rem; + color: rgb(226 232 240); +} + +@media (max-width: 1024px) { + .run-log-grid { + grid-template-columns: 1fr; + } +} diff --git a/component_system/web/static/app.js b/component_system/web/static/app.js new file mode 100644 index 000000000..77b514abc --- /dev/null +++ b/component_system/web/static/app.js @@ -0,0 +1,399 @@ +document.body.addEventListener("htmx:responseError", (event) => { + const target = event.detail.target; + if (!target) { + return; + } + target.innerHTML = `
Request failed.
`; +}); + +function selectedSeedIdFromUrl() { + const params = new URLSearchParams(window.location.search); + return params.get("seed_id"); +} + +function applySelectedSeed(seedId) { + const cards = document.querySelectorAll(".seed-card[data-seed-id]"); + cards.forEach((card) => { + const isSelected = seedId !== null && card.dataset.seedId === seedId; + card.classList.toggle("is-selected", isSelected); + card.setAttribute("aria-current", isSelected ? "true" : "false"); + }); +} + +let dashboardPollInFlight = false; +let seedDetailPollInFlight = false; + +function seedDetailUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedDetailUrlTemplate; + if (!template || !seedId) { + return null; + } + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function isLogViewerOpen() { + const target = document.getElementById("seed-detail"); + if (!target) { + return false; + } + if (target.querySelector('[data-log-viewer-open="true"]')) { + return true; + } + if (target.querySelector("[data-log-stream]")) { + return true; + } + const seedId = selectedSeedIdFromUrl(); + return Boolean(seedId && localStorage.getItem(`seed-active-run-${seedId}`)); +} + +function dashboardBoardUrl() { + const board = document.getElementById("dashboard-board"); + const base = board?.dataset.dashboardPartialUrl; + if (!base) { + return null; + } + const seedId = selectedSeedIdFromUrl(); + if (!seedId) { + return base; + } + const separator = base.includes("?") ? "&" : "?"; + return `${base}${separator}seed_id=${encodeURIComponent(seedId)}`; +} + +function pollDashboardBoard() { + const target = document.getElementById("dashboard-board"); + const url = dashboardBoardUrl(); + if (!target || !url || dashboardPollInFlight) { + return; + } + dashboardPollInFlight = true; + htmx + .ajax("GET", url, { target: "#dashboard-board", swap: "outerHTML" }) + .finally(() => { + dashboardPollInFlight = false; + }); +} + +function pollSeedDetail() { + const seedId = selectedSeedIdFromUrl(); + const target = document.getElementById("seed-detail"); + const url = seedDetailUrl(seedId); + if (!target || !url || seedDetailPollInFlight) { + return; + } + if (isLogViewerOpen()) { + return; + } + seedDetailPollInFlight = true; + htmx.ajax("GET", url, { target: "#seed-detail", swap: "innerHTML" }).finally(() => { + seedDetailPollInFlight = false; + }); +} + +function pollDashboard() { + if (document.hidden) { + return; + } + if (isLogViewerOpen()) { + return; + } + pollDashboardBoard(); + pollSeedDetail(); +} + +document.body.addEventListener("htmx:beforeRequest", (event) => { + const target = event.detail?.target; + if (!target || !isLogViewerOpen()) { + return; + } + // Pause daemon status auto-refresh while viewing logs. + if (target.id === "daemon-status-panel") { + event.preventDefault(); + } +}); + +document.body.addEventListener("click", (event) => { + const card = event.target.closest(".seed-card[data-seed-id]"); + if (!card) { + return; + } + applySelectedSeed(card.dataset.seedId); +}); + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (target && target.id === "seed-detail") { + applySelectedSeed(selectedSeedIdFromUrl()); + } +}); + +window.addEventListener("popstate", () => { + applySelectedSeed(selectedSeedIdFromUrl()); +}); + +applySelectedSeed(selectedSeedIdFromUrl()); +window.setInterval(pollDashboard, 5000); + +const logStreamIntervals = new Map(); +const logStreamState = new Map(); +const ansiCtor = window.AnsiUp || window.ansi_up?.AnsiUp || null; +const ansiRenderer = ansiCtor ? new ansiCtor() : null; + +if (ansiRenderer && Object.prototype.hasOwnProperty.call(ansiRenderer, "escape_html")) { + ansiRenderer.escape_html = true; +} + +function stripAnsiSequences(value) { + // CSI: \x1b[...m, OSC: \x1b]...\x07 or \x1b\ ; then any remaining ESC controls. + return (value || "") + .replace(/\u001b\][^\u0007]*(?:\u0007|\u001b\\)/g, "") + .replace(/\u001b\[[0-?]*[ -/]*[@-~]/g, "") + .replace(/\u001b[@-_]/g, ""); +} + +function isRunComplete(status) { + return status === "succeeded" || status === "failed"; +} + +function updateLogStatus(runId, text) { + const nodes = document.querySelectorAll(`[data-log-status][data-run-id="${runId}"]`); + nodes.forEach((node) => { + node.textContent = text; + }); +} + +function updateCopyButtonState(runId, stream, enabled) { + const buttons = document.querySelectorAll( + `[data-log-copy][data-run-id="${runId}"][data-stream="${stream}"]` + ); + buttons.forEach((button) => { + button.disabled = !enabled; + }); +} + +function appendLogContent(pre, chunk) { + const currentRaw = pre.dataset.rawLog || ""; + const nextRaw = currentRaw + (chunk || ""); + + // Keep the viewer responsive for very large logs. + const maxChars = 200_000; + const trimmedRaw = + nextRaw.length > maxChars ? nextRaw.slice(nextRaw.length - maxChars) : nextRaw; + + pre.dataset.rawLog = trimmedRaw; + if (ansiRenderer) { + pre.innerHTML = ansiRenderer.ansi_to_html(trimmedRaw); + } else { + pre.textContent = stripAnsiSequences(trimmedRaw); + } + + pre.scrollTop = pre.scrollHeight; +} + +async function pollLogStream(pre) { + const runId = pre.dataset.runId; + const stream = pre.dataset.stream || "stdout"; + if (!runId) { + return; + } + + const state = logStreamState.get(pre) || { offset: 0, complete: false }; + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/log?stream=${encodeURIComponent(stream)}&offset=${state.offset}` + ); + if (!response.ok) { + throw new Error(`Failed to fetch logs for ${runId}: ${response.status}`); + } + + const payload = await response.json(); + const chunk = payload.chunk || ""; + const nextOffset = Number(payload.next_offset || 0); + const complete = Boolean(payload.complete); + + appendLogContent(pre, chunk); + updateCopyButtonState(runId, stream, pre.textContent.length > 0); + logStreamState.set(pre, { offset: nextOffset, complete }); + + if (complete) { + updateLogStatus(runId, "Completed"); + const intervalId = logStreamIntervals.get(pre); + if (intervalId) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + } + return; + } + + if (chunk) { + updateLogStatus(runId, "Streaming..."); + } else { + updateLogStatus(runId, "Waiting for log output..."); + } +} + +function cleanupDetachedLogStreams() { + for (const [pre, intervalId] of logStreamIntervals.entries()) { + if (!document.body.contains(pre)) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + logStreamState.delete(pre); + } + } +} + +function initializeLogCopyButtons(root) { + root.querySelectorAll("[data-log-copy]").forEach((button) => { + if (button.dataset.logCopyReady === "true") { + return; + } + button.dataset.logCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) { + return; + } + const stream = button.dataset.stream || "stdout"; + const pre = root.querySelector( + `[data-log-stream][data-run-id="${runId}"][data-stream="${stream}"]` + ); + if (!pre || !pre.textContent) { + return; + } + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (error) { + console.error("Failed to copy log output", error); + } + }); + }); +} + +async function loadPromptContent(pre) { + const runId = pre.dataset.runId; + if (!runId) return; + try { + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/prompt` + ); + if (!response.ok) return; + const payload = await response.json(); + const content = payload.content ?? ""; + pre.textContent = content; + const copyBtn = document.querySelector( + `[data-prompt-copy][data-run-id="${runId}"]` + ); + if (copyBtn) copyBtn.disabled = false; + } catch (err) { + console.error("Failed to load prompt for run", runId, err); + } +} + +function initializePromptDisplays(root) { + root.querySelectorAll("[data-prompt-content]").forEach((pre) => { + if (pre.dataset.promptLoaded === "true") return; + pre.dataset.promptLoaded = "true"; + loadPromptContent(pre); + }); + root.querySelectorAll("[data-prompt-copy]").forEach((button) => { + if (button.dataset.promptCopyReady === "true") return; + button.dataset.promptCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) return; + const pre = root.querySelector( + `[data-prompt-content][data-run-id="${runId}"]` + ); + if (!pre || !pre.textContent) return; + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (err) { + console.error("Failed to copy prompt", err); + } + }); + }); +} + +function initializeLogStreams(root = document) { + cleanupDetachedLogStreams(); + initializeLogCopyButtons(root); + initializePromptDisplays(root); + + root.querySelectorAll("[data-log-stream]").forEach((pre) => { + if (pre.dataset.logStreamReady === "true") { + return; + } + pre.dataset.logStreamReady = "true"; + const runStatus = pre.dataset.runStatus || ""; + const runId = pre.dataset.runId; + if (!runId) { + return; + } + + if (isRunComplete(runStatus)) { + updateLogStatus(runId, "Completed"); + } else { + updateLogStatus(runId, "Connecting..."); + } + + const runPoll = async () => { + try { + await pollLogStream(pre); + } catch (error) { + updateLogStatus(runId, "Log fetch failed"); + console.error(error); + } + }; + + runPoll(); + const intervalId = window.setInterval(runPoll, 2000); + logStreamIntervals.set(pre, intervalId); + }); +} + +function observeLogStreamMounts() { + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if (mutation.type !== "childList" || mutation.addedNodes.length === 0) { + continue; + } + for (const node of mutation.addedNodes) { + if (!(node instanceof Element)) { + continue; + } + if ( + node.matches?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") || + node.querySelector?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") + ) { + initializeLogStreams(node); + return; + } + } + } + }); + + observer.observe(document.body, { childList: true, subtree: true }); +} + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (!target) { + return; + } + if (target.id === "seed-detail") { + initializeLogStreams(target); + } +}); + +initializeLogStreams(document); +observeLogStreamMounts(); diff --git a/component_system/web/static/tailwind.input.css b/component_system/web/static/tailwind.input.css new file mode 100644 index 000000000..a563500f2 --- /dev/null +++ b/component_system/web/static/tailwind.input.css @@ -0,0 +1,27 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + :root { + color-scheme: dark; + } + + body { + @apply min-h-screen bg-slate-950 text-slate-100; + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + } +} + +@layer utilities { + .card-panel { + @apply rounded-2xl border border-slate-800 bg-slate-900; + } +} diff --git a/component_system/web/templates/base.html b/component_system/web/templates/base.html new file mode 100644 index 000000000..ee1ac5364 --- /dev/null +++ b/component_system/web/templates/base.html @@ -0,0 +1,32 @@ + + + + + + {% block title %}Component System{% endblock %} + + + + + + + + +
+
+
+ + Component System + +

Seed -> Plan -> Do-Check-Action orchestration with FastAPI, HTMX, Alpine, and Tailwind.

+
+ +
+
+
+ {% block content %}{% endblock %} +
+ + diff --git a/component_system/web/templates/dashboard.html b/component_system/web/templates/dashboard.html new file mode 100644 index 000000000..5bff902d4 --- /dev/null +++ b/component_system/web/templates/dashboard.html @@ -0,0 +1,120 @@ +{% extends "base.html" %} +{% block title %}Component System Dashboard{% endblock %} +{% block content %} +
+
+
+

Create Seed

+

Start a new seed from a prompt. Baseline branch is selected here; each seed has one branch (seed id).

+
+
+
+ + +
+
+

One branch per seed: the seed id is the branch name (e.g. seed-a1b2c3).

+ + + +
+ {% if dashboard.setup_error %} +
+

Git setup required

+

{{ dashboard.setup_error }}

+
+ {% endif %} + {% with daemon_status=dashboard.daemon_status %} + {% include "partials/daemon_status.html" %} + {% endwith %} +
+

Baseline branches

+

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

+ {% if dashboard.baseline_metrics_by_branch %} +
+ {% for branch, m in dashboard.baseline_metrics_by_branch.items() %} +
+
{{ branch }}
+
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}
+
+ {% endfor %} +
+ {% else %} +

No baseline metrics yet. Run the first DCA to establish baseline for a branch.

+ {% endif %} +
+
+

Direct Code Agent

+

Run the configured code agent from the project root with a dedicated single-worker executor. New runs appear in the Do-Check-Action column.

+
+ + + +
+
+
+
+ {% include "partials/dashboard_board.html" %} +
+ {% if detail %} + {% with + seed=detail.seed, + runs=detail.runs, + events=detail.events, + baseline_metrics_for_branch=detail.baseline_metrics_for_branch, + setup_error=detail.setup_error + %} + {% include "partials/seed_detail.html" %} + {% endwith %} + {% else %} +
+ Select a seed to inspect its worktree, plan, runs, logs, and promotion history. +
+ {% endif %} +
+
+
+
+{% endblock %} diff --git a/component_system/web/templates/partials/action_error.html b/component_system/web/templates/partials/action_error.html new file mode 100644 index 000000000..8a856804e --- /dev/null +++ b/component_system/web/templates/partials/action_error.html @@ -0,0 +1,3 @@ +
+ {{ message }} +
diff --git a/component_system/web/templates/partials/daemon_status.html b/component_system/web/templates/partials/daemon_status.html new file mode 100644 index 000000000..75a0d5e2e --- /dev/null +++ b/component_system/web/templates/partials/daemon_status.html @@ -0,0 +1,14 @@ +
+

Daemon: {% if daemon_status == 'running' %}running{% else %}not running{% endif %}

+

Plan and Do-Check-Action runs are executed by the daemon.

+ {% if daemon_status != 'running' %} +

Start it in a terminal:

+

uv run component_system/run.py

+ {% endif %} +
diff --git a/component_system/web/templates/partials/dashboard_board.html b/component_system/web/templates/partials/dashboard_board.html new file mode 100644 index 000000000..df82ab6ad --- /dev/null +++ b/component_system/web/templates/partials/dashboard_board.html @@ -0,0 +1,58 @@ +
+
+

+ Dashboard {{ dashboard.seed_count }} seed{{ 's' if dashboard.seed_count != 1 else '' }} across all stages +

+
+
+ {% for column in dashboard.columns %} +
+
+

{{ column.title }}

+

{{ column.description }}

+
+
+ {% if column.seeds %} + {% for seed in column.seeds %} + {% set is_selected = selected_seed_id == seed.seed_id %} + {% set is_promoted = column.id == 'completed' and seed.status.value == 'promoted' %} + +
+

{{ seed.seed_id }}

+ {{ seed.status.value|replace('_', ' ')|title }} +
+

{{ seed.prompt }}

+ {% if seed.plan %} +

{{ seed.plan.title }}

+ {% endif %} + {% if seed.latest_metrics and seed.latest_metrics.get('val_bpb') is not none %} +

val_bpb {{ "%.4f"|format(seed.latest_metrics.val_bpb) }}{% if seed.latest_signal %} · {{ seed.latest_signal }}{% endif %}

+ {% endif %} +
+ {% endfor %} + {% else %} +
+ No seeds in this stage. +
+ {% endif %} +
+
+ {% endfor %} +
+
diff --git a/component_system/web/templates/partials/seed_detail.html b/component_system/web/templates/partials/seed_detail.html new file mode 100644 index 000000000..93f5439cb --- /dev/null +++ b/component_system/web/templates/partials/seed_detail.html @@ -0,0 +1,326 @@ +
+
+
+ +

{{ seed.seed_id }}

+ {% if can_edit_prompt %} +
+ + + +
+ {% else %} +

{{ seed.prompt }}

+ {% endif %} +
+
+ {% if seed.ralph_loop_enabled %} +
+ +
+ {% else %} +
+ +
+ {% endif %} +
+ +
+
+ +
+
+
+ + {% if setup_error %} +
+ {{ setup_error }} +
+ {% endif %} + +
+
+
+ + {{ seed.status.value|replace('_', ' ')|title }} +
+

Ralph loop: {% if seed.ralph_loop_enabled %}enabled{% else %}disabled{% endif %}

+

Latest signal: {% if seed.latest_signal %}{{ seed.latest_signal }}{% else %}{% endif %}

+
+
+ +
+
Baseline
{{ seed.baseline_branch }}
+
Branch
{{ seed.seed_id }}
+
+
+
+ +
+
Seed worktree
{{ seed.worktree_path or "—" }}
+
+
+
+ +
+
+
+

Plan

+ {% if seed.plan %} +
+
+ +

{{ seed.plan.title }}

+
+
+ +

{{ seed.plan.target_component }}

+
+
+ +

{{ seed.plan.description }}

+
+ {% if seed.plan.commit_sha %} +
+ +

{{ seed.plan.commit_sha }}

+
+ {% endif %} +
+ {% else %} +

No plan yet. Click Run Plan to queue the task; the plan is generated when the daemon runs it.

+ {% endif %} +
+ +
+
+

Runs

+
+ {% if runs and seed.status.value in ['queued', 'planning'] %} +

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

+ {% endif %} +
+ {% if runs %} + {% for run in runs %} +
+
+
+

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{{ run.run_id }}

+
+
+ {% if run.signal %} + {{ run.signal }} + {% endif %} + +
+
+ {% if run.metrics %} +
+ {% for key, value in run.metrics.items() %} +
+
{{ key }}
+
{{ value }}
+
+ {% endfor %} +
+ {% endif %} +
+ + {% endfor %} + {% else %} +

No runs yet. Use Run Plan to start.

+ {% endif %} +
+
+
+ +
+
+

Latest Metrics

+ {% if seed.latest_metrics %} +
+ {% for key, value in seed.latest_metrics.items() %} +
+ +
{{ value }}
+
+ {% endfor %} +
+ {% else %} +

Metrics appear here after Do-Check-Action runs the training entrypoint.

+ {% endif %} +
+ +
+
+

Timeline

+ +
+
+ {% if events %} + {% for event in events %} +
+

{{ event.message }}

+ {% if event.commit_sha %} +

commit: {{ event.commit_sha }}

+ {% endif %} + {% if event.target_branch %} +

target branch: {{ event.target_branch }}

+ {% endif %} +

{{ event.kind }} · {{ event.created_at_human }}

+
+ {% endfor %} + {% else %} +

No events yet.

+ {% endif %} +
+
+
+
+
diff --git a/component_system/web/templates/partials/seed_detail_response.html b/component_system/web/templates/partials/seed_detail_response.html new file mode 100644 index 000000000..ca64317e3 --- /dev/null +++ b/component_system/web/templates/partials/seed_detail_response.html @@ -0,0 +1,4 @@ +{% with oob=True %} + {% include "partials/dashboard_board.html" %} +{% endwith %} +{% include "partials/seed_detail.html" %} diff --git a/component_system/web/templates/seed_detail_page.html b/component_system/web/templates/seed_detail_page.html new file mode 100644 index 000000000..ec7ca146c --- /dev/null +++ b/component_system/web/templates/seed_detail_page.html @@ -0,0 +1,15 @@ +{% extends "base.html" %} +{% block title %}Seed {{ seed.seed_id }}{% endblock %} +{% block content %} + +
+ {% include "partials/seed_detail.html" %} +
+{% endblock %} diff --git a/prepare.py b/prepare.py index 62607b9af..b64b909fc 100644 --- a/prepare.py +++ b/prepare.py @@ -38,7 +38,8 @@ CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "autoresearch") DATA_DIR = os.path.join(CACHE_DIR, "data") TOKENIZER_DIR = os.path.join(CACHE_DIR, "tokenizer") -BASE_URL = "https://huggingface.co/datasets/karpathy/climbmix-400b-shuffle/resolve/main" +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co").rstrip("/") +BASE_URL = f"{HF_ENDPOINT}/datasets/karpathy/climbmix-400b-shuffle/resolve/main" MAX_SHARD = 6542 # the last datashard is shard_06542.parquet VAL_SHARD = MAX_SHARD # pinned validation shard (shard_06542) VAL_FILENAME = f"shard_{VAL_SHARD:05d}.parquet" diff --git a/pyproject.toml b/pyproject.toml index 94ae32989..d95798a99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,15 +5,19 @@ description = "Autonomous pretraining research swarm" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "fastapi>=0.116.0", + "jinja2>=3.1.6", "kernels>=0.11.7", "matplotlib>=3.10.8", "numpy>=2.2.6", "pandas>=2.3.3", "pyarrow>=21.0.0", + "python-multipart>=0.0.20", "requests>=2.32.0", "rustbpe>=0.1.0", "tiktoken>=0.11.0", "torch==2.9.1", + "uvicorn>=0.35.0", ] [tool.uv.sources] diff --git a/scripts/clean_history.py b/scripts/clean_history.py new file mode 100644 index 000000000..df085d0ce --- /dev/null +++ b/scripts/clean_history.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +"""Reset local autoresearch history/runtime artifacts. + +Actions: +1) Checkout main branch (configurable) +2) Remove all extra git worktrees +3) Delete all local branches except main +4) Clear component_system runtime state/history folders +5) Remove .pytest_cache, __pycache__, and results.tsv +""" + +from __future__ import annotations + +import argparse +import shutil +import subprocess +from pathlib import Path + + +def run_git(args: list[str], cwd: Path, dry_run: bool = False) -> list[str]: + cmd = ["git", *args] + if dry_run: + print(f"[dry-run] {' '.join(cmd)}") + return [] + proc = subprocess.run(cmd, cwd=cwd, text=True, capture_output=True) + if proc.returncode != 0: + raise RuntimeError( + f"Command failed: {' '.join(cmd)}\n" + f"stdout:\n{proc.stdout}\n" + f"stderr:\n{proc.stderr}" + ) + return [line for line in proc.stdout.splitlines() if line.strip()] + + +def is_broken_worktree_remove_error(error: RuntimeError) -> bool: + msg = str(error) + return ( + "worktree remove --force" in msg + and "validation failed, cannot remove working tree" in msg + and ".git' does not exist" in msg + ) + + +def remove_children(path: Path, dry_run: bool = False) -> None: + if not path.exists(): + return + for child in path.iterdir(): + if dry_run: + print(f"[dry-run] remove {child}") + continue + if child.is_dir(): + shutil.rmtree(child, ignore_errors=True) + else: + child.unlink(missing_ok=True) + + +def remove_pycache_dirs(repo_root: Path, dry_run: bool = False) -> None: + for pycache in repo_root.rglob("__pycache__"): + parts = set(pycache.parts) + if ".venv" in parts or ".git" in parts: + continue + if pycache.is_dir(): + if dry_run: + print(f"[dry-run] remove {pycache}") + else: + shutil.rmtree(pycache, ignore_errors=True) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Clean local branches/worktrees and runtime history.") + parser.add_argument("--main-branch", default="master", help="Branch to keep. Default: main") + parser.add_argument("--dry-run", action="store_true", help="Print actions without changing anything") + args = parser.parse_args() + + repo_root = Path.cwd().resolve() + print(f"Repository: {repo_root}") + + print("Verifying git repository...") + run_git(["rev-parse", "--is-inside-work-tree"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Checking out '{args.main_branch}'...") + run_git(["checkout", args.main_branch], cwd=repo_root, dry_run=args.dry_run) + + print("Removing extra worktrees...") + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + wt_lines = run_git(["worktree", "list", "--porcelain"], cwd=repo_root, dry_run=args.dry_run) + worktrees: list[Path] = [] + for line in wt_lines: + if line.startswith("worktree "): + worktrees.append(Path(line[len("worktree ") :]).resolve()) + + for wt in worktrees: + if wt != repo_root: + print(f" - removing worktree {wt}") + try: + run_git(["worktree", "remove", "--force", str(wt)], cwd=repo_root, dry_run=args.dry_run) + except RuntimeError as error: + if not is_broken_worktree_remove_error(error): + raise + print(f" ! stale/broken worktree metadata detected, deleting directory: {wt}") + if args.dry_run: + print(f"[dry-run] remove {wt}") + else: + shutil.rmtree(wt, ignore_errors=True) + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Deleting local branches except '{args.main_branch}'...") + branches = run_git( + ["for-each-ref", "--format=%(refname:short)", "refs/heads"], + cwd=repo_root, + dry_run=args.dry_run, + ) + for branch in branches: + if branch != args.main_branch: + print(f" - deleting branch {branch}") + run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) + + print("Clearing component-system runtime/history artifacts...") + history_root = repo_root / "component_system" / "history" + for name in ("state", "queue", "worktrees", "logs"): + remove_children(history_root / name, dry_run=args.dry_run) + + pytest_cache = repo_root / ".pytest_cache" + if pytest_cache.exists(): + if args.dry_run: + print(f"[dry-run] remove {pytest_cache}") + else: + shutil.rmtree(pytest_cache, ignore_errors=True) + + results_tsv = repo_root / "results.tsv" + if results_tsv.exists(): + if args.dry_run: + print(f"[dry-run] remove {results_tsv}") + else: + results_tsv.unlink(missing_ok=True) + + print("Removing __pycache__ directories...") + remove_pycache_dirs(repo_root, dry_run=args.dry_run) + + print("Done.") + print("Remaining branches:") + for branch in run_git(["branch", "--format=%(refname:short)"], cwd=repo_root, dry_run=args.dry_run): + print(f" {branch}") + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock index c840d62f5..931a2d7d9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'linux'", @@ -27,6 +27,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, ] +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + [[package]] name = "anyio" version = "4.12.1" @@ -46,6 +55,8 @@ name = "autoresearch" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "fastapi" }, + { name = "jinja2" }, { name = "kernels" }, { name = "matplotlib" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, @@ -53,23 +64,29 @@ dependencies = [ { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pyarrow" }, + { name = "python-multipart" }, { name = "requests" }, { name = "rustbpe" }, { name = "tiktoken" }, { name = "torch" }, + { name = "uvicorn" }, ] [package.metadata] requires-dist = [ + { name = "fastapi", specifier = ">=0.116.0" }, + { name = "jinja2", specifier = ">=3.1.6" }, { name = "kernels", specifier = ">=0.11.7" }, { name = "matplotlib", specifier = ">=3.10.8" }, { name = "numpy", specifier = ">=2.2.6" }, { name = "pandas", specifier = ">=2.3.3" }, { name = "pyarrow", specifier = ">=21.0.0" }, + { name = "python-multipart", specifier = ">=0.0.20" }, { name = "requests", specifier = ">=2.32.0" }, { name = "rustbpe", specifier = ">=0.1.0" }, { name = "tiktoken", specifier = ">=0.11.0" }, { name = "torch", specifier = "==2.9.1", index = "https://download.pytorch.org/whl/cu128" }, + { name = "uvicorn", specifier = ">=0.35.0" }, ] [[package]] @@ -379,6 +396,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] +[[package]] +name = "fastapi" +version = "0.135.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-doc" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/7b/f8e0211e9380f7195ba3f3d40c292594fd81ba8ec4629e3854c353aaca45/fastapi-0.135.1.tar.gz", hash = "sha256:d04115b508d936d254cea545b7312ecaa58a7b3a0f84952535b4c9afae7668cd", size = 394962, upload-time = "2026-03-01T18:18:29.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/72/42e900510195b23a56bde950d26a51f8b723846bfcaa0286e90287f0422b/fastapi-0.135.1-py3-none-any.whl", hash = "sha256:46e2fc5745924b7c840f71ddd277382af29ce1cdb7d5eab5bf697e3fb9999c9e", size = 116999, upload-time = "2026-03-01T18:18:30.831Z" }, +] + [[package]] name = "filelock" version = "3.24.3" @@ -1524,6 +1557,139 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" }, ] +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + [[package]] name = "pygments" version = "2.19.2" @@ -1554,6 +1720,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] +[[package]] +name = "python-multipart" +version = "0.0.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" }, +] + [[package]] name = "pytz" version = "2026.1.post1" @@ -1840,6 +2015,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "starlette" +version = "0.52.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/68/79977123bb7be889ad680d79a40f339082c1978b5cfcf62c2d8d196873ac/starlette-0.52.1.tar.gz", hash = "sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933", size = 2653702, upload-time = "2026-01-18T13:34:11.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/0d/13d1d239a25cbfb19e740db83143e95c772a1fe10202dda4b76792b114dd/starlette-0.52.1-py3-none-any.whl", hash = "sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74", size = 74272, upload-time = "2026-01-18T13:34:09.188Z" }, +] + [[package]] name = "sympy" version = "1.14.0" @@ -2078,6 +2266,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + [[package]] name = "tzdata" version = "2025.3" @@ -2095,3 +2295,17 @@ sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6 wheels = [ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] + +[[package]] +name = "uvicorn" +version = "0.41.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" }, +] From fa8f3b645c5469b5cdd41b33c5c5b6f4e6a4101d Mon Sep 17 00:00:00 2001 From: AutoResearch Agent Date: Tue, 10 Mar 2026 20:00:43 +0800 Subject: [PATCH 03/24] Introduce Component-System for component optimization, add a dashboard to monitor activities --- .gitignore | 8 + README.md | 6 +- component_system/PDCA-DO-CHECK-ACTION.md | 75 + component_system/PDCA-PLAN.md | 58 + component_system/README.md | 99 ++ component_system/components/model.py | 380 +++++ component_system/components/optimizer.py | 179 ++ component_system/components/trainer.py | 191 +++ component_system/config.py | 31 + component_system/domain/models.py | 91 + component_system/entrypoint.py | 18 + component_system/package.json | 13 + component_system/postcss.config.js | 6 + component_system/protocol.md | 290 ++++ component_system/repositories/state.py | 75 + component_system/run.py | 801 +++++++++ component_system/run_arxiv.py | 174 ++ component_system/services/workflow.py | 1490 +++++++++++++++++ component_system/tailwind.config.js | 11 + component_system/task.py | 303 ++++ component_system/training/mainline.py | 82 + component_system/web/app.py | 53 + component_system/web/routes.py | 376 +++++ component_system/web/static/app.css | 137 ++ component_system/web/static/app.js | 513 ++++++ .../web/static/tailwind.input.css | 27 + component_system/web/templates/base.html | 32 + component_system/web/templates/dashboard.html | 124 ++ .../web/templates/partials/action_error.html | 3 + .../web/templates/partials/daemon_status.html | 14 + .../templates/partials/dashboard_board.html | 58 + .../web/templates/partials/seed_detail.html | 171 ++ .../partials/seed_detail_response.html | 1 + .../partials/seed_detail_runs_content.html | 148 ++ .../seed_detail_timeline_content.html | 16 + .../templates/partials/seed_runs_inner.html | 145 ++ .../partials/seed_timeline_inner.html | 16 + .../web/templates/seed_detail_page.html | 18 + prepare.py | 3 +- pyproject.toml | 5 + scripts/clean_history.py | 367 ++++ uv.lock | 250 ++- 42 files changed, 6855 insertions(+), 3 deletions(-) create mode 100644 component_system/PDCA-DO-CHECK-ACTION.md create mode 100644 component_system/PDCA-PLAN.md create mode 100644 component_system/README.md create mode 100644 component_system/components/model.py create mode 100644 component_system/components/optimizer.py create mode 100644 component_system/components/trainer.py create mode 100644 component_system/config.py create mode 100644 component_system/domain/models.py create mode 100644 component_system/entrypoint.py create mode 100644 component_system/package.json create mode 100644 component_system/postcss.config.js create mode 100644 component_system/protocol.md create mode 100644 component_system/repositories/state.py create mode 100644 component_system/run.py create mode 100644 component_system/run_arxiv.py create mode 100644 component_system/services/workflow.py create mode 100644 component_system/tailwind.config.js create mode 100644 component_system/task.py create mode 100644 component_system/training/mainline.py create mode 100644 component_system/web/app.py create mode 100644 component_system/web/routes.py create mode 100644 component_system/web/static/app.css create mode 100644 component_system/web/static/app.js create mode 100644 component_system/web/static/tailwind.input.css create mode 100644 component_system/web/templates/base.html create mode 100644 component_system/web/templates/dashboard.html create mode 100644 component_system/web/templates/partials/action_error.html create mode 100644 component_system/web/templates/partials/daemon_status.html create mode 100644 component_system/web/templates/partials/dashboard_board.html create mode 100644 component_system/web/templates/partials/seed_detail.html create mode 100644 component_system/web/templates/partials/seed_detail_response.html create mode 100644 component_system/web/templates/partials/seed_detail_runs_content.html create mode 100644 component_system/web/templates/partials/seed_detail_timeline_content.html create mode 100644 component_system/web/templates/partials/seed_runs_inner.html create mode 100644 component_system/web/templates/partials/seed_timeline_inner.html create mode 100644 component_system/web/templates/seed_detail_page.html create mode 100644 scripts/clean_history.py diff --git a/.gitignore b/.gitignore index 99c30f52f..a3fb245de 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Python-generated files __pycache__/ *.py[oc] +.pytest_cache/ build/ dist/ wheels/ @@ -21,3 +22,10 @@ dev/ # Results file results.tsv + +# Component-system runtime artifacts (logs, queue, state, worktrees under history/) +component_system/history/ +component_system/baseline_branches.json +component_system/baseline_metrics.json +*.log +.ipynb_checkpoints/ \ No newline at end of file diff --git a/README.md b/README.md index 8459259ab..15ee32f53 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The idea: give an AI agent a small but real LLM training setup and let it experi ## How it works -The repo is deliberately kept small and only really has a three files that matter: +The repo is deliberately kept small and only really has three files that matter: - **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. - **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. @@ -16,6 +16,8 @@ The repo is deliberately kept small and only really has a three files that matte By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. +If you are new to neural networks, this ["Dummy's Guide"](https://x.com/hooeem/status/2030720614752039185) looks pretty good for a lot more context. + ## Quick start **Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). @@ -47,6 +49,8 @@ Hi have a look at program.md and let's kick off a new experiment! let's do the s The `program.md` file is essentially a super lightweight "skill". +For the component-system workflow, see `component_system/README.md`. + ## Project structure ``` diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md new file mode 100644 index 000000000..be1e30780 --- /dev/null +++ b/component_system/PDCA-DO-CHECK-ACTION.md @@ -0,0 +1,75 @@ +# DCA — Do, Check, Action + +## Responsibility +Take the generated plan from P, adapt/fix it in the seed worktree, +run the canonical training entrypoint, evaluate results against baseline, and +promote only when the signal is positive. Do not propose new ideas or optimize for better metrics; only adapt/fix so the plan runs and report outcomes. + +## Workspace and paths +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. Treat `component_system/` in the worktree as canonical context. + +## Input +- Runner prompt (task content). +- Baseline: `component_system/baseline_branches.json`, `component_system/baseline_metrics.json`. +- Worktree-local files only. + +## Baseline measurement (seed_id __baseline__) +Retry until the run succeeds and you report real metrics. No empty metrics. + +- **OOM:** Reduce `device_batch_size` in `component_system/components/trainer.py` (default 128); keep `total_batch_size % (device_batch_size * sequence_length) == 0`. Rerun until training completes. +- Only trivial fixes (e.g. batch size); no model/training logic changes. +- **Commit before reporting.** Uncommitted changes break the follow-up merge. + +## Workflow +1. Work in the seed worktree (one branch per seed). +2. Adapt/fix until it runs (runtime only: bugs, OOM, imports, config; no model/hyperparameter/training-logic changes for better metrics). +3. Run canonical command (**≥600s**): `timeout 600 uv run --active component_system/entrypoint.py`. **Must set command/tool timeout ≥600s running this command** when invoking this run (so the process is not killed early). +4. On bug/OOM: fix and rerun; for baseline, retry until success. +5. Commit on seed branch before reporting. +6. Print DCA summary block with `commit_sha` in JSON. +7. Runner evaluates signal and handles promotion. + +## Output Format +Print the summary block. Put metrics in JSON; runner falls back to stdout/stderr parsing if missing. + +```text +AUTORESEARCH_DCA_SUMMARY_BEGIN +{"checks":["entrypoint"],"notes":"...","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.24,...}} +AUTORESEARCH_DCA_SUMMARY_END +``` + +If no final metrics, use `"metrics": {}`. Runner extracts from stdout/stderr: `val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, `total_tokens_M`, `num_steps`, `num_params_M`, `depth`. No metrics → recovery DCA inspects logs; only then treat as failed. + +## Check: Signal Rules + +| Condition | Signal | +|-----------|--------| +| `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | +| `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | +| difference < 0.001 | `neutral` | +| no historical baseline `last_val_bpb` | `positive_signal` (first recording) | +| metrics missing or training error | `error` | + +The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). + +## Action: Promotion Rules + +Only DCA may trigger a merge into baseline; P must not. Runner records `commit_sha`; on positive signal the workflow merges seed → baseline. Merge conflict → system queues merge-resolution DCA. + +### Promotion (`positive_signal`) +1. System merges seed into baseline (you do not run merge). +2. Workflow updates `baseline_metrics.json` / `baseline_branches.json`. +3. Metadata in seed/run state. + +### Merge failure +- **Normal seed:** In seed worktree: `git merge __baseline__`, resolve conflicts, commit, print DCA summary for retry. +- **Baseline seed (__baseline__):** Merge __baseline__ into target (e.g. master). Run from worktree that has target checked out (`git worktree list`); do not run from __baseline__ worktree or `git merge master` there. + +### Non-promotion +`neutral` / `negative_signal` / `error`: log only. Failure info in queue/state logs. + +## Constraints +- No model/optimizer/training-logic changes for better metrics; only make the plan run (bugs, OOM, etc.). +- Use `run_mainline_training` (or equivalent); do not skip `val_bpb` evaluation. +- Do not edit baseline JSON files; only DCA promotion updates them. +- Canonical runner: `component_system/entrypoint.py`. Traceability: git + state files. diff --git a/component_system/PDCA-PLAN.md b/component_system/PDCA-PLAN.md new file mode 100644 index 000000000..bc3393a5b --- /dev/null +++ b/component_system/PDCA-PLAN.md @@ -0,0 +1,58 @@ +# P - Seed Planning And Generation + +## Responsibility +Extract exactly one testable improvement hypothesis from the seed prompt, +generate the first implementation in a candidate worktree, and hand the result +to DCA through the runner. + +## Workspace and paths +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. + +## arXiv search (CLI) + +Run from repo root with uv (e.g. `uv run python component_system/run_arxiv.py ...`); arxiv is already a project dependency. + +### Search (CLI script) + +From repo root, use the script in this component: + +```bash +uv run python component_system/run_arxiv.py --query "machine learning" --max-results 5 +uv run python component_system/run_arxiv.py --id 1605.08386v1 --output json +``` + +**CLI arguments:** `--query` / `-q`, `--id` (one or more arXiv IDs; overrides query), `--max-results` / `-n`, `--sort-by` (relevance | submittedDate | lastUpdatedDate), `--sort-order` (ascending | descending), `--output` / `-o` (text | json), `--download-dir`, `--verbose` / `-v`. + +### Hypothesis from results +1. Read abstracts; pick one concrete change (not just a concept). +2. Map to component: `model`, `optimizer`, or `trainer`. +3. State expected benefit; reduce to one isolated, evaluable improvement. + +## Input +- **results.tsv** in cwd (if present) ? read first to avoid duplicating tried/discarded ideas. +- arXiv via arxiv-search; past failures in `queue/done/`; manual seed files. + +## One-Improvement Rule + +One seed = one hypothesis = one causal change. Do not bundle ideas. If the prompt has several options, pick the single best for this run. Prefer the smallest coherent change that tests the hypothesis. + +**Good:** one optimizer schedule change; one architectural block; one training heuristic. **Bad:** model + optimizer + batch together; multiple paper ideas in one seed; "cleanup + new feature" in one candidate. + +## Output Format +Print a summary block for the runner: +```text +AUTORESEARCH_P_SUMMARY_BEGIN +{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} +AUTORESEARCH_P_SUMMARY_END +``` + +## Steps +1. Read `results.tsv` if present. +2. Refine prompt ? one concrete idea ? one isolated improvement; name target component. +3. Implement in worktree (from baseline); commit on seed branch. +4. Print summary block (runner records commit). Description must be enough for DCA. + +## Constraints +- One component, one improvement per seed. Smallest viable implementation. +- No exploratory cleanup or opportunistic refactors unless required for the one change. +- Commit on seed branch; runner does not merge. **P must never merge;** only DCA triggers merge into baseline. diff --git a/component_system/README.md b/component_system/README.md new file mode 100644 index 000000000..baa4f3a92 --- /dev/null +++ b/component_system/README.md @@ -0,0 +1,99 @@ +# autoresearch + +![teaser](progress.png) + +*One day, frontier AI research used to be done by meat computers in between eating, sleeping, having other fun, and synchronizing once in a while using sound wave interconnect in the ritual of "group meeting". That era is long gone. Research is now entirely the domain of autonomous swarms of AI agents running across compute cluster megastructures in the skies. The agents claim that we are now in the 10,205th generation of the code base, in any case no one could tell if that's right or wrong as the "code" is now a self-modifying binary that has grown beyond human comprehension. This repo is the story of how it all began. -@karpathy, March 2026*. + +The idea: give an AI agent a small but real LLM training setup and let it experiment autonomously overnight. It modifies the code, trains for 5 minutes, checks if the result improved, keeps or discards, and repeats. You wake up in the morning to a log of experiments and (hopefully) a better model. The training code here is a simplified single-GPU implementation of [nanochat](https://github.com/karpathy/nanochat). The core idea is that you're not touching any of the Python files like you normally would as a researcher. Instead, you are programming the `program.md` Markdown files that provide context to the AI agents and set up your autonomous research org. The default `program.md` in this repo is intentionally kept as a bare bones baseline, though it's obvious how one would iterate on it over time to find the "research org code" that achieves the fastest research progress, how you'd add more agents to the mix, etc. A bit more context on this project is here in this [tweet](https://x.com/karpathy/status/2029701092347630069). + +## How it works + +The repo is deliberately kept small and only really has a three files that matter: + +- **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. +- **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. +- **`program.md`** — baseline instructions for one agent. Point your agent here and let it go. **This file is edited and iterated on by the human**. + +By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. + +## Quick start + +**Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). + +```bash + +# 1. Install uv project manager (if you don't already have it) +curl -LsSf https://astral.sh/uv/install.sh | sh + +# 2. Install dependencies +uv sync + +# 3. Download data and train tokenizer (one-time, ~2 min) +uv run prepare.py + +# 4. Manually run a single training experiment (~5 min) +uv run train.py +``` + +If the above commands all work ok, your setup is working and you can go into autonomous research mode. + +## Running the agent + +Simply spin up your Claude/Codex or whatever you want in this repo (and disable all permissions), then you can prompt something like: + +``` +Hi have a look at program.md and let's kick off a new experiment! let's do the setup first. +``` + +The `program.md` file is essentially a super lightweight "skill". + +### Component-system workflow + +**Seed → P → DCA** loop: daemon runs two workers that poll a file queue and dispatch to an external agent (Claude, Codex, or OpenCode). + +1. **Dashboard** (optional): `uv run uvicorn component_system.web.app:app --reload` → http://127.0.0.1:8000/component-system +2. **Daemon:** `uv run component_system/run.py` (or `PDCA_AGENT=codex|opencode` for other backends) +3. **Bootstrap:** Have the agent follow `component_system/protocol.md`, create a seed and queue it for P, then start the daemon. Do not run P/DCA stages manually in-session. + +Seeds flow: `queue/p/` → P → `queue/dca/` → DCA → `state/`. Results in dashboard. + +## Project structure + +``` +prepare.py — constants, data prep + runtime utilities (do not modify) +train.py — model, optimizer, training loop (agent modifies this) +program.md — agent instructions +pyproject.toml — dependencies +``` + +## Design choices + +- **Single file to modify.** The agent only touches `train.py`. This keeps the scope manageable and diffs reviewable. +- **Fixed time budget.** Training always runs for exactly 5 minutes, regardless of your specific platform. This means you can expect approx 12 experiments/hour and approx 100 experiments while you sleep. There are two upsides of this design decision. First, this makes experiments directly comparable regardless of what the agent changes (model size, batch size, architecture, etc). Second, this means that autoresearch will find the most optimal model for your platform in that time budget. The downside is that your runs (and results) become not comparable to other people running on other compute platforms. +- **Self-contained.** No external dependencies beyond PyTorch and a few small packages. No distributed training, no complex configs. One GPU, one file, one metric. + +## Platform support + +This code currently requires that you have a single NVIDIA GPU. In principle it is quite possible to support CPU, MPS and other platforms but this would also bloat the code. I'm not 100% sure that I want to take this on personally right now. People can reference (or have their agents reference) the full/parent nanochat repository that has wider platform support and shows the various solutions (e.g. a Flash Attention 3 kernels fallback implementation, generic device support, autodetection, etc.), feel free to create forks or discussions for other platforms and I'm happy to link to them here in the README in some new notable forks section or etc. + +Seeing as there seems to be a lot of interest in tinkering with autoresearch on much smaller compute platforms than an H100, a few extra words. If you're going to try running autoresearch on smaller computers (Macbooks etc.), I'd recommend one of the forks below. On top of this, here are some recommendations for how to tune the defaults for much smaller models for aspiring forks: + +1. To get half-decent results I'd use a dataset with a lot less entropy, e.g. this [TinyStories dataset](https://huggingface.co/datasets/karpathy/tinystories-gpt4-clean). These are GPT-4 generated short stories. Because the data is a lot narrower in scope, you will see reasonable results with a lot smaller models (if you try to sample from them after training). +2. You might experiment with decreasing `vocab_size`, e.g. from 8192 down to 4096, 2048, 1024, or even - simply byte-level tokenizer with 256 possibly bytes after utf-8 encoding. +3. In `prepare.py`, you'll want to lower `MAX_SEQ_LEN` a lot, depending on the computer even down to 256 etc. As you lower `MAX_SEQ_LEN`, you may want to experiment with increasing `DEVICE_BATCH_SIZE` in `train.py` slightly to compensate. The number of tokens per fwd/bwd pass is the product of these two. +4. Also in `prepare.py`, you'll want to decrease `EVAL_TOKENS` so that your validation loss is evaluated on a lot less data. +5. In `train.py`, the primary single knob that controls model complexity is the `DEPTH` (default 8, here). A lot of variables are just functions of this, so e.g. lower it down to e.g. 4. +6. You'll want to most likely use `WINDOW_PATTERN` of just "L", because "SSSL" uses alternating banded attention pattern that may be very inefficient for you. Try it. +7. You'll want to lower `TOTAL_BATCH_SIZE` a lot, but keep it powers of 2, e.g. down to `2**14` (~16K) or so even, hard to tell. + +I think these would be the reasonable hyperparameters to play with. Ask your favorite coding agent for help and copy paste them this guide, as well as the full source code. + +## Notable forks + +- [miolini/autoresearch-macos](https://github.com/miolini/autoresearch-macos) (MacOS) +- [trevin-creator/autoresearch-mlx](https://github.com/trevin-creator/autoresearch-mlx) (MacOS) +- [jsegov/autoresearch-win-rtx](https://github.com/jsegov/autoresearch-win-rtx) (Windows) + +## License + +MIT diff --git a/component_system/components/model.py b/component_system/components/model.py new file mode 100644 index 000000000..f74d89386 --- /dev/null +++ b/component_system/components/model.py @@ -0,0 +1,380 @@ +from __future__ import annotations + +from dataclasses import dataclass + +import torch +import torch.nn as nn +import torch.nn.functional as F +from kernels import get_kernel + +from prepare import MAX_SEQ_LEN + + +def _get_fa3(): + if torch.cuda.is_available(): + cap = torch.cuda.get_device_capability() + repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" + return get_kernel(repo).flash_attn_interface + return None + +_fa3 = None + +def get_fa3(): + global _fa3 + if _fa3 is None: + _fa3 = _get_fa3() + return _fa3 + + +@dataclass +class GPTConfig: + sequence_len: int = 2048 + vocab_size: int = 32768 + n_layer: int = 12 + n_head: int = 6 + n_kv_head: int = 6 + n_embd: int = 768 + window_pattern: str = "SSSL" + + +def norm(x: torch.Tensor) -> torch.Tensor: + return F.rms_norm(x, (x.size(-1),)) + + +def has_ve(layer_idx: int, n_layer: int) -> bool: + return layer_idx % 2 == (n_layer - 1) % 2 + + +def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: + assert x.ndim == 4 + d = x.shape[3] // 2 + x1, x2 = x[..., :d], x[..., d:] + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat([y1, y2], 3) + + +class CausalSelfAttention(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.n_head = config.n_head + self.n_kv_head = config.n_kv_head + self.n_embd = config.n_embd + self.head_dim = self.n_embd // self.n_head + assert self.n_embd % self.n_head == 0 + assert self.n_kv_head <= self.n_head and self.n_head % self.n_kv_head == 0 + self.c_q = nn.Linear(self.n_embd, self.n_head * self.head_dim, bias=False) + self.c_k = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_v = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) + self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False) + self.ve_gate_channels = 32 + self.ve_gate = ( + nn.Linear(self.ve_gate_channels, self.n_kv_head, bias=False) + if has_ve(layer_idx, config.n_layer) + else None + ) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + batch_size, seq_len, _ = x.size() + q = self.c_q(x).view(batch_size, seq_len, self.n_head, self.head_dim) + k = self.c_k(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + v = self.c_v(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) + + # Value residual (ResFormer): mix in value embedding with input-dependent gate per head + if ve is not None: + ve = ve.view(batch_size, seq_len, self.n_kv_head, self.head_dim) + gate = 2 * torch.sigmoid(self.ve_gate(x[..., : self.ve_gate_channels])) + v = v + gate.unsqueeze(-1) * ve + + cos, sin = cos_sin + q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin) + q, k = norm(q), norm(k) + + fa3 = get_fa3() + if fa3 is None: + raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") + y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) + y = y.contiguous().view(batch_size, seq_len, -1) + return self.c_proj(y) + + +class MLP(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) + self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.c_fc(x) + x = F.relu(x).square() + x = self.c_proj(x) + return x + + +class Block(nn.Module): + def __init__(self, config: GPTConfig, layer_idx: int) -> None: + super().__init__() + self.attn = CausalSelfAttention(config, layer_idx) + self.mlp = MLP(config) + + def forward( + self, + x: torch.Tensor, + ve: torch.Tensor | None, + cos_sin: tuple[torch.Tensor, torch.Tensor], + window_size: tuple[int, int], + ) -> torch.Tensor: + x = x + self.attn(norm(x), ve, cos_sin, window_size) + x = x + self.mlp(norm(x)) + return x + + +class GPT(nn.Module): + def __init__(self, config: GPTConfig) -> None: + super().__init__() + self.config = config + self.window_sizes = self._compute_window_sizes(config) + self.transformer = nn.ModuleDict( + { + "wte": nn.Embedding(config.vocab_size, config.n_embd), + "h": nn.ModuleList([Block(config, i) for i in range(config.n_layer)]), + } + ) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + self.resid_lambdas = nn.Parameter(torch.ones(config.n_layer)) + self.x0_lambdas = nn.Parameter(torch.zeros(config.n_layer)) + head_dim = config.n_embd // config.n_head + kv_dim = config.n_kv_head * head_dim + self.value_embeds = nn.ModuleDict( + { + str(i): nn.Embedding(config.vocab_size, kv_dim) + for i in range(config.n_layer) + if has_ve(i, config.n_layer) + } + ) + self.rotary_seq_len = config.sequence_len * 10 + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.register_buffer("cos", cos, persistent=False) + self.register_buffer("sin", sin, persistent=False) + + @torch.no_grad() + def init_weights(self) -> None: + torch.nn.init.normal_(self.transformer.wte.weight, mean=0.0, std=1.0) + torch.nn.init.normal_(self.lm_head.weight, mean=0.0, std=0.001) + n_embd = self.config.n_embd + scale = 3**0.5 * n_embd**-0.5 + for block in self.transformer.h: + torch.nn.init.uniform_(block.attn.c_q.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) + torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) + torch.nn.init.zeros_(block.attn.c_proj.weight) + torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) + torch.nn.init.zeros_(block.mlp.c_proj.weight) + self.resid_lambdas.fill_(1.0) + self.x0_lambdas.fill_(0.1) + for ve in self.value_embeds.values(): + torch.nn.init.uniform_(ve.weight, -scale, scale) + for block in self.transformer.h: + if block.attn.ve_gate is not None: + torch.nn.init.zeros_(block.attn.ve_gate.weight) + head_dim = self.config.n_embd // self.config.n_head + cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) + self.cos, self.sin = cos, sin + self.transformer.wte.to(dtype=torch.bfloat16) + for ve in self.value_embeds.values(): + ve.to(dtype=torch.bfloat16) + + def _precompute_rotary_embeddings( + self, + seq_len: int, + head_dim: int, + base: int = 10000, + device: torch.device | None = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + if device is None: + device = self.transformer.wte.weight.device + channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device) + inv_freq = 1.0 / (base ** (channel_range / head_dim)) + t = torch.arange(seq_len, dtype=torch.float32, device=device) + freqs = torch.outer(t, inv_freq) + cos, sin = freqs.cos(), freqs.sin() + cos, sin = cos.bfloat16(), sin.bfloat16() + return cos[None, :, None, :], sin[None, :, None, :] + + def _compute_window_sizes(self, config: GPTConfig) -> list[tuple[int, int]]: + pattern = config.window_pattern.upper() + assert all(c in "SL" for c in pattern) + long_window = config.sequence_len + short_window = long_window // 2 + char_to_window = {"L": (long_window, 0), "S": (short_window, 0)} + window_sizes = [] + for layer_idx in range(config.n_layer): + char = pattern[layer_idx % len(pattern)] + window_sizes.append(char_to_window[char]) + window_sizes[-1] = (long_window, 0) + return window_sizes + + def estimate_flops(self) -> float: + nparams = sum(p.numel() for p in self.parameters()) + value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values()) + nparams_exclude = ( + self.transformer.wte.weight.numel() + + value_embeds_numel + + self.resid_lambdas.numel() + + self.x0_lambdas.numel() + ) + n_head = self.config.n_head + head_dim = self.config.n_embd // self.config.n_head + seq_len = self.config.sequence_len + attn_flops = 0 + for window_size in self.window_sizes: + window = window_size[0] + effective_seq = seq_len if window < 0 else min(window, seq_len) + attn_flops += 12 * n_head * head_dim * effective_seq + return 6 * (nparams - nparams_exclude) + attn_flops + + def num_scaling_params(self) -> dict[str, int]: + wte = sum(p.numel() for p in self.transformer.wte.parameters()) + value_embeds = sum(p.numel() for p in self.value_embeds.parameters()) + lm_head = sum(p.numel() for p in self.lm_head.parameters()) + transformer_matrices = sum(p.numel() for p in self.transformer.h.parameters()) + scalars = self.resid_lambdas.numel() + self.x0_lambdas.numel() + total = wte + value_embeds + lm_head + transformer_matrices + scalars + return { + "wte": wte, + "value_embeds": value_embeds, + "lm_head": lm_head, + "transformer_matrices": transformer_matrices, + "scalars": scalars, + "total": total, + } + + def setup_optimizer( + self, + unembedding_lr: float = 0.004, + embedding_lr: float = 0.2, + matrix_lr: float = 0.02, + weight_decay: float = 0.0, + adam_betas: tuple[float, float] = (0.8, 0.95), + scalar_lr: float = 0.5, + ): + from component_system.components.optimizer import MuonAdamW + + model_dim = self.config.n_embd + matrix_params = list(self.transformer.h.parameters()) + value_embeds_params = list(self.value_embeds.parameters()) + embedding_params = list(self.transformer.wte.parameters()) + lm_head_params = list(self.lm_head.parameters()) + resid_params = [self.resid_lambdas] + x0_params = [self.x0_lambdas] + assert len(list(self.parameters())) == ( + len(matrix_params) + + len(embedding_params) + + len(lm_head_params) + + len(value_embeds_params) + + len(resid_params) + + len(x0_params) + ) + # Scale LR ∝ 1/√dmodel (tuned at 768 dim) + dmodel_lr_scale = (model_dim / 768) ** -0.5 + print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") + param_groups = [ + dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), + ] + for shape in sorted({p.shape for p in matrix_params}): + group_params = [p for p in matrix_params if p.shape == shape] + param_groups.append( + dict( + kind="muon", + params=group_params, + lr=matrix_lr, + momentum=0.95, + ns_steps=5, + beta2=0.95, + weight_decay=weight_decay, + ) + ) + optimizer = MuonAdamW(param_groups) + for group in optimizer.param_groups: + group["initial_lr"] = group["lr"] + return optimizer + + def forward( + self, + idx: torch.Tensor, + targets: torch.Tensor | None = None, + reduction: str = "mean", + ) -> torch.Tensor: + _, seq_len = idx.size() + assert seq_len <= self.cos.size(1) + cos_sin = self.cos[:, :seq_len], self.sin[:, :seq_len] + x = self.transformer.wte(idx) + x = norm(x) + x0 = x + for layer_idx, block in enumerate(self.transformer.h): + x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 + ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None + x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) + x = norm(x) + logits = self.lm_head(x).float() + softcap = 15 + logits = softcap * torch.tanh(logits / softcap) + if targets is None: + return logits + return F.cross_entropy( + logits.view(-1, logits.size(-1)), + targets.view(-1), + ignore_index=-1, + reduction=reduction, + ) + + +def build_model_config( + depth: int, + *, + vocab_size: int, + aspect_ratio: int = 64, + head_dim: int = 128, + window_pattern: str = "SSSL", +) -> GPTConfig: + base_dim = depth * aspect_ratio + model_dim = ((base_dim + head_dim - 1) // head_dim) * head_dim + num_heads = model_dim // head_dim + return GPTConfig( + sequence_len=MAX_SEQ_LEN, + vocab_size=vocab_size, + n_layer=depth, + n_head=num_heads, + n_kv_head=num_heads, + n_embd=model_dim, + window_pattern=window_pattern, + ) + + +def create_model( + config: GPTConfig, + *, + device: torch.device | None = None, + compile_model: bool = True, +) -> tuple[GPT, dict[str, int], float]: + if device is None: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + with torch.device("meta"): + model = GPT(config) + model.to_empty(device=device) + model.init_weights() + param_counts = model.num_scaling_params() + num_flops_per_token = model.estimate_flops() + if compile_model: + model = torch.compile(model, dynamic=False) + return model, param_counts, num_flops_per_token diff --git a/component_system/components/optimizer.py b/component_system/components/optimizer.py new file mode 100644 index 000000000..227caaea9 --- /dev/null +++ b/component_system/components/optimizer.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import torch + + +polar_express_coeffs = [ + (8.156554524902461, -22.48329292557795, 15.878769915207462), + (4.042929935166739, -2.808917465908714, 0.5000178451051316), + (3.8916678022926607, -2.772484153217685, 0.5060648178503393), + (3.285753657755655, -2.3681294933425376, 0.46449024233003106), + (2.3465413258596377, -1.7097828382687081, 0.42323551169305323), +] + + +@torch.compile(dynamic=False, fullgraph=True) +def adamw_step_fused( + p: torch.Tensor, + grad: torch.Tensor, + exp_avg: torch.Tensor, + exp_avg_sq: torch.Tensor, + step_t: torch.Tensor, + lr_t: torch.Tensor, + beta1_t: torch.Tensor, + beta2_t: torch.Tensor, + eps_t: torch.Tensor, + wd_t: torch.Tensor, +) -> None: + p.mul_(1 - lr_t * wd_t) + exp_avg.lerp_(grad, 1 - beta1_t) + exp_avg_sq.lerp_(grad.square(), 1 - beta2_t) + bias1 = 1 - beta1_t**step_t + bias2 = 1 - beta2_t**step_t + denom = (exp_avg_sq / bias2).sqrt() + eps_t + step_size = lr_t / bias1 + p.add_(exp_avg / denom, alpha=-step_size) + + +@torch.compile(dynamic=False, fullgraph=True) +def muon_step_fused( + stacked_grads: torch.Tensor, + stacked_params: torch.Tensor, + momentum_buffer: torch.Tensor, + second_momentum_buffer: torch.Tensor, + momentum_t: torch.Tensor, + lr_t: torch.Tensor, + wd_t: torch.Tensor, + beta2_t: torch.Tensor, + ns_steps: int, + red_dim: int, +) -> None: + momentum = momentum_t.to(stacked_grads.dtype) + momentum_buffer.lerp_(stacked_grads, 1 - momentum) + g = stacked_grads.lerp_(momentum_buffer, momentum) + x = g.bfloat16() + x = x / (x.norm(dim=(-2, -1), keepdim=True) * 1.02 + 1e-6) + if g.size(-2) > g.size(-1): + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x.mT @ x + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + x @ b_matrix + else: + for a, b, c in polar_express_coeffs[:ns_steps]: + a_matrix = x @ x.mT + b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) + x = a * x + b_matrix @ x + g = x + beta2 = beta2_t.to(g.dtype) + v_mean = g.float().square().mean(dim=red_dim, keepdim=True) + red_dim_size = g.size(red_dim) + v_norm_sq = v_mean.sum(dim=(-2, -1), keepdim=True) * red_dim_size + v_norm = v_norm_sq.sqrt() + second_momentum_buffer.lerp_(v_mean.to(dtype=second_momentum_buffer.dtype), 1 - beta2) + step_size = second_momentum_buffer.clamp_min(1e-10).rsqrt() + scaled_sq_sum = (v_mean * red_dim_size) * step_size.float().square() + v_norm_new = scaled_sq_sum.sum(dim=(-2, -1), keepdim=True).sqrt() + final_scale = step_size * (v_norm / v_norm_new.clamp_min(1e-10)) + g = g * final_scale.to(g.dtype) + lr = lr_t.to(g.dtype) + wd = wd_t.to(g.dtype) + mask = (g * stacked_params) >= 0 + stacked_params.sub_(lr * g + lr * wd * stacked_params * mask) + + +class MuonAdamW(torch.optim.Optimizer): + def __init__(self, param_groups: list[dict]) -> None: + super().__init__(param_groups, defaults={}) + self._adamw_step_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta1_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_eps_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._adamw_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_momentum_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + self._muon_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") + + def _step_adamw(self, group: dict) -> None: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + if not state: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like(p) + state["exp_avg_sq"] = torch.zeros_like(p) + state["step"] += 1 + self._adamw_step_t.fill_(state["step"]) + self._adamw_lr_t.fill_(group["lr"]) + self._adamw_beta1_t.fill_(group["betas"][0]) + self._adamw_beta2_t.fill_(group["betas"][1]) + self._adamw_eps_t.fill_(group["eps"]) + self._adamw_wd_t.fill_(group["weight_decay"]) + adamw_step_fused( + p, + grad, + state["exp_avg"], + state["exp_avg_sq"], + self._adamw_step_t, + self._adamw_lr_t, + self._adamw_beta1_t, + self._adamw_beta2_t, + self._adamw_eps_t, + self._adamw_wd_t, + ) + + def _step_muon(self, group: dict) -> None: + params = group["params"] + if not params: + return + first_param = params[0] + state = self.state[first_param] + num_params = len(params) + shape, device, dtype = first_param.shape, first_param.device, first_param.dtype + if "momentum_buffer" not in state: + state["momentum_buffer"] = torch.zeros(num_params, *shape, dtype=dtype, device=device) + if "second_momentum_buffer" not in state: + state_shape = (num_params, shape[-2], 1) if shape[-2] >= shape[-1] else (num_params, 1, shape[-1]) + state["second_momentum_buffer"] = torch.zeros(state_shape, dtype=dtype, device=device) + red_dim = -1 if shape[-2] >= shape[-1] else -2 + stacked_grads = torch.stack([p.grad for p in params]) + stacked_params = torch.stack(params) + self._muon_momentum_t.fill_(group["momentum"]) + self._muon_beta2_t.fill_(group["beta2"] if group["beta2"] is not None else 0.0) + self._muon_lr_t.fill_(group["lr"] * max(1.0, shape[-2] / shape[-1]) ** 0.5) + self._muon_wd_t.fill_(group["weight_decay"]) + muon_step_fused( + stacked_grads, + stacked_params, + state["momentum_buffer"], + state["second_momentum_buffer"], + self._muon_momentum_t, + self._muon_lr_t, + self._muon_wd_t, + self._muon_beta2_t, + group["ns_steps"], + red_dim, + ) + torch._foreach_copy_(params, list(stacked_params.unbind(0))) + + @torch.no_grad() + def step(self) -> None: + for group in self.param_groups: + if group["kind"] == "adamw": + self._step_adamw(group) + elif group["kind"] == "muon": + self._step_muon(group) + + +def create_optimizer(model: torch.nn.Module, settings: object) -> MuonAdamW: + return model.setup_optimizer( + unembedding_lr=settings.unembedding_lr, + embedding_lr=settings.embedding_lr, + matrix_lr=settings.matrix_lr, + weight_decay=settings.weight_decay, + adam_betas=settings.adam_betas, + scalar_lr=settings.scalar_lr, + ) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py new file mode 100644 index 000000000..fd300348e --- /dev/null +++ b/component_system/components/trainer.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +import gc +import time +from dataclasses import dataclass +from typing import Any + +import torch + +from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader + + +H100_BF16_PEAK_FLOPS = 989.5e12 + + +@dataclass +class TrainingSettings: + aspect_ratio: int = 64 + head_dim: int = 128 + window_pattern: str = "SSSL" + total_batch_size: int = 2**19 + embedding_lr: float = 0.6 + unembedding_lr: float = 0.004 + matrix_lr: float = 0.04 + scalar_lr: float = 0.5 + weight_decay: float = 0.2 + adam_betas: tuple[float, float] = (0.8, 0.95) + warmup_ratio: float = 0.0 + warmdown_ratio: float = 0.5 + final_lr_frac: float = 0.0 + depth: int = 8 + device_batch_size: int = 32 # 24GB vram + seed: int = 42 + compile_model: bool = True + + +def default_training_settings() -> TrainingSettings: + return TrainingSettings() + + +def get_lr_multiplier(progress: float, settings: TrainingSettings) -> float: + if progress < settings.warmup_ratio: + return progress / settings.warmup_ratio if settings.warmup_ratio > 0 else 1.0 + if progress < 1.0 - settings.warmdown_ratio: + return 1.0 + cooldown = (1.0 - progress) / settings.warmdown_ratio + return cooldown + (1 - cooldown) * settings.final_lr_frac + + +def get_muon_momentum(step: int) -> float: + frac = min(step / 300, 1) + return (1 - frac) * 0.85 + frac * 0.95 + + +def get_weight_decay(progress: float, settings: TrainingSettings) -> float: + return settings.weight_decay * (1 - progress) + + +def run_training_session( + *, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + tokenizer: Any, + settings: TrainingSettings, + param_counts: dict[str, int], + num_flops_per_token: float, + baseline_binding: dict[str, Any], +) -> dict[str, Any]: + t_start = time.time() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + autocast_device = "cuda" if device.type == "cuda" else "cpu" + autocast_ctx = torch.amp.autocast(device_type=autocast_device, dtype=torch.bfloat16) + + tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN + assert settings.total_batch_size % tokens_per_fwdbwd == 0 + grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd + train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") + x, y, epoch = next(train_loader) + + print(f"Vocab size: {tokenizer.get_vocab_size():,}") + print(f"Time budget: {TIME_BUDGET}s") + print(f"Gradient accumulation steps: {grad_accum_steps}") + print("Training session started") + + t_start_training = time.time() + smooth_train_loss = 0.0 + total_training_time = 0.0 + step = 0 + + while True: + if device.type == "cuda": + torch.cuda.synchronize(device=device) + t0 = time.time() + for _ in range(grad_accum_steps): + with autocast_ctx: + loss = model(x, y) + train_loss = loss.detach() + loss = loss / grad_accum_steps + loss.backward() + x, y, epoch = next(train_loader) + + progress = min(total_training_time / TIME_BUDGET, 1.0) + lrm = get_lr_multiplier(progress, settings) + muon_momentum = get_muon_momentum(step) + muon_weight_decay = get_weight_decay(progress, settings) + for group in optimizer.param_groups: + group["lr"] = group["initial_lr"] * lrm + if group["kind"] == "muon": + group["momentum"] = muon_momentum + group["weight_decay"] = muon_weight_decay + + optimizer.step() + model.zero_grad(set_to_none=True) + train_loss_f = train_loss.item() + if train_loss_f > 100: + raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") + + torch.cuda.synchronize(device=device) + dt = time.time() - t0 + if step > 10: + total_training_time += dt + + ema_beta = 0.9 + smooth_train_loss = ema_beta * smooth_train_loss + (1 - ema_beta) * train_loss_f + debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) + pct_done = 100 * progress + tok_per_sec = int(settings.total_batch_size / dt) + mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS + remaining = max(0.0, TIME_BUDGET - total_training_time) + print( + f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " + f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " + f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", + end="", + flush=True, + ) + + if step == 0: + gc.collect() + gc.freeze() + gc.disable() + elif (step + 1) % 5000 == 0: + gc.collect() + + step += 1 + if step > 10 and total_training_time >= TIME_BUDGET: + break + + print() + total_tokens = step * settings.total_batch_size + model.eval() + with autocast_ctx: + val_bpb = evaluate_bpb(model, tokenizer, settings.device_batch_size) + + t_end = time.time() + peak_vram_mb = torch.cuda.max_memory_allocated() / 1024 / 1024 + steady_state_mfu = ( + 100 + * num_flops_per_token + * settings.total_batch_size + * (step - 10) + / total_training_time + / H100_BF16_PEAK_FLOPS + if total_training_time > 0 + else 0.0 + ) + num_params = param_counts["total"] + metrics = { + "val_bpb": float(val_bpb), + "training_seconds": float(total_training_time), + "total_seconds": float(t_end - t_start), + "peak_vram_mb": float(peak_vram_mb), + "mfu_percent": float(steady_state_mfu), + "total_tokens_M": float(total_tokens / 1e6), + "num_steps": int(step), + "num_params_M": float(num_params / 1e6), + "depth": int(settings.depth), + "startup_seconds": float(t_start_training - t_start), + } + + print("---") + print(f"val_bpb: {metrics['val_bpb']:.6f}") + print(f"training_seconds: {metrics['training_seconds']:.1f}") + print(f"total_seconds: {metrics['total_seconds']:.1f}") + print(f"peak_vram_mb: {metrics['peak_vram_mb']:.1f}") + print(f"mfu_percent: {metrics['mfu_percent']:.2f}") + print(f"total_tokens_M: {metrics['total_tokens_M']:.1f}") + print(f"num_steps: {metrics['num_steps']}") + print(f"num_params_M: {metrics['num_params_M']:.1f}") + print(f"depth: {metrics['depth']}") + return metrics diff --git a/component_system/config.py b/component_system/config.py new file mode 100644 index 000000000..9975ab2d2 --- /dev/null +++ b/component_system/config.py @@ -0,0 +1,31 @@ +"""Static configuration for the component system. No dynamic or per-run values.""" +from __future__ import annotations + +from pathlib import Path + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent + +# Module import paths for training (used by mainline assembler) +MODEL_MODULE = "component_system.components.model" +OPTIMIZER_MODULE = "component_system.components.optimizer" +TRAINING_STEP_MODULE = "component_system.components.trainer" + +# Promotion threshold: improve val_bpb by at least this much to promote +PROMOTION_THRESHOLD = 0.001 + +# Worktree root relative to project (string for display/config compatibility) +WORKTREE_ROOT = "component_system/history/worktrees" + +# Default branch name suggested in UI when no branches exist (not a global baseline) +DEFAULT_BASELINE_BRANCH = "master" + + +def get_training_binding() -> dict[str, str | float]: + """Return a static dict used by training mainline/trainer (no baseline_version).""" + return { + "model_module": MODEL_MODULE, + "optimizer_module": OPTIMIZER_MODULE, + "training_step_module": TRAINING_STEP_MODULE, + "promotion_threshold": PROMOTION_THRESHOLD, + "worktree_root": WORKTREE_ROOT, + } diff --git a/component_system/domain/models.py b/component_system/domain/models.py new file mode 100644 index 000000000..f03c9a121 --- /dev/null +++ b/component_system/domain/models.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import BaseModel, Field + + +class SeedStatus(str, Enum): + draft = "draft" + queued = "queued" + planning = "planning" + generated = "generated" + dca_queued = "dca_queued" + adapting = "adapting" + running = "running" + failed = "failed" + passed = "passed" + promoted = "promoted" + + +class StageName(str, Enum): + p = "p" + dca = "dca" + direct = "direct" + + +class RunStatus(str, Enum): + queued = "queued" + running = "running" + succeeded = "succeeded" + failed = "failed" + + +class PlanIdea(BaseModel): + title: str = "" + target_component: str = "model" + description: str = "" + source_refs: list[str] = Field(default_factory=list) + commit_sha: str | None = None + + +class StageRun(BaseModel): + run_id: str + seed_id: str + stage: StageName + status: RunStatus + task_id: str + created_at: float + updated_at: float + log_path: str | None = None + stderr_log_path: str | None = None + prompt_path: str | None = None + summary: dict[str, Any] = Field(default_factory=dict) + metrics: dict[str, Any] = Field(default_factory=dict) + signal: str | None = None + error: str | None = None + + +class SeedRecord(BaseModel): + seed_id: str + prompt: str + status: SeedStatus = SeedStatus.draft + created_at: float + updated_at: float + baseline_branch: str = "baseline" + worktree_path: str | None = None + latest_run_id: str | None = None + ralph_loop_enabled: bool = False + latest_signal: str | None = None + latest_metrics: dict[str, Any] = Field(default_factory=dict) + plan: PlanIdea | None = None + last_error: str | None = None + + +class DashboardColumn(BaseModel): + id: str + title: str + description: str + seeds: list[SeedRecord] + + +class DashboardViewModel(BaseModel): + setup_error: str | None = None + baseline_metrics_by_branch: dict[str, dict[str, object]] = Field(default_factory=dict) + default_baseline_branch: str = "master" + available_branches: list[str] = Field(default_factory=list) + seed_count: int + columns: list[DashboardColumn] + selected_seed: SeedRecord | None = None + daemon_status: str = "stopped" # "running" | "stopped" diff --git a/component_system/entrypoint.py b/component_system/entrypoint.py new file mode 100644 index 000000000..33fc2d426 --- /dev/null +++ b/component_system/entrypoint.py @@ -0,0 +1,18 @@ +"""Standalone entrypoint for the component_system baseline.""" +from __future__ import annotations + +import sys +from pathlib import Path + +if __package__ in {None, ""}: + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +from component_system.training.mainline import run_mainline_training + + +def main() -> None: + run_mainline_training() + + +if __name__ == "__main__": + main() diff --git a/component_system/package.json b/component_system/package.json new file mode 100644 index 000000000..5ae45136d --- /dev/null +++ b/component_system/package.json @@ -0,0 +1,13 @@ +{ + "name": "autoresearch-component-system-ui", + "private": true, + "scripts": { + "build:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --minify", + "watch:css": "tailwindcss -i ./web/static/tailwind.input.css -o ./web/static/app.css --watch" + }, + "devDependencies": { + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.17" + } +} diff --git a/component_system/postcss.config.js b/component_system/postcss.config.js new file mode 100644 index 000000000..5cbc2c7d8 --- /dev/null +++ b/component_system/postcss.config.js @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; diff --git a/component_system/protocol.md b/component_system/protocol.md new file mode 100644 index 000000000..7c27b126e --- /dev/null +++ b/component_system/protocol.md @@ -0,0 +1,290 @@ +# autoresearch — Component-System Protocol + +This document is the operating manual for the component-system workflow. +The system runs a continuous Seed -> P -> DCA loop to discover, generate, +adapt, evaluate, and promote improvements to the training stack. + +The main objective is simple: improve `val_bpb` against the current baseline +without breaking the canonical component-system entrypoint or introducing +unreasonable complexity. + +VRAM is a first-class constraint. Higher memory use is acceptable only when the +quality gain is meaningful; avoid candidates that produce small or ambiguous +`val_bpb` gains while causing large memory growth. + +## Top-Level Bootstrap Rule + +If you are an interactive code agent that was merely told to "follow this +protocol", do not manually simulate the entire workflow inside one foreground +session. + +The intended control flow is: +1. Read this file and the required context files. +2. Ensure the queue and state layout exist. +3. Create or refine a seed from a human prompt. +4. Queue the seed for P. +5. Start the daemon: `uv run component_system/run.py`. +6. Let daemon workers execute P and DCA via file-based handoff. +7. Monitor the daemon, queue, and logs; do not simulate stages in-session. + +Manual execution of an individual stage is only for the agent process that was +invoked by the daemon for that specific task. + +## Architecture + +```text +component_system/ + protocol.md <- overall workflow protocol + entrypoint.py <- canonical training entrypoint + PDCA-PLAN.md <- P stage rules + PDCA-DO-CHECK-ACTION.md <- DCA stage rules + run.py <- resident daemon and worker dispatch + task.py <- queue and JSON state helpers + baseline_branches.json <- per-branch baseline mapping (workflow-managed; read-only) + baseline_metrics.json <- baseline run metrics (workflow-managed; read-only) + config.py <- promotion threshold and static binding + history/ <- runtime dir (auto-created) + logs/ <- agent stdout/stderr logs + queue/{p,dca,done,error}/ <- stage handoff and archival + state/{seeds,runs,events}/<- durable workflow state + worktrees/ <- per-seed git worktrees + components/ + model.py + optimizer.py + trainer.py + training/ + mainline.py +``` + +## Core Goal and Decision Rule + +Optimize for lower `val_bpb`. A candidate is worth promoting only when the gain +is real, the implementation is understandable, and the cost in memory or +complexity is justified. + +Apply this bias consistently: +- Lower `val_bpb` is the primary success metric. +- VRAM is a soft but important constraint: some increase is acceptable, but + dramatic growth needs correspondingly strong quality gains. +- Simpler changes are preferred when results are similar. +- A tiny gain that adds brittle complexity is usually not worth promotion. +- A tiny gain that materially increases VRAM is usually not worth promotion. +- A simplification that preserves or slightly improves quality is a strong outcome. +- If the signal is ambiguous, treat it as `neutral` and do not promote. + +## Required Reading Before Any Work + +Read in this order: +1. `component_system/protocol.md` +2. The stage-specific document (right after protocol): `component_system/PDCA-DO-CHECK-ACTION.md` for DCA, `component_system/PDCA-PLAN.md` for P +3. `prepare.py` for fixed data and evaluation behavior; never modify it +4. `component_system/entrypoint.py` for the canonical execution path +5. `component_system/config.py` for promotion threshold and static binding + +Baseline files (workflow-managed; read-only): `baseline_branches.json`, `baseline_metrics.json`. For interactive bootstrap, inspect recent `queue/done/` and baseline state. + +## Workspace and Path Rules + +When the daemon invokes you for a P or DCA task, your current working directory +is the seed worktree. In that mode: + +- Read and edit only within the seed worktree. +- Use only relative paths from the current working directory. +- Do not request or depend on absolute paths or files outside the worktree. + +## Hard Constraints + +1. Never modify `prepare.py`. +2. `uv run component_system/entrypoint.py` must remain the canonical, + working component-system training command. +3. The root repo must stay compatible with the upstream implementation; + do not require changes to root `train.py`. +4. Stage-to-stage handoff must happen through files under `queue/`, not + merely in memory or only in agent conversation state. +5. Only the DCA promotion flow may update `baseline_metrics.json` and `baseline_branches.json`. +6. Do not bypass the baseline mechanism by manually merging branches or + force-advancing the baseline outside workflow control. + +## Baseline-First Rule + +Establish baseline before evaluating seeds: if `baseline_metrics.json` has no `last_val_bpb`, run the baseline (no-changes) measurement first. Use that result as the reference for promotion. + +```mermaid +flowchart TD + A[Create seed] --> B{Baseline result exists?} + B -- No --> C[Create or reuse __baseline__ seed] + C --> D[Queue baseline DCA] + D --> E[Run baseline measurement from project root] + E --> F[Save baseline metrics in baseline_metrics.json] + F --> G[Release waiting seeds] + B -- Yes --> G + G --> H[Seed stays in draft or queued with no worktree] + H --> I[Queue P run] + I --> J[Create seed worktree at P start] + J --> K[P agent plans and commits on seed branch] + K --> L[Queue DCA run] + L --> M[DCA agent adapts, runs training, and reports metrics] + M --> N{Promotion signal?} + N -- Positive --> O[Merge seed branch into baseline] + O --> P{Merge conflict?} + P -- No --> Q[Update baseline metadata and finish seed] + P -- Yes --> R[Queue conflict-resolution DCA] + R --> M + N -- Neutral or Negative --> S[Keep result in state only] +``` + +## Workflow Stages + +The sections below describe what each daemon-dispatched stage worker does. +They are not instructions for a top-level interactive agent to perform the +entire lifecycle manually. + +### P — Discovery / Plan / Initial Generation + +Read `component_system/PDCA-PLAN.md`. + +Responsibilities: +- Refine the seed prompt into a concrete plan. +- Create or refresh the seed worktree from the active baseline. +- Generate the first candidate implementation in the worktree. +- Keep the change focused enough that DCA can evaluate it cleanly. +- Commit the generated candidate on the seed branch so DCA receives a stable snapshot. + +P is about producing a plausible, testable first version, not claiming success. + +### DCA — Delivery / Check / Action + +Read `component_system/PDCA-DO-CHECK-ACTION.md`. + +Responsibilities: +- Adapt and fix the generated candidate inside the seed worktree. +- Run the canonical training/evaluation entrypoint. +- Read the structured metrics from the run output. +- Decide whether the result is positive, neutral, or negative relative to baseline. +- Promote the seed branch into baseline only when the signal is strong enough. + +DCA is the stage that turns a raw idea into a measured outcome. + +## Canonical Run and Output + +The canonical component-system execution path is: + +```bash +uv run component_system/entrypoint.py +``` + +Allow **at least 600 seconds** when DCA runs this (e.g. `timeout 600 uv run ...`). + +DCA must report a structured JSON summary (including `metrics`). Runner uses it first; falls back to stdout/stderr parsing if missing. No metrics → recovery DCA inspects logs. Canonical metrics: + +```text +--- +val_bpb: 0.997900 +training_seconds: 300.1 +total_seconds: 325.9 +peak_vram_mb: 45060.2 +mfu_percent: 39.80 +total_tokens_M: 499.6 +num_steps: 953 +num_params_M: 50.3 +depth: 8 +startup_seconds: 25.8 +``` + +Treat `val_bpb` as the primary metric. `peak_vram_mb`, total runtime, and code +complexity are secondary constraints that influence promotion decisions. + +## VRAM Rule + +Track `peak_vram_mb` on every serious evaluation run and treat it as required +decision input, not a cosmetic metric. + +- Some VRAM growth is acceptable when it buys a clear `val_bpb` improvement. +- Large VRAM increases require a correspondingly strong quality gain. +- If two candidates are similar on `val_bpb`, prefer the lower-VRAM one. +- If a candidate regresses or barely improves `val_bpb` while increasing VRAM + substantially, treat it as a bad trade and do not promote it. +- Avoid changes that risk blowing up memory usage unless the expected upside is + compelling enough to justify the experiment. + +## Promotion Rule + +A run is promotable only if all of the following hold: +- The run completed successfully. +- `val_bpb` improved enough over the active baseline to count as a real win. +- VRAM growth is not unreasonable for the magnitude of the gain. +- The change is understandable, maintainable, and reversible. + +If the candidate is equal, worse, noisy, or hard to justify, do not promote it. +Record the outcome and move on. + +## Failure Handling + +Use the same judgment standard as the original autoresearch loop: + +- If a run crashes because of a simple bug, fix it, rerun, and update the same + run record. +- If the idea is fundamentally flawed, archive it without promotion. +- If the task cannot be recovered quickly, move it into the error flow and + persist the failure details. +- Crashes are negative evidence; they should not silently disappear. + +## Bootstrap Procedure for Interactive Sessions + +1. Read baseline files and recent queue/state. +2. Ensure queue/state/worktree layout exists. +3. Create a seed from the human prompt and queue it for P. +4. Start `uv run component_system/run.py` and monitor; do not run P/DCA manually. + +## Operating Loop + +1. Seed persisted in `state/seeds/`, queued to `queue/p/`. +2. P refreshes worktree, generates code, commits on seed branch. +3. Daemon queues DCA. +4. DCA adapts, runs, evaluates; promotes or archives. +5. State persisted under `state/`; daemon continues with next work. + +## State and Logging + +- `component_system/history/state/seeds/`, `component_system/history/state/runs/`, `component_system/history/state/events/` — seed and run state. +- `component_system/history/queue/done/`, `component_system/history/queue/error/` — completed and failed tasks. +- `component_system/history/logs/` — agent stdout/stderr. + +Use filesystem state as source of truth, not chat context. + +## Daemon + +`run.py` runs two single-threaded workers polling `component_system/history/queue/p/` and `component_system/history/queue/dca/`. Workers dispatch to an external code agent; the agent reads files, edits the worktree, runs the entrypoint, and prints structured summaries. + +Start: + +```bash +# Default backend +uv run component_system/run.py + +# Alternate backends +PDCA_AGENT=codex uv run component_system/run.py +PDCA_AGENT=opencode uv run component_system/run.py +``` + +### Agent Backends + +| `PDCA_AGENT` | CLI invoked | Prompt delivery | +|--------------|-------------|-----------------| +| `claude` (default) | `claude -p --verbose` | stdin | +| `codex` | `codex exec -a never --sandbox workspace-write` | positional arg | +| `opencode` | `opencode run` | positional arg | + +### Timeouts + +Each stage has a default timeout in seconds and can be overridden through the +environment: + +| Variable | Default | Purpose | +|----------|---------|---------| +| `PDCA_TIMEOUT_P` | 900 | Planning and initial code generation | +| `PDCA_TIMEOUT_DCA` | 3600 | Adaptation, training, evaluation, and promotion | + +### Logs + +Agent stdout/stderr → `component_system/history/logs/`. diff --git a/component_system/repositories/state.py b/component_system/repositories/state.py new file mode 100644 index 000000000..7d60ae92a --- /dev/null +++ b/component_system/repositories/state.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from typing import Any + +from component_system.domain.models import SeedRecord, StageRun +from component_system.task import ( + append_event, + list_runs, + list_seeds, + load_baseline_branch_map, + load_baseline_metrics, + load_events, + load_run, + load_seed, + save_baseline_branch_map, + save_baseline_metrics, + save_run, + save_seed, +) + + +class BaselineBranchMapRepository: + """Per-seed baseline branch mapping (seed_id -> baseline_branch).""" + + def set_branch_for_seed(self, seed_id: str, branch: str) -> None: + m = load_baseline_branch_map() + m[seed_id] = branch + save_baseline_branch_map(m) + + +class BaselineMetricsRepository: + """Per-baseline-branch metrics (last_val_bpb, promoted_*, commit_sha, etc.).""" + + def get_all(self) -> dict[str, dict[str, Any]]: + return load_baseline_metrics() + + def get_for_branch(self, branch: str) -> dict[str, Any] | None: + return load_baseline_metrics().get(branch) + + def update_for_branch(self, branch: str, metrics: dict[str, Any]) -> None: + data = load_baseline_metrics() + data[branch] = {**data.get(branch, {}), **metrics} + save_baseline_metrics(data) + + +class SeedRepository: + def list(self) -> list[SeedRecord]: + return [SeedRecord.model_validate(seed) for seed in list_seeds()] + + def get(self, seed_id: str) -> SeedRecord | None: + data = load_seed(seed_id) + return SeedRecord.model_validate(data) if data else None + + def save(self, seed: SeedRecord) -> SeedRecord: + save_seed(seed.model_dump(mode="json")) + return seed + + def append_event(self, seed_id: str, kind: str, message: str, **payload: Any) -> list[dict[str, Any]]: + return append_event(seed_id, {"kind": kind, "message": message, **payload}) + + def events(self, seed_id: str) -> list[dict[str, Any]]: + return load_events(seed_id) + + +class RunRepository: + def list(self, seed_id: str | None = None) -> list[StageRun]: + return [StageRun.model_validate(run) for run in list_runs(seed_id)] + + def get(self, run_id: str) -> StageRun | None: + data = load_run(run_id) + return StageRun.model_validate(data) if data else None + + def save(self, run: StageRun) -> StageRun: + save_run(run.model_dump(mode="json")) + return run diff --git a/component_system/run.py b/component_system/run.py new file mode 100644 index 000000000..a0b8c79f4 --- /dev/null +++ b/component_system/run.py @@ -0,0 +1,801 @@ +"""Seed -> P -> DCA daemon for the component-system web app.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +import json +import os +import shutil +import signal +import subprocess +import sys +import threading +import time +import traceback +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path +from typing import Any + +from component_system.domain.models import StageName +from component_system.services.workflow import BASELINE_SEED_ID, WorkflowService +from component_system.task import ( + BASELINE_BRANCHES_PATH, + BASELINE_METRICS_PATH, + COMPONENT_SYSTEM_ROOT, + claim_pending, + DAEMON_HEARTBEAT_PATH, + daemon_heartbeat, + ensure_queue_layout, + LOG_ROOT, + move_to_done, + move_to_error, + read_task, + restore_in_progress_tasks, +) + +PROJECT_ROOT = COMPONENT_SYSTEM_ROOT.parent +LOG_DIR = LOG_ROOT +RESULTS_TSV = PROJECT_ROOT / "results.tsv" +PROGRESS_PNG = PROJECT_ROOT / "progress.png" + +POLL_INTERVAL = 10.0 +_shutdown = False +WORKFLOW = WorkflowService() + +DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} + +# Canonical DCA entrypoint run: require ≥600s so training can complete. Agent must set command/tool timeout ≥ this. +DCA_CANONICAL_RUN_TIMEOUT_SECONDS = 600 + +STAGE_DOCS = { + "p": ["PDCA-PLAN.md"], + "dca": ["PDCA-DO-CHECK-ACTION.md"], +} + +AGENT_CONFIGS: dict[str, dict[str, Any]] = { + "claude": {"cmd": ["claude", "-p", "--verbose"], "via": "stdin"}, + "codex": {"cmd": ["codex", "exec", "-a", "never", "--sandbox", "workspace-write"], "via": "arg"}, + "opencode": {"cmd": ["opencode", "run"], "via": "arg"}, +} + + +def _signal_handler(_sig: int, _frame: Any) -> None: + global _shutdown + _shutdown = True + print("\n[daemon] shutdown requested") + + +def _get_timeout(stage: str) -> int: + return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 600))) + + +def _build_log_paths(run_id: str) -> tuple[Path, Path]: + LOG_DIR.mkdir(parents=True, exist_ok=True) + stdout_path = LOG_DIR / f"{run_id}.stdout.log" + stderr_path = LOG_DIR / f"{run_id}.stderr.log" + return stdout_path, stderr_path + + +def _write_prompt_file(run_id: str, prompt: str) -> Path: + """Save the agent prompt to a file for debugging. Returns the path.""" + LOG_DIR.mkdir(parents=True, exist_ok=True) + prompt_path = LOG_DIR / f"{run_id}.prompt.txt" + prompt_path.write_text(prompt, encoding="utf-8") + return prompt_path + + +def _is_root_venv_active() -> bool: + expected = (PROJECT_ROOT / ".venv").resolve() + active = os.environ.get("VIRTUAL_ENV") + if not active: + return False + try: + return Path(active).resolve() == expected + except OSError: + return False + + +def _dca_command_guidance() -> tuple[str, str]: + timeout_prefix = f"timeout {DCA_CANONICAL_RUN_TIMEOUT_SECONDS}" + if _is_root_venv_active(): + return ( + f"{timeout_prefix} uv run --active component_system/entrypoint.py", + "Root .venv is active; use --active to reuse it from the worktree.", + ) + return ( + f"{timeout_prefix} uv run component_system/entrypoint.py", + "No active root .venv detected; fallback avoids --active so uv can run normally.", + ) + + +def _build_direct_code_prompt(prompt: str) -> str: + return ( + "You are running as a direct code agent from the project root of this repository.\n" + "Execute the user's request directly in the current working tree.\n" + "Do not switch into seed worktrees for this task.\n\n" + "User request:\n" + f"{prompt.strip()}\n" + ) + + +def _stream_pipe_to_file(pipe: Any, handle: Any, chunks: list[str]) -> None: + try: + while True: + piece = pipe.read(16) + if not piece: + break + chunks.append(piece) + handle.write(piece) + handle.flush() + finally: + try: + pipe.close() + except Exception: + pass + + +def _combined_output(stdout: str, stderr: str) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr + + +def _agent_failure_reason(exit_code: int, stdout: str, stderr: str) -> str: + combined = _combined_output(stdout, stderr) + if "timeout after " in combined: + return combined.strip().splitlines()[-1] + if exit_code == -1: + if combined.strip(): + return combined.strip().splitlines()[-1] + return "Agent execution failed before completion. See stdout/stderr logs for details." + return f"Agent exited with code {exit_code}. See stdout/stderr logs for details." + + +def _should_salvage_completed_dca(stage: str, exit_code: int, output_text: str) -> bool: + """Accept a DCA run when canonical metrics were printed despite agent exit issues.""" + if stage != "dca" or exit_code == 0: + return False + summary = WORKFLOW.extract_summary(output_text, StageName.dca) or {} + metrics = WORKFLOW.extract_dca_metrics(output_text, summary) + return metrics.get("val_bpb") is not None + + +def _agent_cwd(worktree_path: str | None) -> str: + """Resolve cwd for the agent: seed worktree when provided and present, else project root.""" + if not worktree_path: + return str(PROJECT_ROOT) + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return str(resolved) if resolved.is_dir() else str(PROJECT_ROOT) + + +def _resolve_worktree_path(worktree_path: str | None) -> Path | None: + """Resolve worktree path to absolute Path, or None if invalid/missing.""" + if not worktree_path: + return None + path = Path(worktree_path) + if not path.is_absolute(): + path = PROJECT_ROOT / path + resolved = path.resolve() + return resolved if resolved.is_dir() else None + + +def _sync_results_tsv_into_worktree(worktree_path: str | None) -> None: + """Copy the latest root results.tsv into the seed worktree if it exists. Non-fatal on failure.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None or not RESULTS_TSV.exists(): + return + dest = resolved / "results.tsv" + try: + shutil.copy2(RESULTS_TSV, dest) + except OSError as err: + print(f"[P] could not copy results.tsv into worktree: {err}", file=sys.stderr) + + +def _sync_baseline_json_into_worktree(worktree_path: str | None) -> None: + """Copy baseline_metrics.json and baseline_branches.json from project component_system into the worktree. + Worktrees check out from baseline-branch; without this sync the agent would see stale or missing baseline data.""" + resolved = _resolve_worktree_path(worktree_path) + if resolved is None: + return + dest_dir = resolved / "component_system" + dest_dir.mkdir(parents=True, exist_ok=True) + for src_path, name in [ + (BASELINE_METRICS_PATH, "baseline_metrics.json"), + (BASELINE_BRANCHES_PATH, "baseline_branches.json"), + ]: + if not src_path.exists(): + continue + dest = dest_dir / name + try: + shutil.copy2(src_path, dest) + except OSError as err: + print(f"[P] could not copy {name} into worktree: {err}", file=sys.stderr) + + +def _sync_worktree_context(worktree_path: str | None) -> None: + """Sync all workflow-managed live data into the worktree so the agent sees current state. + Call before invoking the agent when cwd is a worktree (P or DCA).""" + _sync_results_tsv_into_worktree(worktree_path) + _sync_baseline_json_into_worktree(worktree_path) + + +def _invoke_agent( + prompt: str, stage: str, run_id: str, worktree_path: str | None = None +) -> tuple[int, str, str, Path | None, Path | None]: + agent_name = os.environ.get("PDCA_AGENT", "claude") + config = AGENT_CONFIGS.get(agent_name) + if config is None: + raise ValueError(f"Unknown PDCA_AGENT={agent_name!r}. Supported: {', '.join(AGENT_CONFIGS)}") + + cmd = list(config["cmd"]) + timeout = _get_timeout(stage) + cwd = _agent_cwd(worktree_path) + # PYTHONUNBUFFERED=1 so child Python (e.g. uv run entrypoint.py) flushes stdout + # immediately instead of block-buffering when stdout is a pipe; otherwise + # stdout log only appears in one shot after the task finishes. + env = {**os.environ, "PYTHONUNBUFFERED": "1"} + if agent_name == "opencode": + project_root_glob = str(PROJECT_ROOT.resolve().as_posix()) + "/**" + existing = {} + try: + if os.environ.get("OPENCODE_PERMISSION"): + existing = json.loads(os.environ["OPENCODE_PERMISSION"]) + except (json.JSONDecodeError, KeyError): + pass + ext_dir = dict(existing.get("external_directory", {})) + ext_dir[project_root_glob] = "allow" + env["OPENCODE_PERMISSION"] = json.dumps({"external_directory": ext_dir}) + popen_kwargs: dict[str, Any] = { + "cwd": cwd, + "env": env, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "text": True, + "encoding": "utf-8", + "errors": "replace", + "bufsize": 1, + } + if config["via"] == "stdin": + popen_kwargs["stdin"] = subprocess.PIPE + else: + # Use DEVNULL so the agent never reads from parent's stdin (avoids EBADF under nohup/redirects). + popen_kwargs["stdin"] = subprocess.DEVNULL + cmd.append(prompt) + + print(f"[{stage.upper()}] invoking {agent_name} (timeout={timeout}s)") + stdout_path, stderr_path = _build_log_paths(run_id) + try: + process = subprocess.Popen(cmd, **popen_kwargs) + except FileNotFoundError: + msg = f"{agent_name!r} binary not found. Install it or set PDCA_AGENT to a different backend." + return -1, "", msg, None, None + + if config["via"] == "stdin" and process.stdin is not None: + process.stdin.write(prompt) + process.stdin.close() + + stdout_chunks: list[str] = [] + stderr_chunks: list[str] = [] + with open(stdout_path, "w", encoding="utf-8") as stdout_handle, open( + stderr_path, "w", encoding="utf-8" + ) as stderr_handle: + stdout_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stdout_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stdout_handle.flush() + stderr_handle.write(f"stage: {stage.upper()}\nagent: {agent_name}\n") + stderr_handle.write(f"timestamp: {time.strftime('%Y%m%d-%H%M%S')}\n\n") + stderr_handle.flush() + + stdout_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stdout, stdout_handle, stdout_chunks), + daemon=True, + ) + stderr_thread = threading.Thread( + target=_stream_pipe_to_file, + args=(process.stderr, stderr_handle, stderr_chunks), + daemon=True, + ) + stdout_thread.start() + stderr_thread.start() + + timed_out = False + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + timed_out = True + process.kill() + + stdout_thread.join() + stderr_thread.join() + + stdout = "".join(stdout_chunks) + stderr = "".join(stderr_chunks) + if timed_out: + timeout_message = f"timeout after {timeout}s" + if stderr: + stderr = f"{stderr}\n{timeout_message}" + else: + stderr = timeout_message + return -1, stdout, stderr, stdout_path, stderr_path + + return process.returncode, stdout, stderr, stdout_path, stderr_path + + +def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: + """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" + task_json = json.dumps(task, indent=2) + target_branch = task.get("baseline_branch", "master") # branch we want to merge into (e.g. master) + worktree_path = task.get("worktree_path") or "" + seed_id = task.get("seed_id", "") + last_metrics = task.get("last_metrics") or {} + last_summary = task.get("last_summary") or {} + notes = last_summary.get("notes", "Merge resolution: committed and merged into baseline.") + completed_at = last_summary.get("completed_at", "YYYY-MM-DD HH:MM:SS") + report_json = json.dumps({ + "checks": ["merge_resolution"], + "notes": notes, + "completed_at": completed_at, + "commit_sha": "", + "metrics": last_metrics, + }, indent=2) + + if seed_id == BASELINE_SEED_ID: + # We are resolving the merge of __baseline__ INTO target_branch (e.g. master). + # git merge X = merge X into current branch; so we need to be on target_branch, then git merge __baseline__. + cwd_note = ( + "Your working directory is the project root (main repo). " + "Do NOT run the merge from the __baseline__ worktree: that would merge the wrong way.\n\n" + ) + steps = ( + "Steps:\n" + f"1. Find where {target_branch!r} is checked out: run git worktree list and identify the path whose branch is {target_branch!r} (often the main repo).\n" + f"2. cd to that directory, then run: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + f" Correct example (merge __baseline__ into {target_branch}):\n" + f" git worktree list\n" + f" cd # e.g. main repo\n" + f" git merge {BASELINE_SEED_ID!r}\n" + " Wrong (do not do this): cd to the __baseline__ worktree and run git merge master — that merges master into __baseline__.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + else: + # Normal seed: merge the baseline branch (__baseline__) INTO the seed worktree so the seed is up to date. + if worktree_path: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at {worktree_path!r}; run git commands from that directory (e.g. cd there first).\n\n" + ) + else: + cwd_note = ( + "Your working directory is the project root. " + f"The seed worktree is at component_system/history/worktrees/{seed_id!r}; run git commands from that directory for the merge.\n\n" + ) + steps = ( + "Steps:\n" + "1. Commit any uncommitted changes in the seed worktree (e.g. batch-size or other fixes).\n" + f"2. In the seed worktree, merge the baseline branch into the current branch: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" + "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + ) + + return ( + "MERGE RESOLUTION (focused task). Do not read protocol or stage docs.\n\n" + "Task (inline):\n" + f"{task_json}\n\n" + f"{cwd_note}" + f"{steps}" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"{report_json}\n" + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + +def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: + """Build the agent prompt for a stage. Prompt types (by weight): + - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. + - DCA metrics_recovery: full header + log-recovery instructions. Heavy. + - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. + - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. + - DCA normal: full header + adapt/run/commit/report. Heavy. + """ + task_json = json.dumps(task, indent=2) + rel_task = task_path.relative_to(PROJECT_ROOT).as_posix() + worktree_path = task.get("worktree_path", "component_system/history/worktrees") + agent_cwd = _agent_cwd(worktree_path) + worktree_dir = Path(agent_cwd) + + # Worktree runs must stay entirely within the copied seed workspace to avoid external_directory requests. + if worktree_dir.resolve() != PROJECT_ROOT.resolve(): + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_block = ( + "Task content (provided inline; do not look up any external task file):\n" + f"{task_json}\n\n" + ) + worktree_note = ( + "Your working directory is the assigned workflow worktree (your current directory).\n" + "All required file context is already copied into this worktree under component_system/.\n" + "Use only paths relative to your current working directory. " + "Do not request access to absolute paths, parent-directory paths, or files outside the worktree.\n" + ) + else: + context_protocol = " - component_system/protocol.md" + docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + task_path_rel = f" - {rel_task}" + task_block = f"Task file:\n{task_path_rel}\n\nTask content:\n{task_json}\n\n" + worktree_note = "Your working directory is the project root.\n" + + required_context = ( + "Required context (read first; paths relative to your cwd):\n" + f" - component_system/protocol.md\n" + f"{docs}\n" + ) + baseline_files_note = ( + "Baseline reference files (workflow-managed; read-only):\n" + " - component_system/baseline_branches.json (per-branch baseline mapping)\n" + " - component_system/baseline_metrics.json (baseline run metrics)\n" + "The workflow writes these; only read them for context.\n\n" + ) + header = ( + "You are working on the autoresearch component-system workflow.\n\n" + f"{required_context}\n" + f"{baseline_files_note}" + f"{task_block}" + f"{worktree_note}" + "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" + ) + + if stage == "p": + return header + ( + "You are the P stage.\n\n" + "## Read results.tsv first (avoid idea duplication)\n" + "Before choosing a hypothesis, read `results.tsv` in your cwd if it exists. " + "Use it to avoid proposing ideas already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). " + "See component_system/PDCA-PLAN.md for full guidance.\n\n" + "Workflow:\n" + "1. Refine the seed prompt into a concrete implementation idea.\n" + "2. Implement the first generated version of that idea in the provided worktree.\n" + "3. Create a git commit in the seed branch (current branch in the worktree).\n" + "4. Print a JSON summary between these exact markers:\n" + "AUTORESEARCH_P_SUMMARY_BEGIN\n" + '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' + "AUTORESEARCH_P_SUMMARY_END\n" + "One branch per seed: you are already on the seed branch in the worktree.\n" + "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + ) + if stage == "dca": + merge_resolution = task.get("merge_resolution") is True + metrics_recovery = task.get("metrics_recovery") is True + if merge_resolution: + return _build_merge_resolution_prompt(task) + dca_cmd, dca_note = _dca_command_guidance() + baseline_measurement = task.get("seed_id") == "__baseline__" + conflict_block = "" + if metrics_recovery: + source_run_id = task.get("source_run_id", "unknown") + stdout_log = task.get("source_stdout_log_path", "missing") + stderr_log = task.get("source_stderr_log_path", "missing") + return header + ( + "METRICS RECOVERY: The previous DCA run completed, but the runner could not confirm metrics from its final report.\n" + "Do not rerun training. Do not edit code. Do not create a commit.\n" + f"Inspect the saved logs for source run {source_run_id!r}:\n" + f"- stdout log: {stdout_log}\n" + f"- stderr log: {stderr_log}\n" + "Recover the canonical metrics from those logs if they are present, then print the final JSON summary.\n" + "Use this exact shape:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["log_metrics_recovery"],"notes":"Recovered metrics from saved logs.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If you still cannot recover metrics, print the same object with an empty metrics object and explain why in notes.\n" + ) + if baseline_measurement: + return header + conflict_block + ( + "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" + "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" + "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" + "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" + f"Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): {dca_cmd}\n" + f"({dca_note}) When you invoke this command, set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds so the process is not killed early.\n" + "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["baseline_measurement"],"notes":"Measured the current baseline in the dedicated baseline worktree.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + "If after all retries (including batch size reduction) metrics are still unavailable, only then print the same object with an empty metrics object and explain in notes.\n" + ) + return header + conflict_block + ( + "You are the DCA stage.\n" + "Do not put forward new ideas or optimize for better metrics. Your only goal is to make the P-stage code run and report the result. " + '"Adapt or fix" means: fix bugs, import/runtime errors, OOM (e.g. reduce batch size), and config/path issues only. ' + "Do not change model architecture, optimizer logic, hyperparameters, or training logic to improve results. " + "The task \"prompt\" is for context only; do not treat it as a goal to achieve in this stage.\n\n" + "Workflow:\n" + "1. Adapt or fix the generated code in the seed worktree until it runs.\n" + f"2. Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): {dca_cmd}\n" + f" ({dca_note}) When you invoke this command, set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds so the process is not killed early.\n" + "3. If it fails for a simple reason, fix and rerun.\n" + "4. Create a git commit in the seed branch for your changes.\n" + "5. Report the final result in JSON between these exact markers. Include the current commit SHA in the summary.\n" + " Use this exact shape and include numeric metric values when available:\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + " Do not omit the markers. Prefer this exact JSON report over prose. If metrics are unavailable,\n" + " still print the same object with an empty metrics object.\n" + "Do not edit baseline_branches.json or baseline_metrics.json (workflow writes them; read only). Do not merge branches yourself; the system will evaluate and promote if appropriate.\n" + ) + raise ValueError(f"Unknown stage: {stage}") + + +def _append_results_tsv(seed_id: str, run_metrics: dict[str, Any], signal: str, description: str) -> None: + status = "KEEP" if signal == "positive_signal" else "DISCARD" + val_bpb = run_metrics.get("val_bpb", "") + peak_vram_mb = run_metrics.get("peak_vram_mb", 0) + memory_gb = round(float(peak_vram_mb) / 1024, 2) if peak_vram_mb else "" + write_header = not RESULTS_TSV.exists() + with open(RESULTS_TSV, "a", encoding="utf-8") as handle: + if write_header: + handle.write("commit\tval_bpb\tmemory_gb\tstatus\tdescription\n") + handle.write(f"{seed_id}\t{val_bpb}\t{memory_gb}\t{status}\t{description}\n") + + +def _regenerate_progress_png() -> None: + try: + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + import pandas as pd + except ImportError: + return + + if not RESULTS_TSV.exists(): + return + + try: + df = pd.read_csv(RESULTS_TSV, sep="\t") + df["val_bpb"] = pd.to_numeric(df["val_bpb"], errors="coerce") + df["memory_gb"] = pd.to_numeric(df["memory_gb"], errors="coerce") + df["status"] = df["status"].str.strip().str.upper() + valid = df[df["val_bpb"].notna()].copy().reset_index(drop=True) + if valid.empty: + return + + baseline_bpb = valid.loc[0, "val_bpb"] + kept = valid[valid["status"] == "KEEP"] + best = float(kept["val_bpb"].min()) if not kept.empty else float(baseline_bpb) + + fig, ax = plt.subplots(figsize=(14, 7)) + ax.scatter(valid.index, valid["val_bpb"], c="#94a3b8", s=18, alpha=0.6, label="Runs") + if not kept.empty: + ax.scatter(kept.index, kept["val_bpb"], c="#38bdf8", s=42, label="Promoted") + ax.step(kept.index, kept["val_bpb"].cummin(), where="post", color="#0ea5e9", linewidth=2) + ax.set_xlabel("Experiment #") + ax.set_ylabel("Validation BPB (lower is better)") + ax.set_title("Component System Progress") + margin = (baseline_bpb - best) * 0.15 if baseline_bpb != best else 0.005 + ax.set_ylim(best - margin, float(baseline_bpb) + margin) + ax.grid(True, alpha=0.2) + ax.legend(loc="upper right") + plt.tight_layout() + plt.savefig(PROGRESS_PNG, dpi=150, bbox_inches="tight") + plt.close(fig) + except Exception: + traceback.print_exc() + + +def _worker(stage: str, lane: str = "any") -> None: + worker_name = stage.upper() if lane == "any" else f"{stage.upper()}-{lane.upper()}" + print(f"[daemon] worker-{worker_name} started") + def eligible(payload: dict) -> bool: + return bool(WORKFLOW.is_seed_eligible_for_stage(payload.get("seed_id"), stage)) + + while not _shutdown: + task_path = claim_pending(stage, lane=lane, eligible_fn=eligible) + if task_path is None: + time.sleep(POLL_INTERVAL) + continue + + try: + task = read_task(task_path) + seed_id = task.get("seed_id") + run_id = task.get("run_id") + if not seed_id or not run_id: + move_to_error(task_path) + continue + started_seed = None + if stage == "direct": + started_seed, _ = WORKFLOW.mark_direct_code_run_started(seed_id, run_id) + else: + started_seed, _ = WORKFLOW.mark_run_started(seed_id, run_id) + if ( + stage == "dca" + and task.get("metrics_recovery") is not True + ): + started_seed = WORKFLOW.ensure_seed_worktree_ready(seed_id) + print(f"[{stage.upper()}] picked up {task['task_id']} for {seed_id}") + + worktree_path = task.get("worktree_path") + if started_seed is not None and started_seed.worktree_path is not None: + worktree_path = started_seed.worktree_path + # Merge-resolution DCA runs from project root so the agent can operate on repo and worktrees + if stage == "dca" and ( + task.get("merge_resolution") is True or task.get("metrics_recovery") is True + ): + worktree_path = None + + if worktree_path: + _sync_worktree_context(worktree_path) + + if stage == "direct": + prompt = _build_direct_code_prompt(task["prompt"]) + else: + prompt = _build_prompt(stage, task, task_path) + prompt_path = _write_prompt_file(run_id, prompt) + prompt_path_str = str(prompt_path) + exit_code, stdout, stderr, stdout_log_path, stderr_log_path = _invoke_agent( + prompt, stage, run_id, worktree_path=worktree_path + ) + + combined_output = _combined_output(stdout, stderr) + salvaged_dca = _should_salvage_completed_dca(stage, exit_code, combined_output) + if exit_code == 0 or salvaged_dca: + if stage == "p": + WORKFLOW.finish_p_run( + seed_id, + run_id, + stdout, + str(stdout_log_path) if stdout_log_path else None, + str(stderr_log_path) if stderr_log_path else None, + prompt_path_str, + ) + elif stage == "direct": + WORKFLOW.finish_direct_code_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + ) + else: + run = WORKFLOW.finish_dca_run( + seed_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + metrics_recovery=task.get("metrics_recovery") is True, + merge_resolution=task.get("merge_resolution") is True, + ) + if not run.summary.get("metrics_recovery_queued"): + description = run.summary.get("notes") or run.summary.get("idea") or seed_id + _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) + _regenerate_progress_png() + if salvaged_dca: + WORKFLOW.seed_repo.append_event( + seed_id, + "dca.salvaged", + f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", + run_id=run_id, + ) + move_to_done(task_path) + print(f"[{stage.upper()}] task {task['task_id']} done") + else: + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, + prompt_path=prompt_path_str, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + ) + else: + WORKFLOW.mark_run_failed( + seed_id, + run_id, + _agent_failure_reason(exit_code, stdout, stderr), + task_path=task_path, prompt_path=prompt_path_str, + ) + print(f"[{stage.upper()}] task {task['task_id']} failed") + except Exception as exc: + traceback.print_exc() + if not task_path.exists(): + continue + try: + task = read_task(task_path) + seed_id = task.get("seed_id") + run_id = task.get("run_id") + if not seed_id or not run_id: + continue + prompt_path_str = None + if run_id: + p_path = LOG_DIR / f"{run_id}.prompt.txt" + if p_path.exists(): + prompt_path_str = str(p_path) + if stage == "direct": + WORKFLOW.mark_direct_code_run_failed( + seed_id, + run_id, + str(exc), + task_path=task_path, + prompt_path=prompt_path_str, + ) + else: + WORKFLOW.mark_run_failed( + seed_id, run_id, str(exc), + task_path=task_path, prompt_path=prompt_path_str, + ) + except Exception: + traceback.print_exc() + + print(f"[daemon] worker-{worker_name} stopped") + + +def main() -> None: + global _shutdown + signal.signal(signal.SIGINT, _signal_handler) + if sys.platform != "win32": + signal.signal(signal.SIGTERM, _signal_handler) + + ensure_queue_layout() + restored = restore_in_progress_tasks() + total_restored = sum(restored.values()) + if total_restored: + print( + "[daemon] restored in_progress tasks " + f"(p={restored['p']}, dca={restored['dca']}, direct={restored['direct']})" + ) + daemon_heartbeat() + agent = os.environ.get("PDCA_AGENT", "claude") + print(f"[daemon] starting component-system daemon — agent={agent}, workers=P/DCA-GPU/DCA-AUX/DIRECT") + + pools: list[ThreadPoolExecutor] = [] + stage_specs = ( + ("p", "any", 2, "pdca-p"), + ("dca", "gpu", 1, "pdca-dca-gpu"), + ("dca", "aux", 1, "pdca-dca-aux"), + ("direct", "any", 1, "pdca-direct"), + ) + for stage, lane, worker_count, prefix in stage_specs: + pool = ThreadPoolExecutor(max_workers=worker_count, thread_name_prefix=prefix) + pools.append(pool) + for _ in range(worker_count): + pool.submit(_worker, stage, lane) + + last_heartbeat = time.monotonic() + try: + while not _shutdown: + time.sleep(1.0) + if not _shutdown and (time.monotonic() - last_heartbeat) >= 5.0: + daemon_heartbeat() + last_heartbeat = time.monotonic() + except KeyboardInterrupt: + pass + finally: + _shutdown = True + if DAEMON_HEARTBEAT_PATH.exists(): + try: + DAEMON_HEARTBEAT_PATH.unlink() + except OSError: + pass + for pool in pools: + pool.shutdown(wait=True) + + print("[daemon] all workers stopped") + + +if __name__ == "__main__": + main() diff --git a/component_system/run_arxiv.py b/component_system/run_arxiv.py new file mode 100644 index 000000000..3595f9f58 --- /dev/null +++ b/component_system/run_arxiv.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +""" +arXiv search/fetch script using the arxiv Python library. +Supports CLI args for query, id_list, max_results, sort, output format, and PDF download. + +In this project the arxiv dependency is provided by uv. Run with: + uv run python component_system/run_arxiv.py --query "machine learning" --max-results 5 + uv run python component_system/run_arxiv.py --id 1605.08386v1 --output json + uv run python component_system/run_arxiv.py --query "transformer" --download-dir ./papers +""" +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path + +try: + import arxiv +except ImportError: + print("Install the arxiv package: pip install arxiv", file=sys.stderr) + sys.exit(1) + + +def _sort_criterion(s: str) -> arxiv.SortCriterion: + m = { + "relevance": arxiv.SortCriterion.Relevance, + "submitteddate": arxiv.SortCriterion.SubmittedDate, + "lastupdateddate": arxiv.SortCriterion.LastUpdatedDate, + } + key = s.strip().lower().replace(" ", "") + if key not in m: + raise ValueError(f"Invalid sort_by: {s}. Choose: relevance, submittedDate, lastUpdatedDate") + return m[key] + + +def _sort_order(s: str) -> arxiv.SortOrder: + m = { + "ascending": arxiv.SortOrder.Ascending, + "descending": arxiv.SortOrder.Descending, + } + key = s.strip().lower() + if key not in m: + raise ValueError(f"Invalid sort_order: {s}. Choose: ascending, descending") + return m[key] + + +def _result_to_dict(r: arxiv.Result) -> dict: + return { + "entry_id": r.entry_id, + "title": r.title, + "summary": (r.summary or "").strip(), + "authors": [a.name for a in r.authors], + "published": r.published.isoformat() if r.published else None, + "updated": r.updated.isoformat() if r.updated else None, + "primary_category": getattr(r, "primary_category", None) or "", + "categories": getattr(r, "categories", []) or [], + "pdf_url": getattr(r, "pdf_url", None) or "", + "links": [{"href": l.href, "title": getattr(l, "title", None)} for l in (r.links or [])], + } + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Search or fetch arXiv papers via the arxiv Python library.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--query", + "-q", + type=str, + default="", + help="Search query (e.g. 'transformer' or 'au:smith AND ti:neural'). Ignored if --id is set.", + ) + parser.add_argument( + "--id", + dest="id_list", + type=str, + nargs="+", + default=None, + metavar="ARXIV_ID", + help="One or more arXiv IDs (e.g. 1605.08386v1). If set, --query is ignored.", + ) + parser.add_argument( + "--max-results", + "-n", + type=int, + default=10, + help="Maximum number of results to return.", + ) + parser.add_argument( + "--sort-by", + type=str, + default="relevance", + choices=["relevance", "submittedDate", "lastUpdatedDate"], + help="Sort criterion for results.", + ) + parser.add_argument( + "--sort-order", + type=str, + default="descending", + choices=["ascending", "descending"], + help="Sort order.", + ) + parser.add_argument( + "--output", + "-o", + type=str, + default="text", + choices=["text", "json"], + help="Output format: text (one line per paper) or json.", + ) + parser.add_argument( + "--download-dir", + type=str, + default=None, + metavar="DIR", + help="If set, download PDF for each result into this directory.", + ) + parser.add_argument( + "--verbose", + "-v", + action="store_true", + help="Print progress (e.g. download paths).", + ) + args = parser.parse_args() + + if args.id_list: + search = arxiv.Search(id_list=args.id_list, max_results=len(args.id_list) or None) + else: + if not args.query.strip(): + parser.error("Either --query or --id must be provided.") + sort_by = _sort_criterion(args.sort_by) + sort_order = _sort_order(args.sort_order) + search = arxiv.Search( + query=args.query, + max_results=args.max_results, + sort_by=sort_by, + sort_order=sort_order, + ) + + client = arxiv.Client() + results = list(client.results(search)) + + if args.download_dir: + d = Path(args.download_dir) + d.mkdir(parents=True, exist_ok=True) + for r in results: + try: + path = r.download_pdf(dirpath=str(d)) + if args.verbose and path: + print(f"Downloaded: {path}", file=sys.stderr) + except Exception as e: + print(f"Download failed for {r.entry_id}: {e}", file=sys.stderr) + + if args.output == "json": + out = [_result_to_dict(r) for r in results] + print(json.dumps(out, indent=2, ensure_ascii=False)) + else: + for r in results: + print(r.title) + print(f" {r.entry_id} {getattr(r, 'pdf_url', '') or ''}") + if r.summary: + summary = (r.summary or "").strip() + if len(summary) > 200: + summary = summary[:200] + "..." + print(f" {summary}") + print() + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py new file mode 100644 index 000000000..e9cac3c99 --- /dev/null +++ b/component_system/services/workflow.py @@ -0,0 +1,1490 @@ +from __future__ import annotations + +import json +from typing import Any +import re +import subprocess +from pathlib import Path + +from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD +from component_system.domain.models import ( + DashboardColumn, + DashboardViewModel, + PlanIdea, + RunStatus, + SeedRecord, + SeedStatus, + StageName, + StageRun, +) +from component_system.repositories.state import ( + BaselineBranchMapRepository, + BaselineMetricsRepository, + RunRepository, + SeedRepository, +) +from component_system.task import ( + COMPONENT_SYSTEM_ROOT, + WORKTREE_ROOT, + get_daemon_status, + move_to_error, + now_ts, + new_run_id, + new_seed_id, + read_task, + write_task, +) + +SUMMARY_MARKERS = { + "p": ("AUTORESEARCH_P_SUMMARY_BEGIN", "AUTORESEARCH_P_SUMMARY_END"), + "dca": ("AUTORESEARCH_DCA_SUMMARY_BEGIN", "AUTORESEARCH_DCA_SUMMARY_END"), +} + +BASELINE_SEED_ID = "__baseline__" + + +class GitCommandError(RuntimeError): + pass + + +class GitService: + def __init__(self) -> None: + pass + + def _run_git(self, *args: str, cwd: Path | None = None) -> str: + try: + result = subprocess.run( + ["git", *args], + cwd=str(cwd) if cwd else None, + capture_output=True, + text=True, + check=True, + ) + except FileNotFoundError as exc: + raise GitCommandError("Git is not installed or not available on PATH.") from exc + except subprocess.CalledProcessError as exc: + stderr = (exc.stderr or exc.stdout or "").strip() + raise GitCommandError(stderr or f"git {' '.join(args)} failed") from exc + return result.stdout.strip() + + def repo_root(self) -> Path: + return Path(self._run_git("rev-parse", "--show-toplevel")) + + def current_head(self) -> str: + return self._run_git("rev-parse", "HEAD") + + def branch_exists(self, branch: str) -> bool: + try: + self._run_git("rev-parse", "--verify", branch) + return True + except GitCommandError: + return False + + def ensure_branch(self, branch: str, start_point: str) -> None: + if not self.branch_exists(branch): + self._run_git("branch", branch, start_point) + + def list_branches(self) -> list[str]: + output = self._run_git("branch", "--format=%(refname:short)") + branches = [line.strip() for line in output.splitlines() if line.strip()] + if not branches: + # Unborn repositories can have HEAD pointing to a branch name even before first commit. + try: + head_branch = self._run_git("symbolic-ref", "--short", "HEAD").strip() + if head_branch: + branches.append(head_branch) + except GitCommandError: + pass + return sorted(set(branches)) + + @staticmethod + def is_seed_specific_branch(branch: str) -> bool: + """True if this branch is the single working branch for a seed (seed_id), not a baseline choice.""" + if branch == BASELINE_SEED_ID: + return True + # One branch per seed: seed- + 6 hex chars, e.g. seed-e57b95 + if branch.startswith("seed-") and len(branch) == 11 and all( + c in "abcdef0123456789" for c in branch[5:] + ): + return True + if branch.startswith("seed/"): + return True # legacy candidate branches, e.g. seed/seed-e57b95 + return False + + def setup_error(self) -> str | None: + try: + self.repo_root() + return None + except GitCommandError as exc: + return str(exc) + + def setup_error_for_branches(self, baseline_branch: str) -> str | None: + try: + root = self.repo_root() + if not baseline_branch: + return "Please select a baseline branch." + if not self.branch_exists(baseline_branch): + return ( + f"Git repo found at {root}, but branch {baseline_branch!r} does not exist yet. " + "Select an existing baseline branch." + ) + return None + except GitCommandError as exc: + return str(exc) + + def ensure_seed_worktrees(self, seed: SeedRecord) -> SeedRecord: + """Ensure the seed worktree exists on the single branch for this seed: seed_id (SSOT).""" + repo_head = self.current_head() + self.ensure_branch(seed.baseline_branch, repo_head) + + seed_worktree = WORKTREE_ROOT / seed.seed_id + if seed_worktree.exists(): + seed.worktree_path = str(seed_worktree) + return seed + # One branch per seed: branch name = seed_id, created from baseline. + try: + self._run_git("worktree", "add", "-B", seed.seed_id, str(seed_worktree), seed.baseline_branch) + except GitCommandError as exc: + # Recover from stale git worktree metadata like: + # "__baseline__ is already checked out at /old/path/__baseline__" + if not self._recover_checked_out_worktree_conflict( + seed.seed_id, seed_worktree, seed.baseline_branch, str(exc) + ): + raise + + seed.worktree_path = str(seed_worktree) + return seed + + @staticmethod + def _extract_checked_out_path(error: str) -> Path | None: + # git message example: fatal: '__baseline__' is already checked out at '/path' + match = re.search(r"already checked out at ['\"]([^'\"]+)['\"]", error) + if not match: + return None + return Path(match.group(1)) + + def _recover_checked_out_worktree_conflict( + self, branch: str, target_worktree: Path, start_point: str, error: str + ) -> bool: + if "already checked out at" not in error: + return False + # First, prune stale registrations from missing worktrees. + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + conflict_path = self._extract_checked_out_path(error) + if conflict_path is not None and conflict_path != target_worktree: + # If the conflicting worktree still exists, force-remove it from registry. + try: + self._run_git("worktree", "remove", "--force", str(conflict_path)) + except GitCommandError: + pass + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + self._run_git("worktree", "add", "-B", branch, str(target_worktree), start_point) + return True + + def commit_sha(self, ref: str) -> str: + return self._run_git("rev-parse", "--short", ref) + + def head_sha_at(self, cwd: Path) -> str: + """Return the short commit SHA of HEAD in the given worktree directory.""" + return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) + + def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: + """Reset the seed worktree's branch to the given ref (e.g. commit before P). + No-op for baseline seed or when worktree is missing.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("reset", "--hard", ref, cwd=worktree_path) + + def promote_seed_branch( + self, seed: SeedRecord, target_branch: str | None = None + ) -> str: + """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. + If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, + pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" + merge_into = target_branch if target_branch is not None else seed.baseline_branch + baseline_worktree = WORKTREE_ROOT / "baseline" + if baseline_worktree.exists(): + try: + self._run_git("worktree", "remove", "--force", str(baseline_worktree)) + except GitCommandError: + pass + self._run_git( + "worktree", + "add", + "--force", + "-B", + merge_into, + str(baseline_worktree), + merge_into, + ) + self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) + return self.commit_sha(merge_into) + + +class WorkflowService: + def __init__( + self, + seed_repo: SeedRepository | None = None, + run_repo: RunRepository | None = None, + branch_map_repo: BaselineBranchMapRepository | None = None, + metrics_repo: BaselineMetricsRepository | None = None, + git_service: GitService | None = None, + ) -> None: + self.seed_repo = seed_repo or SeedRepository() + self.run_repo = run_repo or RunRepository() + self.branch_map_repo = branch_map_repo or BaselineBranchMapRepository() + self.metrics_repo = metrics_repo or BaselineMetricsRepository() + self.git_service = git_service or GitService() + + @staticmethod + def _seed_worktree_path(seed_id: str) -> str: + return str(WORKTREE_ROOT / seed_id) + + @staticmethod + def _baseline_worktree_path() -> str: + return str(WORKTREE_ROOT / BASELINE_SEED_ID) + + def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: + """Clean up legacy persisted seed state that no longer matches runtime rules.""" + if seed.seed_id != BASELINE_SEED_ID: + return seed + expected_worktree = self._baseline_worktree_path() + if seed.worktree_path == expected_worktree: + return seed + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + + def ensure_seed_worktree_ready(self, seed_id: str) -> SeedRecord: + """Ensure the runtime seed worktree exists; recreate only when missing.""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + expected_worktree = self._baseline_worktree_path() + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.baseline_branch) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing baseline worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + expected_worktree = self._seed_worktree_path(seed.seed_id) + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing seed worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + + def _preferred_baseline_branch(self) -> str: + setup_error = self.git_service.setup_error() + if setup_error is not None: + return DEFAULT_BASELINE_BRANCH + try: + branches = [ + branch + for branch in self.git_service.list_branches() + if not self.git_service.is_seed_specific_branch(branch) + ] + except GitCommandError: + return DEFAULT_BASELINE_BRANCH + if branches and DEFAULT_BASELINE_BRANCH in branches: + return DEFAULT_BASELINE_BRANCH + return branches[0] if branches else DEFAULT_BASELINE_BRANCH + + def _first_user_seed_baseline_branch(self) -> str | None: + """Return the baseline_branch of the earliest-created user seed (excluding __baseline__), or None.""" + user_seeds = [s for s in self.seed_repo.list() if s.seed_id != BASELINE_SEED_ID] + if not user_seeds: + return None + first = min(user_seeds, key=lambda s: s.created_at) + return first.baseline_branch or None + + def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", event_message: str = "Queued Plan stage for the seed.") -> StageRun: + run = StageRun( + run_id=new_run_id("p"), + seed_id=seed.seed_id, + stage=StageName.p, + status=RunStatus.queued, + task_id=new_run_id("task-p"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message) + write_task( + "p", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + }, + task_id=run.task_id, + ) + return run + + def _release_seeds_waiting_for_baseline(self, branch: str) -> None: + """Release seeds that were waiting for baseline result on the given branch.""" + branch_metrics = self.metrics_repo.get_for_branch(branch) + if not branch_metrics or branch_metrics.get("last_val_bpb") is None: + return + waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) + for seed in waiting_seeds: + if seed.seed_id == BASELINE_SEED_ID: + continue + if seed.baseline_branch != branch: + continue + if seed.status is not SeedStatus.queued or seed.latest_run_id is not None: + continue + self._enqueue_plan_run( + seed, + event_kind="p.released", + event_message="Baseline is ready; queued Plan stage for the waiting seed.", + ) + + @staticmethod + def _status_from_dca_signal(signal: str) -> SeedStatus: + """Centralized mapping from DCA signal to terminal seed status.""" + if signal == "positive_signal": + return SeedStatus.promoted + if signal == "error": + return SeedStatus.failed + return SeedStatus.passed + + def _reconcile_seed_status_signal(self, seed: SeedRecord) -> bool: + """ + Auto-heal known inconsistent terminal combinations from historical data. + + Returns True when the seed was updated and persisted. + """ + if seed.status is SeedStatus.passed and seed.latest_signal == "error": + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "seed.reconciled", + "Reconciled inconsistent terminal state (passed + error) to failed.", + ) + return True + return False + + def create_seed( + self, + prompt: str, + baseline_branch: str | None = None, + ralph_loop_enabled: bool = False, + ) -> SeedRecord: + seed_id = new_seed_id() + selected_baseline = (baseline_branch or DEFAULT_BASELINE_BRANCH).strip() + seed = SeedRecord( + seed_id=seed_id, + prompt=prompt.strip(), + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=selected_baseline, + worktree_path=self._seed_worktree_path(seed_id), + ralph_loop_enabled=ralph_loop_enabled, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, selected_baseline) + try: + pass # branch seed_id is created when Plan is queued (ensure_seed_worktrees) + except GitCommandError: + # Keep seed creation non-blocking; branch creation will be retried at P queue time. + pass + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from prompt.") + if ralph_loop_enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + return seed + + def create_direct_code_seed(self, prompt: str) -> tuple[SeedRecord, StageRun]: + cleaned_prompt = prompt.strip() + if not cleaned_prompt: + raise RuntimeError("Prompt cannot be empty.") + baseline_branch = self._preferred_baseline_branch() + seed_id = new_seed_id("direct") + now = now_ts() + run = StageRun( + run_id=new_run_id("direct"), + seed_id=seed_id, + stage=StageName.direct, + status=RunStatus.queued, + task_id=new_run_id("task-direct"), + created_at=now, + updated_at=now, + ) + seed = SeedRecord( + seed_id=seed_id, + prompt=cleaned_prompt, + status=SeedStatus.adapting, + created_at=now, + updated_at=now, + baseline_branch=baseline_branch, + worktree_path=str(COMPONENT_SYSTEM_ROOT.parent), + latest_run_id=run.run_id, + plan=PlanIdea( + title="Direct code agent", + target_component="project_root", + description="Direct code agent run requested from the dashboard and executed from the project root.", + ), + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, baseline_branch) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from direct code agent prompt.") + self.seed_repo.append_event( + seed.seed_id, + "direct_code.queued", + "Queued direct code agent run from the project root.", + run_id=run.run_id, + ) + write_task( + "direct", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": None, + }, + task_id=run.task_id, + ) + return seed, run + + def _get_or_create_baseline_seed(self) -> SeedRecord: + """Return the baseline seed used to establish initial val_bpb; create and persist it if missing.""" + seed = self.seed_repo.get(BASELINE_SEED_ID) + if seed is not None: + return self._normalize_seed_runtime_state(seed) + branch = self._first_user_seed_baseline_branch() or DEFAULT_BASELINE_BRANCH + seed = SeedRecord( + seed_id=BASELINE_SEED_ID, + prompt="Baseline measurement: run training on current code without changes.", + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=branch, + worktree_path=self._baseline_worktree_path(), + ralph_loop_enabled=False, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(BASELINE_SEED_ID, branch) + self.seed_repo.append_event( + seed.seed_id, + "seed.created", + "Baseline seed created for initial measurement.", + ) + return seed + + def ensure_baseline_result(self) -> None: + """ + If there is no baseline result (last_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and + queue its DCA so the first run establishes the baseline. Idempotent; safe to call + before queue_p for any user seed. + """ + seed = self._get_or_create_baseline_seed() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): + return + if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + return + setup_error = self.git_service.setup_error() + if setup_error is not None: + return + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + return + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + return + seed.status = SeedStatus.generated + seed.plan = PlanIdea(title="Baseline", description="No changes; measure current baseline.") + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "baseline.queued", + "Queued DCA to establish baseline result before first seed.", + ) + self.queue_dca(seed.seed_id) + + def set_ralph_loop(self, seed_id: str, enabled: bool) -> SeedRecord: + seed = self.require_seed(seed_id) + if seed.ralph_loop_enabled == enabled: + return seed + seed.ralph_loop_enabled = enabled + seed.updated_at = now_ts() + self.seed_repo.save(seed) + if enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + else: + self.seed_repo.append_event(seed.seed_id, "ralph.disabled", "Ralph loop disabled by user.") + return seed + + def can_edit_seed_prompt(self, seed: SeedRecord) -> bool: + return seed.status in {SeedStatus.draft, SeedStatus.queued} + + def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: + seed = self.require_seed(seed_id) + if not self.can_edit_seed_prompt(seed): + raise RuntimeError("Seed prompt can only be edited before Plan starts.") + updated_prompt = prompt.strip() + if not updated_prompt: + raise RuntimeError("Prompt cannot be empty.") + if updated_prompt == seed.prompt: + return seed + seed.prompt = updated_prompt + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "seed.updated", "Seed prompt was edited before execution.") + return seed + + def queue_p(self, seed_id: str) -> StageRun | None: + seed = self.require_seed(seed_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if seed_id != BASELINE_SEED_ID and not has_baseline: + self.ensure_baseline_result() + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + if not has_baseline: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + return self._enqueue_plan_run(seed) + + def queue_dca( + self, + seed_id: str, + merge_resolution: bool = False, + metrics_recovery: bool = False, + source_run_id: str | None = None, + source_stdout_log_path: str | None = None, + source_stderr_log_path: str | None = None, + last_metrics: dict[str, Any] | None = None, + last_summary: dict[str, Any] | None = None, + commit_sha_before_p: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + if not metrics_recovery and seed.status in {SeedStatus.draft, SeedStatus.queued, SeedStatus.planning}: + raise RuntimeError("Run Plan first. Do-Check-Action is available after code is generated into the seed branch.") + if not metrics_recovery: + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + if seed.seed_id != BASELINE_SEED_ID: + try: + # Ref to restore worktree to on negative signal (commit before P when from finish_p_run, else baseline). + run.summary["commit_sha_before_p"] = ( + commit_sha_before_p + if commit_sha_before_p is not None + else self.git_service.commit_sha(seed.baseline_branch) + ) + except GitCommandError: + pass + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.queued", + "Queued DCA for merge conflict resolution." + if merge_resolution + else "Queued DCA for metrics recovery from saved logs." + if metrics_recovery + else "Queued DCA stage for the seed.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "merge_resolution": merge_resolution, + "metrics_recovery": metrics_recovery, + } + if merge_resolution: + payload["baseline_branch"] = seed.baseline_branch + if last_metrics is not None: + payload["last_metrics"] = last_metrics + if last_summary is not None: + payload["last_summary"] = last_summary + if metrics_recovery: + payload["source_run_id"] = source_run_id + payload["source_stdout_log_path"] = source_stdout_log_path + payload["source_stderr_log_path"] = source_stderr_log_path + payload["worktree_path"] = None + write_task("dca", payload, task_id=run.task_id) + return run + + def require_seed(self, seed_id: str) -> SeedRecord: + seed = self.seed_repo.get(seed_id) + if seed is None: + raise KeyError(f"Unknown seed_id={seed_id}") + return self._normalize_seed_runtime_state(seed) + + def require_run(self, run_id: str) -> StageRun: + run = self.run_repo.get(run_id) + if run is None: + raise KeyError(f"Unknown run_id={run_id}") + return run + + def is_seed_eligible_for_stage(self, seed_id: str | None, stage: str) -> bool: + """True if this seed is in a state that allows the given stage to run (used at claim time to avoid P/DCA races).""" + if not seed_id: + return False + seed = self.seed_repo.get(seed_id) + if seed is None: + return False + seed = self._normalize_seed_runtime_state(seed) + if stage == "p": + return seed.status not in (SeedStatus.adapting, SeedStatus.running, SeedStatus.dca_queued) + if stage == "dca": + return seed.status is not SeedStatus.planning + if stage == "direct": + return True + return False + + def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + if run.stage is StageName.p: + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + seed = self.ensure_seed_worktree_ready(seed.seed_id) + if seed.worktree_path: + worktree_path = Path(seed.worktree_path) + if worktree_path.is_dir(): + try: + run.summary["commit_sha_before_p"] = self.git_service.head_sha_at( + worktree_path + ) + except GitCommandError: + pass + seed.status = SeedStatus.planning + event_kind = "p.started" + event_message = "Plan stage started in the candidate worktree." + else: + seed.status = SeedStatus.adapting + event_kind = "dca.started" + event_message = ( + "Baseline measurement started in the baseline worktree." + if seed.seed_id == BASELINE_SEED_ID + else "DCA stage started in the seed worktree." + ) + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message, run_id=run_id) + return seed, run + + def mark_direct_code_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + seed.status = SeedStatus.adapting + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.started", + "Direct code agent started from the project root.", + run_id=run_id, + ) + return seed, run + + def mark_direct_code_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "direct_code.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def mark_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + task_payload: dict[str, Any] = {} + if task_path is not None and task_path.exists(): + task_payload = read_task(task_path) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, f"{run.stage.value}.failed", error, run_id=run_id) + if ( + run.stage is StageName.dca + and seed.ralph_loop_enabled + and seed.seed_id != BASELINE_SEED_ID + and task_payload.get("merge_resolution") is not True + and task_payload.get("metrics_recovery") is not True + ): + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run after failed DCA.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run after failed DCA: {exc}", + ) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def finish_direct_code_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = { + "mode": "direct_code_agent", + "cwd": str(COMPONENT_SYSTEM_ROOT.parent), + "stdout_bytes": len(stdout.encode("utf-8", errors="replace")), + "stderr_bytes": len((stderr or "").encode("utf-8", errors="replace")), + } + run.signal = "direct_code_completed" + seed.status = SeedStatus.passed + seed.updated_at = now_ts() + seed.latest_signal = run.signal + seed.last_error = None + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.completed", + "Direct code agent completed from the project root.", + run_id=run_id, + ) + return run + + def finish_p_run( + self, + seed_id: str, + run_id: str, + stdout: str, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + summary = self.extract_summary(stdout, StageName.p) or {} + seed.plan = PlanIdea( + title=summary.get("idea", "Generated plan"), + target_component=summary.get("target_component", "model"), + description=summary.get("description", ""), + source_refs=summary.get("source_refs", []), + commit_sha=summary.get("commit_sha"), + ) + # Single branch per seed (SSOT): worktree is already on seed_id branch. + commit_sha = self.git_service.commit_sha(seed.seed_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve run.summary fields set earlier (e.g. commit_sha_before_p) when merging P output. + run.summary = run.summary | summary | {"commit_sha": commit_sha} + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.completed", + "Plan completed on seed branch.", + commit_sha=commit_sha, + ) + self.queue_dca( + seed.seed_id, + commit_sha_before_p=run.summary.get("commit_sha_before_p"), + ) + return run + + @staticmethod + def combine_output(stdout: str, stderr: str | None = None) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr or "" + + def finish_dca_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + metrics_recovery: bool = False, + merge_resolution: bool = False, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + last_val_bpb = float(branch_metrics["last_val_bpb"]) if branch_metrics and branch_metrics.get("last_val_bpb") is not None else None + output_text = self.combine_output(stdout, stderr) + summary = self.extract_summary(output_text, StageName.dca) or {} + metrics = self.extract_dca_metrics(output_text, summary) + signal = self.evaluate_signal(metrics, last_val_bpb, PROMOTION_THRESHOLD) + commit_sha = summary.get("commit_sha") + if not (isinstance(commit_sha, str) and commit_sha.strip()): + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + commit_sha = "" + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve runner-set keys (e.g. commit_sha_before_p) so negative-signal restore can run + preserved = {k: run.summary[k] for k in ("commit_sha_before_p",) if run.summary and k in run.summary} + run.summary = summary | {"commit_sha": commit_sha} | preserved + run.metrics = metrics + run.signal = signal + seed.updated_at = now_ts() + if signal == "error" and not metrics_recovery: + run.summary = run.summary | {"metrics_recovery_queued": True} + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.metrics_recovery_queued", + "DCA completed without recoverable metrics in the structured report; queued a follow-up DCA to inspect saved logs.", + run_id=run_id, + ) + self.queue_dca( + seed.seed_id, + metrics_recovery=True, + source_run_id=run_id, + source_stdout_log_path=log_path, + source_stderr_log_path=stderr_log_path, + ) + return run + seed.latest_metrics = metrics + seed.latest_signal = signal + terminal_status = self._status_from_dca_signal(signal) + merge_commit_sha = None # set when seed branch is successfully merged into baseline + if seed.seed_id == BASELINE_SEED_ID and last_val_bpb is None: + if "val_bpb" not in metrics: + seed.status = SeedStatus.failed + event_message = ( + "Baseline metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "Baseline measurement completed without metrics; marked as failed." + ) + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + ) + return run + target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch + # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. + if signal != "positive_signal": + self.metrics_repo.update_for_branch( + target_branch, + {"last_val_bpb": metrics["val_bpb"]}, + ) + seed.status = terminal_status + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed (no promotion); not merged into baseline branch.", + signal=signal, + metrics=metrics, + ) + return run + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + "commit_sha": merge_commit_sha, + }, + ) + seed.status = SeedStatus.passed + event_message = f"Baseline measurement completed and __baseline__ was merged into {target_branch}; waiting seeds can now start Plan." + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + commit_sha=merge_commit_sha, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=target_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + self.metrics_repo.update_for_branch( + target_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + }, + ) + seed.status = SeedStatus.passed + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed; merge into baseline branch failed again after resolution run (loop avoided). Baseline metrics recorded; manual merge may be needed.", + signal=signal, + metrics=metrics, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + if terminal_status is SeedStatus.promoted: + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed) + self.metrics_repo.update_for_branch( + seed.baseline_branch, + { + "last_val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], + "promoted_at": summary.get("completed_at"), + "commit_sha": merge_commit_sha, + }, + ) + seed.status = terminal_status + event_message = "DCA succeeded and seed branch was promoted into baseline." + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + ( + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts." + if not merge_resolution + else f"Merge into baseline failed again after a conflict-resolution DCA: {merge_err}. " + "Ralph can proceed with the next Plan run." + ), + commit_sha=tried_sha or None, + target_branch=seed.baseline_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "DCA run completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + # Resolution run also failed to merge; avoid infinite resolution loop and continue Ralph. + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Conflict-resolution DCA completed but merge still failed; proceeding to next Plan run.", + signal=signal, + metrics=metrics, + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run after unresolved merge conflict.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run after unresolved merge conflict: {exc}", + ) + return run + elif terminal_status is SeedStatus.failed: + seed.status = terminal_status + event_message = ( + "DCA metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "DCA completed but metrics were missing; marked as failed." + ) + else: + seed.status = terminal_status + event_message = "DCA completed without promotion." + self.run_repo.save(run) + self.seed_repo.save(seed) + event_commit_sha = merge_commit_sha if merge_commit_sha else run.summary.get("commit_sha") + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + **({"commit_sha": event_commit_sha} if event_commit_sha else {}), + ) + if ( + seed.ralph_loop_enabled + and signal in ("negative_signal", "neutral", "error") + and not merge_resolution + and not metrics_recovery + and seed.seed_id != BASELINE_SEED_ID + ): + ref = run.summary.get("commit_sha_before_p") + if ref: + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run: {exc}", + ) + return run + + def build_dashboard(self, selected_seed_id: str | None = None) -> DashboardViewModel: + seeds = self.seed_repo.list() + selected_seed = self.seed_repo.get(selected_seed_id) if selected_seed_id else None + baseline_metrics_by_branch = self.metrics_repo.get_all() + available_branches: list[str] = [] + setup_error = self.git_service.setup_error() + if setup_error is None: + try: + all_branches = self.git_service.list_branches() + if not all_branches: + setup_error = "No local branches found yet. Create an initial commit/branch, then reload." + else: + available_branches = [ + b for b in all_branches + if not self.git_service.is_seed_specific_branch(b) + ] + # Use only branches that exist in the repo; do not add DEFAULT_BASELINE_BRANCH + # if it does not exist, so the dropdown never shows a non-existent branch. + except GitCommandError as exc: + setup_error = str(exc) + # Default to first existing branch so the selected value is always valid. + default_baseline_branch = (available_branches[0] if available_branches else DEFAULT_BASELINE_BRANCH) or "master" + status_column_map = { + SeedStatus.draft: "seedInbox", + SeedStatus.queued: "seedInbox", + SeedStatus.planning: "generated", + SeedStatus.generated: "generated", + SeedStatus.dca_queued: "generated", + SeedStatus.adapting: "activeDca", + SeedStatus.running: "activeDca", + SeedStatus.passed: "completed", + SeedStatus.failed: "completed", + SeedStatus.promoted: "completed", + } + seeds_by_column: dict[str, list[SeedRecord]] = { + "seedInbox": [], + "generated": [], + "activeDca": [], + "completed": [], + } + for seed in seeds: + self._reconcile_seed_status_signal(seed) + column_id = status_column_map.get(seed.status, "seedInbox") + seeds_by_column[column_id].append(seed) + columns = [ + DashboardColumn( + id="seedInbox", + title="Seed", + description="New prompts and queued planning work.", + seeds=seeds_by_column["seedInbox"], + ), + DashboardColumn( + id="generated", + title="Plan", + description="Planning and generated code ready for Do-Check-Action.", + seeds=seeds_by_column["generated"], + ), + DashboardColumn( + id="activeDca", + title="Do-Check-Action", + description="Adapting, fixing, and running the seed run.", + seeds=seeds_by_column["activeDca"], + ), + DashboardColumn( + id="completed", + title="Completed", + description="Finished runs; promoted seeds merged into baseline.", + seeds=seeds_by_column["completed"], + ), + ] + return DashboardViewModel( + setup_error=setup_error, + baseline_metrics_by_branch=baseline_metrics_by_branch, + default_baseline_branch=default_baseline_branch, + available_branches=available_branches, + seed_count=len(seeds), + columns=columns, + selected_seed=selected_seed, + daemon_status=get_daemon_status(), + ) + + def seed_detail(self, seed_id: str) -> dict[str, object]: + seed = self.require_seed(seed_id) + expected_worktree = ( + self._baseline_worktree_path() + if seed.seed_id == BASELINE_SEED_ID + else self._seed_worktree_path(seed.seed_id) + ) + needs_save = False + if expected_worktree is not None and not seed.worktree_path: + seed.worktree_path = expected_worktree + needs_save = True + if needs_save: + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self._reconcile_seed_status_signal(seed) + return { + "seed": seed, + "can_edit_prompt": self.can_edit_seed_prompt(seed), + "runs": self.run_repo.list(seed_id), + "events": self.seed_repo.events(seed_id), + "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), + "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), + } + + def seed_detail_versions(self, seed_id: str) -> dict[str, str]: + """Return version fingerprints for runs and timeline so the client can skip refresh when unchanged.""" + self.require_seed(seed_id) + runs = self.run_repo.list(seed_id) + events = self.seed_repo.events(seed_id) + runs_version = ( + ",".join(f"{r.run_id}:{r.status.value}:{r.updated_at}" for r in runs) + if runs + else "0" + ) + timeline_version = ( + ",".join(str(e.get("created_at", "")) for e in events[-20:]) + if events + else "0" + ) + return { + "runs_version": runs_version, + "timeline_version": timeline_version, + } + + def extract_summary(self, output_text: str, stage: StageName) -> dict[str, object] | None: + start_marker, end_marker = SUMMARY_MARKERS[stage.value] + pattern = rf"{start_marker}\s*(\{{.*?\}})\s*{end_marker}" + match = re.search(pattern, output_text, flags=re.DOTALL) + if not match: + return None + try: + return json.loads(match.group(1)) + except json.JSONDecodeError: + return {"raw_summary": match.group(1)} + + def extract_metrics(self, output_text: str) -> dict[str, float | int]: + patterns = { + "val_bpb": r"^val_bpb:\s+([0-9.]+)", + "training_seconds": r"^training_seconds:\s+([0-9.]+)", + "total_seconds": r"^total_seconds:\s+([0-9.]+)", + "startup_seconds": r"^startup_seconds:\s+([0-9.]+)", + "peak_vram_mb": r"^peak_vram_mb:\s+([0-9.]+)", + "mfu_percent": r"^mfu_percent:\s+([0-9.]+)", + "total_tokens_M": r"^total_tokens_M:\s+([0-9.]+)", + "num_steps": r"^num_steps:\s+([0-9]+)", + "num_params_M": r"^num_params_M:\s+([0-9.]+)", + "depth": r"^depth:\s+([0-9]+)", + } + metrics: dict[str, float | int] = {} + for key, pattern in patterns.items(): + match = re.search(pattern, output_text, flags=re.MULTILINE) + if not match: + continue + metrics[key] = int(match.group(1)) if key in {"num_steps", "depth"} else float(match.group(1)) + return metrics + + def extract_dca_metrics( + self, output_text: str, summary: dict[str, object] | None = None + ) -> dict[str, float | int]: + if summary: + summary_metrics = summary.get("metrics") + if isinstance(summary_metrics, dict): + parsed: dict[str, float | int] = {} + int_keys = {"num_steps", "depth"} + float_keys = { + "val_bpb", + "training_seconds", + "total_seconds", + "startup_seconds", + "peak_vram_mb", + "mfu_percent", + "total_tokens_M", + "num_params_M", + } + for key in int_keys | float_keys: + value = summary_metrics.get(key) + if value is None: + continue + try: + parsed[key] = int(value) if key in int_keys else float(value) + except (TypeError, ValueError): + continue + if parsed: + return parsed + return self.extract_metrics(output_text) + + @staticmethod + def evaluate_signal( + metrics: dict[str, float | int], + last_val_bpb: float | None, + promotion_threshold: float = PROMOTION_THRESHOLD, + ) -> str: + val_bpb = metrics.get("val_bpb") + if val_bpb is None: + return "error" + if last_val_bpb is None: + return "positive_signal" + delta = float(last_val_bpb) - float(val_bpb) + if delta >= promotion_threshold: + return "positive_signal" + if delta <= -promotion_threshold: + return "negative_signal" + return "neutral" + + +def default_workflow_service() -> WorkflowService: + return WorkflowService() diff --git a/component_system/tailwind.config.js b/component_system/tailwind.config.js new file mode 100644 index 000000000..ea1a7a372 --- /dev/null +++ b/component_system/tailwind.config.js @@ -0,0 +1,11 @@ +/** @type {import('tailwindcss').Config} */ +module.exports = { + content: [ + "./web/templates/**/*.html", + "./web/static/**/*.js" + ], + theme: { + extend: {} + }, + plugins: [] +}; diff --git a/component_system/task.py b/component_system/task.py new file mode 100644 index 000000000..d4b9fad05 --- /dev/null +++ b/component_system/task.py @@ -0,0 +1,303 @@ +"""Shared queue and JSON state helpers for the component-system web app.""" +from __future__ import annotations + +import json +import os +import shutil +import time +import uuid +from pathlib import Path +from typing import Any, Callable, Literal + +COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent +HISTORY_ROOT = COMPONENT_SYSTEM_ROOT / "history" +QUEUE_ROOT = HISTORY_ROOT / "queue" +STATE_ROOT = HISTORY_ROOT / "state" +SEEDS_ROOT = STATE_ROOT / "seeds" +RUNS_ROOT = STATE_ROOT / "runs" +EVENTS_ROOT = STATE_ROOT / "events" +BASELINE_BRANCHES_PATH = COMPONENT_SYSTEM_ROOT / "baseline_branches.json" +BASELINE_METRICS_PATH = COMPONENT_SYSTEM_ROOT / "baseline_metrics.json" +WORKTREE_ROOT = HISTORY_ROOT / "worktrees" +LOG_ROOT = HISTORY_ROOT / "logs" + +STAGE_DIRS = { + "p": QUEUE_ROOT / "p", + "dca": QUEUE_ROOT / "dca", + "direct": QUEUE_ROOT / "direct", +} +IN_PROGRESS_DIR = QUEUE_ROOT / "in_progress" +DONE_DIR = QUEUE_ROOT / "done" +ERROR_DIR = QUEUE_ROOT / "error" +DAEMON_HEARTBEAT_PATH = STATE_ROOT / "daemon_heartbeat.json" +DAEMON_HEARTBEAT_STALE_SECONDS = 5 + +def _read_json(path: Path, default: Any) -> Any: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_json(path: Path, payload: Any) -> Path: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8") + return path + + +def now_ts() -> float: + return time.time() + + +def now_iso() -> str: + return time.strftime("%Y-%m-%d %H:%M:%S") + + +def daemon_heartbeat() -> None: + """Write the daemon heartbeat file (call from the daemon process).""" + ensure_queue_layout() + _write_json( + DAEMON_HEARTBEAT_PATH, + {"timestamp": now_ts(), "pid": os.getpid()}, + ) + + +def get_daemon_status() -> str: + """Return 'running' if the daemon heartbeat is recent, else 'stopped'.""" + if not DAEMON_HEARTBEAT_PATH.exists(): + return "stopped" + try: + data = _read_json(DAEMON_HEARTBEAT_PATH, {}) + ts = data.get("timestamp") + if ts is None: + return "stopped" + if (now_ts() - float(ts)) <= DAEMON_HEARTBEAT_STALE_SECONDS: + return "running" + except Exception: + pass + return "stopped" + + +def ensure_queue_layout() -> None: + HISTORY_ROOT.mkdir(parents=True, exist_ok=True) + for d in STAGE_DIRS.values(): + d.mkdir(parents=True, exist_ok=True) + IN_PROGRESS_DIR.mkdir(parents=True, exist_ok=True) + DONE_DIR.mkdir(parents=True, exist_ok=True) + ERROR_DIR.mkdir(parents=True, exist_ok=True) + SEEDS_ROOT.mkdir(parents=True, exist_ok=True) + RUNS_ROOT.mkdir(parents=True, exist_ok=True) + EVENTS_ROOT.mkdir(parents=True, exist_ok=True) + WORKTREE_ROOT.mkdir(parents=True, exist_ok=True) + LOG_ROOT.mkdir(parents=True, exist_ok=True) + # Auto-create baseline JSON files if missing (like results.tsv for recording run data) + if not BASELINE_METRICS_PATH.exists(): + _write_json(BASELINE_METRICS_PATH, {}) + if not BASELINE_BRANCHES_PATH.exists(): + _write_json(BASELINE_BRANCHES_PATH, {}) + + +def new_task_id(prefix: str | None = None) -> str: + ts = time.strftime("%Y%m%d-%H%M%S") + short = uuid.uuid4().hex[:8] + task_id = f"{ts}-{short}" + return f"{prefix}-{task_id}" if prefix else task_id + + +def new_seed_id(prefix: str = "seed") -> str: + return f"{prefix}-{uuid.uuid4().hex[:6]}" + + +def new_run_id(stage: str) -> str: + return new_task_id(stage) + + +def write_task(stage: str, payload: dict[str, Any], task_id: str | None = None) -> Path: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + tid = task_id or new_task_id(stage) + path = STAGE_DIRS[stage] / f"{tid}.json" + payload_with_meta = {"task_id": tid, "stage": stage, "created_at": now_ts(), **payload} + return _write_json(path, payload_with_meta) + + +def read_task(path: Path) -> dict[str, Any]: + return _read_json(path, {}) + + +def move_to_done(path: Path) -> Path: + ensure_queue_layout() + dest = DONE_DIR / path.name + if not path.exists(): + raise FileNotFoundError( + f"Task file already moved: {path}; possible duplicate daemon or double completion." + ) + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def move_to_error(path: Path) -> Path: + ensure_queue_layout() + dest = ERROR_DIR / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + return dest + + +def list_pending(stage: str) -> list[Path]: + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + return sorted(STAGE_DIRS[stage].glob("*.json")) + + +def _is_aux_dca_task(payload: dict[str, Any]) -> bool: + return payload.get("metrics_recovery") is True or payload.get("merge_resolution") is True + + +def claim_pending( + stage: str, + lane: Literal["any", "gpu", "aux"] = "any", + eligible_fn: Callable[[dict[str, Any]], bool] | None = None, +) -> Path | None: + """Atomically claim the oldest pending task for a stage/lane. If eligible_fn is set, only claim tasks for which it returns True (avoids P/DCA races).""" + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + if lane not in {"any", "gpu", "aux"}: + raise KeyError(f"Unknown lane {lane!r}") + for path in sorted(STAGE_DIRS[stage].glob("*.json")): + payload = _read_json(path, {}) + if eligible_fn is not None and not eligible_fn(payload): + continue + if stage == "dca" and lane != "any": + is_aux = _is_aux_dca_task(payload) + if lane == "aux" and not is_aux: + continue + if lane == "gpu" and is_aux: + continue + claimed_path = IN_PROGRESS_DIR / path.name + try: + path.rename(claimed_path) + return claimed_path + except FileNotFoundError: + continue + except OSError: + # Another worker likely claimed the task first. + continue + return None + + +def restore_in_progress_tasks() -> dict[str, int]: + """Move stranded in-progress tasks back to their stage queue.""" + ensure_queue_layout() + restored = {stage: 0 for stage in STAGE_DIRS} + for path in sorted(IN_PROGRESS_DIR.glob("*.json")): + payload = _read_json(path, {}) + stage = payload.get("stage") + if stage not in STAGE_DIRS: + continue + dest = STAGE_DIRS[stage] / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + restored[stage] += 1 + return restored + + +def seed_path(seed_id: str) -> Path: + return SEEDS_ROOT / f"{seed_id}.json" + + +def run_path(run_id: str) -> Path: + return RUNS_ROOT / f"{run_id}.json" + + +def event_path(seed_id: str) -> Path: + return EVENTS_ROOT / f"{seed_id}.json" + + +def save_seed(seed: dict[str, Any]) -> Path: + seed_id = seed["seed_id"] + return _write_json(seed_path(seed_id), seed) + + +def load_seed(seed_id: str) -> dict[str, Any]: + return _read_json(seed_path(seed_id), {}) + + +def list_seeds() -> list[dict[str, Any]]: + ensure_queue_layout() + seeds = [_read_json(path, {}) for path in SEEDS_ROOT.glob("*.json")] + return sorted(seeds, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def save_run(run: dict[str, Any]) -> Path: + return _write_json(run_path(run["run_id"]), run) + + +def load_run(run_id: str) -> dict[str, Any]: + return _read_json(run_path(run_id), {}) + + +def list_runs(seed_id: str | None = None) -> list[dict[str, Any]]: + ensure_queue_layout() + runs = [_read_json(path, {}) for path in RUNS_ROOT.glob("*.json")] + if seed_id is not None: + runs = [run for run in runs if run.get("seed_id") == seed_id] + return sorted(runs, key=lambda item: item.get("updated_at", item.get("created_at", 0)), reverse=True) + + +def append_event(seed_id: str, event: dict[str, Any]) -> list[dict[str, Any]]: + ensure_queue_layout() + payload = _read_json(event_path(seed_id), []) + payload.append({"created_at": now_ts(), "created_at_human": now_iso(), **event}) + _write_json(event_path(seed_id), payload) + return payload + + +def load_events(seed_id: str) -> list[dict[str, Any]]: + return _read_json(event_path(seed_id), []) + + +def delete_seed(seed_id: str) -> None: + for path in (seed_path(seed_id), event_path(seed_id)): + if path.exists(): + path.unlink() + for run in list_runs(seed_id): + path = run_path(run["run_id"]) + if path.exists(): + path.unlink() + + +def load_baseline_branch_map() -> dict[str, str]: + """Load seed_id -> baseline_branch mapping (for agent lookup and workflow).""" + ensure_queue_layout() + return _read_json(BASELINE_BRANCHES_PATH, {}) + + +def save_baseline_branch_map(mapping: dict[str, str]) -> None: + """Persist seed_id -> baseline_branch mapping.""" + ensure_queue_layout() + _write_json(BASELINE_BRANCHES_PATH, mapping) + + +def load_baseline_metrics() -> dict[str, dict[str, Any]]: + """Load baseline_branch -> { last_val_bpb, promoted_branch, promoted_at, promoted_idea, commit_sha }.""" + ensure_queue_layout() + return _read_json(BASELINE_METRICS_PATH, {}) + + +def save_baseline_metrics(metrics_by_branch: dict[str, dict[str, Any]]) -> None: + """Persist per-branch baseline metrics.""" + ensure_queue_layout() + _write_json(BASELINE_METRICS_PATH, metrics_by_branch) + + +def reset_worktree(path: str | Path) -> None: + worktree = Path(path) + if worktree.exists(): + shutil.rmtree(worktree) diff --git a/component_system/training/mainline.py b/component_system/training/mainline.py new file mode 100644 index 000000000..e91771d85 --- /dev/null +++ b/component_system/training/mainline.py @@ -0,0 +1,82 @@ +"""Mainline assembler: reads static config, dynamically loads components, runs training.""" +from __future__ import annotations + +if __package__ in {None, ""}: + import sys + from pathlib import Path + + sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +import importlib +import os +from dataclasses import asdict +from typing import Any + +import torch + +from prepare import Tokenizer + +from component_system.config import get_training_binding + + +def _prepare_environment() -> None: + os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True" + os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed(42) + torch.set_float32_matmul_precision("high") + torch.cuda.reset_peak_memory_stats() + + +def _import_module(path: str) -> Any: + return importlib.import_module(path) + + +def run_mainline_training(binding_path: str | None = None) -> dict[str, Any]: + _prepare_environment() + binding = get_training_binding() + + tokenizer = Tokenizer.from_directory() + vocab_size = tokenizer.get_vocab_size() + + model_module = _import_module(binding["model_module"]) + optimizer_module = _import_module(binding["optimizer_module"]) + training_step_module = _import_module(binding["training_step_module"]) + + settings = training_step_module.default_training_settings() + config = model_module.build_model_config( + depth=settings.depth, + vocab_size=vocab_size, + aspect_ratio=settings.aspect_ratio, + head_dim=settings.head_dim, + window_pattern=settings.window_pattern, + ) + + print("Loaded training binding from config") + print(f"Model config: {asdict(config)}") + + model, param_counts, num_flops_per_token = model_module.create_model( + config, + compile_model=settings.compile_model, + ) + + print("Parameter counts:") + for key, value in param_counts.items(): + print(f" {key:24s}: {value:,}") + print(f"Estimated FLOPs per token: {num_flops_per_token:e}") + + optimizer = optimizer_module.create_optimizer(model, settings) + return training_step_module.run_training_session( + model=model, + optimizer=optimizer, + tokenizer=tokenizer, + settings=settings, + param_counts=param_counts, + num_flops_per_token=num_flops_per_token, + baseline_binding=binding, + ) + + +if __name__ == "__main__": + run_mainline_training() diff --git a/component_system/web/app.py b/component_system/web/app.py new file mode 100644 index 000000000..9495a62ec --- /dev/null +++ b/component_system/web/app.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import time +from pathlib import Path + +from fastapi import FastAPI +from fastapi.responses import RedirectResponse, Response +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates + +from component_system.services.workflow import default_workflow_service +from component_system.task import ensure_queue_layout +from component_system.web.routes import router + +WEB_ROOT = Path(__file__).resolve().parent +TEMPLATE_ROOT = WEB_ROOT / "templates" +STATIC_ROOT = WEB_ROOT / "static" + + +def _static_version() -> str: + """Cache-busting version from app.js mtime so browsers load fresh static assets after changes.""" + app_js = STATIC_ROOT / "app.js" + if app_js.exists(): + return str(int(app_js.stat().st_mtime)) + return str(int(time.time())) + + +def create_app() -> FastAPI: + ensure_queue_layout() + app = FastAPI(title="Component System", version="0.1.0") + app.state.workflow = default_workflow_service() + app.state.static_version = _static_version() + app.state.templates = Jinja2Templates(directory=str(TEMPLATE_ROOT)) + app.mount("/static", StaticFiles(directory=str(STATIC_ROOT)), name="static") + app.include_router(router, prefix="/component-system") + + @app.get("/", include_in_schema=False) + def root() -> RedirectResponse: + return RedirectResponse(url="/component-system", status_code=307) + + @app.get("/favicon.ico", include_in_schema=False) + def favicon() -> Response: + return Response(status_code=204) + + @app.get("/.well-known/appspecific/com.chrome.devtools.json", include_in_schema=False) + def chrome_devtools_probe() -> Response: + # Chrome DevTools probes this endpoint; return 204 to avoid log spam. + return Response(status_code=204) + + return app + + +app = create_app() diff --git a/component_system/web/routes.py b/component_system/web/routes.py new file mode 100644 index 000000000..b1d7c01a2 --- /dev/null +++ b/component_system/web/routes.py @@ -0,0 +1,376 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi import APIRouter, Form, HTTPException, Query, Request +from fastapi.responses import HTMLResponse, RedirectResponse, Response + +from component_system.domain.models import SeedStatus +from component_system.services.workflow import GitCommandError, WorkflowService +from component_system.task import COMPONENT_SYSTEM_ROOT, get_daemon_status, LOG_ROOT + +router = APIRouter() + + +def _templates(request: Request): + return request.app.state.templates + + +def _workflow(request: Request) -> WorkflowService: + return request.app.state.workflow + + +def _is_htmx(request: Request) -> bool: + return request.headers.get("hx-request", "").lower() == "true" + + +def _render(request: Request, template_name: str, context: dict, status_code: int = 200) -> HTMLResponse: + templates = _templates(request) + return templates.TemplateResponse(request, template_name, {"request": request, **context}, status_code=status_code) + + +def _resolve_log_path(run_id: str, stream: str, run_log_path: str | None) -> Path | None: + # Primary source: persisted run metadata path. + if run_log_path: + candidate = Path(run_log_path) + if candidate.exists() and candidate.is_file(): + return candidate + + # Deterministic run-id naming (new format). + run_named = LOG_ROOT / f"{run_id}.{stream}.log" + if run_named.exists() and run_named.is_file(): + return run_named + + return None + + +def _resolve_prompt_path(run_id: str, run_prompt_path: str | None) -> Path | None: + if run_prompt_path: + candidate = Path(run_prompt_path) + if candidate.exists() and candidate.is_file(): + return candidate + prompt_named = LOG_ROOT / f"{run_id}.prompt.txt" + if prompt_named.exists() and prompt_named.is_file(): + return prompt_named + return None + + +@router.get("/", response_class=HTMLResponse) +def dashboard(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + "dashboard": viewmodel, + "selected_seed_id": seed_id, + "detail": workflow.seed_detail(seed_id) if seed_id else None, + } + return _render(request, "dashboard.html", context) + + +@router.get("/partials/dashboard", response_class=HTMLResponse) +def dashboard_board(request: Request, seed_id: str | None = None) -> HTMLResponse: + workflow = _workflow(request) + viewmodel = workflow.build_dashboard(selected_seed_id=seed_id) + return _render(request, "partials/dashboard_board.html", {"dashboard": viewmodel, "selected_seed_id": seed_id}) + + +@router.get("/partials/daemon-status", response_class=HTMLResponse) +def daemon_status_partial(request: Request) -> HTMLResponse: + return _render(request, "partials/daemon_status.html", {"daemon_status": get_daemon_status()}) + + +@router.get("/partials/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + "daemon_status": get_daemon_status(), + } + return _render(request, "partials/seed_detail_response.html", context) + + +@router.get("/api/seeds/{seed_id}/versions") +def seed_versions(request: Request, seed_id: str) -> dict[str, str]: + workflow = _workflow(request) + try: + return workflow.seed_detail_versions(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.get("/partials/seeds/{seed_id}/runs", response_class=HTMLResponse) +def seed_runs_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render( + request, + "partials/seed_runs_inner.html", + {"seed": detail["seed"], "runs": detail["runs"]}, + ) + + +@router.get("/partials/seeds/{seed_id}/timeline", response_class=HTMLResponse) +def seed_timeline_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render( + request, + "partials/seed_timeline_inner.html", + {"seed": detail["seed"], "events": detail["events"]}, + ) + + +@router.get("/api/runs/{run_id}/prompt") +def run_prompt(request: Request, run_id: str) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + run_prompt_path = run.prompt_path if run is not None else None + prompt_path = _resolve_prompt_path(run_id, run_prompt_path) + if prompt_path is None: + raise HTTPException(status_code=404, detail=f"Prompt for run '{run_id}' not found.") + content = prompt_path.read_text(encoding="utf-8", errors="replace") + return {"content": content} + + +@router.get("/api/runs/{run_id}/log") +def run_log_chunk( + request: Request, + run_id: str, + stream: str = Query("stdout"), + offset: int = Query(0, ge=0), + limit: int = Query(64 * 1024, ge=1024, le=512 * 1024), +) -> dict[str, object]: + workflow = _workflow(request) + run = workflow.run_repo.get(run_id) + + complete_status = bool(run is not None and run.status.value in {"succeeded", "failed"}) + if stream not in {"stdout", "stderr"}: + raise HTTPException(status_code=400, detail="stream must be one of: stdout, stderr") + + run_log_path = None + if run is not None: + run_log_path = run.log_path if stream == "stdout" else run.stderr_log_path + if not run_log_path and stream == "stderr" and run.log_path and run.log_path.endswith(".stdout.log"): + run_log_path = run.log_path.replace(".stdout.log", ".stderr.log") + + log_path = _resolve_log_path(run_id, stream, run_log_path) + if log_path is None and run is not None and not complete_status: + # During queued/running phases metadata may not yet include paths and files may appear slightly later. + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": False, + } + + if log_path is None: + raise HTTPException(status_code=404, detail=f"Log for run '{run_id}' ({stream}) not found.") + + if not log_path.exists() or not log_path.is_file(): + return { + "chunk": "", + "next_offset": offset, + "size": 0, + "complete": complete_status, + } + + file_size = log_path.stat().st_size + if offset > file_size: + offset = file_size + + with open(log_path, "rb") as handle: + handle.seek(offset) + payload = handle.read(limit) + + next_offset = offset + len(payload) + return { + "chunk": payload.decode("utf-8", errors="replace"), + "next_offset": next_offset, + "size": file_size, + "complete": bool(complete_status and next_offset >= file_size), + } + + +@router.get("/seeds/{seed_id}", response_class=HTMLResponse) +def seed_detail_page(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render(request, "seed_detail_page.html", {**detail, "daemon_status": get_daemon_status()}) + + +@router.post("/actions/seeds", response_class=HTMLResponse) +def create_seed( + request: Request, + prompt: str = Form(...), + baseline_branch: str = Form(...), + seed_mode: str = Form("manual"), +) -> Response: + workflow = _workflow(request) + seed = workflow.create_seed( + prompt, + baseline_branch=baseline_branch, + ralph_loop_enabled=seed_mode == "ralph", + ) + if seed_mode == "ralph": + try: + workflow.queue_p(seed.seed_id) + except (RuntimeError, GitCommandError) as exc: + workflow.seed_repo.append_event( + seed.seed_id, + "ralph.start_failed", + f"Ralph loop could not queue the initial Plan run: {exc}", + ) + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/direct-code-agent", response_class=HTMLResponse) +def start_direct_code_agent(request: Request, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + seed, _run = workflow.create_direct_code_seed(prompt) + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed.seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/p", response_class=HTMLResponse) +def queue_p(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/prompt", response_class=HTMLResponse) +def update_seed_prompt(request: Request, seed_id: str, prompt: str = Form(...)) -> Response: + workflow = _workflow(request) + try: + workflow.update_seed_prompt(seed_id, prompt) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except RuntimeError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + + if _is_htmx(request): + detail = workflow.seed_detail(seed_id) + dashboard = workflow.build_dashboard(selected_seed_id=seed_id) + context = { + **detail, + "dashboard": dashboard, + "selected_seed_id": seed_id, + "oob": True, + "daemon_status": get_daemon_status(), + } + return _render(request, "partials/seed_detail_response.html", context) + + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/dca", response_class=HTMLResponse) +def queue_dca(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.queue_dca(seed_id) + except (KeyError, RuntimeError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/start", response_class=HTMLResponse) +def start_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + seed = workflow.set_ralph_loop(seed_id, True) + if seed.status in { + SeedStatus.draft, + SeedStatus.generated, + SeedStatus.passed, + SeedStatus.failed, + SeedStatus.promoted, + }: + workflow.queue_p(seed_id) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + except (RuntimeError, GitCommandError) as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=400) + raise HTTPException(status_code=400, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) + + +@router.post("/actions/seeds/{seed_id}/ralph/stop", response_class=HTMLResponse) +def stop_ralph_loop(request: Request, seed_id: str) -> Response: + workflow = _workflow(request) + try: + workflow.set_ralph_loop(seed_id, False) + except KeyError as exc: + if _is_htmx(request): + return _render(request, "partials/action_error.html", {"message": str(exc)}, status_code=404) + raise HTTPException(status_code=404, detail=str(exc)) from exc + target_url = str(request.url_for("dashboard")) + f"?seed_id={seed_id}" + if _is_htmx(request): + response = Response(status_code=204) + response.headers["HX-Redirect"] = target_url + return response + return RedirectResponse(target_url, status_code=303) diff --git a/component_system/web/static/app.css b/component_system/web/static/app.css new file mode 100644 index 000000000..7edabb7c7 --- /dev/null +++ b/component_system/web/static/app.css @@ -0,0 +1,137 @@ +:root { + color-scheme: dark; + --card-bg: rgb(15 23 42 / 0.6); + --card-border: rgb(51 65 85); + --muted: rgb(148 163 184); +} + +body { + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + -webkit-font-smoothing: antialiased; +} + +/* IDs and branch names */ +.font-mono-id { + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 0.9em; + letter-spacing: 0.02em; +} + +.line-clamp-3 { + display: -webkit-box; + overflow: hidden; + -webkit-box-orient: vertical; + -webkit-line-clamp: 3; +} + +/* Card hover for clickable seed cards */ +.seed-card { + transition: border-color 0.15s ease, background-color 0.15s ease; +} +.seed-card:hover { + border-color: rgb(56 189 248 / 0.5); + background-color: rgb(15 23 42 / 0.9); +} +.seed-card.is-selected { + border-color: rgb(14 165 233); + background-color: rgb(14 165 233 / 0.14); + box-shadow: inset 0 0 0 1px rgb(14 165 233 / 0.35); +} + +/* Status pills */ +.status-pill { + display: inline-flex; + align-items: center; + border: 1px solid transparent; + font-size: 0.625rem; + font-weight: 600; + letter-spacing: 0.04em; + text-transform: uppercase; + line-height: 1; + padding: 0.2rem 0.5rem; + border-radius: 9999px; + white-space: nowrap; +} +.status-draft { background: rgb(51 65 85 / 0.62); border-color: rgb(148 163 184 / 0.4); color: rgb(226 232 240); } +.status-queued { background: rgb(146 64 14 / 0.45); border-color: rgb(245 158 11 / 0.45); color: rgb(253 230 138); } +.status-planning { background: rgb(30 64 175 / 0.4); border-color: rgb(96 165 250 / 0.45); color: rgb(191 219 254); } +.status-generated { background: rgb(15 118 110 / 0.38); border-color: rgb(45 212 191 / 0.4); color: rgb(153 246 228); } +.status-dca_queued { background: rgb(8 145 178 / 0.33); border-color: rgb(34 211 238 / 0.38); color: rgb(165 243 252); } +.status-adapting, +.status-running { background: rgb(109 40 217 / 0.35); border-color: rgb(192 132 252 / 0.42); color: rgb(233 213 255); } +.status-passed { background: rgb(21 128 61 / 0.28); border-color: rgb(74 222 128 / 0.4); color: rgb(187 247 208); } +.status-failed { background: rgb(153 27 27 / 0.34); border-color: rgb(248 113 113 / 0.42); color: rgb(254 202 202); } +.status-promoted { background: rgb(22 163 74 / 0.28); border-color: rgb(74 222 128 / 0.42); color: rgb(187 247 208); } + +/* Empty state placeholder */ +.empty-value { + color: rgb(100 116 139); + font-style: normal; +} + +/* Section headers */ +.section-label { + font-size: 11px; + letter-spacing: 0.2em; + text-transform: uppercase; + color: rgb(100 116 139); +} + +/* Scroll containers for long dashboard lists */ +.scroll-pane { + min-height: 0; + overflow-y: auto; + scrollbar-gutter: stable; +} + +.scroll-pane-stage { + max-height: min(32rem, 68vh); +} + +.scroll-pane-detail { + max-height: min(30rem, 62vh); +} + +.run-log-grid { + display: grid; + gap: 0.75rem; + grid-template-columns: minmax(0, 2fr) minmax(0, 4fr) minmax(0, 4fr); +} + +.run-log-pane { + min-width: 0; + min-height: 0; + display: flex; + flex-direction: column; +} + +.run-log-pre { + min-width: 0; + min-height: 0; + flex: 1 1 auto; + overflow: auto; + white-space: pre-wrap; + overflow-wrap: anywhere; + word-break: break-word; + border: 1px solid rgb(30 41 59); + border-radius: 0.25rem; + background: rgb(0 0 0 / 0.3); + padding: 0.5rem; + font-family: ui-monospace, "Cascadia Code", "Source Code Pro", Menlo, Consolas, monospace; + font-size: 11px; + line-height: 1.25rem; + color: rgb(226 232 240); +} + +@media (max-width: 1024px) { + .run-log-grid { + grid-template-columns: 1fr; + } +} diff --git a/component_system/web/static/app.js b/component_system/web/static/app.js new file mode 100644 index 000000000..34de6c3af --- /dev/null +++ b/component_system/web/static/app.js @@ -0,0 +1,513 @@ +document.body.addEventListener("htmx:responseError", (event) => { + const target = event.detail.target; + if (!target) { + return; + } + target.innerHTML = `
Request failed.
`; +}); + +function selectedSeedIdFromUrl() { + const params = new URLSearchParams(window.location.search); + return params.get("seed_id"); +} + +function applySelectedSeed(seedId) { + const cards = document.querySelectorAll(".seed-card[data-seed-id]"); + cards.forEach((card) => { + const isSelected = seedId !== null && card.dataset.seedId === seedId; + card.classList.toggle("is-selected", isSelected); + card.setAttribute("aria-current", isSelected ? "true" : "false"); + }); +} + +let dashboardPollInFlight = false; +let seedDetailPollInFlight = false; +let seedVersionsPollInFlight = false; +const lastSeedVersions = {}; +const savedScrollPositions = { runs: null, timeline: null }; +const INTERACTION_DEBOUNCE_MS = 3000; +let lastRunsInteraction = 0; +let lastTimelineInteraction = 0; + +function seedDetailUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedDetailUrlTemplate; + if (!template || !seedId) { + return null; + } + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function seedVersionsUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedVersionsUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function seedRunsUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedRunsUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function seedTimelineUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedTimelineUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function isLogViewerOpen() { + const target = document.getElementById("seed-detail"); + if (!target) { + return false; + } + if (target.querySelector('[data-log-viewer-open="true"]')) { + return true; + } + if (target.querySelector("[data-log-stream]")) { + return true; + } + const seedId = selectedSeedIdFromUrl(); + return Boolean(seedId && localStorage.getItem(`seed-active-run-${seedId}`)); +} + +function dashboardBoardUrl() { + const board = document.getElementById("dashboard-board"); + const base = board?.dataset.dashboardPartialUrl; + if (!base) { + return null; + } + const seedId = selectedSeedIdFromUrl(); + if (!seedId) { + return base; + } + const separator = base.includes("?") ? "&" : "?"; + return `${base}${separator}seed_id=${encodeURIComponent(seedId)}`; +} + +function pollDashboardBoard() { + const target = document.getElementById("dashboard-board"); + const url = dashboardBoardUrl(); + if (!target || !url || dashboardPollInFlight) { + return; + } + dashboardPollInFlight = true; + htmx + .ajax("GET", url, { target: "#dashboard-board", swap: "outerHTML" }) + .finally(() => { + dashboardPollInFlight = false; + }); +} + +function pollSeedDetail() { + const seedId = selectedSeedIdFromUrl(); + const target = document.getElementById("seed-detail"); + const url = seedDetailUrl(seedId); + if (!target || !url || seedDetailPollInFlight) { + return; + } + if (isLogViewerOpen()) { + return; + } + seedDetailPollInFlight = true; + htmx.ajax("GET", url, { target: "#seed-detail", swap: "innerHTML" }).finally(() => { + seedDetailPollInFlight = false; + }); +} + +function applyRunsPartial(seedId) { + const listEl = document.getElementById("seed-runs-list"); + const paneEl = document.getElementById("seed-runs-scroll-pane"); + const url = seedRunsUrl(seedId); + if (!listEl || !url) return Promise.resolve(); + savedScrollPositions.runs = paneEl ? paneEl.scrollTop : null; + return htmx.ajax("GET", url, { target: "#seed-runs-list", swap: "innerHTML" }); +} + +function applyTimelinePartial(seedId) { + const listEl = document.getElementById("seed-timeline-list"); + const paneEl = document.getElementById("seed-timeline-scroll-pane"); + const url = seedTimelineUrl(seedId); + if (!listEl || !url) return Promise.resolve(); + savedScrollPositions.timeline = paneEl ? paneEl.scrollTop : null; + return htmx.ajax("GET", url, { target: "#seed-timeline-list", swap: "innerHTML" }); +} + +function pollSeedDetailSections() { + const seedId = selectedSeedIdFromUrl(); + if (!seedId || isLogViewerOpen()) return; + const versionsUrl = seedVersionsUrl(seedId); + if (!versionsUrl || seedVersionsPollInFlight) return; + seedVersionsPollInFlight = true; + fetch(versionsUrl) + .then((r) => (r.ok ? r.json() : null)) + .then((data) => { + if (!data) return; + const prev = lastSeedVersions[seedId] || {}; + const runsChanged = data.runs_version !== prev.runs_version; + const timelineChanged = data.timeline_version !== prev.timeline_version; + lastSeedVersions[seedId] = { + runs_version: data.runs_version, + timeline_version: data.timeline_version, + }; + const now = Date.now(); + const runsIdle = now - lastRunsInteraction >= INTERACTION_DEBOUNCE_MS; + const timelineIdle = now - lastTimelineInteraction >= INTERACTION_DEBOUNCE_MS; + const promises = []; + if (runsChanged && runsIdle) promises.push(applyRunsPartial(seedId)); + if (timelineChanged && timelineIdle) promises.push(applyTimelinePartial(seedId)); + return Promise.all(promises); + }) + .finally(() => { + seedVersionsPollInFlight = false; + }); +} + +function attachScrollPaneInteractionGuards() { + const runsPane = document.getElementById("seed-runs-scroll-pane"); + const timelinePane = document.getElementById("seed-timeline-scroll-pane"); + function onRunsActivity() { + lastRunsInteraction = Date.now(); + } + function onTimelineActivity() { + lastTimelineInteraction = Date.now(); + } + runsPane?.addEventListener("scroll", onRunsActivity, { passive: true }); + runsPane?.addEventListener("mouseenter", onRunsActivity); + runsPane?.addEventListener("focusin", onRunsActivity); + timelinePane?.addEventListener("scroll", onTimelineActivity, { passive: true }); + timelinePane?.addEventListener("mouseenter", onTimelineActivity); + timelinePane?.addEventListener("focusin", onTimelineActivity); +} + +function pollDashboard() { + if (document.hidden) return; + if (isLogViewerOpen()) return; + pollDashboardBoard(); + const seedId = selectedSeedIdFromUrl(); + if (seedId && document.getElementById("seed-runs-list")) { + pollSeedDetailSections(); + } else if (seedId && !document.getElementById("seed-runs-list")) { + pollSeedDetail(); + } +} + +document.body.addEventListener("htmx:beforeRequest", (event) => { + const target = event.detail?.target; + if (!target || !isLogViewerOpen()) { + return; + } + // Pause daemon status auto-refresh while viewing logs. + if (target.id === "daemon-status-panel") { + event.preventDefault(); + } +}); + +document.body.addEventListener("click", (event) => { + const card = event.target.closest(".seed-card[data-seed-id]"); + if (!card) { + return; + } + applySelectedSeed(card.dataset.seedId); +}); + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (!target) return; + if (target.id === "seed-detail") { + applySelectedSeed(selectedSeedIdFromUrl()); + attachScrollPaneInteractionGuards(); + return; + } + if (target.id === "seed-runs-list") { + const pane = document.getElementById("seed-runs-scroll-pane"); + if (pane && savedScrollPositions.runs != null) { + pane.scrollTop = savedScrollPositions.runs; + savedScrollPositions.runs = null; + } + initializeLogStreams(target.closest("#seed-detail") || document); + return; + } + if (target.id === "seed-timeline-list") { + const pane = document.getElementById("seed-timeline-scroll-pane"); + if (pane && savedScrollPositions.timeline != null) { + pane.scrollTop = savedScrollPositions.timeline; + savedScrollPositions.timeline = null; + } + return; + } +}); + +window.addEventListener("popstate", () => { + applySelectedSeed(selectedSeedIdFromUrl()); +}); + +applySelectedSeed(selectedSeedIdFromUrl()); +attachScrollPaneInteractionGuards(); +window.setInterval(pollDashboard, 5000); + +const logStreamIntervals = new Map(); +const logStreamState = new Map(); +const ansiCtor = window.AnsiUp || window.ansi_up?.AnsiUp || null; +const ansiRenderer = ansiCtor ? new ansiCtor() : null; + +if (ansiRenderer && Object.prototype.hasOwnProperty.call(ansiRenderer, "escape_html")) { + ansiRenderer.escape_html = true; +} + +function stripAnsiSequences(value) { + // CSI: \x1b[...m, OSC: \x1b]...\x07 or \x1b\ ; then any remaining ESC controls. + return (value || "") + .replace(/\u001b\][^\u0007]*(?:\u0007|\u001b\\)/g, "") + .replace(/\u001b\[[0-?]*[ -/]*[@-~]/g, "") + .replace(/\u001b[@-_]/g, ""); +} + +function isRunComplete(status) { + return status === "succeeded" || status === "failed"; +} + +function updateLogStatus(runId, text) { + const nodes = document.querySelectorAll(`[data-log-status][data-run-id="${runId}"]`); + nodes.forEach((node) => { + node.textContent = text; + }); +} + +function updateCopyButtonState(runId, stream, enabled) { + const buttons = document.querySelectorAll( + `[data-log-copy][data-run-id="${runId}"][data-stream="${stream}"]` + ); + buttons.forEach((button) => { + button.disabled = !enabled; + }); +} + +function appendLogContent(pre, chunk) { + const currentRaw = pre.dataset.rawLog || ""; + const nextRaw = currentRaw + (chunk || ""); + + // Keep the viewer responsive for very large logs. + const maxChars = 200_000; + const trimmedRaw = + nextRaw.length > maxChars ? nextRaw.slice(nextRaw.length - maxChars) : nextRaw; + + pre.dataset.rawLog = trimmedRaw; + if (ansiRenderer) { + pre.innerHTML = ansiRenderer.ansi_to_html(trimmedRaw); + } else { + pre.textContent = stripAnsiSequences(trimmedRaw); + } + + pre.scrollTop = pre.scrollHeight; +} + +async function pollLogStream(pre) { + const runId = pre.dataset.runId; + const stream = pre.dataset.stream || "stdout"; + if (!runId) { + return; + } + + const state = logStreamState.get(pre) || { offset: 0, complete: false }; + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/log?stream=${encodeURIComponent(stream)}&offset=${state.offset}` + ); + if (!response.ok) { + throw new Error(`Failed to fetch logs for ${runId}: ${response.status}`); + } + + const payload = await response.json(); + const chunk = payload.chunk || ""; + const nextOffset = Number(payload.next_offset || 0); + const complete = Boolean(payload.complete); + + appendLogContent(pre, chunk); + updateCopyButtonState(runId, stream, pre.textContent.length > 0); + logStreamState.set(pre, { offset: nextOffset, complete }); + + if (complete) { + updateLogStatus(runId, "Completed"); + const intervalId = logStreamIntervals.get(pre); + if (intervalId) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + } + return; + } + + if (chunk) { + updateLogStatus(runId, "Streaming..."); + } else { + updateLogStatus(runId, "Waiting for log output..."); + } +} + +function cleanupDetachedLogStreams() { + for (const [pre, intervalId] of logStreamIntervals.entries()) { + if (!document.body.contains(pre)) { + clearInterval(intervalId); + logStreamIntervals.delete(pre); + logStreamState.delete(pre); + } + } +} + +function initializeLogCopyButtons(root) { + root.querySelectorAll("[data-log-copy]").forEach((button) => { + if (button.dataset.logCopyReady === "true") { + return; + } + button.dataset.logCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) { + return; + } + const stream = button.dataset.stream || "stdout"; + const pre = root.querySelector( + `[data-log-stream][data-run-id="${runId}"][data-stream="${stream}"]` + ); + if (!pre || !pre.textContent) { + return; + } + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (error) { + console.error("Failed to copy log output", error); + } + }); + }); +} + +async function loadPromptContent(pre) { + const runId = pre.dataset.runId; + if (!runId) return; + try { + const response = await fetch( + `/component-system/api/runs/${encodeURIComponent(runId)}/prompt` + ); + if (!response.ok) return; + const payload = await response.json(); + const content = payload.content ?? ""; + pre.textContent = content; + const copyBtn = document.querySelector( + `[data-prompt-copy][data-run-id="${runId}"]` + ); + if (copyBtn) copyBtn.disabled = false; + } catch (err) { + console.error("Failed to load prompt for run", runId, err); + } +} + +function initializePromptDisplays(root) { + root.querySelectorAll("[data-prompt-content]").forEach((pre) => { + if (pre.dataset.promptLoaded === "true") return; + pre.dataset.promptLoaded = "true"; + loadPromptContent(pre); + }); + root.querySelectorAll("[data-prompt-copy]").forEach((button) => { + if (button.dataset.promptCopyReady === "true") return; + button.dataset.promptCopyReady = "true"; + button.addEventListener("click", async () => { + const runId = button.dataset.runId; + if (!runId) return; + const pre = root.querySelector( + `[data-prompt-content][data-run-id="${runId}"]` + ); + if (!pre || !pre.textContent) return; + try { + await navigator.clipboard.writeText(pre.textContent); + const labelBefore = button.textContent; + button.textContent = "Copied!"; + setTimeout(() => { + button.textContent = labelBefore || "Copy"; + }, 1200); + } catch (err) { + console.error("Failed to copy prompt", err); + } + }); + }); +} + +function initializeLogStreams(root = document) { + cleanupDetachedLogStreams(); + initializeLogCopyButtons(root); + initializePromptDisplays(root); + + root.querySelectorAll("[data-log-stream]").forEach((pre) => { + if (pre.dataset.logStreamReady === "true") { + return; + } + pre.dataset.logStreamReady = "true"; + const runStatus = pre.dataset.runStatus || ""; + const runId = pre.dataset.runId; + if (!runId) { + return; + } + + if (isRunComplete(runStatus)) { + updateLogStatus(runId, "Completed"); + } else { + updateLogStatus(runId, "Connecting..."); + } + + const runPoll = async () => { + try { + await pollLogStream(pre); + } catch (error) { + updateLogStatus(runId, "Log fetch failed"); + console.error(error); + } + }; + + runPoll(); + const intervalId = window.setInterval(runPoll, 2000); + logStreamIntervals.set(pre, intervalId); + }); +} + +function observeLogStreamMounts() { + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if (mutation.type !== "childList" || mutation.addedNodes.length === 0) { + continue; + } + for (const node of mutation.addedNodes) { + if (!(node instanceof Element)) { + continue; + } + if ( + node.matches?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") || + node.querySelector?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") + ) { + initializeLogStreams(node); + return; + } + } + } + }); + + observer.observe(document.body, { childList: true, subtree: true }); +} + +document.body.addEventListener("htmx:afterSettle", (event) => { + const target = event.detail?.target; + if (!target) { + return; + } + if (target.id === "seed-detail") { + initializeLogStreams(target); + } +}); + +initializeLogStreams(document); +observeLogStreamMounts(); diff --git a/component_system/web/static/tailwind.input.css b/component_system/web/static/tailwind.input.css new file mode 100644 index 000000000..a563500f2 --- /dev/null +++ b/component_system/web/static/tailwind.input.css @@ -0,0 +1,27 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + :root { + color-scheme: dark; + } + + body { + @apply min-h-screen bg-slate-950 text-slate-100; + font-family: + Inter, + ui-sans-serif, + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + sans-serif; + } +} + +@layer utilities { + .card-panel { + @apply rounded-2xl border border-slate-800 bg-slate-900; + } +} diff --git a/component_system/web/templates/base.html b/component_system/web/templates/base.html new file mode 100644 index 000000000..ef14a6341 --- /dev/null +++ b/component_system/web/templates/base.html @@ -0,0 +1,32 @@ + + + + + + {% block title %}Component System{% endblock %} + + + + + + + + +
+
+
+ + Component System + +

Seed -> Plan -> Do-Check-Action orchestration with FastAPI, HTMX, Alpine, and Tailwind.

+
+ +
+
+
+ {% block content %}{% endblock %} +
+ + diff --git a/component_system/web/templates/dashboard.html b/component_system/web/templates/dashboard.html new file mode 100644 index 000000000..82b1056da --- /dev/null +++ b/component_system/web/templates/dashboard.html @@ -0,0 +1,124 @@ +{% extends "base.html" %} +{% block title %}Component System Dashboard{% endblock %} +{% block content %} +
+
+
+

Create Seed

+

Start a new seed from a prompt. Baseline branch is selected here; each seed has one branch (seed id).

+
+
+
+ + +
+
+

One branch per seed: the seed id is the branch name (e.g. seed-a1b2c3).

+ + + +
+ {% if dashboard.setup_error %} +
+

Git setup required

+

{{ dashboard.setup_error }}

+
+ {% endif %} + {% with daemon_status=dashboard.daemon_status %} + {% include "partials/daemon_status.html" %} + {% endwith %} +
+

Baseline branches

+

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

+ {% if dashboard.baseline_metrics_by_branch %} +
+ {% for branch, m in dashboard.baseline_metrics_by_branch.items() %} +
+
{{ branch }}
+
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}{% if m.get('commit_sha') %} · {{ m.get('commit_sha')[:7] }}{% endif %}
+
+ {% endfor %} +
+ {% else %} +

No baseline metrics yet. Run the first DCA to establish baseline for a branch.

+ {% endif %} +
+
+

Direct Code Agent

+

Run the configured code agent from the project root with a dedicated single-worker executor. New runs appear in the Do-Check-Action column.

+
+ + + +
+
+
+
+ {% include "partials/dashboard_board.html" %} +
+ {% if detail %} + {% with + seed=detail.seed, + runs=detail.runs, + events=detail.events, + baseline_metrics_for_branch=detail.baseline_metrics_for_branch, + setup_error=detail.setup_error, + daemon_status=dashboard.daemon_status + %} + {% include "partials/seed_detail.html" %} + {% endwith %} + {% else %} +
+ Select a seed to inspect its worktree, plan, runs, logs, and promotion history. +
+ {% endif %} +
+
+
+
+{% endblock %} diff --git a/component_system/web/templates/partials/action_error.html b/component_system/web/templates/partials/action_error.html new file mode 100644 index 000000000..8a856804e --- /dev/null +++ b/component_system/web/templates/partials/action_error.html @@ -0,0 +1,3 @@ +
+ {{ message }} +
diff --git a/component_system/web/templates/partials/daemon_status.html b/component_system/web/templates/partials/daemon_status.html new file mode 100644 index 000000000..75a0d5e2e --- /dev/null +++ b/component_system/web/templates/partials/daemon_status.html @@ -0,0 +1,14 @@ +
+

Daemon: {% if daemon_status == 'running' %}running{% else %}not running{% endif %}

+

Plan and Do-Check-Action runs are executed by the daemon.

+ {% if daemon_status != 'running' %} +

Start it in a terminal:

+

uv run component_system/run.py

+ {% endif %} +
diff --git a/component_system/web/templates/partials/dashboard_board.html b/component_system/web/templates/partials/dashboard_board.html new file mode 100644 index 000000000..df82ab6ad --- /dev/null +++ b/component_system/web/templates/partials/dashboard_board.html @@ -0,0 +1,58 @@ +
+
+

+ Dashboard {{ dashboard.seed_count }} seed{{ 's' if dashboard.seed_count != 1 else '' }} across all stages +

+
+
+ {% for column in dashboard.columns %} +
+
+

{{ column.title }}

+

{{ column.description }}

+
+
+ {% if column.seeds %} + {% for seed in column.seeds %} + {% set is_selected = selected_seed_id == seed.seed_id %} + {% set is_promoted = column.id == 'completed' and seed.status.value == 'promoted' %} + +
+

{{ seed.seed_id }}

+ {{ seed.status.value|replace('_', ' ')|title }} +
+

{{ seed.prompt }}

+ {% if seed.plan %} +

{{ seed.plan.title }}

+ {% endif %} + {% if seed.latest_metrics and seed.latest_metrics.get('val_bpb') is not none %} +

val_bpb {{ "%.4f"|format(seed.latest_metrics.val_bpb) }}{% if seed.latest_signal %} · {{ seed.latest_signal }}{% endif %}

+ {% endif %} +
+ {% endfor %} + {% else %} +
+ No seeds in this stage. +
+ {% endif %} +
+
+ {% endfor %} +
+
diff --git a/component_system/web/templates/partials/seed_detail.html b/component_system/web/templates/partials/seed_detail.html new file mode 100644 index 000000000..89e81a0cf --- /dev/null +++ b/component_system/web/templates/partials/seed_detail.html @@ -0,0 +1,171 @@ +
+
+
+ +

{{ seed.seed_id }}

+ {% if can_edit_prompt %} +
+ + + +
+ {% else %} +

{{ seed.prompt }}

+ {% endif %} +
+
+ {% if seed.ralph_loop_enabled %} +
+ +
+ {% else %} +
+ +
+ {% endif %} +
+ +
+
+ +
+
+
+ + {% if setup_error %} +
+ {{ setup_error }} +
+ {% endif %} + +
+
+
+ + {{ seed.status.value|replace('_', ' ')|title }} +
+

Ralph loop: {% if seed.ralph_loop_enabled %}enabled{% else %}disabled{% endif %}

+

Latest signal: {% if seed.latest_signal %}{{ seed.latest_signal }}{% else %}{% endif %}

+
+
+ +
+
Baseline
{{ seed.baseline_branch }}
+
Branch
{{ seed.seed_id }}
+
+
+
+ +
+
Seed worktree
{{ seed.worktree_path or "—" }}
+
+
+
+ +
+
+
+

Plan

+ {% if seed.plan %} +
+
+ +

{{ seed.plan.title }}

+
+
+ +

{{ seed.plan.target_component }}

+
+
+ +

{{ seed.plan.description }}

+
+ {% if seed.plan.commit_sha %} +
+ +

{{ seed.plan.commit_sha }}

+
+ {% endif %} +
+ {% else %} +

No plan yet. Click Run Plan to queue the task; the plan is generated when the daemon runs it.

+ {% endif %} +
+ +
+
+

Runs

+
+ {% if runs and seed.status.value in ['queued', 'planning'] and (daemon_status|default('')) != 'running' %} +

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

+ {% endif %} +
+
+ {% include "partials/seed_runs_inner.html" %} +
+
+
+
+ +
+
+

Latest Metrics

+ {% if seed.latest_metrics %} +
+ {% for key, value in seed.latest_metrics.items() %} +
+ +
{{ value }}
+
+ {% endfor %} +
+ {% else %} +

Metrics appear here after Do-Check-Action runs the training entrypoint.

+ {% endif %} +
+ +
+
+

Timeline

+ +
+
+
+ {% include "partials/seed_timeline_inner.html" %} +
+
+
+
+
+
diff --git a/component_system/web/templates/partials/seed_detail_response.html b/component_system/web/templates/partials/seed_detail_response.html new file mode 100644 index 000000000..c2b27e70a --- /dev/null +++ b/component_system/web/templates/partials/seed_detail_response.html @@ -0,0 +1 @@ +{% include "partials/seed_detail.html" %} diff --git a/component_system/web/templates/partials/seed_detail_runs_content.html b/component_system/web/templates/partials/seed_detail_runs_content.html new file mode 100644 index 000000000..278a12ba0 --- /dev/null +++ b/component_system/web/templates/partials/seed_detail_runs_content.html @@ -0,0 +1,148 @@ +{% if runs and seed.status.value in ['queued', 'planning'] %} +

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

+{% endif %} +{% if runs %} + {% for run in runs %} +
+
+
+

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{{ run.run_id }}

+
+
+ {% if run.signal %} + {{ run.signal }} + {% endif %} + +
+
+ {% if run.metrics %} +
+ {% for key, value in run.metrics.items() %} +
+
{{ key }}
+
{{ value }}
+
+ {% endfor %} +
+ {% endif %} +
+ + {% endfor %} +{% else %} +

No runs yet. Use Run Plan to start.

+{% endif %} diff --git a/component_system/web/templates/partials/seed_detail_timeline_content.html b/component_system/web/templates/partials/seed_detail_timeline_content.html new file mode 100644 index 000000000..8fabcdd54 --- /dev/null +++ b/component_system/web/templates/partials/seed_detail_timeline_content.html @@ -0,0 +1,16 @@ +{% if events %} + {% for event in events %} +
+

{{ event.message }}

+ {% if event.commit_sha %} +

commit: {{ event.commit_sha }}

+ {% endif %} + {% if event.target_branch %} +

target branch: {{ event.target_branch }}

+ {% endif %} +

{{ event.kind }} · {{ event.created_at_human }}

+
+ {% endfor %} +{% else %} +

No events yet.

+{% endif %} diff --git a/component_system/web/templates/partials/seed_runs_inner.html b/component_system/web/templates/partials/seed_runs_inner.html new file mode 100644 index 000000000..488d9912b --- /dev/null +++ b/component_system/web/templates/partials/seed_runs_inner.html @@ -0,0 +1,145 @@ +{% if runs %} + {% for run in runs %} +
+
+
+

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{{ run.run_id }}

+
+
+ {% if run.signal %} + {{ run.signal }} + {% endif %} + +
+
+ {% if run.metrics %} +
+ {% for key, value in run.metrics.items() %} +
+
{{ key }}
+
{{ value }}
+
+ {% endfor %} +
+ {% endif %} +
+ + {% endfor %} +{% else %} +

No runs yet. Use Run Plan to start.

+{% endif %} diff --git a/component_system/web/templates/partials/seed_timeline_inner.html b/component_system/web/templates/partials/seed_timeline_inner.html new file mode 100644 index 000000000..8fabcdd54 --- /dev/null +++ b/component_system/web/templates/partials/seed_timeline_inner.html @@ -0,0 +1,16 @@ +{% if events %} + {% for event in events %} +
+

{{ event.message }}

+ {% if event.commit_sha %} +

commit: {{ event.commit_sha }}

+ {% endif %} + {% if event.target_branch %} +

target branch: {{ event.target_branch }}

+ {% endif %} +

{{ event.kind }} · {{ event.created_at_human }}

+
+ {% endfor %} +{% else %} +

No events yet.

+{% endif %} diff --git a/component_system/web/templates/seed_detail_page.html b/component_system/web/templates/seed_detail_page.html new file mode 100644 index 000000000..4fa3d6f0c --- /dev/null +++ b/component_system/web/templates/seed_detail_page.html @@ -0,0 +1,18 @@ +{% extends "base.html" %} +{% block title %}Seed {{ seed.seed_id }}{% endblock %} +{% block content %} + +
+ {% include "partials/seed_detail.html" %} +
+{% endblock %} diff --git a/prepare.py b/prepare.py index 62607b9af..b64b909fc 100644 --- a/prepare.py +++ b/prepare.py @@ -38,7 +38,8 @@ CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "autoresearch") DATA_DIR = os.path.join(CACHE_DIR, "data") TOKENIZER_DIR = os.path.join(CACHE_DIR, "tokenizer") -BASE_URL = "https://huggingface.co/datasets/karpathy/climbmix-400b-shuffle/resolve/main" +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co").rstrip("/") +BASE_URL = f"{HF_ENDPOINT}/datasets/karpathy/climbmix-400b-shuffle/resolve/main" MAX_SHARD = 6542 # the last datashard is shard_06542.parquet VAL_SHARD = MAX_SHARD # pinned validation shard (shard_06542) VAL_FILENAME = f"shard_{VAL_SHARD:05d}.parquet" diff --git a/pyproject.toml b/pyproject.toml index 94ae32989..8882b6fab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,15 +5,20 @@ description = "Autonomous pretraining research swarm" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "arxiv>=2.4.1", + "fastapi>=0.116.0", + "jinja2>=3.1.6", "kernels>=0.11.7", "matplotlib>=3.10.8", "numpy>=2.2.6", "pandas>=2.3.3", "pyarrow>=21.0.0", + "python-multipart>=0.0.20", "requests>=2.32.0", "rustbpe>=0.1.0", "tiktoken>=0.11.0", "torch==2.9.1", + "uvicorn>=0.35.0", ] [tool.uv.sources] diff --git a/scripts/clean_history.py b/scripts/clean_history.py new file mode 100644 index 000000000..922809145 --- /dev/null +++ b/scripts/clean_history.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python3 +"""Reset local autoresearch history/runtime artifacts. + +Actions: +1) Checkout main branch (configurable) +2) Remove all extra git worktrees +3) Delete all local branches except main +4) Clear component_system runtime state/history folders +5) Remove .pytest_cache, __pycache__, and results.tsv + +With --preserve-seeds SEED_IDS: keep everything for those seeds (state, events, runs, +queue tasks, worktrees, branches, logs, baseline mappings); remove only other seeds' data. +SEED_IDS can be comma-separated, e.g. --preserve-seeds seed-a,seed-b,seed-c. +""" + +from __future__ import annotations + +import argparse +import json +import shutil +import subprocess +from pathlib import Path + + +def _read_json(path: Path, default: object) -> object: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_json(path: Path, payload: object) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8") + + +def run_git(args: list[str], cwd: Path, dry_run: bool = False) -> list[str]: + cmd = ["git", *args] + if dry_run: + print(f"[dry-run] {' '.join(cmd)}") + return [] + proc = subprocess.run(cmd, cwd=cwd, text=True, capture_output=True) + if proc.returncode != 0: + raise RuntimeError( + f"Command failed: {' '.join(cmd)}\n" + f"stdout:\n{proc.stdout}\n" + f"stderr:\n{proc.stderr}" + ) + return [line for line in proc.stdout.splitlines() if line.strip()] + + +def is_broken_worktree_remove_error(error: RuntimeError) -> bool: + msg = str(error) + return ( + "worktree remove --force" in msg + and "validation failed, cannot remove working tree" in msg + and ".git' does not exist" in msg + ) + + +def remove_children(path: Path, dry_run: bool = False) -> None: + if not path.exists(): + return + for child in path.iterdir(): + if dry_run: + print(f"[dry-run] remove {child}") + continue + if child.is_dir(): + shutil.rmtree(child, ignore_errors=True) + else: + child.unlink(missing_ok=True) + + +def remove_pycache_dirs(repo_root: Path, dry_run: bool = False) -> None: + for pycache in repo_root.rglob("__pycache__"): + parts = set(pycache.parts) + if ".venv" in parts or ".git" in parts: + continue + if pycache.is_dir(): + if dry_run: + print(f"[dry-run] remove {pycache}") + else: + shutil.rmtree(pycache, ignore_errors=True) + + +def _gather_preserved_seed_info( + repo_root: Path, seed_ids: list[str] +) -> tuple[set[str], set[str]]: + """Return (preserved_run_ids, baseline_branches). Exits if any seed not found.""" + comp = repo_root / "component_system" + history = comp / "history" + state = history / "state" + seeds_dir = state / "seeds" + runs_dir = state / "runs" + preserved_ids = set(seed_ids) + baseline_branches: set[str] = set() + run_ids: set[str] = set() + + for seed_id in seed_ids: + seed_file = seeds_dir / f"{seed_id}.json" + if not seed_file.exists(): + raise SystemExit(f"Seed not found: {seed_id} (no {seed_file})") + seed_data = _read_json(seed_file, {}) + if isinstance(seed_data, dict): + bl = seed_data.get("baseline_branch") + if isinstance(bl, str): + baseline_branches.add(bl) + + for path in runs_dir.glob("*.json"): + data = _read_json(path, {}) + if isinstance(data, dict) and data.get("seed_id") in preserved_ids: + rid = data.get("run_id") + if isinstance(rid, str): + run_ids.add(rid) + return run_ids, baseline_branches + + +def _clean_state_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + preserved_run_ids: set[str], + dry_run: bool, +) -> None: + comp = repo_root / "component_system" + history = comp / "history" + state = history / "state" + seeds_dir = state / "seeds" + events_dir = state / "events" + runs_dir = state / "runs" + + for path in seeds_dir.glob("*.json"): + if path.stem not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + for path in events_dir.glob("*.json"): + if path.stem not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + for path in runs_dir.glob("*.json"): + data = _read_json(path, {}) + rid = data.get("run_id") if isinstance(data, dict) else None + if rid not in preserved_run_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _clean_queue_preserving_seeds( + repo_root: Path, preserved_seed_ids: set[str], dry_run: bool +) -> None: + history = repo_root / "component_system" / "history" / "queue" + stage_dirs = [ + history / "p", + history / "dca", + history / "direct", + history / "in_progress", + history / "done", + history / "error", + ] + for stage_dir in stage_dirs: + if not stage_dir.exists(): + continue + for path in stage_dir.glob("*.json"): + data = _read_json(path, {}) + task_seed = data.get("seed_id") if isinstance(data, dict) else None + if task_seed not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _clean_worktrees_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + dry_run: bool, +) -> None: + worktrees_dir = repo_root / "component_system" / "history" / "worktrees" + if not worktrees_dir.exists(): + return + keep_names = preserved_seed_ids | {"baseline"} + for child in worktrees_dir.iterdir(): + if child.is_dir() and child.name not in keep_names: + if dry_run: + print(f"[dry-run] remove {child}") + else: + shutil.rmtree(child, ignore_errors=True) + + +def _clean_logs_preserving_seed( + repo_root: Path, + preserved_run_ids: set[str], + dry_run: bool, +) -> None: + logs_dir = repo_root / "component_system" / "history" / "logs" + if not logs_dir.exists(): + return + for path in logs_dir.iterdir(): + if not path.is_file(): + continue + # logs: {run_id}.stdout.log, {run_id}.stderr.log, {run_id}.prompt.txt + run_id = path.stem + if path.suffix in (".log", ".txt"): + run_id = run_id.rsplit(".", 1)[0] if "." in run_id else run_id + if run_id not in preserved_run_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _filter_baseline_jsons_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + baseline_branches: set[str], + dry_run: bool, +) -> None: + comp = repo_root / "component_system" + branches_path = comp / "baseline_branches.json" + metrics_path = comp / "baseline_metrics.json" + + if branches_path.exists(): + data = _read_json(branches_path, {}) + if isinstance(data, dict): + new_data = {k: v for k, v in data.items() if k in preserved_seed_ids} + if dry_run: + print(f"[dry-run] write {branches_path} (keep {preserved_seed_ids})") + else: + _write_json(branches_path, new_data) + + if metrics_path.exists(): + data = _read_json(metrics_path, {}) + if isinstance(data, dict): + keep_branches = preserved_seed_ids | baseline_branches + new_data = {k: v for k, v in data.items() if k in keep_branches} + if dry_run: + print(f"[dry-run] write {metrics_path} (keep branches {keep_branches})") + else: + _write_json(metrics_path, new_data) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Clean local branches/worktrees and runtime history.") + parser.add_argument("--main-branch", default="master", help="Branch to keep. Default: master") + parser.add_argument( + "--preserve-seeds", + metavar="SEED_IDS", + help="Comma-separated seed IDs to keep (e.g. seed-a,seed-b). Keep their state, events, runs, queue, worktrees, branches, logs, baseline mappings; remove only other seeds.", + ) + parser.add_argument("--dry-run", action="store_true", help="Print actions without changing anything") + args = parser.parse_args() + + repo_root = Path.cwd().resolve() + print(f"Repository: {repo_root}") + + raw_preserve = getattr(args, "preserve_seeds", None) + preserve_seeds: list[str] = ( + [s.strip() for s in raw_preserve.split(",") if s.strip()] if raw_preserve else [] + ) + preserved_run_ids: set[str] = set() + baseline_branches: set[str] = set() + preserved_seed_ids: set[str] = set() + if preserve_seeds: + preserved_seed_ids = set(preserve_seeds) + print(f"Preserving everything for seeds: {', '.join(sorted(preserved_seed_ids))}") + preserved_run_ids, baseline_branches = _gather_preserved_seed_info(repo_root, preserve_seeds) + print(f" runs to keep: {len(preserved_run_ids)}") + + print("Verifying git repository...") + run_git(["rev-parse", "--is-inside-work-tree"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Checking out '{args.main_branch}'...") + run_git(["checkout", args.main_branch], cwd=repo_root, dry_run=args.dry_run) + + print("Removing extra worktrees...") + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + wt_lines = run_git(["worktree", "list", "--porcelain"], cwd=repo_root, dry_run=args.dry_run) + worktrees: list[Path] = [] + for line in wt_lines: + if line.startswith("worktree "): + worktrees.append(Path(line[len("worktree ") :]).resolve()) + + branches_to_keep = {args.main_branch} | preserved_seed_ids + worktree_keep_names = preserved_seed_ids | {"baseline"} if preserved_seed_ids else set() + + for wt in worktrees: + if wt == repo_root: + continue + if worktree_keep_names and wt.name in worktree_keep_names: + print(f" - keeping worktree {wt} (preserved: {wt.name})") + continue + print(f" - removing worktree {wt}") + try: + run_git(["worktree", "remove", "--force", str(wt)], cwd=repo_root, dry_run=args.dry_run) + except RuntimeError as error: + if not is_broken_worktree_remove_error(error): + raise + print(f" ! stale/broken worktree metadata detected, deleting directory: {wt}") + if args.dry_run: + print(f"[dry-run] remove {wt}") + else: + shutil.rmtree(wt, ignore_errors=True) + run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) + + print(f"Deleting local branches except {sorted(branches_to_keep)}...") + branches = run_git( + ["for-each-ref", "--format=%(refname:short)", "refs/heads"], + cwd=repo_root, + dry_run=args.dry_run, + ) + for branch in branches: + if branch not in branches_to_keep: + print(f" - deleting branch {branch}") + run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) + + history_root = repo_root / "component_system" / "history" + if preserved_seed_ids: + print("Clearing component-system state (keeping preserved seeds)...") + _clean_state_preserving_seeds( + repo_root, preserved_seed_ids, preserved_run_ids, args.dry_run + ) + print("Clearing queue (keeping tasks for preserved seeds)...") + _clean_queue_preserving_seeds(repo_root, preserved_seed_ids, args.dry_run) + print("Clearing worktrees (keeping preserved seeds + baseline)...") + _clean_worktrees_preserving_seeds(repo_root, preserved_seed_ids, args.dry_run) + print("Clearing logs (keeping logs for preserved seed runs)...") + _clean_logs_preserving_seed(repo_root, preserved_run_ids, args.dry_run) + print("Filtering baseline_branches.json and baseline_metrics.json...") + _filter_baseline_jsons_preserving_seeds( + repo_root, preserved_seed_ids, baseline_branches, args.dry_run + ) + else: + print("Clearing component-system runtime/history artifacts...") + for name in ("state", "queue", "worktrees", "logs"): + remove_children(history_root / name, dry_run=args.dry_run) + + pytest_cache = repo_root / ".pytest_cache" + if pytest_cache.exists(): + if args.dry_run: + print(f"[dry-run] remove {pytest_cache}") + else: + shutil.rmtree(pytest_cache, ignore_errors=True) + + results_tsv = repo_root / "results.tsv" + if results_tsv.exists(): + if args.dry_run: + print(f"[dry-run] remove {results_tsv}") + else: + results_tsv.unlink(missing_ok=True) + + print("Removing __pycache__ directories...") + remove_pycache_dirs(repo_root, dry_run=args.dry_run) + + print("Done.") + print("Remaining branches:") + for branch in run_git(["branch", "--format=%(refname:short)"], cwd=repo_root, dry_run=args.dry_run): + print(f" {branch}") + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock index c840d62f5..90a4e722b 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'linux'", @@ -27,6 +27,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, ] +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + [[package]] name = "anyio" version = "4.12.1" @@ -41,11 +50,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, ] +[[package]] +name = "arxiv" +version = "2.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "feedparser" }, + { name = "requests" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/6e/647dd134e66d3ea6ff8aba2a177a37c74245625cfc58184e3aff99c8d8ec/arxiv-2.4.1.tar.gz", hash = "sha256:691606c1069bcca8316fcb082f5d15e65f1f24a021b0b87f01b9fa56347f63c8", size = 74975, upload-time = "2026-03-04T03:05:33.991Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/6a/297380dc42fa25dff095feda66d46f7abba77ba54579d079071a2459e8d3/arxiv-2.4.1-py3-none-any.whl", hash = "sha256:060d678410ffc224ada01089f877b7676f250e37f96c140bad6c287afadb15d8", size = 12106, upload-time = "2026-03-04T03:05:33.029Z" }, +] + [[package]] name = "autoresearch" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "arxiv" }, + { name = "fastapi" }, + { name = "jinja2" }, { name = "kernels" }, { name = "matplotlib" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, @@ -53,23 +79,30 @@ dependencies = [ { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pyarrow" }, + { name = "python-multipart" }, { name = "requests" }, { name = "rustbpe" }, { name = "tiktoken" }, { name = "torch" }, + { name = "uvicorn" }, ] [package.metadata] requires-dist = [ + { name = "arxiv", specifier = ">=2.4.1" }, + { name = "fastapi", specifier = ">=0.116.0" }, + { name = "jinja2", specifier = ">=3.1.6" }, { name = "kernels", specifier = ">=0.11.7" }, { name = "matplotlib", specifier = ">=3.10.8" }, { name = "numpy", specifier = ">=2.2.6" }, { name = "pandas", specifier = ">=2.3.3" }, { name = "pyarrow", specifier = ">=21.0.0" }, + { name = "python-multipart", specifier = ">=0.0.20" }, { name = "requests", specifier = ">=2.32.0" }, { name = "rustbpe", specifier = ">=0.1.0" }, { name = "tiktoken", specifier = ">=0.11.0" }, { name = "torch", specifier = "==2.9.1", index = "https://download.pytorch.org/whl/cu128" }, + { name = "uvicorn", specifier = ">=0.35.0" }, ] [[package]] @@ -379,6 +412,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] +[[package]] +name = "fastapi" +version = "0.135.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-doc" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/7b/f8e0211e9380f7195ba3f3d40c292594fd81ba8ec4629e3854c353aaca45/fastapi-0.135.1.tar.gz", hash = "sha256:d04115b508d936d254cea545b7312ecaa58a7b3a0f84952535b4c9afae7668cd", size = 394962, upload-time = "2026-03-01T18:18:29.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/72/42e900510195b23a56bde950d26a51f8b723846bfcaa0286e90287f0422b/fastapi-0.135.1-py3-none-any.whl", hash = "sha256:46e2fc5745924b7c840f71ddd277382af29ce1cdb7d5eab5bf697e3fb9999c9e", size = 116999, upload-time = "2026-03-01T18:18:30.831Z" }, +] + +[[package]] +name = "feedparser" +version = "6.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sgmllib3k" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/79/db7edb5e77d6dfbc54d7d9df72828be4318275b2e580549ff45a962f6461/feedparser-6.0.12.tar.gz", hash = "sha256:64f76ce90ae3e8ef5d1ede0f8d3b50ce26bcce71dd8ae5e82b1cd2d4a5f94228", size = 286579, upload-time = "2025-09-10T13:33:59.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/eb/c96d64137e29ae17d83ad2552470bafe3a7a915e85434d9942077d7fd011/feedparser-6.0.12-py3-none-any.whl", hash = "sha256:6bbff10f5a52662c00a2e3f86a38928c37c48f77b3c511aedcd51de933549324", size = 81480, upload-time = "2025-09-10T13:33:58.022Z" }, +] + [[package]] name = "filelock" version = "3.24.3" @@ -1524,6 +1585,139 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" }, ] +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + [[package]] name = "pygments" version = "2.19.2" @@ -1554,6 +1748,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] +[[package]] +name = "python-multipart" +version = "0.0.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" }, +] + [[package]] name = "pytz" version = "2026.1.post1" @@ -1822,6 +2025,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/c6/76dc613121b793286a3f91621d7b75a2b493e0390ddca50f11993eadf192/setuptools-82.0.0-py3-none-any.whl", hash = "sha256:70b18734b607bd1da571d097d236cfcfacaf01de45717d59e6e04b96877532e0", size = 1003468, upload-time = "2026-02-08T15:08:38.723Z" }, ] +[[package]] +name = "sgmllib3k" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/bd/3704a8c3e0942d711c1299ebf7b9091930adae6675d7c8f476a7ce48653c/sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9", size = 5750, upload-time = "2010-08-24T14:33:52.445Z" } + [[package]] name = "shellingham" version = "1.5.4" @@ -1840,6 +2049,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "starlette" +version = "0.52.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/68/79977123bb7be889ad680d79a40f339082c1978b5cfcf62c2d8d196873ac/starlette-0.52.1.tar.gz", hash = "sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933", size = 2653702, upload-time = "2026-01-18T13:34:11.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/0d/13d1d239a25cbfb19e740db83143e95c772a1fe10202dda4b76792b114dd/starlette-0.52.1-py3-none-any.whl", hash = "sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74", size = 74272, upload-time = "2026-01-18T13:34:09.188Z" }, +] + [[package]] name = "sympy" version = "1.14.0" @@ -2078,6 +2300,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + [[package]] name = "tzdata" version = "2025.3" @@ -2095,3 +2329,17 @@ sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6 wheels = [ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] + +[[package]] +name = "uvicorn" +version = "0.41.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" }, +] From 2b2b45dbecb32c6c0eb4a7578aecaadf85cf6917 Mon Sep 17 00:00:00 2001 From: Laurence Date: Wed, 11 Mar 2026 21:28:59 +0800 Subject: [PATCH 04/24] P stage: Add gradient clipping for training stability - Add max_grad_norm parameter (default 1.0) to TrainingSettings - Import torch.nn.utils for clip_grad_norm_ - Apply gradient clipping before optimizer.step() - This should improve training stability and potentially achieve better convergence Target component: trainer Expected benefit: Better training stability leading to improved val_bpb --- component_system/components/trainer.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py index fd300348e..f35bf138c 100644 --- a/component_system/components/trainer.py +++ b/component_system/components/trainer.py @@ -6,6 +6,7 @@ from typing import Any import torch +import torch.nn.utils as nn_utils from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader @@ -27,6 +28,7 @@ class TrainingSettings: adam_betas: tuple[float, float] = (0.8, 0.95) warmup_ratio: float = 0.0 warmdown_ratio: float = 0.5 + max_grad_norm: float = 1.0 final_lr_frac: float = 0.0 depth: int = 8 device_batch_size: int = 32 # 24GB vram @@ -74,7 +76,9 @@ def run_training_session( tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN assert settings.total_batch_size % tokens_per_fwdbwd == 0 grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd - train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") + train_loader = make_dataloader( + tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train" + ) x, y, epoch = next(train_loader) print(f"Vocab size: {tokenizer.get_vocab_size():,}") @@ -109,11 +113,17 @@ def run_training_session( group["momentum"] = muon_momentum group["weight_decay"] = muon_weight_decay + # Gradient clipping for training stability + if settings.max_grad_norm > 0: + nn_utils.clip_grad_norm_(model.parameters(), settings.max_grad_norm) + optimizer.step() model.zero_grad(set_to_none=True) train_loss_f = train_loss.item() if train_loss_f > 100: - raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") + raise RuntimeError( + "Training aborted because loss exceeded the fast-fail threshold." + ) torch.cuda.synchronize(device=device) dt = time.time() - t0 @@ -125,11 +135,17 @@ def run_training_session( debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) pct_done = 100 * progress tok_per_sec = int(settings.total_batch_size / dt) - mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS + mfu = ( + 100 + * num_flops_per_token + * settings.total_batch_size + / dt + / H100_BF16_PEAK_FLOPS + ) remaining = max(0.0, TIME_BUDGET - total_training_time) print( f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " - f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " + f"lrm: {lrm:.2f} | dt: {dt * 1000:.0f}ms | tok/sec: {tok_per_sec:,} | " f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", end="", flush=True, From 0ae358df92f6a5f73755fd9a733d2a5c6dcfcb0d Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 15:08:38 +0800 Subject: [PATCH 05/24] Add optional gradient clipping and improve Ralph worktree restoration Training: - Make gradient clipping optional (max_grad_norm: float | None = None) - Disabled by default to test training stability without clipping - Can be enabled by setting max_grad_norm to a positive value Workflow: - Add _ralph_try_restore_worktree helper for consistent worktree reset logic - Fix baseline waiting: allow planning on non-baseline branches without baseline - Restore worktree on run failures and before P queueing when Ralph loop enabled - Minor code formatting improvements in workflow.py --- component_system/components/trainer.py | 5 +- component_system/services/workflow.py | 74 ++++++++++++++++---------- 2 files changed, 47 insertions(+), 32 deletions(-) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py index f35bf138c..c753818a4 100644 --- a/component_system/components/trainer.py +++ b/component_system/components/trainer.py @@ -28,7 +28,7 @@ class TrainingSettings: adam_betas: tuple[float, float] = (0.8, 0.95) warmup_ratio: float = 0.0 warmdown_ratio: float = 0.5 - max_grad_norm: float = 1.0 + max_grad_norm: float | None = None final_lr_frac: float = 0.0 depth: int = 8 device_batch_size: int = 32 # 24GB vram @@ -113,8 +113,7 @@ def run_training_session( group["momentum"] = muon_momentum group["weight_decay"] = muon_weight_decay - # Gradient clipping for training stability - if settings.max_grad_norm > 0: + if settings.max_grad_norm is not None and settings.max_grad_norm > 0: nn_utils.clip_grad_norm_(model.parameters(), settings.max_grad_norm) optimizer.step() diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py index e9cac3c99..c89fcafd8 100644 --- a/component_system/services/workflow.py +++ b/component_system/services/workflow.py @@ -608,18 +608,24 @@ def queue_p(self, seed_id: str) -> StageRun | None: branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None if not has_baseline: - if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): - seed.status = SeedStatus.queued - seed.updated_at = now_ts() - seed.latest_run_id = None - seed.last_error = None - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "p.waiting_for_baseline", - "Baseline run is still in progress; Plan will queue after baseline finishes.", - ) - return None + baseline_seed = self.seed_repo.get(BASELINE_SEED_ID) + # Only wait for baseline when the baseline seed is for this branch (e.g. master). + # For another branch (e.g. dev), no baseline run is queued for it, so allow planning; + # the first DCA completion on this branch will establish baseline metrics. + if baseline_seed is not None and baseline_seed.baseline_branch == seed.baseline_branch: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + # Branch has no baseline and is not the baseline seed's branch: proceed with planning. setup_error = self.git_service.setup_error() if setup_error is not None: raise RuntimeError(setup_error) @@ -825,6 +831,26 @@ def mark_direct_code_run_failed( if task_path is not None and task_path.exists(): move_to_error(task_path) + def _ralph_try_restore_worktree(self, seed: SeedRecord, ref: str | None) -> None: + """Reset seed worktree to ref (e.g. commit before P) and log result. No-op if ref missing or baseline seed.""" + if not ref or not str(ref).strip() or seed.seed_id == BASELINE_SEED_ID: + return + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + def mark_run_failed( self, seed_id: str, @@ -862,6 +888,7 @@ def mark_run_failed( and task_payload.get("merge_resolution") is not True and task_payload.get("metrics_recovery") is not True ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) try: self.queue_p(seed.seed_id) self.seed_repo.append_event( @@ -1020,6 +1047,11 @@ def finish_dca_run( source_stdout_log_path=log_path, source_stderr_log_path=stderr_log_path, ) + if ( + seed.ralph_loop_enabled + and seed.seed_id != BASELINE_SEED_ID + ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) return run seed.latest_metrics = metrics seed.latest_signal = signal @@ -1247,23 +1279,7 @@ def finish_dca_run( and not metrics_recovery and seed.seed_id != BASELINE_SEED_ID ): - ref = run.summary.get("commit_sha_before_p") - if ref: - try: - self.git_service.reset_seed_branch_to(seed, ref) - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restored", - "Restored seed worktree to commit before P for next Plan.", - commit_sha=ref, - ) - except GitCommandError as exc: - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restore_failed", - f"Could not restore seed worktree to commit before P: {exc}", - commit_sha=ref, - ) + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) if seed.ralph_loop_enabled: try: self.queue_p(seed.seed_id) From 85065a475ff232ebd453e6a1e64f1a78e36c257a Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 15:47:35 +0800 Subject: [PATCH 06/24] Commit uncommitted changes --- progress.png | Bin 252961 -> 46385 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/progress.png b/progress.png index 999e45c2b8833884870c1d7f084bab7a7bbc9e2c..ad960133d930080ec4bf92afde6e78e3aa125e90 100644 GIT binary patch literal 46385 zcmeFa2UyhSwmwQ?)EF##8--Z0jFlEaKt#akMja4TPvaLX_{EJVjTpe+fb3- zL7LH^bQCF4$3hVVMnUQTbKf;G#^gEYIrrS>{_j2aK7XF<9fMPT^ZmYWt#`feyVjiD zskL?54|9JI5)zum-1aYRA))UXLPFn7{(cgEBbBmR7ynv+L{0yQj-%BPw|!2QLYn)I z9I|sfVs~)=B3DZ%=Yx(8vQle)mRcvV=)jR9hn&|-OWS{Wgp{L`we;^sE@gO?DTlTh zI134h&ZPf+V;xa}_eu$4{_B_BM?*Us-9z`djH-U9S#Ub&8;h+Erybh5YGAF-gwj|U zh3&Gtoa0AyZ4TCz&w5cLBdPg9#NW6=CO>JrX5PK1%oeS);&B;^lnUgXy?q8>d58Tp z>-DRtCA~vOKfEhbjA-%d&S~4Q(A9Lnd9~WES>NDF=x60FO5yw!u3ydwY9bQ}>ps3)Zh~3w74AD!KN&E)t(V zAvAiH+O8DWf8s-@#CdRX6ef##9tuC!5!I1)ae>ku+nTr_8B6ux;Nbi915F+rW0$Hu zQPI)17qo{ipU0cs4RbX-J$*qiF52ttQ+jBBgL8Sq;in6#>{C-y+pA-Z?r4St4E46@ zrJEgVj~uE9%uSiKM>aL7i zcSCkxwNa3go5RARuTF#>eRGyBBQC4^K}oP&bZW7c;^Ak%Om4ZT)V=M@^aVYoj^48L zBCRDse(6oe#2xcST^D+NdOq@Qt7_4cd5#Trk!cz=CT=FCSoCaMP|$i8o8{ZDT{gd{ zxSZM3nOttg_8PEn{1!V)Kdvgxm3CN4_}vKa^-H&$F9=n1PI`TMUTbTJV_8RZd{c}j z-ZUv+#K^)|GOgg_Q?ZE6Gn7;O?_0X}etNN^?esi{ZLHA4Pv$6MKZJC}$Hta!nP-3X zf_&Y|6`_YVHFa6HxnwJJ-eJ* zW2&?w!qdsBEYzT@_m3ZUSp4zte;SxykfXnJsWDNo4n4f3wKSyW&HTrwr)flL7ZzMu zDvMhb>fRbud*Idudc#khwX#ZGDLP~Q>3KOE50^%l8q4%XSB*UHQEffFK=09E`cK;D zqP1-rX2aU6{nh){eCIRrR!r9JWx}V9D)BHSx3c9Sb}Z*|<@DO6Yef1~7C4uznImJd zP1o>XNGX0>i?ujHcD(YJGRRcvEZ zb+I)vwuf(UzmbW@XyMWrg$vvoc8#7F8JQ<%Q_0OWWA+p*y|mkmllXKYr#@Hbu2xme z4wWGbms~C(jRv#FmLQS~hyp=5K1_QZ*rnwptCMdH0d<(_wjbM)O?{F8gjl-lk`#}(ZSXDJ?d zcD;kU&bB&+4&XeygkRRXb%*ZJGm)FT@aW!fpRpku?%5w#wAQo7Tyb=Aj&xM68s!d@ zzdj?DThM!dDm|*NZ^DcX&-Qo8T9p*6x*N*6D3v(;m;oal6J$UWG8!7{hZf4Why*eI~*&2-fyDd=4gkF-u7|z!*Q}3ac z0Z%iWw*aMy`2Vg7+xQ&a%F4=F%yW;YE7?Uoc`({v@4Ha>7=FP}!?VvxnLOCd>#j|< z{&>k}#QcVLxhl^ywG`XfXu&Pq9yJ`!``k1H<^A0anfYd8J%Kl!+sf2fH}MhFBDJH^ zY7rj{W9;8OyP?K@JSn<2%TMWOcar{`PsxV(Xe$qSA=Tf(kKI=p!=K0ZDzHa=c@ zy-UaL(JaK^{89yW_Rc&F>70~p+EeGROUS_f@I`>uWv9E=yi3PSy!xtqV+I-#A60j^WwrzuiA0xYNe*^4z2_<(thjyi#*G`> zw2iULs=B0eK54Jn`|$WFm2@|scm#GsdtP6>PtLoVc(Y_qXVxWm1g!(|9BivSo;`jM zcKwR3oTi*wb`~esV~urL=qa0p>kjR`a(l40+*{Z4(=9Pc)~spMrj@GH?9nxdJUTYi z=4&0{m8S1pD(A~kYby)$MH;jTL8M5(hEF@$+wAH2IXVl>ORlYvOq-+RYL{9%kjEQ6 zWy34n;jJBm_>5Z-7Z*1i=1s44bLF{JI(KFvv_xNgeq-~Qn_Dj}EG?^b^m=b3S*GkW zHe%Ig9I1WKwX7?TH&$8}tCo|;(KE--NBUH?)@c>$;&R-JF?nOd`G{_!!EaMuXxUcB zEi^o>l)%)b7-`aA)#&rbeKnb#A!|cQ=>UzQf1x9#0`}sI)`#Imde-E5EtW&-dohwk?BhxxHaF zY)2n%y>Oq&p%*FE<>6{3CMJi=6x((?F4_^gE^*z+hsV*QjtEPqhS@l?YS_Wp+(w~? z9!<&bEmt*LDKB?9uK`&JjQt!i_1s>lCXxroE)#lb><;t;$YbbbRiExrc(}?G1hH zX(g~?S#F&D{hhT{U1t`!>R-A2)7k?&4+BDp^tP2tpL-J)()jl2C26Vb506AtlLxYP zC4Es3bEH+D=)A6R7};|Q*QoYAh6l-**)qm?5nGE z&QYe!*F%hc^ZR767>5W&XRGI_wlzzbYxe)~?>9l#;ZfZoYs|NNL|77eD(2()u_kZK z<)0CAg@hjVFbOeV4>LR@E5%HnXCqb_m^aeWp*b++$K?_!yvRV)`(r;0zPokFA&rAA zy?FEKwOKc^AD)=JP~SfJg_aU+72#Bagq~kC@@|_Qdb^H!_+!4PbV^=gV&bsI06o5b zpr=?`6lhVcr|bt|NyYArxA(=TWDj>lm-b73&&aNkjzG5PUBWy(`1*|W?wSYh)mQ{%htb_NQpncaTAvFpIhfY?+ok? zad>d>V^zF&Y6dWXFFt5?e!24rL}XpZPO~G=)g;pl5*|GScCE4FPMT-86K^t*#rC>> zOmxNNL$#KHvzh09|826U&YkTC6H9-ap`s56LBZ(gXkSf8m(iie*#H9hFLtQr>}2~& z=Vlmg1%kP+(9G__#&@`KTOR;Y_1VYS`EMr9RN>Z^>(+0;VH2)uLgadLv`qc-{Zz299m($gl77Z|XkL|Ip}}xABoj)!;~Px#)uRM^;jT*Z?RZ znMP+O5leUlR;X7*R1cbWhv0QI!sZpmlLRN8BF#i9U6tcKcRZY&Z zKEs=9&fP2^49zH{8~lmG_PHUC<9IB0q(yoKGY)B5?V?L(;?q01x!yOrvO1GA`ufhU z2*0KtnN%QuNxA>!lBZ_CeNF|7&q$=?+_|%yU?Iz*^JSv@$j7JA+_;?nx{?T0Rb&H# zwnp;@n*DZYd3kvO9R{k7eZ1rw%EkV3ILroiXxiW9*5qy-?mgU27zE*DaLJ|j*MFO? z-c#1wDQfxq$I*1W;XWm-xkG^Kjw9*fbw6kum?D_C`Mw@8fpxSh$ z&IC$Dn!sJoI2bK`HD+e_9G_lX;0%njjOpDMZ6=w*0~m?*Hr=|=-6Gjb`PipztnJr+ zzR|o?T`G6*4Wm#mx5xho0&c#1h-0q%>8W#A*f%JnC{3w&9<4U84m*{W$5rxazOwZW zBD1euFI5I8D#QkVGL-S?@7yr_?$*#7(Y$MUV}0>V76P_Ph+R_QCVMyFUAqVEcO)%*`Tv!?Hj;7{V`28@-Dq(=CAFnJh${F*9E;M0dwK$Wt7T4V*o1Q7l%x ze0j~ptx;~CWA zQGT20$6_Ghct1*mE1W44hJi%t6M$TTvHUV z{>FfOia0gNAD_&-8y6?V>_Lgvd{=e!pi0k`OU+|kj@dolm8}bdP_l0L^lY;`Vr+01 z7wJBXF=zde1C)oYxUTi~=?uzwm zWSPMyYn>-@mA@*qo&DVs> zexv&+d&W=Wm&3Km)~yHx8F>dWPys_F43_j&LnxLH6*Y6N{-tF;j(xmhs+jb z)yDvNYni>}hj7EUBLv9odvww+wMthJ_oEj_=FZAozmt4U~S!@54InHj~cCe95ZPGt0?W;Z!39h@?w@ z|AT~9he7;D`vdm8h7k{8JCXA5hTa*HkE-)24_Uud+@{SAY~8NWbfZ5 zO}igA=|lIPnVIJjrWqI8+fe0GYA){tY>^Ue^Ue#Wr9;*m7X>3WQnkJ_T?XU zY*<6-1v@_=%(ZTnC?JuHQ6y-k>vDWjDn$n+byTi5QkYTVOv9vCfiJKn_ME$^ALCQ( zclz|{PEa{Ry%9WPuh9Hr>AX;SwHs+>Lq&QlMjp43b6?@#qS@VzS#DG{ofRv4Hn+ zSyJGX4EdY})`_5Y9Ez;-Kdo7^K*`nEB5(SF^%5^0h@{>unEcUHPHJ#|-e{MjbhY(S z)HMq4^WI)mYNg0w8E6uKRJaZ==x|3XEO?$n#$u{4y)O4*dl`QBXP|*tIQ{fbu#2GSB9U7D(rf^em~*@nCz5 z+PE7Gb3%UO$fMTiaBgIquz#)LxY%);%vuxsIBeCzn}J-jEAz1vt7;@;jX$7-UTzzc zxaH1{h;U@H^@Tj;xYtA>bi98rs@RV+5ekqZ33|Z@=bMGPMlNEcdBqFF(zlmXRW7Lv zzngqN74f{PYD(Q>>tseiYqkNxklzUVhe^IYrft1%R=yqS0E;nw!-t3G=gO@nDv9b+ zI+E&}H*a=nImy!ni-F9pgv2V{3#O$F8Tq*J!#mPE9`wCuQk~Aj$XVelSPc-l`co*ZU(2J#H7;sbzmxDxA~Rm?}77p1tUYf zeheeKL^T)guQfAN2KVB=)a_@ZP@*8co#lyOEFRlrpH>U{3f#}0r@WsfzmN9(NnDz^ zMJ(JY;q^z}?VKh#(xI1E1bXV~nT5wzGJAi1%#8vcbyuY?Y6JmQ6UqDB&3oPoqWT|- zN7NR$A)_Xvyj;#CZZ1DOOWEL1)A>2F%K-r;(`XAs-}u!nLV-dg%k9^10o)xEIe0)% zyw9j@l#THr@5W>AV}+HMTQI>X*eJd}`6Hj5WA;!D=*9ii-Key6fy-{x3u4RMty6<1 zzFzs&rg42T$J!#&U$+QMN*I=#>*eFJ!M&$>s+dalG3Q+EC<{a_4c16cU|tD$g0!6x zdS+g7bWi*kply5#w~oS##b$9|vX&j?%ZqFynQm73u- zmL*0-K$*{LDs;^WPDAC_-XbKr&GS6cYCTP>gt6EaG_2`s|-M+IRV$m z&3miF9?~Mdo9VaQ^63J%5}*>DMmK4*b=B(@=pB+uF(lwGKtj}Uthr3&8Dfd-&TdMZPOJ6gfl6lc5l-nS3;=*~0->>qmhR3*S{=+SwFkqN@ikAclvxulI45n}ioK(CiNOj7MFkyU(ke1Z%ZJWy2@fhMj8 zs~vwoY=*Gno87LAl8kb}s&h+FF6fo5dwbsM(ofe=vf=*t-MXZ*9!p!j#MXG1J>IwK zP+3iGe`+Wy>8gPq&{)N&b*|XPWiDqr4Y%LDi*#O>t`R6TggqVB9QM?S%J{0CPq9Jx zWE{kT?MR=2_n#1xi}Kvfk3nGEnHX}Ul6&0|sy^Ne5l=li9_(T)uOATd?FSqr=UpnJ z?K|2~yE*pjp9AC7Ji}*jb6@ZWP(Nu8IN-X=JHh!k<}&TPA1KX2YV=iX8Uz4!|V zvwhlP^MFr6AS4Mr>&8wo$;XT9Frvnyqoc)-^>ysQ+U!o>v)X?@Zof1RQ+*5%n97tu zcwHPm64810eFne^XJD~Kg9ng~;?vwg3mL`O1DIFI`#QBp28uAxotQ9LD#h_D#;(z| z9?SwUWZ*waFH$AqaJZ9C+I43+ej)9wI2sxpChA9oO=HHY=_-S-mfg=m#rf&ob2Xs1 zXlEB3f_KQtyUfhkF=Mj0SraZ8&EkxXm4_(A>-&uMMRO4=42ZG&3A^Ah;DUrxiCb3t zosi(F!EVkTkW*=1$))n*glz}rb3oX}qL9vVTZ8ARrFjj!6jn(FIdwEl( z3APehorDuq!M851w>fKTXJfNE7sy0D8mg3*8tzN9v*e%~q>??{FmODj{lJ0;Z*V?Q zEltH#o%QcS8L!NcHi*@+s<{HvI&~1da&NdC=rjpO8&TB<*bfiS+2CUpg7XNXo$P~3 zp980QquA!XsxiLong;$CQ_3E$9M2wX69vMV^-Vy(n zUn!x+h&^P%qmqG=eVGckbEMlXuNZ4Dv+cA9A{@{rS-bMPdlR_0QP1g{X?IVw>H;fi*z% zNGg@D=Zm5^FeY7YC_JoCBNZb{+-F~z=tcGU>&Iu>?%ld*lh*SXj6!&^RF(w25WyL6 zR~8kwnIvFPJ?Po(98t_0Dd$O4eWWy}<_})ET8iZ10RMfDPm@$w03a58W$D&5S4D|a ze9dUTuOy4{_Q{1}V3wMq+0W;KK!GaOgMiqUR4i4Dt@*NU87Xn!b4Fiae+wO#e*+59 zo#m#h{*NsO@@TQ>4pSz^JG`uG!DBuwiSr--+{$21U;plUOj6P6*gZ?|D!w<;Qibtvp#z0fOW3D3%7a`k z0(B9@sKtNvpvHROFK%uCPIN4QfHAWI5P*sb9Oc+HGdParLj+PFIsJ^mbDPHkwbHyLP3&y5$vKxMxpcWZVGggmPl? zD&Kx+0-;cWoP?K;{X$NP(iW^{$v3Z*#4eIs^J)Xa0u0NaN* zb08jU@m2luGpIgPf{?-G>kizC0kUPXkb7FOqtt;Moss-V-!zIzoHT9jRaB0PNf`3w z`V2l6vjUYTC;56HpH>vK>V->o!PGe{tMZ`Z?ZzFw@K8@l9fC z<|sHASA&#_fe<2gj{|V*kD62-n3kkj3ZgM65{vH<3ZNhT!;%9hP9~SF7boO}f*-+W zWCi^hBug;pqt(bogB#7mfMQC=RQg|<_u>&7P@WtJS~h-N4}WmXoWW@D>@CfNL*l;A zjQQ(+MsPKd2Zx-$(44jj#V=YD@%U^fJ_>XyT&6hC;Oq=scNfK8t@$No5!)D$(EsgCuo?xbec>m4pO5iDGc^ zhwneHkI>$HomJ;<9_U(9wAQgHn^agLxcJ!-#M{f6m#Qi&uVTwAbIks@h4huFbJrd? z`_wSO=@D*J;L)i~>|bZEBz~^QUnutXuXfMZ``CWhu2}}ZJLIJqU_V1Z1KTyU(JP=| zWx$D7aGF<*-P;Mn#t>uze-!-tqQ0FXCnraylwL$Fi$^D?Xv`MaZ*(Qpc3FUaB!U{% zhd&sqYN@D3{ST&YKxF>n2DvCEBB61%H)ai->1m2RbmAnV{l(oKtY+LI(*3o%Uw@e}sC^3XA9Joa#Sj_P9Je`a}=tWxxnLMW0Ar!A#yYLexO(sp#R>CA{_=BG~1h>MC^ zMXyLn5D2L)HoZw5bW&h@XtCv@*Q9g3%cqb9qg$-mHfx1tX z0KGR(wQf55=-Ufq$N+2e%BZRxnMW?}8{aS#)C=p^xc`&6lbCBbu5J3t0gW+{#Z*`H?&s zyKZf|1zKr;_RZD7VWYzFuKTI*i_c+X`dNnsmbqSzOCArBCAtuRt#;fjb*e!(AN?}PgG z1tedqeJ2@`Dl>m|;u{2L7o_Y;ZYwD^e`{J2;&;cgHAb<$B5HAt#}Umyt!Y7uwpvZ0 zYb7sT@K}RK*>-7%z&wP1+MnInOv)0m6pGMC2p?Bg2c%!-kvRS=4cz-(P=Z0wl9RjW zz*DRwMhO{HGIo-PrT|4F}Un$&tc#R){_y+cHfV8WFq;KJ@m#?qHl~dLepD|Rb|f7*fX-Bt+tp7LHNGrYV=POa zA(CKjqtYUeOb?Y%@I6LW=PP|NQr6w8S)o-Llip?#SvCzHu(TfqeoSTN;^d}qKZT~? zYW3M^c9r*bkv)qf#}Rr( zx{UaDlGFKs6Y1aKhR_6!9e*xEutMIN3l}uo_3Llsk1nuP!Ueyf> z#Q7jOTU|1JS#c4pSPb$j-D9Kv*ln`_AgUG>V{f$CD4*!1BF!%4`IaGEXApa&RUxwM z6fxNJvoWL!dwKnldxhCUlB(K!n?|01RQ0>Kz*e=nNF_tIe1k00X{aSo!D!h&pgbmP zLtmwia39Hix!Y~$0v}xj`^+eWw2-*H&F0cqi|Fn3;I8xC;gV}|N@}~-*6YnuvnmM= zD+}v*k?xkRd;ic!d#F)#p|Y^RbS$0EqLa^LA?7Nf47l&WWAjGYBc0Z&;+EY8$yP1+ z(8VC)g6$n`Y>G4$8m}_Q!lxeO%li_1o^6S(OV@kg+J1HV0vnmbJ)g3bJseug9ZTwy z+pOL+w_~d=H_19ubl=M()iui35*ixrZ)P(~gA(;JWg^Vo&}_d!l$5` zIuUXpa^z|kWkby@C5s-}Vkvafb3op1fN)eqCRgMQV)i3T@_GLhz>5R7=5{Dq-Rhfc z(PG+I5$Wt@m8-1es2LO<s*cp(rtTXFTp zq*{TS&bpd8sOtYFucmKr&`-F1eDOQhxbPgzxqR4OH^y4KaY#@z2%vjjD39>tGhmuv z8Ha`pMj@P;5y8QhJf#n{&aEv~3d>tAq~Eb@${qsPmiO{-E5;&AD(pKcBPTDvoC&J_ zS&_elk4nLS8$f@J)Gq1I8&F7P?Xe3F5vhNx{VyGJDdw%$xNwVh#Zf9~P zGORQBrxv1FKrW$bmNcRSP57PA2!Ow1my@Bt;EX+wlEWOc4#X9oa%+{|AY(u*qHL2Oy`*aG^St-xSxmTlSD~d&Z?1E%t!7Z;C!hLx zHY}6j8jC@j1xXGy!PVl2*3b@D{P1vsGg!D*+*{FY2UgVer;oeVuZOKZwL6GvpsRW!_?MF(dw-W?RRSb(>D`aPVI@P5TFxYFM>Ja=ftJ!b zt>Cq*R)ihVCe4KKFPO}7QYJ}d@%5a+LqNBNh(P%A1vo%D-4M(wm(-`ri<6g*eo}tX z%YJ#yY(ss9zP-;!JM-1dEmQ5X(vRPThQ~n7?wFPXL=F&O-HpISUeI~=DZ2>kGRSI@ zj08x&2?Nlt$+%kbkD94=qvw*noD8}l7gF!d-EwJR7;2y!2Gii?V4j{K77lGmess96 z82p>z0&6gRvANz*S&~RHh1gdFFFT-078OOAxW3-ZaxW<$CEeVN#I5Dt6% zv8r~U1q>|x_c*~9WCcG!35n{FgBAf{Xl;V6Bf)^QTw}-AbBlp+jmcHRpu|iShG&boSs`MyKT)o=d@77A0;Kx#Y1Q_-5#g!ThVQ*> zGf?Fxb8@-z(fjZ!C1ld31tL2(Oe-SCq+z>5V&XQ_(om}RgpRdcvs5Xf7Aev%%&kdr zMflPEu3$+BM>1I`i-i>d!(N~Y#V(r$9BRU1Ac+?*J~N*M|Lp!k)lkcOU0y$q1OK{A zTeG3P`%Xr7rN>ZY$F-HAt_h|~CkhBpOWK)5-lx(>-!q+1vb9mcgaXnDisJ6ygqTUV zP1f)@6+o;6RS+n#))csq3UnKsR-)dh;vx}@Qa1fg*Tki2{-KB8Fj)X<&WScZK{=%3 zsaBn^Qla6{X5w6F^ON~O?)+uO`4>6u z0h%s9ocEX^s0yB)E(tYjZmkIpxW_4tm$Pggx!HV9B?Ax`|J2phk!P0f8X1C!K*8eo zZ?ZQ!*8Grk9!4RNtS}NwrTD; z>2j?m=My_)Ys=_7>COBy%59TS;{%ss@_Tebw{5RYZrfzf?e7Xf_=2elt~MBwL{lt& z$#@0M+~5G)Cq6CVA^f>11*5|Kr3hq+w)zPFD(*=ZERlCJfTSC$38ZM?K&(2?Uk^%1L6`qpW+gEq%j z*eAi&a0e!8U%MIvpJj2;vH$pjJ2tO(InX#}pHIXT!~OLsQNxD3z1*9+n+Jck(NC!R zeR9-3P^3Fp^FfEgAvUWRodYb9QXHJlE1 z8;<`TSuBfD2ypH9?LXlEJoQclNVei zrV}CXAnP_nU`Qce)yB@H^g-wRymzB37LJZ7#gK|TAD$h6p!=&6lkc088AcI#Qx-HE zgkPyz)I!M+>8(Mz6_XT4Pb=RqamMPrWZSGA`CM|qB0;~N)Gp8D>xx@G=R(Wgycw@SdQJ zuXs0FgZ_OYwf4TAW}H{#8il;-tF@ZE^t>C(Ya@L$PsgXdi9P@D+%I1$xoJk}wwhY= zjs4_}Ig9I|uq$Rufw{s(a%&cGX66k$Yt;}EW3B5Xe>X`3Yo zJA=oGnxec=bahqG+JoB5HG?bbmZge=75wKmeQgw|#G}eYd=XGyO}HNw#f0_s1v;(3 z6Dre{!Kq;zSmhw0zZj@^I%<@jNC=}ud_u)ly*luP|JgeQ=-|lvYDolWjh?dje$f%9 z2uoWkXtcdJ?mqtNEt}1DZTfOszKcElI6VjNc(6Y&MN?(`WB(_Z|G#q&gjW7P*RRQs zmH$a2DE0U4JT0ip{PoxcBTt^JJ<<`1n%uf7H>YAJJ2AC*oXG|-4id#m3)lxrx-XqsdJ{C+0u>Ct-1g2)8N!D$RyU6!) zD;%|g1D-y~xrPftxjnw8yUuO=wm20e&ee8VwwAfM&hV$s;Njtm?K_S9w5GX$q_&+q zFGR+xwcZ_UP^`)7&(GVKwdC?cr>?%UICA;s2geVE6)+P5vEji*OB88yL<8Yg>o6VPaWQFcv$>Dc+&IB2&e&JUj%f zmsM+h@(b_=y4_Wh^I}yO;Za6eyT?C-Yb2=3}O`t7fQn%!NYHwP__kHQcjsN|1uik=jz2W~yC*pr*&o$kV z|Cnj`|6L}fnt8BRy1kSH+)vFQjpy-z}WfR#HbqJYpb7 z@oQQ4wlGl8b)8@Y$%s(~5rus2NNN=_8^*~IA^ndMxI$GJ&B%d6IK~*}m`n)xgrtoR z{XUT_@Fbc63Hu~IS>JlioZk}ylv9SDGCm7U$^=Yu@qCm7F);o7CBZS&mB6L%gWHI? zdGq5<^<+x1f={`Ov_2mmm(5WsgK$QUa7vJ5^U+2xT%#LUdA>!Q3>peeM{iXQkB6GQ zYH1CVW;SU+*!N=qk8k(2NRlyMLoVY7hsqyCmP7Y#5P)Li;pj4%)fOrHa?>D zB7g{7R|=_^o7*LZc63G|B>Cz$V-xGeJUSD5&IzQ9*~VR5g@g6vd_2EEi7<5u6}I$e zYPX1c&~YCG;cG8$9-5w*pS}O1!fAOZLRw}|8-HW*j24y4hd^$&;VPrYl;IjPhQLz` z%qSYoL)FV-pu^hXun#C976am}SKjY-VuHYmdu>bOFmzB|TaJ_7B$macppWHv5M;+konoSPZJk zxoNoHCfFx_;6TI(9tL-XKwzb}#SL5>F;rq5z>F2V*Ahdi2~()PasbtjmQ1;N6;vuC zU~I~1{}7=EH*$2h652*&kA8R+dZdC`I!x74tq*UM+JHRK_EZXOH=3)p?)AxRbfU~z zv;SYi{asmhGiam;{pc=AjWv15I*P)M{I0j*W1&4k)EzNMmsH1&(j){_&}4llqkU>Q zOvSP|n{brvyYUerAfg4uSt&TR5J*XUpB?p8(5L{~;NakU_bn7e5{ZF;;@LCF0?qMk zQz8bh7kXO@=-Wb4g~={&YsNJs_M{WBz*pH69WqX!NZRPvB>$u2v4$&vuPwvWtxCOZ zr?(39Rq=53U9;3|MV~LEdXh>0Gu=3ge0ebR3?{mJ4Krt6m&<^ctV;z{#fcT)P9fdW87}Foa`VSuD`CF43bKm6mcPihL3D6OpbM}0_=M>N0HH;5MaExGe|lAb+%x8iu&Z#yucOgK zW3a9jyx4RDT}NV(Xsdgu7dJame?^VwHSztG(f8GAY5A*n!6U6#P|>I268+77P{9cta; zchQtW+6lqzoHc-tQXV$ruXFvvO1aC2ATu_jN4p$13h)Yhi{z<}F+EfFWqhL?yvbJ3 zBrLTA7%*jVq;k@0I6|#+0mScs&jtE)?S^naOYM0Kzb}pI7g`X57lKb=zw@9t1EUmF z$;vZ^HMPb%*(UO*FkvPSHMy*`H56RfwAJ>fPGV_R;8XQ2dHlUF8sSxcT)zD;55YpA zcZZy-oO%cBVoOcI%&?SQSTgz;fZzSf^{{V3yIMl{_%9c9;v_X{lXsk2aSPu2MG6G=Su-i#Nl z;x*hHlU!CrEjggpN&6w$jufuj=(MEH(@-*b$w`hc#j{ENX`42&$YM&;A#v^ezTUx) zZ(qP;iVIh!nN1hhcif|voRU@Jhv2yUy~>T7Sj1~$%*OVMimlg@&Irn_fFJ1&QZM%y zkZPV|7PX7AxlIE_!Gg_w;5T()22C~sS3k;4CbyXZuK__^~s26sIWQzR>4EeBb$T;i{n^Q9EC{m?= zhQIXPCr&~Y+GD1-+8@%WEIg0aP08gnf(x!>+k?Z?-8BX_q>^+p;1HEG7e zk=(9ss9;ooS!(i4{CHH#FW6Y<@Q`l7R26TwD@g*R!zTTCLDvy`a54GZG#I4RBehpK zf7#U+R>@sXSVxf$B~aJnaKTy+{-a7tvyJ1$@hBbhMh}HZUD`u(djh^UpggZ7N2t}G zuVQq6<0A$zvb6tXFhW(|i!Tp_9#lh!{2sW$Eu4cAD98fFDObf~T-mv1?PaApufCZTr3 za+-D#8H^}lkcl=8nloVu1N{#41CxwSVVicn3>s2qtZjng3M;3`5C={|T)2gOe}?hi5Dnu#&jAf+qzR50Ew7Q?lGI= zO>1ez5_dSg3H2JORf*<><&~58o!K$P2#u#)Y(r;we;- z8vJI0T9-^hve+{s0}5-`(pZTi5MHL$S4$h9+fmyNxTg@b;awjdJU!5h4s2x#8PwN6 zxrmx9$(+y(n}H^H1Tv;zMv8gj6Y>)J)LpG+_`gUSRYIu z;`B+;1)FFovgIhczGczTiql2$Gx0GD7XK&u9d28V-vj+!{tq^7vc-MTqn5AcQ=+E) zHa)2079Sx?G6R7csb~#G zD-^$377ZW#`3)n$Yg!1=_9G19HD{JD6ePD9H!C+jf_FWb{8VJ@9pf_pLA0dPqbVya zfZ+H+`B)s@)q)h{(E)aZiEHgL8bJmpU_#~F+k+pl1B&PcbgAyz$Y2@pcW)M4HJ__kA50U z@`YbYqqc@2GMApJQ^nRBLL7~NDvY+8JkH?wRXpTTT%)mMMBS2!X1+`1F5xa71yZxo z4!t2X=!fcPDK4X_PczG1(9ugUhsh!g1Lv)cZx`HJ5ky8YS;QtIm-8*q@&rN2xr=_= zpJ@yZ^-WTOpP~F|XKQ&x1oiSEAQ-GNvi5$fX+X$*@F_CC>EzvGD~u-fB}c_W;?wV3O$1?V_VZ zH|lR_KudM>(}vMcFBD-L1|my&Hingk&y4SHg2&19>#FXLOLiHsF^JT;lDGT3Y#BdP zqD9jB)x#4^7B!s_Y{sC^2r8}!ni7m{-Xmbn>~iU0I3lKZ%sl#>bK`d2AiX z!|*Ig6{Zx&qFsu?B(k;0KjtM(ePz(x)&DxUP+t*htwTkt?xy@C5W0o*-|)ePhJ@e> z^soG1?-2ar*Zj}KA3eiGugMn%A4ou3gimg5KQ$1D@dlrg2ZrDDim0wveRcdEZJf6U z&p;Oo-$_P;82SB}oXD%#M&^}GT!_}i1nBT7^Pin%tvqaNKmN{CF3 z@?gwuLa05GN^(>yI$v$$-A2_LCrp}JfGG&URAv*eK_is-Lt&HU`PC8iXMh_1+hJkx z`&Dt^L^H|fMxZZ9=c^L&k|0J3>F+(Ox9uoq)cG-}$({zGjCvfN_S4UMnRf+cOkD=3 z2z1u1#K|6lP7#JPEDb|0P`5!$E&l5bhKl(2f_LutnKkir(`me`an;blG5+Z{X%@g|@Q-O`{O~EDUwBULI%&C6G zri7y4|5yGZ0fDrY8lhoz{2NoT4l~1Kpo^e&KB5qQE92?}L9BPWmlkt^dS=j?-bSr* zB1U#ss6P?6aT`f)_Tv8PWWP_k^JQ_bA!Onhc=WmKn&l1;T`xvSq`gejj{4hSplPDT z{ORZPZu({5OlZi$*R?@#wbb~PKuNs1aN8i=EV7=}+a+sK>ovr&Im zj`FvGV;P0iB|sn$_jCsf;SRz_qCp6~2!v9_j(fL?LcbIho7IgLzouz6<-T*#J|Blrt1Io$xhq!h~5T0TFp2J9vFlhtCm)e=_;gh{^jWCZX2Yo zOESuG=4{!<&xwGfe?RolcTj=%dDZ8j6FVwa(te*{d9P1H%1fY=50LLwE>BGt_%H$3 z!tsBrv2nk`KfZm6f#GEa79kW2`3DZEG4Sh)@9%_3dfj61HkQ;v(*RYf7K#>&5uZ19 zj8G`q(2B(4OUQ+fSpd9Me~mZKP~VUShy(9M!_J%W;mldhkWKl$C^uK~=V!wWwF5IZ zivLO!FbeU+K#-9Pa@AshS^)LJf$1>-x}o`#mKZ-qZB$>M_9%3i!oJ3z)B=QjtMi87 z^tJp;(_jJ^4>oC-nMI))H%qR=c0aC(X4tj*Z2T?{N-hJw`HKL)|sV7gW74`C|Rf)-h>l*|0IMh&f>{i;UFeLL;+F#0I6$BNiUPWd ze6>-~nm>EM`WXue z?nPY`mKviQXeQU!Q9d|n(#$P_5Gb+|zQrpH5=Ja2jTBQ?)40xYv}43!KW<2kh&^6HgRwe zftAr-S@z-eV`ngPtrRH?bOeXcemfXX;&g`oSfP@*^QYI1#Fc_8k*CkP>uVqZHXJH3 zEWi)BY4czNrta^*{{0Q~G13*Y(;MkXVV3V-f}Y5>m%w@yo*`7TndatiH*tM$7EmuJ zojw3OTLI=9;q`ZKDCtP*78~T2Oy%C3@~gzRQY)oSZ0p|tPA4w&?!o;xgZd5xNnVN# zE;2Wix)f`5XYau~nO0JgtJMwn24A_C@of}i(aN763dyz&oDuEyeP}=UD5$0HL0qO( znU_!Az^I~ZO=e$-d%4#Hq0*h2yC#sm7opIoCye8{g33B-OCY?$B*uw~Pik4GMpbII zkg)ke@y%H+!51nhhSMD09?0~~Xh>0y+O6Kx+WkGV2h5i$;Zsa^F{}K-o67FiU$od= z_R!A|nP`;Ek1MoFkmy*9b~LV|A3F$~>g+!c%l6qQr4FK)eIy8o&ra{z^FXjqulF77 znLY)R>s}dZW@GcjR*jDT)yCec=8ccgH-fJK2;OiQo86Bi*muF{%O}sIvlFvNU`G`? zkP|$A8}&S$uoS#_EOSq!AlwNViv6h;9@3o7pqUj0xt>FYn)tt-k-?{`rqOH0pLx9X zZsCcOG%b*NSIC`$z1^nVsDwZ;f|}PX?d5)7^7Yu-2*{m{vUIuBc~2wG@TCszn4?bZ zH^va4$h+N(o_88f#w2EkY&(2+IGMO93ep4}q5)_TiFl}V7998Bv%zO;@Ys9dFsJ~Q8Go6cRqdIu+Bv2@pCkSfi*u%=cd z@R>4)-~2(u9TASGpRY`lf4K+-iXwy?8K>p~vb@j1{OxZd1t+ew$`CJu5feLU5F0U$ zYSRQCK4s?~|1aq4fY+ZT*rPv<1gHM|mX;s>{FatkU+l+1D?gcuW$(?$eXK&)=jgxR_=vjer7;Uw5w)>1#6aukH`SOdr%amuvVsQtg8rj%e4g(% zixtuInrgK#AIC^j$1w&iLDU|EoTOA3^36%$P9iJ#(ThqObg5jL8!sJLOhlTDP31jW zF|wI(I|MMoDymS4zn5rHD^Sc(d$QSZMIfyr8snGJ=sHqY`8NMIXIJoVKiF`-HwXKP z;ZPAayAP!=fdLT2%yL^cY z6Aw5o+V}gf6G_sr=_2gTqOm@<+3}c`VvuCP)cF$r4Uc1Iv2Ne7>iba)m+QwsU+0B^ z>^%bG{j+0VozK0Ry6iWvl#c>#^g!qtQSO5@>1->zOmMm1;=!+J2nF&_%9C6&0@`+p z2NquxNGHd2<8Ug0KW`MQ7aUaG+xQh|me~e^6?}GX_us>dxMtXTIf4yi5TWnDa7L+@ zuV!Hc*1v}ZuO)II_S-2m+_o1~w?d1PBK`q>VfN5qSb91&rfm|q@q`Qm@aYML(g|m> zXy_*pPx+%$!o6oyM;23+PYe?(FBObJl8DhCf1m4?SmD>_#N)O_?JgqUB1(mui8(+I zB(&4GWJ1W)_9nb)*7$P{DBce~_Li_ac%1l|CuY2lPjC8PIr)LdY_*IIh%ohsx&&4QwTQ0$B+cpAx)lsh*$PwriMq zNcWYFD(*Lvp9ZP+r2!b!xKC46F{99*#-$*3k|aTe4t)~}(ILCa1%dj2RdDv^M_{e5 zVWf>UO|=6#><_hIDM{C?W@;ZNgDI$n1_FN8q z$wjhVZPJ}TD+nQ^QHEo;0N)FJh^SsI=v5R-ST1Ml+)M3KQTIh7O%bx4 zy^S9Xpkd^+scaR5t&Q)iu|04u&V98#klJBf{PF8w{bk2|nY|(OpQM>MwMjEYq)kHs z&?SkUAu9GR6lBsws6(1m8($I0W98;$&=-lSl0|)kxT>lOka{zPefr)Zn&jqBg~DEmc5@4S$dU&~s0|wD;jg@q4j4c??douRh*qZm-%Fw<_tHKM7M9O997q zw`4p~3Dxu z0ORtWjQP`-Ev>w_NwC_ZKL|67p*s^1L1gac>58}S?PL29HB4d-Eb)Or4!O}DB9ewz z84#6w=W?^C2iqXy(Ce(ZjxBTM^4fks3m)3+4*hhd{ymysknA zva-i+j4!ypD#>(v_Ea@9A1H5Y@#)EG}wJP)IY|OFwH)CVN~2tj?+JQ7B;WAM6Z^z zDcCGNHL1^!RMoZJzyCtaa6x`zf zqrESUitjgx7X=)d#!u!UFkoabOZI({`P+N zJ3PatfRMqX2&Fo_ee*&p=y;>|KSjhwjK$KqzM&OjC}V?}?J9MGF;<_LTUMnnmGz+T zTUgfse>)h+wQU=@Wda%zC5$uxd^e)P5pP_o=swyV!ds!U%?_2jswE&VKdjaGW~hYs zpEhU{=D_aE!ez}|o$7sFcnG>_2)R7MoC|Pu(+?{mqT8&#h*kMD8zUlm54>elnyYD> zK0#Cp=I~oK11-OP=_Fu>$^HDF84IDOL?t~ixux+&NK06Hsm!_93pXdRBmSrv_NY<7 z0g=TUsF))NeUcSO3VQENmy-L~?cMu*r_L zAWUF6RxPyarN#of#ZQ^-cyUMy^1I2B;e7!{dzO{Sa37L=(E*jbJt^6T>}>oqOHA8W zOZPP(ZKJ(;&cJ~RWAryV){mcZIhJnpDO?d|0wcdMpK`b75eh6{`vaug@UY<)o$p^P zNkzAlIO-}Zd^SH%sI_e!?&9y(H3?KUEERMYYhAE~PG-XD$nw{^Op-KdQ}f=cayF>HgzBLGr@IZOii!R((bgJ2*qDXSx ze#Lv|aL1&-k)Qb`BHZ(8mSQ+et~XDG;I5P8yD*~|)89iZF**bQG}_QKu0+%2LBxV@ z!_lDK@yC$oa~^0&18{D6eD$wivk;+X7`+2H1ZD5H4I?k-)88vI`{Vdx^h@u-N?AwV z`@FT+1&=^zC9ec1<4qCKqO=Fq=tZPX1SU41n|J{YXS-L=z)e(x#l&%<>Rl813>|A{ zjt|g{no1d&<$o;RGF&9&isRPu*m48Ko!WW4-LX|v=OJN-mL5A^I-dAqR5ODgqka8X zJdSI#BoJH{zxw%W&``CXyaiA`gSb^%bS!mqj|Mvos}%-!o0qqoGBzb{ZL)o|w~wNL zI=w#O@#sKY=$0lk$0Di_azMtC?Bz#C8&efDA3RGjJKw0Zj!?%Qej(u$!Drrk+n1s( z?V+AqetI=to#K=cU9{EV*#JFFMdEditR(dF$uQ*bce#aPdzV;Zk#g8Op#oU*W|h9J z75jcdqL1QsG-|$5r#gxw!YNDb)Wf|%|I_ITRQ#mB1hqS^EYw`4CssMcpR`ohc^=?g z8FO_FtR(`Xl_d~8T<4*g?TX?Ezv5sF(9{LniU?oBG(V$6-2K2fgRc^=mNy zN=?0p|MSa#DGE=9hEF@D1r69E^et}B*tK;!>Z`@Vb->rmeFZoK zWglPr6b_cDa-!E1w}(DSsOSeJJ&G)@M(B>v4e6 zH&vZg?2jrdt0u*c4leH(e^8pXYR@kBO(=^h*5ATklBC>M^n9Yya4%6LbVb6|y1zF! zH%D(#=*G$OaS1&@2kHw1SH&kZvz3kaGprwwJV*{#ON>Z@F>sizwj2nry_Ld3!p#cgjzAwL+G%r%R1Zt+49ZGeont}{!Gu&#keyu z0`K7DQ$)R|4}06^_wM0F#&42r;*C0Q%u4|mQRrs~5hB|nHis1;0gKW)ib>Z^aAcL% zR1vYb!pt2F!qbfxzCGQ)zD`@~Pe<0q!TrNkjp@2VC z^_X4F9+H|^M20Ra4O)5$Y;Wdin10lv#l5*DQIV}gWnn;ZjpA|4k}2LuouKqz-z1sV zI7RI&!=@OJT-bfXOk*9>vW5oRK1+a9*?9S8+@<7vXnEVPN~E5u{mx zeN0nt*xwWpSBVNyPcb6qSP^1jAIn_&J~MV7qW+qA9CMCEghUppiGUdYZr-Cq)9tgT z6Otg@dB`*Q;2eIww|rLLACO;2@ttt9))(H?Q~{&aWIdk(7k(6hdC18LsSo4Uqa8P* zwPRhT;yBK-aKkU7-J-#rOa2e_zH|(dj~ksb#p^0z(a?iI-X*kF zvQv*LGTW$aQNEwtkxn-ALcUN>NGp$AGO{_GtGNN47d|;VZVz0s0>%A!eeHJ<>#-T> zEi8awIeY=Be{XR;y##0j%f2YvAv!wEYJ(K_srDqvff1WveJq%Qu`$Iv9y1yYTNv=T zAiNr>(r|)iHXa@pvasMM9MP+NUHI0Ejw$*y!ogWIoT0|D`VL;8D^o=qNkZ zKjd7N$vH#$M@RQd(;%VQs^32uQHw|KJ=NzOlRH4~<5F2scZG)cGyugsn8O23-uIlf zJK=o_<>9czojNN|3^IWyfX1q!%Z9#apZ?KXt?wAw*8~2gJ0m;Bd-hh(5Z&0sM0IMS zOX--aoT|91?M8rAH3P+wnh5e9>Z=qfU*@=hLp|35Cc|zYq4WTFTjnZAt!|%GE}n1` zL(MgWc98{=lqqm=`SbQM^c@r4QVSP0C2IoEKK z;xp!AceAfWJ(=|_y)|@<+&In^g>o^A67trB{t+_}+%f{chvvX2Lly&3eJD$@=Os4k zs3=UwTpmq1R*@7vZR?+<%h zgR72D6#G#`x#lH}vjwuo{+dw>hC$d$gDRi@{s+=bK9%)EOQsr4yFOt0JDO*@)_lDf8iM#Cn_R?D{N4y5MrTe-yKpcP zAGE@;Htb+)bHw&;XOXm1r{zk9ao&Xkg)@dpBJ0Ahx68VYoYYvQnXWZQHdrav`GtmU ztckzx{jn?a+>hCVB{ zxq37=SG}e3`ndP(qI(Tx@y6fzC8Q>p9^Kh;rG0BPo?gBQzpfPi0s@9#&ue_M=|+O_ zXiWzc1^K3q0JI!FZFc2^1?=uFpZrvLU~^HPp3M27H1|TF$AS>`H;iChC;-(rix!xPOSw2E(Z4YLr7!#9Za<2}>~nqm@WnIzvNVeJWvB1Y?T)iIHqO3hXBrt*YEXM_GZ(~YM|S20e}{c@KmNFI zrh&%Y<|`Q%jgRi1*f#RR*wA#$(aZ6+-3!*WKDrTWA&m>zINs<}NkvY@=l)HO?K1po z!$rp*wNw*VScnfeYeYC-z7@aIeL%4F{p%saPjuCH_uO|J-jut=U+T1>qs=GTw6iKs zePL(P#osrK80u!u3(76S!EqV15?x%@v`D8Xq%7HLY@E10!l`ELj<(Xm2Io8FgJf$S zYlUmQYr8OcfRem3*-+-((L2N~M(cnS#{+|<~-`mp{_LoAP4%GQ4Bjh%-torx#sInrw%9am5=QLLZ6bcwI;U5f)IJ0f*E zTMd283V-<48nbQ;kg-h-;ulo4$E^7_r~P(gvxVu*&9S>qMGu-kSjo=N(X4<-#ovM}2_j^KC+UBY8ES1b|5#LaP<*dqwB@l+SBqsx z#*C`kTMgpNv{KD*FpjUu(Sk+HT7(1Vi}{0TM}shw=96HwZ9`z!$&h!-JLi2rISdO` zKm0G59z0`MrIz)H71wHU_&Pd<1IK!LbL!i+aX>j==KNG#er#8lW?mw$8R9TUCc-5CC`8GHdnYJ~-*k6jn{6&eVv0mdY&yZis z{uOVg>}D6Q@&jS*ZyffD(AC@(7C+pxW~nm{qx=j1K?6PPTMv$n^FBIaW#*=JMfFy- zwy4>$|FsVo_%2HO;G#67oUc&HW7mPw*p`*Dh@)D`EzgqY0_tfuoiDDrO7fFWbLmHj zCi@J6*4(`taWv4xIAXdu;zawHBM2+ixjv>iUsk!6t5ws5Ra|>i+buvprK_U1xh1gt zAONJA5wj=AvwiQ$wTyE<2n(Hk_aHW=)A>?o|E+60)<;fX5_wTo_VDxSSw77JnywuZ z$D0i|TAUj1<9=&urD{@5eoRr@(BVp&*7#mL05Z3SrDgNnZM)u#GjqiQ z>r|x&MBX#>DbAl6;NY-N$AE9fqEV}eF;%B34Ug zp_2J24)}%@6K$y)KivBzeok*gaP>P;!}w!3dHgY@*bWdt z-k+O3CC}Y5JlZk!QtR`wupWEeLu;*MwNtG0WHATci5lCF?u|P6Yjt8<*+i?Z8xi$} zUF-9%mQAhB%_)u$n>DSlNc8bf7F^jR$#M26fjdBO#b^p#h0G>TrMHx7+C9!JaFHiALM%DIO>A zwW_|&A?D(L7e?3pe*mM~kYS+qUOk%ZkTo@?AD$^ZDcC!q^bDqT#HFh%WR;aN4L#M}Zo$s}+wPaiw3?JerWxH35>s_*}gjZaGO z84#Wgbc>n@3^Uw80u~+PkJ_?L8kgixma=&xfRB$aww=9RBGC{JT(;n!buBK#tS~Y0 zNUOKJVPiij{QC<0%N;=Dq)wPF0ra&m865K}pyAM_khnXZ@ctAXky^>$wYRm-Qh>_- z%dSXnmTul8HNQpYFrbsFfze+oz`n-K`SN6$LUBw}96eI-M-XwGpTcDUld$5oWr1~K z46q%XBm&my5T6FnHs#C!e)oozfb}1cKmUL1m^_AW2r*p|n)VUES?Oq4C%fGVDV5YeSVH=CRvy*shrNY&Wl&s=wf{_D2VlEQsdzd*Gy85rNUfFK4mXv0w+pMG6z%UNa}=_qnsM& zYoES)0TR^5_mxxMIQE8s`V>%q!V79?>;5jAaKLDt)z56JE}cmFe!8MVHKE_`OR0Q7 z|2^?Pq)z;ocq{+E6#3zb7hDbFmbUt0Vhb#D_gbeMIy6zV!fLW)kOz{(ehBJ60PhST z8?p{`-qBjCuEjll=2K4Ya~uGq;aWeH1z=>^4g!jG?%T1S{xihzPxu9bwfD5<7!OP- zVvALcdpks9fcmfhdlanO`}g|$uKx|L(M#{l^aVn?FYE=EpMFt@HcdT6qM!$8` zl^`?#x)MjA+(dW-F}F6k+N)IqI5k7ly5Vn=b`pMyi8=<`>f+Jq9{_qg2ltl!ysq<0 zU4Wc4hX^&%ujCLDa|5uIWz(o_zW0F1q}n0znXcaq*Io482vvdKd+XhpbCj_$jU1A^ zkhmyv959l@Y+Y7buhe`E%sZ9Z-n6D>TrCMcfB$9k41}pI5Kz2`fN+4K!!%?BQ{qSR zyc2xZ{i07?M$WUK;BZGiLcCdhZC`NsrP4OnN3gRONW1KG113+03O0Kw^+XCY<%TuH zQ_^?x=H}4BM)Ejjw%ap7SeOHRsS_cAU_QqHsa^-*{YBh56m%jOgmRN18iUKj5x=Mx z2%66l0QID5M5xkUig0?^7@c1T>&mxmh9-|B#)BSTC{x@sPLHOjlDc?Q`iM}#rO1Zf ziN-OQ^}bop$;|U-Z+}}+a0eB$N@$k}=$m=pjyQG8@#a0=GeeK_76Ln<`HM zDC6W4_Gy^r)AOkLxOUBUqk)<=9N==qDY&#hg%#w(CFeL1=BIT*#qdV}KPZgN3LdF&`h4330s%A7+P*&thkfPv`ef>zrRy6#1IkbU0qRIP)qLQ?-V556(Cko79Az5=NwR5SN_BWc-hLR?l8b@4 z%p#aYY2Ym#^Z@h9A5hqKCNp$4@zMK5U*>dPnvWU(QfzZxUe3_<7=`CInlPX;o0o2+ zW1v4=ddAmZwOESZFq-uh2LZS=odt{)7*)r7^m?KbO~)unZj+40=hOtAL5PYibso?xC*^JWs-I^7bmgk=dv`DPsR3Xg<7GZgr3BIG0o{8A5^biDLz7*J zL`P01U_3B13)lLtcS4xa4qX8lONjinLfKY$A2p{;QHpJ2?)}0L6tC~lsWPSJN|Q0- zy_l=;h{6ug5^U@!;z6nTfLTm@@u2c;5YpO@14ZL3Qt@)IRLX6I@Zn4{B>sFq2S#RSa((Fn2oYr?)!NR(sCsMbCUY6=Kp zG!76kKO*tH@i@ZG@*{-$3ZO*VfTv%Lw`GCv`qrG*{C3(7 zyMymft5kAb1Vcz7J=B;F@^}@E9-90vyoW?mWew{(+Q|Zp5=9^mnNobaKkd|42bbd# z{@L=T>9!7m83B&rJWvf>_bky(@X=#guW5&M-lMi-%*SXk;~)t3;=-Gl=>TrFVA6`` zXPMQ&YLH*-I9w}d5XVi{&pVSo7j#Ym+FH>h3;r5xt92{0-B(gE%rf`A*nMP?w3DPj z3MA-umiU&wtLt3{io8(BrIr`UTA)8{>!~v!y#D9y4QWS82mewD^&cZC9SZGu{xF10 z<@xIXKkFY@DTqVM763q4e0E-E1~Kq-pO%4vZI!DQYedBDao~ZWl+$%Vy#!A~G%|QC3YW_;jt^2AIEMJvU`N$0U8S$mP<(lk>cWFTSbCkeYI1dlzuY_IzH<;a@zj1YAtg5Za*7%>3aTVDS4OJ&A>St6AW7 zAlpZBZaan|5jj1@XOTVVcBZ`jk&?&RqV{^wk5=3?4fb6F8i5Evp$N)bfGJ}eAB2?< zSyzh&xSa&`+EGokbIz^5L4fYWsj>&w=Hnh8Gmtj(s`V=yFkgn@Sn@2$+WdMJag_m41j3qEZ~3jj63riU0-lLGo0}dm*wP zgt&8ZsURDGYx*1+=oDEQ|Mc2fR@&)Wh9KLM)3Umtl@}pH~}b~hWNwu5&vxBF5jFs+%_i=A-;jdU0Xx>JxR`AbQreH|+B1Nr+nf61|!oaV2oAWgzuAlfq%(m#nRw=|$dCoM?}=r1!C|>q6t^ z%r^IHYN5Y=tHVI{#;923Zl+i`d(CtKoC!4xkYQ%GROMVklGAp1SD-d!w6cLY#=WQX zo;%o2=gG)%EsYaF40oa6=+;y^nIvE$iF+Og^=`!VX~}1HCrbgGEU(kbHcqF$9SU^@ z?$-9Er9t}iV&yfLfiHSyZ537)bBIk-``Sl+^@dUN7rhEB-2a{8*Jv1OjhcrNENQsb z@fk2=23n{Hds#zldfW%#ZuIJ?sO{!ng=CoL=PhgY)jSi?AHskC60i{{_I&4B)DI^< z(71W>uW%+M38fFs^o=uA4Kcp|1zj{}jZ{pkE> z2^m|l7I>2spd?LSVP{nHxZrU;Lo5in&m!8Fz!#aas6egm>1?Sw3UL|oM@hshOP!0& zM=BT`IEk4b-*s+bDHvj42+@B@RAYE6)>7RWznXNz9zTXepP)<-l}k{%>|@o%5(w;Q z99E!j=vI3aT^4^x3`Y{c>yJ{S-1aGmM)aF^MTuyJF4+|%T=ziYAjpY;}hP$!<9RXQ^{vp?HX!=#gCiVT!;8hdl0H$aX{ z(AH)#RN28K<~RV!jjY&lvEU$IiNvlZk%YwEy+4;Qj$#T{CV8H`(D&bIF zgOythp{5@fJo&<0zbWpAv9q;iSNm9?k}Tx%Qhw4M z!hy0ACBUKiMSx(h!|q!QT{{)Y95nH~VJ{jFXgBX%{C+#+thE{KZyRETN|?yioD7&M zVogb|QmA9=nWAOC6{R70nJMzZH`B?k#yKKU3j3`>{T_F|$+Ym^cGWrfC%Jn{PjJ`C z`~yLF92Rk)W()dpY_!sD6~&qbQ;&KSgXT+0dgqsYh20u@GlcXLdUTb|X{^BdQ^M%H zVAGBCB=j-s@$)#L`>2scjf>BJ`0M3s8)1S~PiqIQyn9lX=AkXvx8_ugRXe{C=xoYC zC6&#C!~#oi0e6nAT2?TU=xr9dyo*Fq+C6}cvXn;wz%RkvheoYxE72ZgQ3p7F)>n<+ zhEvIv5fe!VyJztz*Fb2zG3a?? zP}HNXly6mme6rw0jg!QqLn)y%s6pv^aCEIdXN+qqqyIHddHYXinhBO;GOQt z?>cUKeaa752BXO-U9-uyGdkYsSJg8T8FH3TOjGYoa8XC$l#r5ALL(69V%*g{OC=6# z2?S=ugmyO|Toi+qMWWjgbPW--JP9&H>gO4{HWd73x+0Kz1i_9i;RUZBwv2eKqu~0L;emSS@rJ@851cHV2nOlDGmAMP&^il>$2-qn7l>OqOie0$Qz4NFWIAFcDCM zm49QcxDt&t!FraF;`We7lAo}k=cC7Wjz?1B8PI~OG{?>!-N0yVPph)3n9sV*pJ}8d z2T?K|{H~hLN=($W{SRh+f@YnSpRsKMf)?s(6wPHnSN$i#?AYH%<5QOn6?&oaH%Wi& zIyI&sj1*Za0iZ)DrBpqx(+>L@vh;I2S`odaao`U!F~o#~Vrf%U3zvyBHqN;Q?qoaa z%-B<9$=BftU%F}WS@J<(RiV#tfrztPO;D&~KZs=kIg4tZ&sX6wVx{F zrV|QmZZvkusCGhtE$%|nxeM#JqGY@HPiC zP8Sub<5BMvL-RG@ZnicRAO<*rRsB9N%WSGn5nTk6`%E@!UtN?v=h6Z@lFnq~`2YNX z{(rxp6rfdr*02JG1OTH-BRve*LE*EpE+qJunggMaA=eUQ=P@M2b8E9bG$~wo<$?&R zn;mp}Z0#cCFXo01|C5~BtPE;EfFhxfeE6eBCzPp2DUja49v=qIZ73R)Z7TWOJCg);8u~w?DX^#Z+5L@4s#y=n`C$s<&1@F6**Q!MNx)|;x_x$+ zV|^tqhFKKLYP)_ZP?xKm4Mha@j@U~%vNYx(%Nw+JI85c)J5?~!qziddBu+&msgU>s zdsO|dO{B^LdI9XG3d2{JNsyPHT4u=0l#LjHWrXZ2nu=wjHU@3+u7}hh#8-o8)oSVN zU2^s}z(zS~orQO0bh+<%=Yq0>B|x*0OCTdbA7eMpNKrK%hk9z7S2daaH^A!e`fdF4 zFl0H?8R4y)=A*jjh_3-#$Bz`;Oi6`M!WQ5fCFxe|QF95RxD?u=T7Li$OBncP}mH4-Ue-?gs3n9& z6xD29h60pqbV2Kbcy7{bSjGO}p)-d5Z83_}Bj3Oq%E8y$Tnnp7A+P6pxa~#Qd|=Pgj5a FzW}B~-pBv| literal 252961 zcmcG$cT|;E*FB0!jIYKL6$>g@IDjA`BA|4m2pp<3DN+K`r6|2)Vg)I35Rh)6gAjTX zNTdshbO8Ycq!($@;WxJ??-##&|G8t_A0rtka?W|4z1LoAt~uv=JTJ;iZrQYN6CEAh z7OIrE5*^(}c{;iu!vFIlexjpuHx=ImZ7-K>EKAOy0kV~Hy*oV$9I&Uf8Oz+>+;}nsQ@l*uN@av=*2IX3k(15 zSNQnGk=>z33#muHYY@8C^@Beb-`PXAW+J)|F7tHXvXxpl<*wbTLpe1No@Z?TifiDt z!0r<6&G?xA`8vy`Upw?aeuPi29dagM&I~y1l7S<+T9dw+}sAjKrp);SdK9}NCajyY)>C{#om*dLW zTZB0`m%QorSbH9F%Gm7fA9|bq^PdNI?^cMgyjuUm4+b4s*_NI8o6F{>>%^*!GOF~` zDyyof8Ai4FsbgZY5hk- zXV&la^!DDde3$#AhbMa*lDYNTNB3{%lMQ;#bh<*Dy%&I8i$@#m(!Mg7CmIc)V3iAv#{2hOGx+o76YQ5}n*QyD zf=FF)@`iV6?zkFIE>UYXe%k#@-^-piN{ich{FI-(5Nxb;v z_X7tG*p9wE>9#bM(_ZM3^EzDM8Mb)n{a-hoN7E{3nI8uSMXielBx3c+E*tE=ORTH`gT%jh|wTUji-0E z>^G{;{&LcC)xI9N%0Kp>wlrB?UZ6!|Peyvm?vtC2RftNd6J3(^_4O4vaQMOW0Y*mE zJnN5?YV3WAsM}&Zf;Qi2IuY}TwL;2 z#VgtNqi_A$m;B#3Z9D6{JXtZ@tfouONuowZ?7Pm+ECl+Jw5IHkMnD91PRHrvF1heC ziZrs0&(+h;H7BZ8*XNRLrjyzhJN!MqKhQg6{8~j^{1%0soBK^T>j?@){QjO3um7sN zarbJwqb8`iJ?_$#mDMe}#L^~+AB_qJEDKGEdd?ZYG@*e19!<{@w3ML#AH z606jP*AYUyn3y8#AFMMtiw~SF-)|Lf+8Ar?;7~Vojpu@_toP!~&=pp>z@v)i!uTzY zn0`r}pWb{ucW3XEDKx638<y+Eo-pW*-WRi0phWBlqjCk2!bW=iDt5YAEyY=wYNK z#I8Yb1>PaF&FH1t^5F4}40oO^Ob;Yo&avdsd6BE7rS`dAq6KXFH+*Md0jg>fk`ZW0{ z$H^YEj_l{<-VD#MeX;o#R6)1J%r3{?EjXs~IL-bD_uNA!;}L(8(1y%R+50aw}Mzn@ClPk!DBgNLS}f|hENrX!Ww zxnpEEne0_xMj_s7@6W0xs!+$r$E|H`Ri2!?jsMr%jlNavt?*NrepjTsabg{d^h2pM zt(SJ}V+zhrj*gUqmGC!l3J0g(Q7zhA?*~iCGf9eoQ6y8DY z=8s#j`z*K$vUaJ(tNC17xerp_jt?xq2)-&<?E-sh?2Y^0-c57e9;sNQ%8ri9B$ozqsLoWeMIGhg;nA+>!Fx8~wo}ZS6B7_5 z*7o+7>YrX{esX>TxlynC_hr(?JL~GO)#Uu*>XiQaef`Ito~H=0tDoNgg{+|EwWNb% zGCf!SOpp9OIvf@x^;{1Z*I#(-n3=>{@wQN z+n?Z!ELKaVEtV39tG?SB<^I`viXiB!}R#ih9k zg{;-ifwdyHrI~j&bqNMlH0$fv{d011G;=JUBTgxo#a0CZxi@CvU#PF2JWMG@qFVWN z;B|!a=Qd;2r{Wt)#YdnbuNl(@mVNf0o>>@(!gNcFE88~vn-X9AmFk30!79k zO`W!KfXS-Dm(h7BCYZ#=$Le4OE-o&>3Dvpr&PG{-ZGZ@6`X{4Go;-Q7PsBMBsX5kl zVX6urGCSPd_>f(h_JZw~Uw-K~KD=ko9+Ij~6faxnN1dOW7CDkvXH*@0f*_fiiSIAm z-%Z{#3x`x?Ze?bukrq8W(#qyCQMeQm&d{xk9D9S)n55jZKaO@6cRw(Y;_32ObF*2H zIZ=o|CtpgsKK?FGrDL!)HL6WQb9s4rGgYE7RxUt0vSkF7&vW?pty_ELLV2lKzBg~) zoWzpF9a~k7lV>1z)gXb(*wGwo404Ow^^*N%ymmt(O4R@>Q0_K zS(Tk;Kc-nyQNh-nkAqYG^7c>4@8)pNF?nu?^eZT??Aa#`U(sib;%Iu~n6a8N>lV3s z4v%&gv>sKCl@09}Z%x&ujj?Fu*bX(Q%ss*3m9Vw7^-7DAbiM5E?w$~IxyUt7BU>(v zzke!#O@TGPTy$}exx_4s)zhvnQp6|%$Ah8l{vL(dxAaFZ{z(v-vd_>!lm|beRoVO* zl4Ln{qNY1eHiQ=Kz^6reID!Rp8mX*N!aAAIsbW@KTZZS z7uR@XhqUA7aaDEoM@~#<1O@jA*^f$PzkB@4%^C}m5~>z)io3On9VT?1p|r9W#W684 zndDm|r`x?l*jUNAIYGH%9`7As!Te$R6opb6ua>HlIG5a6;1HKPVOqEYnC-e( z#nBH*dL?(*$2~4-6xdsYPB>TtMVLvg%_ZkKPCmVk{L>3;GG})@?sPd|#52Wb5}sXi zNouLFZ7smU-Z)Nii=yhByL-jf7X0*bI`DaVoW;vi_doRYJ=DLfoo{;*$aF`u!hE>& zSfKwLv(C#U1?#%Xf{6`Cgr{+sXq`nF85wB?RXflb^?v$LA~DnuqwJj}>Bq#*lzOYR zbEq-S$NJ-|n78u&M=p5d6tHy2+k1%S*$&a1%mr-+AM~fKjb_x<@CPh$?%%%Me^5Po z#;i3Z0+rUsAjkhxz7UEB0KF%DhZE9-Pj2oRW)6;5|Jl4*cJ|RhF?Zo+w}S48?HlVB z_^!6>J@MkxIj=GG=a;Tv&DqCYG*eNzL+yBamwqB(hVg5M^x!ZmKA%vQeE1nXu5OgH zg@fG(-RJcZEes@jnvgJkzTdF*(U|RUQ`H&N5}Mje%A?9kDFEwzMiGdseF$EDKrOvw z-QuqKI*-*Wxp{dZBNg5Z0Y=NsNouEYXt(d(Yx2YT^$Z2}V?048bS-7W_-i@EEZTWb zu|K=^;bl3(&Ih@H;jh2`Dj#l6@^R>PYrW>+a5`GZQ|9irgYxOMw;0!O9{7L(4RK(r z-U1U(;wTr2UNr~gg^BixaKn5X)abAF0E7zHNNeNWH z%|r4%>M!N}ZXtzUZAwzBdw18OBbx!i%-vrbX8dAtILW9Y6S*VJZluMaL+Y)Qa-p-` zd0Kt4#;IO}QG8MWbdy0jwyxWcnK z|ByrV)m}087iosoyRUR)Ta@40xYHXA;r=^%dn^17Dl~<(q+OA`f;#u*a{-OPOvg}Chp4w%=33pAR%%S}666|Y-dl=nPyM_j}7DwCjX zrSO`zmX`M~H-BV6r^l0R(P3oX^`dPa)pci~7(P}Zy_OtCEnOU}D?O#2>D13K&7_oNrphcVEKJ?MF)9l8#c$3ZR$uJl!F=>+<(HLlU9cuyno$U{0z+C_ znrA!C*bRiwri>b7R+`&tMFEn-0#kl|{=V~fHlO_c_uswIV(%qkjfPC$b^gO$2^<0+ zLImWlX?%3w83#bVL%X^mqOS?aB_{NSlh{d}eA_DFU={{|^V2V1zHBfbKuIgV;wNGx z#Y3(MV6V4T*MpDWs>|HGBfA#wdT>XC-`Z058qI?3!)#w)pTU5aAU(2e9WB7WG{Jdxm;pIH9=+*gqk`C%^TluZ&g8{x z%jN!z$G~yrB?X|SDEbp)?wn)~5`Wsw$=#l9aW!c2^Jji8?fi#kqiwSb6w!tDzy3Ph_Gm|?-^_M{3~Sy??pmA(V^Wb(Dp#9r(OA33M& z%CQt@Ciy7QPRFsVR%DKIE${MdzFftzOUc~%*qOl$3u&HS7x9;MMw3wkk&ZnZr!(5n zC41*|IakZ7*x1|4(`xK`N;i^ak^pYqLcLR5?2$g#h0ZI)ZhY=*F4q1#7h5VSDoWs3 zZ(%n*5}>!Nd$$Zx9@(Q?xL=~5B41W5W!#HgSvxh>o+ZQ2uD7bz>(VKx3>)EmbAkqK%&Q`|IVNur8?Ij4y zn&^03^1jv-ZJF%x+du88QB+086>NUD9_e4Xa}_OC&DKDqN4CrTL)am}R^rqv+9CY}Gy;C2nsjswhul;rO;^P}_>~;>4ZW5(d$FfQjj;WfT~U8Nc<1H@TE{5l-0e*a zTeohl^3C17Z=YFL&QH5I9*(Y90b?3n)^(k{;8i!DXEPAWxLn2Zg!RQ;U^1kSqiy!! zvMhpRQffMxyQ7U7qNzr0O%f`+-VakJlWmJVl}!uLIS2F@`?+|gp%8jX{5tY3FJn6c zLr97}nh*<7V}cjWLjV^~Jt?iPuRmiqtU}}2I9CVQ+g4`=kj2NL68~$^Wz;|Xd}7y} z%Y=MNEw+Hd@ceR){O5T$P=1~%y4$w-8A$#vh{iaCg1xOZxRSrrCZhN9mO(X(>^b-T z5aR}K5h2m2nkcbut+m-!-MHN6gwR{m5D?YqG|P$=^$h)0`!Fh{Lk!SWVCQ}aS3B&R7=sKrkTHdJ%f}d(mcO}bsDzU zbzd;d=%qZb--}dgoE`SZ&TD|H{ZlwJG3^r8Jwn~U-98z{b+MZEU=xR&w<%B8p6=e; z5D}d|(wfSq5%tG(EV{V_*8DH2Z7Eif!QSe{cxA7!d@Ca@-rwPjRrhPBWui#X44SFf%vO?bAjj6QNj zkEUeK%*k2v!dEGOn?c`-oTCEOl#=YYF>Yg_%chX|`X(Khu`HDiDMABp8Iqr>DtuLg zc+Wlcs{a0oqDQ0jQZ#Q=Tfs8ZQCfgf&%@&4V!(``CEr%=1~E6YmuhAeiO5Yxd7eAj z_8~0#XLJZyM;=m24&%NK2(>AJ^<1_|{Yy@lxy)-JcJ4-fp}xT_8OJ|dmpya*(m5Q} zqhMe(xUxoi3+K9IvZh5vPcEXLmp%1)K03IMCb?5oY{NFDaQ*!oB_GPBOp7vmDj#&4 z@z?k~zP*O&hq=%*vrWnh45ZxdKA~eCE?`rB&&!JkO@$$8ul5R9NBf_)(#hyV_BYV< z-l&T;l5%)Di8idpd1JusfSo&b=*t(+b+4_Su3Efxa`|(I6?=AuX(MZf7W&+Pn4l$&(C7MK*9tg7-{I-$c``1_-qx0vOXtNS z{V&Dl!$FJa&*4nMj@P{AraE%0!thrHHl631SpXiOFlos$kIn>Y9#MO#xaDe7yuW_> z5E5#wb=AuCvqr9d+}@>8wTYLWhxXV;qUcL}czJsZRRXW39S`U#O;}o4CDMffTN3Gn z4@H1frckYIY?RUA(~>=Ko(wYDC{$#$*DY@w(v9nwwDYVl&K#{wjdY zk7A1CmXO_%s#~nmdP4n=v$Q6VCLe(Tn$t9=0s)_plUQo$vMZ z^{@s9{`B|tKS@hFTb4()g4AmPOVhxp`!b)j53K@G;eZoWxziP<9Ewj%MY6U}pvest zc1qvPseTBh#XJQ3zz7g{3tFUx=6>L9+xLI2V?1;1EY3nRD2}FdLz(p(HY8zf``w0r znL-sEK?561Dg4}ib+|Fk&wl(J`_Svc$7mad%-FyWyK=D@gKzJz16GjAk z_EaM1!bGfF`UnWxr*>!p<~daUc1;p@431tQZmr_wb8CU&Q{nfDui zta&llRXw+d92}vq(&HiR*O4Mg0DIK@0LO>#Ji7LbdubUZEv<0eEDKdL!$>alWNtwL+y3+Cz>g6ocWR(6m1I!a5S~P> zymS(`(EbK*@6q9|zH{qV^gdm)3I!TN0i+m^iBV>4nWYYqu@upvUuc$CYuyfMAM~bX zP4Q2FXSKjHo(?!q3uu|Fs&vxEVLSDk0a|z@wS$+<(6$5&FWzeK@<7s~joa>ywX=i{ zk$Jvr?VuKUKYnPDOW+2ZPw!4?I`NzykAFtvBorY5pgrd8P zfx(p3ttnMkm+%t#R}u*SGqRIaywgqF0GR5cW~;bFjGb;Zh|vWbv$;37K8_cyl^xtb zC0uHw@K(||Rma(LD&6nYPm4$IQB@wGh%S+5=e;}I4yGW`uFeeBW4nCLoj*U3G#36) z>4?M&v}C4ecKauwhPcI==YSD5YfF!b7K*a@?z`_A0)O4^!F1umg{rD5_S)&As|!)> z0xCd1DgibzYXyXZXtXP?ksm5_FMRPrNp4j>UbCfyw! zYX3m(EyG@x7kBxx}gp1dMqe=CS>Z z6pRLyfDZdI5j3&xmcFk8X_>wX60fgTiWvjxN4<&c2>+2N>(G7hs7J|8Ivbv&WZ{ z7iSt3YW**R8hc_K<@Px;XYS=sC*si_@@zhjEBO3G z6`wV|aI!%ga~7;wEB>k^$Lpw$X1GdtfZwA1q0tdj=iK~!Gk{N{bES|4cK!VGgZ>rf zV<~Yn!hT8xqz z@;-EhG0fd}`5SEH%&aNwMAp2aRn5IwE`+CSS>%Jt&j}%@kx*tHs_KdStQ7VghhBv| z)J2^CpmFfTB0M}wxF|UrucjKB#(?p;ZM}REp9H2l9l8^cXopfRryEoy|A)OK zQ7ttEjH?OsVP(}XWT9?J3R!WZ}Ikz!<`)vX7eW;{TI@g zbU>1dZ8<2ah5KlLs8M4O4sP5G*@+g7=%cu}v%R>BwUdgG4wRm=2i|jz^6xIFS!VKA z9-sX-DaE3H9t88rkh15%f#j8?xg=QAS;1vDlwEMYlMGU|-1ypr8gf@$g6J;<0vdfBf->Y@R3>QtTZm9G0D z!os%G{c?pbalJwTa8;LUnSSEO)h>O zOCrkr_3PIWaG@b*_-y)RZKTH@N|eeuZkzt}$dsnr7V+>0aKr%hl=v@@&HKwMaxq%&vgphwIZ*1i6e8^bOSj}IOV4yu0yM2;FnNI zWKJA?o7Nkd?SH_-!-Lv@qGJmrAbWLA(cgdC8Bu4n!8yyU85}4-e~a*(ayoX_ZHIvF;O>FoSVxo31VdHqkjiBp0$E}ZoJci+`mu+n=lm6eyrF1(OVCAykZv3GHK z=eu_*I1%C_*1d!Lb2bV!_i7ehcYq$M2xqcJ73$}0pKP4j#i{;Ac-zG1LtP~e%t^(P z?~;}ZEgPm+zBr{Uzj}nERFObQt2OlcKWs>DO`wD$hKV@;rL!1=lMj@(s*RM!WnHTGb-C@t1e!lG@kR&+L8U&mZ!gL z_owCXGuQPK4SOXw7ZgO=8tC=X#4)AV_nBVvL-6W`9=#p zOD%QCjlEl@PqlR&FDl}ZfrMQ9y7Ktt>}V&2`v)zgmF~nc=6BSDoT`nQhzk%tWzm_h z$}H*o(AGHg1;Qc$W}p6a?8K1I=`Ja?%j6?@dtP@E#(H{SUVg!`&CXqP5Hg!lTUB=CPkvqxZ5zPq0DQEFKhwu+YZzE_wdUWXZ;5nqwMt^3> z9f2~ABE<l0HVV0%{a{N;e1q21(S|0NE44Z3Fcs?@?UjuWOr|Ckyc9JbQK}KdxV1 z-pEaqdel9%1xAqCNRi8EPA^@%_6H>atUSB?$XkGxFXDQH7+oIt*G2mycMS*T?s-d1 zIHb-@^v+0Cah<^9( zAbstE&J=v!kR7m({ZrEe;>EG5CYV zBEuT8)ebPi)R#kl?;X+k_usF;B+>u5cjrOt2DAvko+pqAUNtwXBHs9gOeJB+x^aV! z+5k?ZCc3NXfPl3)vxK*leJ74w?TQr?MUrhz+>FAb{Jw+N44Q#V{;4BZKq)nZi_TX1 zGxv+I9V6ip|QtbB#esdP&$>C0R3zqf9(@ZFxYf-$YMAY{mAev7v-v%Rs;_lwJeY zHn6wH6`#7hDydqd^zgaPU!yjlA~qMg%<%@q5mVt{(F0{7`WW3Ww)8zH7CBh(vhyD$ zCxXXH-vt$o&M$i2bP+Z@=b1tOac8KdO@Q>2&XAN7dYy=hByjSYh0a+mDcVU0y%7XN zAbR8!t-PmL7t(l6O;0!C9+Xyo>H`Bf2y|Nyb0fK3=rQvBx`8qdt2;&A_a#AUl7W+@ z3ICdDJEYW{qJ25F@N+0>w^8sx#7UzcB6dd>Drsn7F7mXUu(1Ysj)4-LWYt}i@4k|c z)-M@^PZEh#)x?Kr6l}-Z`51*AFQQ(y;EIWp?eO_Kk1R$3L;>p5z%DieLX#!~l`a5Z zgd4ym4@3Rst}cd@?x|l176Z^Z4yd19P*9U3Yha!@Tup*jo;6HSiYN`f2LzsO+0So+ z?73;%wpLIJ97(gUyF1`rKu9W(%ZID%F+@ev`A;9nVS}l_exmC+Qs@xC34)y(D|fr+ z{rks|XX6n75p8D*gf zMx;2v>|@;A7s2l2yDp>yHCfnm2?`1lj--ESU)^7S{UWyf=_a`cIL@*xIxZ3B2uR`M z&`<)Xj;k+kZPbJt8eFE8{kzVIK^AB`nt%NI8|h2oJ0T`GwCHyTRKlWGgQg>&oOY!m z6Jq}gi2i&WcM$$o8RzG)aKo_qp@V-4DK`Pc3E3KA{z5BZI?+|Qh~KXj_Y2l>X0LCu zCw5&OC`MVPjS}z`sX|jAQ2+MbyCd_Hy`h43i2(U0hF(|x2LxO}s+)W*y4qL-*HP>Y zuDzAS7fjhn`ek0}n$HE@mvf1a7gVZrzRfambo9BMvF^J^OplPx;}I&vz76wn-{`10 zQvgxas?_+ywwHC=>_941q_Ovt*AtRNL3AM+=p`0C1P4F^NR;Nu-U@=X(OUi~Yql4r zA2%PShfJ`xhMqF^CFKSGau!XP61XH{8J`x66m`o7M-@ltlK=(LdMM$|FcFZTg~bYM z6+{RM#O+lG3HjGP{Eo90gHL2FsKv`h086Ll=34QsInz|R&77Z{0MdAe$aX>+5T`Cp zVlydUx~LflnsCA=p%3bBNU_XL^ev@x**k*>(oWN>rc(-Pr$-fP))KiqEV=x87*7W;#-+!+P=3zSH z9&#q!O*Y+M^*?h%l6uc>f;w?Uiig4g_5ani6?C5rw9QG)W{{Ft>+_L48Q^&3?kHYT zg$uhEXX8#(RMg=r6P)q%jx+FBedzBG(iei^S5`$7jx!VF7q5>jM5ZmrkqPQ?M_Q`m z4AVik#J350?XZ2XE=sYiiUS2{;C+J0xGO5bIjLZ#VL=4-`(}+T5ktk>F@cvymO3BrFoI$EmsQ zhk0;LWex4M(S9`f?)k%`?OD}O+|&~*Vu`N;U{T3p06^|vMnTfhcN^D+27WOL`Y{R_ zHNnrMJrJ9bVe@o}`m#4LavDxDj{5B0iIb*AB1 z@!IN=kr4PcK4ixz{mqEpa+I}@;iAsE1=GEA_HuD{-O>|o#|!I{X5OvAl|KoIWj{h{ zNN_ilped1xD|;nE>(+?74Yq)?F}9rACLMHb{Aj};jeb$$gMI>TJN0>HRLxnps_@`Y zCmVXd-e-QIJfrqYZe`)0mZe6*??co>L>mRo!9?a@NbPvAwmA@?woL*Jaq}{syP&y!6tvxMp4Ft$}J{ zYat%L+`0>1^d})R1&`WWPfb%_;5e`(tij$8n`Eh?^6(^BNw_^@N0yh6asx8`H72O% zh6K9D2VBG@dZ{C#ZbJ<$&!qg1uv>~1t<)@j)dV+4d>Lzc@D!X;h*)=Z{C2E#CO(A? zU=Y@Y!Q!=*)V$Yqbqes>HDFLC0nmD=DM8u#+O>-bN-<4^E)H851$pZ$x3edpD3T(I zl~p2H6jx3U(_zod3ABezSRPV6Nk<8D+#58wFae?6*Cn2QoX<-kJcc|kiW5xXq$fTj zbTX!G>4xnY#(w9{oul8ghfP620f%3Db#;}<2=E+4#9N|f&Az?jN3^7N0DQu-;0%!& zn5j>nF5)d`N_Q#{IpiX+fU-C7UX$jP*X6T0sg2|{1R|jofloaiayyykKu&&wCBVQE zOX5U-e=LC@NY$+X%7i;3TE@jE=b&n*M7oR%h%EF4;9|66nS`jMd=YaDC^~JlAPqQp zwWeO1_HZTR6eU8+LjqNcwOZ1<2rUP`D_O!3yRWS{5L^U1G`VQpG$5A{;b*31W@NJ> z1|nVLksn&g>f_~PBnJPWg0*Nu+MXS1WUHSs3E?}Zh})$HK?d6(G+707_>J)U0-Tdv zQQg|AhV!J1<_rW23vQ8+n}3_NfkWu`?j=qh*0#@J?6bg9 zS-i_t^jMiiImwR(%~@%bkpRCX?kxzWl@y}y3GR?EB3lUYJeF)B;R`|6qjhF2;qC*b zLF!0E41EcF<|i{CBtW>6%_O0NB%v03LKoYY(}#t}`2jdH8-~4#m@?&W2M3ViOW3o9 zi0%A4e9v9a0XIeF&K5B%1N6l2v-jjV6eEpvLqdmR*{%Zml1fi52)*~N6FOQT$Qr@_ z2b$!k^u4QFSU8%d&PfQ?UASGa9aI8P(RF~n;zN0Z22P+Qs?~4pIaa9z)SC*EE*MwI z2bUjd!S`@!U4+w^W=)_a8A-!dn}S^-)sui&Ks!RjW6P9)8u)(wlkUq{#B=PE42@v0 zUgkGa4ia?~1(A@mZFF35QiaK4NyRgIC}<-;g+ z2_$(vq9zc^IGNHwNtLz9Y$B_ITLMJLu>JJ@7ZN3?Jt0m~Oh@s{YG~ifeHlp$LU=eG<~$o?fN)Ob3B3>)>U1ztqgv$ZWRuB`zI%48bXphQIw{Np)Uvc%TGzShjzT0>24HwGl{f^-lBG2(B19D9>t((l5vEoN?2|5m7fmy?xsuc z0tsj%`1O4r-2O_F(9zMxy*aWVrVyIGj0c^T^2Lh};3Q2y$@AqhF@@;*&&Vsa0o$Bu z-o`^t`#@b388$S?=^y}!gap*t7BI=oY;1AiTX+5a>hZ8L?#;|`H~;?Y*KapQ4gPoB&h&N5@=OH#Zx>Zw7o1DdP-C!1kGs`4*98nP3qQNU3SoA&<_*1DLuN@4X z2Yi3n*a>h#>%(YAi9aIA`+vQTj!yW6qc3xkN}|e8(eSDND^te@tOdVW(r7=uy*yax zlFFZ9Ga1x9#DY1h-S=5KvL(Oy{6D|v#MC142br&U`s|sFVEE?y_wNT#+RU?YXWPDq zYXVgv3Cxa$`|EEjEv8b8i_QIZdM~y_`?7C z$YE4BFK_RMwMNS596t8Ah}T!@og$Ntxi(MdnzerY0CDYqkX;-6^$`+d%`9{K+|M=s0*-5x}lXZTiT zWrL4JvJrx+*bOj$)wItCfBg0X-v4GZ+{6w(P5gv|g9AFy1n86d;K_$Yz(!D3xb%PT z^dS~fEuc+Ps5w^M#ITd;zT!-@(Sxp^ev5^_3Z{^dj~)vj_mq{E_NnAL-uw1$Z)~`P zbIykjQoLIK$`!urJ>rFv<@*T%Km<4#VzK zs*X0eNBPpYuCnrFb`^*{bN3||nY1kLy`v18$lM=ijjW~_F4jkZw8$H9d*nNc;+8G(h zg(ltr_L)p}63{T`wtUe3+gR;WLUAVo9Y&Kw&U_pm=0L~_oDrTkWx)|62$`sq&~-?~ zr@lIXnJfD zS9>#R-+%s_2~neD_lPy9KyJa=GR$a0*A$1@ste-cM>#n;+Y%<2R5tFUeHm#WPeJe# zTUCeUOARhRdg!w}hOa0bP}n)C#Cn#WefmoI<`o!qfqJ9hY&%@SU1K-d_=!aY&H_rs zGLy|=8^h} z%;FO-V{4IU;S1{Ug|)0*|6igR-^A1q?gIa9E|KD4497j_?KKM7&<0`+X@Uzm2P?!}W(_pkfSrg(KV<^= zx!Aq78V5%e8KoyHPhp3`L5rRqsEf<5MIYg|I21>`zJ%ij-97?|yM=gtP?_mRSWX`a zx_kF7h1i3uTJq4bJ?-x7>|~+Bqx1yVF^Q3R<7|)K@-1j$ z@n4$y4!QMgROs%QLMIoEu?%`9rj*^sHKVU;{*u(32o`df3^o$nkE>b%u|oS&Q>5)E zY~+wA0(hX2#yoke_z32J!k^4n2OeWXss?d!oJvFMJz$HG0`Y3FL-@cK_F?{ zaPOVG)wgL^63YD#;F5`x0m`l)%5JDhC%AwpO2TtIkAqk+VHBG|{$epb0cnrS{dr8l zzDqHM@Q+T2FBpx44#K?hVzS?nyWv~^bu)ByZFMMX5Y)Zy-9!I)YjX)1j1GwHiMw5G zO5D*z;-L~BBrG?$syIYJHHO;B6e*?wL-bc5A`y3eA|~)KT=5u1bC{SbkZ^&(n(_F6 zj8Tx)Xa6MmwF#_cB%gtlm zkXIAI&MFhN9fymUbV++adN1G<7)olzKYGl6*uW_2s*as(hPp@icl6INMoI${DI=}c zDrRbCn-kr^6s`>~@=r0FsJE!8Tc*pIk&(%=QH+#AqXuAR?_y2&ic+*DG+e3u#7m7&b$0Nstd zd+M&!(J|==SFEGU^M_Q6t7f6%Evg$#r@v<1xk zLbiW-6bCE}WWfnRBe2$2NCxuASOSSP%xE@`wq@9X=-2{3CswU-zts!>=-1%LiPRFv zt}H=FFd{<|o(I;4e7B`+G9G1;B1IK+o{8^rot7gvg(%R3K26dlIjQKjoL6RB?GP<4 zUBsvK&)1EaKQy0%Qmt(0<9z?ibY=J$h?{JprlS@81y4Mgd-obkN+2AHjVUxi&pQ!I zKu>)TQNdrRf;5v?v8s7=k`|3Br9~+0UG(1huehy6B&a`SM+=i#gy-pauyq0A%v)6~*r!QpnRz(6^BO5V}jvM;!kq ztYqKtFzH5EOf!&+Xwi=!KNhb>vlnXO<~;n(yXJaOtZpbPVjuz!$|oFiIYC_TR4u0#f1mGOj$z7&oD5A1q!* znhQ~4gD8e^1{nvD|NbKR!(1I01jje;0PJX>z#8`jA>^w!p=>nl-+g1qCiaEz|@Z zGdPX9DB(gs1q*H~d3jRLUXFF>6byGRo*|trXn$`@Gw_2K_X3 zJc0xBJcPCA$@3^~EiG}oV7Lwq1+@Vcmn_x0y1Em}!&=4eB4tku1CloUd#T>9`k%22 zMqQ-Rz{f_6F3x`>imK88o)qQf=}A>%1Pg!4$qe=v>TtM-%O&(BFc$QlVEgyG+>phn zzc%TXfO1HgAmN$G1s5;C8Rja;({l1&5VjqX0bm_T#T1LzAg#$r(OvQC4y6A7m*C0c zzdlT=6xe}TD2uF`I?+$DHvH5OivBw$to<#BI>J|zj{ zO1U>x$rv3g3S!7^F0FVJRnkrYIgxP{K%7K!uD5RW+~xqM;!N2d-9g;PMPgOK=n%;Q ze`icUGh(Zg6%hUAVpr>!PxUl`x&yGXp9Y?xuxQ%jCAL`~fy0p(Owd4i`TDk?$+QC? zEQGy+_PUKW_LoWiCaa07y^5Qk-y>Uc(=6f-%yzD!IAshqx5b`UV`11LPf*z>VwpSHz*G*RH$|W2R4F$Jo^6# zaUX=ZFOvwz(Cg$&V5YtOY!+usi65MkQx?xr!IB0?Md`+79mIj8BCZ$`9YC_cyQxI{ zF<>!wJim_?1=TYPMivqf*!J3MOG98qzOcaP8&uDpJsV{Hy=TnayW;NfN0LT~->f4c z+%)s7V~P%}W$~w6K2eugW>_^s>AjSBZZ1l-_eSi#)#3eQjsw$I@^`~3f$qt)A+hW3 zJ9F(Zbn8ckR71ziZy#Iu;C?AmaoJxtd5tH(%bQDl+Fhb8dhaiY(U<2%&kmP+p=pdreL?_>iH8}nJ6XP)<=8RY_RiL=50#*`OV+JGnRzH z%&OSXgvkhyJSRwl1QG{k2hw8D@7fg@9{~~vyo?DL`A`g-V&LcqKsH%&VgbN@Y49I9 zboQB~Ul0ZGQuYfwc>zycYfG=I)ciINK9GTTO4?@kiHnoE`PF|l@-?S#jZB$Z$dBk3 z7E%)tbWlbIpU(a&8evBs>}E>3BRoq9k07cPhq$4eisOZA8BB$VN5#y$R7vm=N-mAsXoQ+2k%mgYUn68l<)7 ziEMpArAv{L+KBN8H79!P<$2w`xC*=zFM`Zt(3uJ-27zUnSIcW2lsJN5CFtF1#|Jv8VwhM`A1BW`)j z7*atfupd1tMQ)wAAjPRr&p*HTXz=N=_Yy zpm{;km(n=^R+3q9iadmn45+~JiE>)4rStggI%)&_6=CaLZiV+fX#6IbA9C63F}uZX zr2BHD#Vc}fYp2qlY~K`Sch*Wi#D7bDi~{i*nbaR4Jtqcz-@t}V?5hdrW{5(L$899R z(!h=^j;GJGA_!*x+Hf!)REuZ3DUtS}zoSP?09+>aC_;}CAdZxJs+v5SkVZ_4Luft7 zGtwYF#{x1LjJPvTLu>fg={U;;bCAsJ;M)s(bafNozPYDTU4BPmuxCW3IYU_uS&t{| zh-?%|EO^2Kzzrd)Klb%S6XzF>3?Y0m#J!(zN??%Q5Hnv_p$nVoGjQC5lZ18i33ymy zLxfA$lMH-|eYVpqnROSx4d;dW{$g_|k;SGxzuPhr?A+nnXI;o!H=xXrs)Yt*Q))BU2~>rRrqvYwRCy_jDB*Ve^gj>2J-Zux5YS;D5>yOQ&oQ6@ zJbx6=msG}&+iZOuuAu~tUOKn(8@Pe zrKa+#Lm|cY_2MVL310h7m9{Mf?J8!Wje7rc({wi^e<;bJtGt99P_R5v9hdp}Ex%oS zgOcKN-lWt`%%~XK>w?jm}VUCBb8DzY* z9WuJtvQnt+zMe^WeU#FxfnhG{l=ZZVNvYg%u}qzx=29EFZ0;U2$1AZ<8slOLi5o}ibd=P+yDHp z&k|PJi#-+*vKf2f)pv{{8Ftf*J~e)Iwje>r)66<^rTe*ISESfXmDj{;2@#*HzNk=4;ZnB!st71dc!ldPzv&sVmug@lnYAoEuQDvH&YK{q0%bbk*_4$&^ zT}@S!-AKx?m3xC~^T#ud3Wjp3)uGDQ>!h5xKhfUl2*t&Slv`zVpj88i1x6`8k z4|{JNlx5q#i#~{o0*Z>Hgd&K5gmj|_C@n25Al=;|3W!L9NFyTM-Jpn+fYKdO(%rSr z+qZDf?3wk)-m~U6vz~ds??qpQJFe@D<2cUadMzy0GMba#re2fUEZjnqQxMHXp^$;; zKe?==pqUd(A-cAmm1(b1ohl~WY_O5PO1v^Zxc79oL}h#X;swLKHzTH-A!}vj`p0c2 z)>sf4RJP&u+CQLBwBo)B8DQ?Y@6>&;!uD{MO?zaxi{~I=90-D-XeCNfz~?{dUrcX( zIa7Gp#V;x=9N&R?TkfHyNQ{&^9~9CD$Lz3d65F~}Xjy+I!y}Olk^kTz?AmGj@~bU* znvAJtw<^8H_t#3dGUU4P`>O;0djTFXmjBS40Gsjwe1KHJFfb2+fdU=s0ct2YK+rK| zbK4iP=Jh(LS9Rj}jWj<-wvCo%a#ci#hCtqGcGz!fnoj+Q(HnJMfxekT{zV8|(d3v) zY=xW5MRpA9t~73{^x3A@lB})`6zUbO4?-p<$5P9w(!mPis}X2ZOcYhy7HFReueGV(TcqU(t*|EZS7uQTnKZBtqQq+qm0qM{ z3vD&(d{wV0dc_y)+W-BC%0eCUy9Xg=6GQq$MAZaD$vm`{hzF1rcjZ-N1_u781WXgW z&?HZYsk3^}#es+R?KU!Mf`|IH86zq`Ebm4Y=tnANOV%f+Yh)(soyC~_wz(dX^!4LG zccmJ7*0FO%aqJMC=<5LgU`hqY%*wc|{jgKbU9BUeCzcPdJUn#7He2+!WNm6yZl8(+ zPwjx!5|)*uEI$a?p=XnY(gYC(Arce-E+0cpmT&q7ZUNMOi0~Fs7a=?>$gvQtK;8|S z1jrX4?i%JRxgm;(q!6VM)E0>J07PI&iiMyK=s&Z|0ixsv3JR&cz+K`yyqg%4GDMdS z-wu&Tf!=;Y#?9PkJ1o;bEGW)4Z=@% z`9a=nv^a=t1r!LiGUiv%(4<#HRX(d?Gc%K{J7+XJ%2PPe#hGNQ3U`XN9K65*-tLVBCo?Sqa;HEZS*ZXGNr~>E61J{gH zr##Gb<;oC9!s+DfZCzcaSXZCq`1P~@QAoJK^JV4TX#nMgq~=ad0|F`GLD$Az1FPMA9B{YJjdT zQajTDN#b zLx9^xjEg~Ci74EDFI5)3ZwSo-whVEpN62>tW-8g9KoXE=me6F%CIEGj`Fw;SVNkm* zij2k}R0$|Zsl}o~l0dB43?vG$O5%k>UBHIR%wO%grt_YPzEU zMVEole1t;szf(?u6U^IZ%0^S6p>e>!l)eX9CNeMtt%-SJKFDJbt`_kO%U-M(H+z}= zPshtBih1$kTVO7rXAUJLn){O`hv$F{Ux2sIYoHZA0Mme&(Siw+w6}H(4ZtY(I%3DM zaUsxzY;1?i?XCaPE6Ny>Sft~S+x;AvA@oVxX(*#K`=xq!TE;Og#VxTBN32wx@ab}V zvh*_Nx6z9mun&=vnQI}c|JJ{G35A;d0d^cj-Zwz~4U(DPVhjN2K^XgvX3-g(yp^ZN zEc8hd0r1{uaL*aU#=;7PQI%1us{&aXB9y&~Efy@A!`otU(ndhDgIb(9k<#AH5BQ(yeg{IMCtT7QG_T5QdS;L{GEbDT4}Y zXHK`NLN!wL4U^auz_$y3u88=nb10O2xXDJD5D&E&rT$>-jcsmuN5SZ$^F2MG&PYJ- zrje0!j8S8oDOz+^)w%Nev7o*YRUPDWBlv%8S{eXV*#zV2uIM*oj09 z1Xls$L}Z8tChl&C=Bq5fAqB1^I?2pM_In+r8*3I2_LLPzfZGz8v6L169Q2i*em%N` z$cHZ96iLb$23{{6T15oWg{q7dyjZ{oLl}}_2j~@G=MzZO}BL@8ULMFpGYRrU2I@apu@ob*@L-vt}BDia?<^>W;3tog$Lj5ZSDQHfu zDtts`C-R85T-a7@h)5$S>BNC2jbM%}ycB@bU}F%u*uq$Y(6?;h&V!)1FzyWO=5KWY zHBAj%tVzJi5mI@8zz^}>`CL~wHNEA2xCA4%AV$#i9vp)F3bSU>2K^9E!Al|kamauC zJ$A`5V9UrX9*FRfK~$LYpoJDM@5#3>#orz!wn#{HW85q7j7S|!`m{IGOvEQs9QS&1 zzh-=QC}u77l~0MtSy%cBtzQ*n%f?UE(P-sTUq-UBL5+84yGkx9Ti0L=@X|2(Q;xpb#a{R&dIFx4a)|t=`|?g3T(+wY|pY5$TJ8F=nCeR z5U3+y^#B*14v0IVszWTA5U>>JzjUZ?0X&hB@`=$}u3Qh8Zi~$T`xH$;nv`JQAv6RK zwo>3z(*yMbbC(A2$)J`Y1^7oC8IXl_4;B}{K}|r>m|G54>Tr;qjLaFrF$Spsn*iVr z_)-|GgA`cbbD&`fqD1DYf$UKP;!^=ISdeHSmLCYq3jWTz#mXQ6$74`?f@s%4(SukC z0iOhaiBm41y9(J4XJG$p5yG_KG?T+B`ypwfp#%6j%ru!F>?(yOe@1Ase7~q%%N9ByfnT; zau{i9yznbUr8?Yp=clE=I*+7tJL%h2BL%XDN}?SJV3SqkG=FCpI6^Yk{!Ji5QIUcF z=%!H~Lycx7h?xi2jYxwo$C@+I86+6LsTjjBA*a4YJJ5!R!F&gZee|V#La92OmqyK{ zuwh#SmhL2_K?`r(FnicM@hh>MDr3G0Q*v_YXL5&zn^V3vu4|cI2zG_K*AP}SojIE0 zRBKL6PE3c7yGx2R&jlsuS@)6$-3yOLZ0#X{LYW!v-V^n>ICJBzfbH@PrfOR9Y~2y*_vN(GtYR$ zZ+@6_IFET-ReE=Q66x;vrj?&K#ts)Q4_&^nvq|bDGPFxaru0z1n_h-b-~NOS&qEEF zr$SEFk%zGg;6M>E2B5n#Ffp|;V4O2$4m6C!WKG zCI68^cXB*LiN%hgM=68auzuVhX=$NDgN6Mr)lA4H$u6FcQvV?}7Z0cYRQ8EG_IS1d zek_Qw0~BnDu-7wS;0s|!H$lU};$?XY5HN>EcrTDWCADP6_spX|K@nlO#c3^8{W4Me zgY8WU33FC=siW#}Dp4gf_0D3seM-?$^Qiamf|@!vkIj;y zPOG`Iy3o&c;yRup<1`3GJ8eT4cn7)DH^aH}vQJ(qlJ+OXu??oHjW@I=rQ!K-%;|N} zhi@{x$K?O0Q2bc*uy6mtl&xJTrDQ+1w17$6#|A9?-t`O4273X*d$tYB#6ufI^teG`*du#_RiR*9=seeIEqmtA^Jna&8q@XAsI6u231|D5f?Q$lXH5detbze zPSJ^wo~seB{vmKfUl;ZWp|?tyaRPLG%?)*`Kl1KI<*stj50%KacQ0o%bgR;^Gf1gQ zFLf^ri+5*Ru>ja70d<9ZOG*Bn{9mB~=IMncno`RQCmz7Px5$$|hrllC8AUxM^C`yj zO?=!?#jJ6b&H1&Nx3tm(7RU5uX%;g+mQc&|X2+5zqgJBlDpizLG{_`l8>EfTsGw}B ztCuYKK(Vp7H@lbKQjso`nMk`AruXthdUrs<83XJ3AUyz?{)-=|{syy>XaVZVi_F)2 z(ST*br9s>e{w*wew=u$riLD(Mz)o|ufPbN;V_LzPBXR8%0d1W;>;h71i2 zk(CFh{CtSh+-b+xSIX2Ks+PROh!l|41^O!@M0BV zb?_s{$HQ4#wPARI375;r*dP6DLWrV8?IpyH*kB9u#J5L_N zT_V{72!0r^aE7Q%c_k(2E=XWOjP>-b*^4hUV>hPfeq}DP4HyhI@kLb`r==5QTn7`~ zFv?vAhnCzy+r5FBIkl|th~ ze8RL=Z%X~YjdJi#rt>RG0XS`-XAu;a+4tlEc@7oEV66ClUwCLplBXFId=0#=8}Cx2 zrMym@omYwhS_nw?ve3MOZkfxov+gG($`DF0WJYF2xc%xwvTlY#gU>r@q8RPeW0Yc= zvNZ=SSE{N%b@$4u3b*@22>z0Kn(T-ht!Ambln>DQ|9l!x_s2d7Ap%)eqWg1MMJOJ93Z7IX2$3qp-O2Wjids}-Bg$1_G*^35$uXVx8>dCZ@~`uyjT z;NN4M zujXvfsP->z{dN4??{9g1kZaQB_gDE!Am4^L7mzwb7Y3V}7L;>txo)4J zbh;4@bWuHk$k@#H&;w-l2qt4eivy?7!Kz`n1iG%Epi7AM8;({2?g4N^5a0ke4Pr(L z$A91ZB<98|RR0$yY-AEtGvHX|T5pz8HEq3a>-_DTydv-S+tV%Y$z1@#577z0s8LEC zSTO~r?gS@;WFC>?g0&P#p`g^GrJxW5HVB!TIPvO(rXUFC-v<{{4HDi*K=%g$l}>x8 z=iWc0{%!M&xyal~$IZE(ni7QSBCkHwfJtnOn;`ZIqySgz09L$sRw_|gnXs?^mdQc` z1Ny6|y8lR)XY@`FJnjHIiu?#H_T%?IYiuHva)=Qfebc8pVT~%I)E&m*B8zo$C(D)V zAlrIy86BFy@L@AJ?yy|lF^JBBL z@i~KrZ9mTd38ug<3@^nB3tkia1K2in#zOOO+zit5F#r3#0=ePDxBNbWiLZ@SO5v=a z@a1wSZ$6x*-7{H;NFU9V!G9RRwZ>W6sQ3N_9?AnF7LIWQ0V{BPFoQ({0t?X5f_RSx z6vjXtgYz5Je|c^a6Q(zme}VpOpwz5NtfpV!!6xsZ()T^z(2b6e_Q|DC0l&l+UPqI* zIT_}Ao0W8vHUNxYzW@RnM86NrFDS5Jze6h#$vU;P6bBUEKNtQC>nMz*Q4r2iS<6&W zW4DjM?zUdBkQr$fwwY>-yz}~w^>Bb+M@;DaRsFu-Q+YvWVYm}y`N&~T(19QaNFqvF zDDsit{Pesg>z{9pDnqZLxQ7oREtD#!ldrp|oQr1KP98{pTeAk?Sv!-YA=34~FBN3D z$iNWt-hXe2<_(vBzRmCRKN|(%W|+-{N9EL=9STX-IrXiC;13-8{Ff+$q9JZQy8%;A zz;)8)1} z!OPEjq~D_-Xte)qJo$&M7@4}Y4Wx>}x($5sI}*ab9#C;o=V5FBWF z-4D3>!7gjbPZC=A?rBC=C2{i-&>zsLI&CTw$@>mJioWTXaTB3x7Vko+MJ_7ae^rfpuGxNub2Szoz(7gyHT0R)A)jNi(ylj{C^ zOFI9b42=E{Jn;3O$Mk>vaQAp78>f-K&G5`y>uHh7 zp?it4=}^Zs?Wsj6jrjfZTF*bJDdu2FRebQ&hcfZRr^bs`%J zY8qY&S*cbS8-&S6*yFo=>fke`HHtd%cPJ>M6T zeTpM^8m6!(gX2o9IhP4>d)g))#(goPz^(Sg=YS0J_m4ez(`t0HA-3hg=#ft}zvC-Q z<75`Q?yI~SfybbIdK4fwIWT<~S!H^69>*-JmdNkKw|LcPuAB4gJn}ng1{bHurrw($ zKX4Phv29!Z+{t$~sUbPccu+s@32H~K<+)3N9yi@qW{PX89^9qCtv~LDfI%S!UTv^g z3u~)fb+z`g6FYhTCtG@knYFXSskref46J7a&ZZ=KQv3=TiK4kS6XvXrJnVlz3?{$t z=F!K=RVS%yR@`CES1Y=u4!;canc7{9<7|#G72k1N7o0Xx74%K%a)%=u-pk0IrP9D@ z+0NwA>?HViO(Dxk;8E5deCH(D{K18*Z%cPgc2BDX+)Ed})sd|yhvH5ynq1t~WU^Z5 ze0lbz1t%kx#|Os-6yf(=9(&u;x2a?2P_l{X+EHhhI(}j*|M9$D9oYSSnXK*&R%Arm zr*7R^>Y%99U_88Lqw{3_sOxoBo7~~jOf*~PT?xi4BO9u_ym4=7F6`ROI#jE%`Bq29 z9(+pk79~2}7rW5TbvmpFCEvpT$Hiks|K9wvX?KRJ@KajbO4(Qpq6c1PL~YfZ6|m?J z%8tYxbM<>sJGgy+V3Rf+#hxa4&gS&vxy`ZM@WX*FxI=#BJ$j8gtT3;n*J6IYIR5V& zi?YQv3RVxQ+p!&8U+u6| z8@0MF6^)MaX_b;Oyvq}~I5F{YB2}}9z38W`M}AOP3(=$S70xK798DQ<+&^w+v{B^; z?j`swu0dXnb{aSt341E8t&U40sP6sH2TJb-w8!!)*tNOS=#O5fexAHg&}2EIU8K@P zNBb0nLvK`g-#%=psr~w3u(aGzAkp(P8WT3UN6LO+l9^!EdM@|LpLLn*+ST7{P~6)v z>Rqz$p--_B-Bz}K>yhNMDgAv-bM+5*Njp`vwexpZE}~A6qP)d&@KHotMH-zPDFueSh7g-&-w0un4z8 zOV)MnS?zmCAR3tQsi+lWtdQ$njSiRjx5F?drE?J+zoZH#_cpQzgUx%Ts0+l1`R0qM zYnQ&jtu8!4yXP@{{+`9XOPg0ZpOe$6^k){Y*F5?2nP&X`P`0uyOdcmXUP$Vl7u&YPj*!LC(z~H_`$h&5fx*H5!9Z`(gIA2c zQm%jPKBl10=DsgQ0RLRhW)h(%_8T_(;2c}5Hk?Ubq@98L)c#a1$Nu$boaH3dSoSYI z^%k}TEeUy^qgX=L@?Lm`1NAO2G29Np{ZjQ zvN|EJEWBiX(wrHaW>@kw?BJ@x+qN$Y{zhwf^u4R+W`%>^+%=cdNn^<9BR{kV^{&P){n2@yx$nYyE`#&`_y>MsAt7KHPsl z*QA|W&rBJ6%=}aDt)la!9V$!^RljD%qSZdfeTXdMoYupa@ZOm+mEAK1UgmvPe}9lR zbuR#Ka-}<+W|DwFzSaofq5nRUE^=IFkau|ZvSgdNjE+}|| z&OA8!bIFcxRC!+9>Sh-8Pn*u6*mQ^&SnoBDo0w82UmO3KmMtg)llBLWi{c?k?($d zG|Jlpc4>r6Pf_50a{BDqkhr+En}NJ=P|zpC94@y5P8{A96fnOO*ROj3T<48Vg!r~e zIUWIF3?P2WA>)l3;~ed(aOK)FSe%<#y^)I#?bl}*kDAz)M;qd6cBqdJmUPL;w#m)* z7nUn4EU@e2u0$!B*LF+p=k4#H#YG(uW7%bnc`H6J2n&-j8;VzY_vfZOv_2A6@q)Pe~(@SF4lYC0Ym*jyMIkd>2bH^>C1)0Z$tLYC7j4w5%8jNpbb@fo02 z^?S1(?1qa9!wSm+Jx6C-uZFPbIMQ((VpFa*H z^06CE4h}o!$0G1d)oLrfdq?$fZubhdH*DMbsI)q!&MZod^2R^azZBb{D$AEo>z~m* zQ4Zr?1cWe?&N1R}69y&rx-CKc(*QnbZ+hpa;QWrM zuh+~^CnP1kgefc*82To27(;`B;#R9(P?vdv7U4V0^?!Xs^uWS`2?grW3t*d~2O>$h z?H3?P3SCkL9v|rT&4y|;2-O5kaDh+m9DSD$;E6b=)R&|9oVcTu0Fs;cuwKK&=FZ?k zQR+yL7mwo4o$+~8Md+N2LK>L@Ucyir0Z(AV0}Tx_vywsb7^^ZhI38EKM3dMUu<8Fk zY3Cw@$~;3tu4ur)-zen36n6v6G2k0hV6^EQD1Sjuay<70Qu|R(=_v>TW7>bZt^N3UFHuDt?u<3lN0vDbH8Ovq*;CbZj6j zGPtvRhpZiAN7WlumRRGcZJnnt$S8$xp;jXD+E=~~ zNuRg(>Zoz+l`CGNTMJtm-s^WMb-1II{n$y*W^75yeXpC{oq(r~39dBk?c1MLWx4&$ z10(|`>>&NMtQamGHUp;aaMb+2B?Vt`Sw{$2AY;ZTN-C<@wbJEHFerd!1kOY?4ol!$ zD9!Db+)EaL`A{TmZDc3my2q`N_LgICMK=8R8{Y=1rNs9X>CTnbP>Op9@yYBwmtn|S;hR%TzH$IgaxOdjF{D+L9% zkljm|ecBw=U&(zemyelGG?ai*HCIT@PdRYx=thfwor?9c*o_oQ46L_eGM4Fug;#TW z{RS5q2m+X~(IHS98q$CV5T=xZhu@*K8g!7%oimWY8tcX5Cy@8R=o-*cw)W5u`xnh% zI~=z9CMk&$oRuHInI_65W~9InfQ4E(tU{fV2;Ek}?EQi`Hty%3q!>laM)$iZN7S(2UOBs(BvT!e2SYl zThzC~Ix80fB68>qsBkCUhi7fpK!hB@vs{Vm0SSynr4tUj`v!NXvUF6V2fEr6mtiGe z+Bo52u}i0-<@nQ7<#OaJTinM&>%9S!1x+f7=&Gp>9D>0mIM zpP&Etc3yRWV*@COBY#Fx-w*rO=O zdiKo|)xGV7Q|h3z>aTQUf%A#afLBiK{3bYL<3j9j1~(M+m?3;ds9He7!vsgO!yfqt z^Rj7*S;U~x-4u9|t&o8a#&PsEcrns>lrB4ly{@=??`C&wL?B6oc;Wdve5T zn9V7zCd>hJtRn?DiJ~yK1bF1Y{zSF44*X$@UiAPs2Mt>0O zv3;LExcKGEm!OCUpTWhg2x8mcnUI$CkM3&?L+y(6AlSK`s|BH%l;hC06FBI$Iedes z1v2P1@Jny0ss;yS6p}fb=Rr=19KY34wFN_&M?cw8Lf{nodlnWeM4CvlU^pxkSOMJc zPG6qqPA=hQM)u*Y}ad1xpMkAb`$Ly zjwvMP&oMn&qLL$ zZ0gN2;unC1B%D!x832;3YmJIz=QdYHBR>!pSY3Jl{Khka$x9@czPu`aP?qVYpZMZt zS*4bJvHqe~S7MIC&wS_exjc|0lNwKx%{uZ-^tL{O9Nu?F5nfeq3CXt=#TRa_r@~`S z$)ywv%rHTY|EMeW;L7K#l_z17U>18(UhoKw9kT6JM-4g7#9qC6btkZD8i@Z@hSvPK~j(s2nKxKN>5xQ(EtC%~|={ zExmSB)V}vXPNPosu3w1X0D|BdG;s`V)(a@xKNT02e)w<3L4G1dgit+=$3t@0Fc8QV zz$)wL=rDj&%fOdU4{UC}eEqrxg;qF^03DqlxGq?O@GjlL_a7@gb!9_iol|`~HWSupL%JDX>kcU3QgE9D? zfHpfjOX&Rx6bT1f=@?`V9&p}5+{fi0NCA;|C2{@gtvcq<)O$(vb4CoTreT}25&C2; z^uE$-+AN#ue7IxRY6A^elA#r!H|tV8ASOFcSD&c#8oa5~+YU4)JjcynouGW{f zENBLQ;cod;D^mQLcQ4<@Do0TlMHeWWrc|Xg&hBiz(IR|##(Tx)xaK?a$IFauB=tK* zqZ17D*<(bYmiYJ6=+&mZ7pL7!MaRN|>j9n!N-GkRDPWa@b`A?GV=VH3-|?dtZ|*{t zRnB2W|2aX@`*^H3FLS9%bWVnToXAx*Wfg6sgx2~oe)`<@*K;aKtdsGs%d7G8Sh?8O zYy}kk!rgIPf?no6u_GI7cqL7KgO<}1C)+vT-otxdS)vSlDd}(Cs=sTo?i|tn)Q0zF z!3p*=m)4rHs{%nbH_KcUD~MCi&l> zGTY-Fb$*3nN;WA^0DO~@DT_~Z^{c?%h-7YOeS>(lF5sLq&Q5c zvm)@RX50O*Ifox=yAKT--BqRFMA8p|z&3w&^i$3Gr z!nQP;eZrI5YYOiMY#cC-qc&(Uq)2X2+p&s4W&z)s9qv~zL~iCh3x z{3ul!ejQW(?=pig)DnJUo@GL=6?)tify*;XQoZq4qfQ}d3&(Ar#4f}CK$`>LTQob2 zl9GzA?oxG0*vN*Tw-(Ud>Zu2OrRhS}*@;yr!YUKqPWHB&LwI8MeWT}9)Y|a;o#Wf7 z#Hb1&Lz6}CXhK$W$XtwvZZ(|vhv-z>DzOHPoR^f;c+ZRYKkFP{WEE@X)HN13*;%9w z{dIk(@xt*DJ|JCK5v_S3X4ms305qSB3BUDKdhpxwFz3ohZ|Rc%obQ%XE;2`l8=>aE za~1`y0mE!oWSQ>eE$PF-K~3untyCo(Z4S73t;RCH&M~` z&2bYL&7^d+^wW9gkLwNXFX56jbsjzpxe_ZByCYBG`I(Sw%kY+YjQHY?JfM6i`9QJB z=3A}~M$N_&{Y(*b(Hoz%^ByzajRjijD6h(!AfrCr;`M=9vY~d=wBh#mwoF%{GY^9< zDdKmiR=Bgki@%Xm>XG#7G;>DMWn#hw@(j2s{wEuF4Vd}97Xx_B=MhrkpHHygi{DUM z1fiyG8u4AxZzJU|C(dxecYI2C_IUt`Y|)k)$8G(Nw_yk&*>X+xeQi@DNJ~ffe~?oV z=k7AP7^bjd7~b)!hmClBnP~ZYwjN8mNSOLe#xvchTkIK&2aJjgH28jJ{OUg!f2bg; zFv48b%6s-DrQ=Q+zhXu>UDU>S@sGf@nY+h;%wviTIaHECHJ>}Uu#qzw{?KW797m(x zM`Xh4wAIQNnLpdLNy^{AqgtXMx*+Fx+r-l)nMBW<2U;QUmVdqL38AX!&9xeJg1mI9 z*3jO<@x>CdKHA?(U*}_}*X)Ni;r+?qDVR7`!o{HQn5;VJ?Xq;RCQ|eOu9tQ>_KU~a z;T7T$tLHp`wQEn=A2{#vz>LzQbI1WKDX%z0ZJKm2--zz!#o=Kz;rRc zxeE>kL+;470f-ohy(oG);?*ag#2qf=xVtPW{PXycf3N-yRg)C!>$y$syPt2wcB-y* zWXhO8r4I-+q4)D{l^(I>uRRam6}Zws9D6V2BI&ML{bhwS+t`xpm}5~Ae^E_%)G5}o zuh~uAK(ipS1~a^IIS;5E>k@k1rQ(kp-wdP^Zr0W?QfEes2HgxYyf$MA)y-_wNK|g^ zr4dz5sCQ~oa+6-^pFQw`d3yi^l-xhEXkMbd`TmxlT{cDS(&<_z>^u06UO#$j-gbld zmhSC83+SmQbWW!r0Iv=9WwP5nThtvIAabaT3k}%+d;>~H)-1)qyL7kEcIc$gy2K^) zAJ>sdsBwfU?6dD^mZ)UeNSRAk(as*2_h3LwbLuGd)`|(WZTgraVsqs|wE9=yL>CeI zJ?dg5gvqaPmIlgu7)sqyoMdP zVffWM*zxdCFOw6ahb`)+wB83^s4(qE>E|9mNdEf}F#KL#49w02I9ByM_IZcD%od6} z2?DV%oJ%$}!eu8P`BW&mj@Eo8%go6248%h7-fv@LzU1h#!S}V^81RHe52}e4HPTZ1 z?{}i!Hb$U-_wx(R zL6`7bDzIu$&7WUX1T0Q$mc_5^#Yj8YeD)(uZpxiLJa2P)XPIEqX4Rs75J39WAhweP zZKIc@G(TixhR4ZS-aS%X%=I$%VH4H#^5qWZ&G zCU5gK6|MoY9{H|qWZ{8H`v7_^Jm?+mdi%`2lIL2jd#k(t$5aJkDrNh-sbWzlb4<+Z z-vNi0M`OQS>IozB(Ls497;C;d@wrp?lXK-qgHy-)75K#g-G zMztIMZDZ-P$m5El?gbL@pnl#}!LGd5umRr7u1WuCXgLM7e6QWcDmu-5%Av3~_MmJc z{?Tg*PD5|Xd8>CA7V z)o(sl^mbG_xfYWSw?2=l1JGo<}TrA{UL46fBs*-+merhDo~1%Htl zDg~pi#neI2EHt`UWj{3LB26{}IQ-yULtw;1HORa&VmGh0c zP<>cwvrU|R{3~anmFrO;_o6|>Kv=jl$I2IbMi%#n%4}(5M2o4RLCq=R4Z%wCsCyrh zu(Qp20)KW~5n;U69zjRV;x>$(ZP*jPUq19iYyKq15iB0~du8H*o*Mcnd_OPshOIo{ zwvi`!-af8ISY!C5irPp54^WR!=f0Mi@a6ldSkgcQiea{}T)GbjdtYdj2?zispUd-qg+gE9QSE56NFhNk^YSbb3@Qku6QRNxlv4d{b zq115xw{0gN#a-uZll4q&K_7vOYh^Xa5-iv6^o#&j>&vre(RIT=Vt3Z+6#;%Kz;q^e zW(Q4iLA@RJL0ZJSi?s_`evXb?51~~4ktM~0DLxG3aCuURUQ;%k!~j#Y+h)aMx?*nM zaZ9y9ZIpTYaf~7xad7Zs+?^>tz?=U2tf~Ii2+7{>Ss49N&^eO$MYS1_hqNnXE#)s% zcP&y-n-lB*`t7yRkmihd3zPjIKoE#p1$@{xG9pg@gAY z@fgZ(*-|cZWts_U%FZ7)=^o?_E(ym@|HZ@_L_pb*m8- z-7%+s2s5X}&vVjJPF_$?eE%TDlWpwC)qNB9{Ka#hnXsW^+h|*896*nDv>Q;ZbZwPG zVH@|vi_Yt>x6T@?|L{kI>HjbW{B$GNv~k*Y=XE(P-{>DLw}Rx}wcM;) zN)RoQ0Eq^0R3y+K46gI*OpRP%%~mAv05OennLXd=blceTY>o-G3wkCRwfTI;j*dMT zmAv!lQ0W_l4!qp+on=i`@p+lshpC*S=5*z=Hc?k9^+ZqGSa}`#;irm{8TYm?n+4~v zu;0t6U?F?q-a{0_!|P2lzP!9gjser8DNtc(kQ|(oOz+;r$mG?)8O+~$OwAu&oF-|Y znZY&w{08U+45Lb8o)3+<5hCSbD95+)b>8lIA zKkJuAF~<50%~z+Io01y3K^3slbI^{wCKNE@g%XBeepF7V{V3%y*Id79YQ(zblf0!z zKG_DO>4xOGWyDpjf`QVU^$Up51LA?Aaeq$g@Lb`04Q*4P;SaRWu$`>pf=3_VAqu^= z8`0e&@IpCe{HGs{SnhSr3++d>zB=#w3}&Jc)sL=1nv3aHy2uwDLJf)+pHYB8!`_5A zb(Vdh%^B+}6ZADuS4%-(`Xb%d;BnE#l48!T z6Q1(u)%og~r%RWinjh2@ZZ@>v$&lRjltbmHZ{m2Od(s;1{Fz`CepqZQ5M3&Dfeh8Jg## zd3gWP-P1Udj+JB!?5^Kh$U$DmB#E0ce1B#`@lJo0_e^q?aOk8HwpmxF`$+9szCH_! zYsb->XrNQ-0DJ^_3=j!^mg5`=I=Ni%jJ=PP$euq1mu7im%xni@Y;xk2-pc-o;kV%x z%|W>2dWc9O8pLZpKFGpHHasFxnJ4y_9<=(#Dv4qz8xeVCaXR?^_>oSv@!XdbE62Je zB)o4ExXubJWhhR4_#PG`S$(Jk!HbV&0Cx$q-mD1V+treIiPsWi(61pu0+7SA9wU*Y zj+^Q3P<*M1@sX{)#)68RXu{mt5A8xZn!u9tAB$#PQd$9uwl-!>Gx>mwN^ey4 zYM!Sp0eqm|shO6Wtj@1aPrvSyseGUWqyY|pL!ktCyNVZwI!d26AhshElyCiRdb@7-Cv?ENvuq4lXykR4&ca>8gn zC4+*%X)k(tFkmTJOO_Niino>pCx@T1DuMObm&>U1hYC}x{O-x>t;sW5;j%;yZmGnZ z;x1?@fH&V=6%Stk>Oy`y8Gm&nvmNb;cUvabX`pE`m0f@T;go$H{6! zsEy-ysqS{Oll8FMUaH>M0qtBNM(*(!ErbagpTJNq)~COa-jm!X*F0~{KmN%)?!yr@Y%JB@T1qgQ_vaUD7Q)Sd*`H?}cj1a05ZwOzZmqY{FKh(`cd9?R|b#eAJ$yx0SZ$)%j639GXqs$uQNdafRI(_{T;?o@2a z!~mDt6TBI3-TmOG|A}Deht|6q)BdeYzN^k+E5sTg7v}q#86%-$Vmnf`b^Keg!GIax zxV*W}?x>j)*Q~nE|DkLC1{ZB`u(~-#f!Kg(?`|8;`E$woBF^{=YE9xSqxxO8?csyl zby3?h$?Qs(@CmJs@c-mQuFd*pK$0?t61s`Ld1lr3c&e?P95h9-cSee$N`MJpx+|SK z{+<1}Ox?IdYk3u2VvrS)tU*DvQ%41|(2tN^-)Qj1Eqqoxo`**G%lzirMp$oTsT+Ra zMrEwDxkIpsC=sEz<-nE)MU9tv4Fc;}w@{2|Zs0*jlI)r_O+wcQZ!ca4luR|8B6{Jh z5G6kipsVgUZOV@|&>8@21`z5_#^aOu^_AMI-yqEH1*mf&Gd}1c>-z>B;$-9z&Pwa6 zUdQrd&D*O^4vsxON7g(ai;`qy)iGaV0fB*bd(#O8$s^I!Klv(tMP!lfWfUl7GV$r~ zGs3l6F_`v8q!HQ!1xZmcW3zA(P1wzU#-|EH7cE6Ux%$(KQT~Rc3)Tj*^r00z>g^M* zN8f5N_#ep_o?oUv2+2HHkP84<#4bAaVo{VFlzY$=kF!-$E8Jf{Q~1XUXAlBxK+I*4VA0VK$WWF+(UM}TT)a*>~Vfx55mZ$ zi_x)2cU3fDjJjligSuY3^z7aXGl`0MbVPo&b9X_zB&{O*;BGZvm}*!c)Q;Soo{ocW z^Q@Mg%nYrmprb`W(i{ue>SeO+G!eYHICnSbsL9;Qp5a#LXyjPGC&)XcvHA|SJA>0rVKTz z(3IF0{j?k$$M?6N=X-?h``{Y_o+5hr{L!j$y&{le*dPpL!akEcX}|BN+K_o*;#B2y zlcgj~fpcE7hAvC)nsw!MAHIH!mRKH$BM|LYXY*GVT4~%DW+0%Snxa$rbBSQX5(yS- znph1A4;Pk_x(Mtya7{t+@zU@MWo2bd=_W%e?}i_Dv(9+bTnS_xvSAgtb_c)GyJU!M z+on9ut|;oRaSO%L;hJXUU2|4)hNCFyxBe-RU%Fn4bItMt_LWquPodvz>>zD4Pe@Ag zeAq|_kZP3B8DXYWE$KUobkOubFebc>r3T&C(wqhTJdTN0PY=~pKx}}Z+aC`%?+32( z<+L!+004FCz)PmaKmbSIl&UfClHtJ2cx7ef>$pU<+Eb*uS(`4OZfSmu9d6i`<6{nf z7xXA0Gd8k!<;t&fV@Pw-9((lVb=5r5weoB7KQ}8HT$$9}9^cdA4KAqHD+<mV;R3J+D+J1fMP|do_v)>a=q~VWw_=G zKE*$v!cH)W2N9!5PyV3-8^!bSth3gTjzP(ci%%%1X2AMbk+ji1u5o#FoiA*nZR&YW zyl#D*?FNY6e4K{7L6N=rD;Pzo4IqenI@zcHuF5v= zxM<0CKt^8W8|h+>)=?WC0@anqFqHS0)n(HA9Xc~YczIg41Y+EkA0g(PFv75tN3U5* z*G0|_9y|Yi%EEpbX1$WSAvLv;JZ{aY3jE2(EjU3PgITDS=yIG5@^uVO z(|x{pVAKGq9~zn#d(B(V%d;tIcD!)6hkQt7sZwE|LzapcKjLOkzbe7{j{Wh}D9Z>C z>Xd!6+#G(ew+VXo(?CNT6dvD-8>*o-yHrHzeZO5I3J44}Lc$H4#y2Px^h3w=l` zUufcQ^nNgS&`T{$iD1Qun%MKow&=wE=j1<@N5qwnr(nPW4mz!$veGIU1Q`^FxW?@& z@&*@SLxPHh&gr#P0Du-{c3qE*VyWbD;Ej2Tl>f$ zF5SH$!^?3gNChO0HAs(s_KpCTZgNIzDt~ zi$vfa@IVdD$C1^W+~@)?)tRIn$%Ey94#xrsJ^-UrTe&$Z13%R>b}UvoO2z%ofi8`s zfCX^dE4Wt>!es5&4uON&z+l&#G^wn2bBOK(ly;?+_w5mFwCjN%Y9UM3@ADOo`vUSp z>xZ+ePPu~{WPeBlBluxd2L^S_N^psYLT1_yZ|PdBzyt%$PtC(6{b#+`xHR595D}EjRHOsMr(PStf>+22?R4Y} zU3O_Fj{>X@>|O-rtiHZoP-)#H=dVo_)`ZGmw38JPF|94&?5@9~zRkvanhU+(#()RM ze;FI96}tRQ-@Rp58n-%oD%WM18%BU+>GNOLkVvqSfDk;~2FysOq1J;lKtnG#@ZI zgbSH(mEYc4wIbuNcSq138kSDhyOMHv&Xe}y=lrJf_hRo#yg2^mfPt#Jekm`r;+B4ib6X1#In~d&Uaa{-gaJK{>LV z8QvV^IO%^slb_!M*rS#UiMqePpM=|{@mLcIB(gxVjg~-{=QMuxKh2F57iiGNV13n@ z72P#k#J2KV>yNj7x8O++XAuF6}`p`a-3#y1qggS21px@T~2q!==rOHS{l9PN%w@|o!dH3cJl`v#7xV!&F*L#3t+4u3o zR~i~dDl4l9AuA(7+OlOUBPl{rWM!6;q9n3ccOh9>Av+lniX>ZR$j)Bx=j{IfpEvjM zzK-KLj=GPYx~}v5jqmrfzALk0`&*6RS^ytOv~AO`PIi}GbVPyZIP%(^?8%O2-sTKR z9YqfUf9@m7aYemNg$|~2hEdBj_b1aysN}(W-%M?I3^{@PV}+!UjeqkcP>~beuD}^6 z7olRs+Yt!%f!q#-6{{aPz95pXPN|+1*z}*UgY~Nb@<5e}?hOMIhkW!g#CS4(OB3_{{HDB za8Y{L8<(ENL4%vN{MoR|G=qkL;SAo0RW?(p5U*pLS*;L-G;7@{u>qgksTTvM?~@)H z@5Z69Y4j;Ovv^+78U{?&whcVH)3b{ zV0n_6^l2wpQyjG(Y_sFIARR4N25u%cc*!|;e&jAS>guR1-Ev*R$qKL<8ip7gQ&6AI z7bRFfIUh(JcrPx_+R&rj6l`&ILx<6|`M(#{$*P8(qBOZ(e=4m%{?U1a8YngVj_yf( zYx8{4QF>)auW){W_ew%z7u!JMca2`2{zy-##c2$U_S1e67YSyRa&3Rui* zD+X+S0(cPP=a_JJ>bmB48yp^lGo3d&_UPw6l7RL1NZc!Rb&kQ={qE&}p$E0|C_$&s zU-+W?jipTi*D;EdPv0{=Y6p{FF$UefROpmc8>oMRJIFDnwdSsbCt*AR*yzje7!@|6-t1}xt{d0q4)BNmlMSv>a=-;4I6iOdfCJ|Vj9r@eq zX7?@Ub&3|ZP*P~G1>Jfn651Yk>tA=#3dOWB9$wzb>|qqB|EuxnU9MN@m{;{oblK!q zC6|oY2LuPpUAlDXl&KC)jbBpfzZAZ$QW zO!$1@Xc_^Rkz(Ev4TYK+$cFCYFP3H5Py^IhEXJDuT~2vdQVab0dfQv`+`l~<@~mVp z_5LXkSF|1I5B^^{{eq{F+BXX!A)zPr_3G}6M|kR*Og~v<5#(@G$nULws!M{ZO1JJY zhSQVg)@u}wMH@3P3regjjy^~6@|LDnoK;u_HY5pxg!LCLHH8Za&s!}YfSGw?)p0%2 zmlA&*enDSDEF2$AKC9&wDHR%blx|5Z@=uNYsoH$a9@aaMf0^>v-Zv%2r<<;Fkv56_ zy4@ril5vfvVq5i0dg_p7k3}d6W~zh3R9Dxw9(rU2Z(#!e+0BP{X{qYN21Eb z<>l?Y8D5#qR8(C1_T9yAE?&I&rd9CQ9jC(2cg@3AJ_q{Mn3lpVy@~H`I^83$V7Mww zzmsz`+cgK)v}5@<9RglFyRGOVdXu?J z%@V+P2q8~$yBMLj1^hu4n0RU9W<6TLfC1Oi*tiTmd*az z{fZ8$#*K zI`XWtYcr_mO{Dntv!hWh8GoP6>_zSOBBimo;fPayOf>eN?MMx44;j6XnzdDTpID}!zK?4)~!5&5Cc8?cs8eqlBdZ= z$qkYQq?@lABSN0+H@$XV$y>yG?;~fL3w)NY;`xnx;6V}dGM}4mRGq)W|19*+HC=5a zR->~q3AZycjeOYib^Rf*0@FntWOz&$vs?C02i5P(WhKBkD8-=2%6X?JTb>ei90Ha> z?RZMU1zDD-J=LlALIPFqNRB3|JCl*yHAMA*T?Q`i)+xa?g+lMs=kZbOHYIrs>remb z@=o2jA$;o8sTF%y!_?q^{$XdPA{PiA{^xvCv;FHXb*r;SGTt8G65|Sm6qMsV?WUs& zH+Q&-NTtC?4|?k;WG!juTxjE!up9UD@)Ip`e)e^W`LcVsDC>JH)wm0}1kCmTqr;Xh zZ(ralS$Ax}wSaz`k)@Jfg=cjiZ?=PLvIqwU2j8tQ`XqF}sxGq!741`hsPYSP^RVB} zB7Zv$rfz9Ee8BO|;%4ARH{64nLUFi)F^jQxp@hclhf#2Q1=w#VvlaUB6W(L#w;~4t z!+|#QkQG|fVX(XV5*+ftnJZWJBl8J`MVnQ2k%gb2DDCdu=e}qp2^DtTZ?w2rHajYfR|!*rU|&L9|AJ&2UljPi;VuGM2)O3=BUz>yUM~ z&=FZ)YqA)Uc6l$5hVF%+%Zn*ka+wO|m7QQq0U9d+SwZpF);_2zEjBL(`u)9@zbi=H z+MJ0z+&Mf!VP$XEq~w>h9gU z_{~}K^O0z;0<#Xn9yMT=U)^WjQ4ZneG}|`tME~46fnd4Zsdc!(md5RxD}n_@8{b&4 zFpHB@g?kQ=%Y0ZiuqMtst80e>b3R`IX{Hk9lWOo!pc%tuehTG=15FoqRK3^0wNaPw z30`~o)X=ys#iJ&RJkx>oiB{xT8TqI6oNFJO^E1Lc2LsV%xo@mE?S|x`xhqedj{Mls)%o7OTU|^Rf4XLCo9Xm9TL`k-uwtSnTpmEKX0u}xKN+)hwp65qy>~Zr zg{AzXOyvEB=h{;?o%BYE>Crk7Ew)P}syxrYSNpmuwA~*)9hw}gX4}WL?;#qcKU4`J zSn*+69bi*+i?sIp`2P8r06j#eg88+pX&2Ny@ zml%26(2Ho#av$}{zcw~(pNna8Cp2(l)5?@>o&H#VBmc;(1~fj3kZBUW>#~Pq zx&`hWsfP2^L!5C9Tv@_!!y7~BhKz%1qvus7j<|?9K;EcnE7w?M*eXx0kXFXB{{HCR z5zd5UvC@eM33yUoplf?!XV14!U4;UN#@jNpLvZ1cyv%KsGggpWa2x%$`*~51(p)`e zi5nqE!PwY!W#vZDm|g`vU!jR(VhOH;{P*AN68_zF|HtC8Dup)G-nnmXx}=G{zJd@{ zZk-2PQs1MVlhv*UIlwv)MkTAt&H?mA?$N=iF23hm^vm9@Kkw)?bAR96vZww}6dp_Y zeW@-7%^9owh>Nsx^qIBGf4tJQ(Nz<@8fXk71b5XF?AVZfZx3fD9E5rj*Qgen#vayK zm^7s==Zjg3B4MKUWA!LdjNjQ3tuB|y|ENhTfI>4uCj76Ykr%leTE8a+B<=Ge0m=}r z!jgn4_1tRukjZ;-xSMI%=sbb86y-dL)N`J<^rOK?U6?hS8luceYup|cvBcXTUSkH1 zqpkVJIivCIj~bckKYYJnK7iRpwpz zg?Jw+DMLAnn_}*X1@ZCuwIA$6h%6Og93R*Wo1?PG6U*L13>aEqCyW~>Uue#_rRl1+ zvE^$DUX2S?71;xE*TU-P`MA{8e)Y4tjuFoGifyXfcQd?y{KDCTjqkLi+h*0+GVOu9 z=?pX-ObAc-axiwUQA&i8wwpLfM{Gn9Nl9k1W730pX`@sCzuWxEl>*759{QY=t?n1?|Z$ME%)S*{7luN|s9 zxM@TafmC^I?=@pM9c#I| z=8C50BDmw;L#=j<)NKBFwMaRvvRVBX&kxTs6L~+yA80j~oJZqt$76H$;Y|`t1W_%y|HxZ|9XFeK%lWHX z@`FsI*WzrtpLMiOf6Nn9W`cs$*{vhgD!G5a;D0omQ)CBss3$OLM20*`S8E(3(<$_5mkOD!-+Ha)oy3|Sh zWZ8#rHT-z5;!j(8uXQ9YGxO`Jt%1J25^6d8Y*qame5`be=hlQoq};{$K2+egrvi@n z>P@gL4>`mdUOY@>Ur22$EH=;R5zZG)3M=vPWaHYmXYOT{x-4b~RU+;NStBTODhK$u zcF-~(>-?m$(j%)C$~RM)i$Cz@+yPyFZK{FGH*j0M>hiX^Hsi9c5Edk9*`Is4lA~+T zx)SF;=$xPGd3GC9yWjUFiuS=?%*Ya}~S4w)0k-g6@UN`Ex<~-EM}{ziu8olKO*l-<}ZY(dTU%_XUj^sN$bku1MDA z^D-7PTU0+Bo3-zN5D{>++_Z@xOA z#i{isY$$`&;$KyhYM@kiG#QO=n{6yY!+!x%BjkR3O?-M9HHlAWQN?UHebn2n7>5f% zsV$;pB*NSy5Xf}KM{(V*BmT00i;y7d&c*^NXzugR1~1#Bp!B9x2;Jc0BjSi4Tq49H z`?t+MGFp^*5&Vc72O7{(?)Ey1EQ`(+IGF(A{J*`O@~Y$?ZT|l*zsmWSgye0B_fHx~ z#l^HP0iO3llu#O7(aB&;oVj)hh8FaqcdBVTj2Y0N0}`xRVySk5(i{Kzcnn8ZW)HPx zN9U?TsSt_y%%AArL*>De`f24!(w(bd=nD=W7K%BhgAz3`@d#Fqka7TI!>8>FLfzeF z4-)p3CUK^tY1x6mQia_YZHTK~_rkdO7({Lw_@UCr}M`%ujHFrlJ&COIe>2VrN~qG^$->gSODg(&Y$$k z-hHz^N0wUQRk!cb*}^@5X-PmESQ*qtfqba_Uu*iXxu0JuA1 zn94fT4MQe6ob>%Q=?cF+E*x*j$r5qA39jhs;rvKYg9l$3Q@dXoYKZM&C~%RsOMD5) zlPfBEWH$QoMxfU_=y#7mutq5uqy4l9dBfnw{=dPEXMKO`e*C9YRTEGe<${BsLU}H# zQbjn6n8l^wpmd}V{E?zYgx=utb4u@Y6;16Zc8?@7t?1Aaz-Dwqub|e~P&^9#JxZ2R zBY$GEakNlz9^9Wqro$8bI@&7&)V)6>hHR(~e>cX^R;hS;;3Hu!kusgWg+9KK?Su0pGM z@W%3i(J>be1B+~E8ikP&7eE^S1EaZj;dVg>J7^fsKjm;>fXEOU&8nMQPc-GQ{pt$) z1SlFNG!gXR`uKL@mO{YK0IUW;bT9&EoyXp>jxtyXZYH;f#h;^k37`&G9O=Go#yA7( z>y_&%sF;=3+&1@)t0v>w3O9%%-QkAnzz?XAyb?R|?h9(W$l>w;#7kDZa)M;hl;(Qb zX!YfBKO9RBr9-vqbMlR;iGErqF)m`#klp=mDu1VHA6?%Kn6HfvL|1}*%z16j2GyxY zy4r;Ri%^0Rwhdl==Kec3{N0wUrRNFo7~H4}R!tu*{xhFg~lPg59^6C!|?F%*n zxDiZ3P$7(txU469LeIu^Nsx6OgUk?Z!-P&W*x;4nS!UT{Jm<95U+BcobS^~7KT5H$ za(=MHio4)^-m*R_ll4q9F;HWpLFt+9hX0W;7*%A_!LbNFLSc}4amMi&72>SSeoszF zT&1%3urIP{`tPI5yYk*KLL<&gV-#KxjbK0ut2Q6pp!r4PiUbPrb-2Vh!-Qee-gUohK>9|L3m&`G-p5W7Cl4qeJQ;&Q6=oO|RuR$==-RG z$k`gR_kV*==GAxv{MdAUCHy&Ut;Yoe}ONd?c=|e%FcQC!Pp_eeTjRR zU}nRAxQNt4+5+h{t03X%pFKoYxV-@?e8(I8icU;DwCf4@{a`EtkgV7jS}_re6|i=*fZB*hy!tqS#I9)Wirk3L3C zL%T}_T|sFW{SeNFPjX|RRtOTYR-QaW z;AR+CvAi>eA|mJP*h@ZNRQoi&H`iTvUNjEjB4THDHr2d@8=Qcom+<<7bVA>KlPtx>BI(|!^67evWj%nwT(SVHg zi{-C$MG7A6b1sPaPy@LJ*kD8;{o4~A31yM@OD%Dh{nZHyl4yA=#wS4)+-@LG&ouk{ z@;!MHeVf0}SLkv65M?mQNgJx>e%`Xgup2Ko5`w;iFK<{sW#371wEn@+5w^;x5+1N+ zxX=vH4+&t#4WnQt-9l%6YQIJ5S=aep?u-4BSp4o)9+>wd6v8cuM<$*O55Nqkn)_BjFov!D{P0dbJh3qNb*sX?w_4TV21v5 zhm1?13t4AUXe~!i)IeV|n+*#V_qp(9)oOgoS{hBA~%|sHo_@bu0NEL}s+M z7?lR*bwDWiYe`^jcCG;L%)=<;t}KcFA!~XN@;BN0Qil;CO>kxYfU+a)f#Ae)&?x{H zxXYlhe0cv>$7t%i0Jz>QZDmW$ZO6O2BiI(96f**OWCxY#E_6wGVe|{NI8ne&Mgt&~Oq9%2)6v zp^Qf&p-J~bs4q%$WcB_!To}E0xbr9y7=efkR6h&g5zB9D{BaKE*zc|`vfJJY4)_~7 zd`6|CMklbv=H08^p~Zy72GT_dx6R>zk7OzHyqCf>HU>co4H-5N32VVQ=EO1u=*8gB zROtLvOYH5VS(O{$&-o;$Sh!cFa1)w%Vx8uUWK_&mIIQ-}*f(wOrMNUj2!;#762pFT z3WzSebo!5Vl)dPrD)O%cuEX@H+4pg_1!-A?UqJS+H6rtIz}FEzF9O;x;V%I9Ra;T#{x)ZP`iOMQqR0{!_!&2`wtjNzGX z;y``-V)yzjl&X8I)+Ni?-#`Gtk2lRYsHVy$ACQPhWfJlo@&0o&dhFU31~~}#HEh&j zTunV`9S*{`njKkLR>^MH9#v}Sdd(Gl!+Ft?*EDSnNNjVKD6)IWZh~L(d+dI@VzEt` zFobHkOm0j-;L8`j7we6Z=BRcCP?__?qza=YV9#nMdy|sKv_yU=jDQ^cDVywes5?2Y z3|y7YWbr`8$n_J6dy#$_T~r@fIrgllHa6zv=g$5w^ig4uswB={Ba2b4;X+dFc5Jl= z!={rf?*;YpoyDdTzm?-+596wNI{XDPO%!#2S6Bq<|*LB(QDT8{94<$(cLRwZDM3a`RUwXsECF_IhTSTY&(q|9SD^!J|i< z92}mRnVGhBme@-{f>-LnI#^lRauSUh9((<}(tSlZo-6>Ba(C7oVao#6PQkiL6OV$ zVzq8|z5%A(Lsku<+`_h|tx6x+ezf|~9RQ?|alJ_aacfX3Vnw5*5wQO3Nra6f0SFXT z2+Bwg6?1;`a|K6sesfiYP%$&*%Xit1teVB1H!XwedDG@&YroE6=3FJU;{=FOXt=7y9L1iHAa%nwj72zg-8(iFxY`$tE+i^p-W z+nk-ccMP$z(1}pbu*w$07P?OMZP(&pZedf3MN2ROBkPo0J)K&EGhn>|%ME1DXeS%p ziR7Fi61UdRvt_LFVlY(S*j53JA66a_&;+IL+>Oo^#If$+=nhSDS_jn$5Dhg#!)4uW z-G_2KK>QO|MdUCwjn{?^wi@AlD7l!|)V7FcSy8)gfuYPo+5P-!?qAs~8iPYaA5`=_ zM_LaNx})zp-T0xbY`I$uKnA2BPyGE$kh+D4s^$!{DWq&oB7o1Pg2mxF257V1`uSY; zpcpnSa_4a~xXDlG%P&j(gUZ?0K;{|r zMghAuC=mg2FMaC1c%H93Q@@Z&HCapJRR#QA46=9Mu>Lx7=kP=V+|o|<-`8q1y(^n% z?(?d|#>W%6px;sqs%2)|rV^hgXh08ibLi^>1UmU4!Ry9wa`wfnsAp7nkh&+@Ac~KU zuZVc=j|2}aCA&&K>pm@typ!qZCS2i&;^`xVegq}acuV~rMj1gWiG)X>JG3~8UY(Dn z1F5-V$9CVQ@z*t>5i~}M2AyUTpkHH$S5r*ty_x|9bqVm3siZue(P(^;SPz~V?xGKH z*?6SOoLe}-nHAn@_;HH^YiCO2UHfVW>e&?bCmZAiB_i-J zN$d6*1%>XcCc~aoWU1;gs-EmUKGh%u|I}VoI`LdZ<&|lwCSjLpBMdoChJzt6EH zbg_tAzWp>^3~jE{^2Trj&`+*o;}nnIAMBnN9kh7v+4Iasb7_1j@s? zOLN;^p5~sKxl`91ltu0VgA2vg@1D>^Pc8S~BfK(FVnY%F8}!o z^)FPMqZr3Y?jvs75EQ+qa*!-W&qju-pLKdH*{H=GjRMr@*Uc&{^)hW~Rh6AsfQKVX zWRK)Qe;A-vha}cKa`u5LJ!%ysocST3?JUxDGh{N68OtrY$Gc@h42~~RBC2HXSX&v< z8xTMgkkwDfvFNhdK=d=43hkuW4y=UHT)SVNrE;EF!;m;46Fu?+T7N?7xLUhJQoDCV z@(?;Lc=sM7sK6%m{zfk~P4!;u)LF&FI(n~n&l|Uoj@fX)i}U08jk@T`b0a2eitmKY zW-=1AY;1lFgVL>~554sR(Gx+ZZjzv#?uu_ z7KkT%wcwGapS|d7>L`XeULa ztDKOrl1jCFs_5A?9%R}1bDLP?mQE+0FIw`OeJ)p|!FduCP%hnWN1_ZkIOlNjFx3B( zAjg=R%Q^&c zDw}pPZ@*hHTw;~cAeF&Ebm)dr)9cJ{@DNpY7JNT`g|p+qR@qpsfqBs#s(smEh34y4 zBgGyuXzcX(v=QjR91CAeAnU$waFQ*5$VltNk`Hg zCoUood@`{KX9G&U=BS6>m3lY0_-oM45aV0e_~(Nx{I>x6u?1p}Aq$4PC-(ymy%M1r z++im*K0TCxuXGYYMro~89bS2jR;60Xix@+|^F2u$!$YjBWqlFzw=_HF#B|hOM9g~8 zCMs%jH!|zhWYfgC;jk?F$@h)r`}=_4cDaHNzm3`l3YVK82>_h)!QhGI-LVm|i6z(XWg=SJO^7AMK& zAE>+186e)Ld8~a{HkN`Sg!cjZ{KfOt%IQq9;w>O<(9jVDnPHrv4cdU7eF3> zP<&ZphKz%KP7wHY3d2<98vVcq4MRr&-p3()|wA^sndjR1U5S3j#(-ZaII_Z@1 zvK{!%qmXv|M0&w|Yq0R|W#=?qLgOkw|9nIb4A4^daeR`E9*ycVGrM_ ztIQ-XW76dxMYtH#4lTy4T6zF)p1oNZg|tA6I=(dzl|IXsH3A4}QqlpWAls%`bF$m0 zKqT;(zE{X{_@cB|Fce;+!!{g9jMyu&XR}9?os^oIe&r|rZ5VV={wG`YLYnTY9dz5k z$*-^Kpf5Fe4n5BufaJ@w+a+6F_P#>&+unGg;Eht+YP#L!piBY&e|@MNhsIs0Rl@JZ zJj&W?H!eOkpOnJcQla9djUC?$cNp@#-?gvVUgPd zMQq!x;p8^^)@wQKqqbs{7$5Wrb{SgclAeMwQ%GsBukGNzSU#`6HJJOAw2GWDynFA+JpH4@rt!Zt9 zJcbW49KQK8Hw32~n<>L4WXK`=0kLx$Fa=gQ44ZvH0v!y$P-*Nf9_l6fF(i7F_=t24 z=pLg27zZDz2)qHmKmIzI<;_c5uq$B$F=Yud0$>OjSW#+pZ0MW)3BvkqqNBVQr3xZ# zQFCEgT?ws!g7FW>-Co6Lck>|2PiQ7c2LKwcjpUle!9MT_mtB@|=_VxW(kUn^<}Qxr z>Ak(m2<=3?^em!RAK}YNuLMQ|45`u#W0W=BIlCZ1@ z;|o%y2thX}(30v5*c z`~b)U@8Jkx{4!&KzWxKbPX!s0T9D{wYHe_*#*}03{*yrK)J>8GA#qOib!$#NT+v-g zIuW-p_Fz8`&yQNEvEJBVL$YGijL6A0OrLxdFFLK1(uH%q0$Rad9F3`a7azo1?tE zD^n-6o}SBa-#I4u2Yv+IL`<5R5etu;+-)>B=%wxHG#);HcuTUZ9`%oW0v8tgH@+;> zj>q9lh$I+T2a3uq(jYP`sv@w7-(xk_O-g1BKUmtnxG}(a9AAJq)O3#qYUiuK{PKlo zHP{V|nFTAKgK}H39UBpifL+PyT5ZLr&a-s-R=CHCX5VROeOXhvyi}JQE=gEVPPLtx zSxH^Jo-vYb*DmC%bJxy@BAwRXJPc5UbS9#oUzq+vpQ{Et8f3V z`K1Kt3>K!-Zp##4IsBk{#^oY2Sn!EnU>;sVqFgW=#uySUlJ{B0%3WD8v1gdrka4-1&Y~%0*=Il{^dQtE zil(Oh#IiagS$MP(hDA7!*9n=H?Yk<@1ozDVbsRB@2X$%yv);YW9$gwcz`s^H)UTJaN&@R$K!^&XOA{0<#Q7Woz7&#?Ov_8F+ zywdfi3WRS3%bv5zX6bYJ#ICC=mqeUPhkt+IG<^>DFqVRuRZj2#7jlzf%`{cCu zLWz%CHYDyN9+Lj@N1!#2S6f_%coUdPh5w%j3xG{Ro>2$`)YamoUnV-Lyb)Q%bCrP%4{7&N_ox0rEZ?4q^D{>=&j2<+y+d1lYvd-9>Vv$N#2 zXXxGt`_DMZsIrDS*U1dDNp$!}s21MeBe#cikF)IV#>jV*Umv(BA}jD_{#feyR!ZZJ znac#n^kH20i2j_|@T_&2v4h$uh8odI|1^ihd$MFl8zHt(M1(R~$z5Dik z=xo^T?^B$@vB)s_qd;1AM5b%*+hs3f%_}|08#byTkKxW!c5&7=s-puf$=?IAqYdO{ zCZgB54-6L0o)t{<5)>6fth1NfO*VY5t*7N<0i8QiP$TJ!;#!9vZC=w<>Vwqnw9Fq@ z4QBR?Q95}o=5?G&$n&k>D*W)efC*TbepIt(Ql`>>^u{MwJQSL<`Cqm?b<~M=-(;+58{wY}|IG{hi%qrDG=r$Qu7iR1*<@R&(E8$a?W$mm}%& z%1obM_)pS_aCyT;;0%uDak z^K=t6uX!{dohjBbwD~rAw$_?^)2zz$!b$Dw3BTUN@Y&e2jqh%zwgw@jPa*yszqwkh z_=$7Bdz#&-->hDtf;8fvoZlg-Pg?}1*Vd5i@%^*O3;7s3Q0XVTat5Yg0Ny4*!I1B< zW9Lru*0=m(mfw<+o}cPv?`jXSzT)F1JyDwIYq+vrFCkRos=ZL~M>o4>eYCcjb7NUI0-mRNqz0)&XpX}&4rGu)_-Aq`zU3%X#D8X`U zgCtq*J0~M63+KM({8N&35be$Mv-O64v`*(V;|BEwmjw|8c590mu^F^AigF7}KZXXGv!1b5&Zp3{r(`7m`h4lj zvVA-0cbAsjDGki6PSZ<9A9V1kg}=Y1kMm)pI{$8Is6K9N+OSSm=PeSasCM;UuWM4> zd5OB~T~f8gIrYk3N?XVA*43VJanIZX85Qy+39EJ&FI2T!IgeWHcyIFHrWeMptSvfi zj-@Z;l~%C1M+`)}uN=x|6TY}XReIuX5w25u2Kc@iqPFhc33%9;LGyukyy|Ig{Idsf z8Ig#_IJRA@t!;)^ob`Kofst&@w}`7yV3visG6^faIKHP!h#~@GL{D>dQ*3^3;wYr7 z@tkMA6dhPuRQ~$5uQR+UABXB#SWn!UEXUFW4zlj`UK{t8O zn4w(J>Gm@eev2Yj|2e4c^vx3b&?2Yx8sOMD%H}_{zx7R)Y$g;dS*-59l$F0!l5;do zuNqdixQ4qn8MfyO*3GzkWG%iO|9&_4yUP{wFMItDwB@G1XXtRtzpC9Ts7zcC^({X|O+4WY^+_`gysIz*R`;!-b4<-2x&dwxDFSPsU&rYnDo>)%o-NmwQ z4{pg~u7dq`!NL}0vxSx;+D!ZnyQdt-igxVY^*%c>E?stYJ5ESc+}fjZ0#vM}>*=;J zQCpE0FKBE}wVN)duxbexb3QcD9yTbOEpxA6o}4lF0)_Rb`G~AV@u>^5=N;|FO|UCh zBoFUpZKWRhgL}#xAjnd*ifg%SNJ4H~Jq4Zh7oQGlCUTB_ zT6~IhxB3@`QY0iY7_{piM z=3HB|dnT-Y120YD8?r2QSe<{Ri9~vNN61yR^+`+h@vFZ`Jec2z%Idc9?(f%V+2sEW zU^aiE8D1Q;?sx6a4aL}B#2*YAJ$K@+h;Ngr>+#{=T=>Qb(YgUCcIQOEvlSW&Kd?++ z0RHTH``J-v(zq?@)XQ|V;a;L+)wZ&Q74^Twe!ps>Li(cfSXN5MU55d`i#i=gI>pkS zpA~(j9=o-yC;peB6Ff8cvVzS!9&VDD@@YY~v6aE3X zbj&e2M9ku=L<0|OO(T7C)W^z)NU_XyLnEat?uY~Ob&rDk_AM8Ic!5~@1$I#0?w@fZn&BivnT7;((f6 zFR`s2>KZFF!F$Piwa=clxmqNkv#Wh?9qIGCTRXc3-uP6q^|?|zb;^l|3--RiHGSc^ z#)vji=J3)?#3!F@Oge`ox$BhJBzLfWa?)z{RLT62G+w>3i^cM-L|@K-a6NBLfuhtZ z`@#Bq2n#vw$GRNeVg%L~i74J|?n_@fH86BY_oYZg6ut<`wNd8f4PQ!V+8QSg8l&8Ch=Cyb3_2O^~J(WaQpdT;F5Yj`&|Tvz#h?^6LVjkSw&_D z+LZAtsjc!p@<*7m*Y0eaFH_EcKwu2-TsA5}I!M~>sCUYrZU6nMl}g8H%a3Em`A&@i zw8vqzjY@9y-&XnH{Ak}U7USiWWS6WZ=l*L(r&mw6doLy|G<%$G|A_ z0HRGe$+vFZ`aR1scx>Ba@_+2dIt+fyJum`znshGf;o?|PG{PH(z9l=CAA<2a^;={3 z=LAzawgXFH>o-cG>@#YN;^LVe)GLw^tebn{xiPnR!s4j$v5NJ+}$ZK=HmSLKcHbvJjzy{t&#_rkhV;6QVh*P(uw z@&MP-iH*KzE$n(987h-OrJzBhmv__Y#(%~XLce>B=xMM+=8Q_EQ#NIC2Pq_DV4 z<2XxyP;5S2oe{Y0w!?Y3)O?eTnM^l054kLC$4N!xG@LP$s8K(N9$scNy5oM$Yq1!I z({kc0ZM7k^0Vr&S60MFOmKnCYPTX(4nHi zM6SN%BYnvo9FuZR>kGLbZl#L1I!>uQ(p(Ic@^|;&+tDb#a4wkf)f0YDL+nOj_nlAsUd?voFC(d?6n_1F6aKD*EPty`YQlypDhcq|@o^7#=U%aEyh^Vs6 zWAvv#&3WjUx_!W1<2xO{N83f0f-1XF$DnHP=EaO=1dap-1|sEU|A7PhczK5oxRI}| zsXtVY@!Sm+F*|7biNW20kDY!UnB6SB<=leO&lxhbxknKWTc370@7v_iCqD+^<@E|x zikKOXds7`3wQ1w-&s+sOeku$lwWZD=qkpJ z@hN!k#*u)$9XoamkFUojS4EP0?IAforFCmQ2o-3MfI-!-<_tdP{irz|PwdB$*B4qj z|4GPq>v^fDIk%y6@s!lF?;@||<~jD}(eu~^e>un>5P5$VD|OB?-dOat`u)0K-->IF z-|a53$tgZ`Sw2lJH{SHPNqFL|1G=|p(Kz$4!q0)hL=uX!5!2ewA-TxQFCpf(ihk1+nlWzw`H+{|j`HQrUM3O&wOtb6h0*hnJ@FQa0&uaJr6^68ADp`J- zYiuAn=Fb{Ibo5KGa&P>&kmc;PO~pcf_oH@eCMe_WvXW=WHDnzBSua z59^HHn5z2ZmoxK%9BLO9V6WY)arveEM$%R2_qI@7ugA_J>J36Qf$!B&V0GTUZ^Ufi zhYXq^)X+!M)tS}_LCVlIXaAIYQu~Z&PYF+a!vkbYzI^$CJ_=V0u@sRpc{p;dH4!H# z$5P>~JdDpH;KE<_QXHp!_Z&JDjPMGLw*~!SmT(f{V=oXOsLk!_Gq3NpUKcT`KK}UK zR=`z-Pe$J@pC`hnPMrVUQ-;x_i>;jl9~H;;N)&(GDVtyQw#|Sxs=u zH>x|Bc(|{<`^K-TkB&;7CCaz_=>jN@&b}MT$*!LMRU=z`S4`ykvtY98S{a2)u{E?w zt=XZE$ls=|{@mNfdWcl(vBxX(r`!{cH}BuN#G7Qd5+Yj6q$I|aBJhWjlK2gA9N90X z&a7=q+1;s%$m9EK!pVyx9XjR{ubG&bY}vBK7;qO+J|T{+>QPTgTv2B3ozKM=xUPlN zq;SZ(2Ze;3+;C4_>>v#(y8>F_V`w5!gFYVnrc$A0NOn~7xgqBOQ;8E?FI?Dj`SRs| zKZ-~G`H77!>+v8iX6gRyd?l6l_*c*3bcV~wonNgBnmb6Rm`kuUQd;hZ;qs>u4`o$`3 z562!!MhEO-l`}RMcBSY~Z_d_Td>^4=O-4N?YV=L;xKM;^ArA&z;93p%ZCizC^PBlna) zS_a{lun#ggr^!nTvtxBSO)J4=J*ICgN)FQbg&%7uSN`c{E*Pg2+MP(TuiIKf{L_zn zY@P9dHGmZS7YsREXeb#`f(}jHfstRGsXoQjmMbazV-8Lr)Zn{Ap{~I@4a%>CJzj(xJmZ?ft?7CWf2K+f#lzNLm-o^rvb^?UY11 zB;SUx{8ai;{ZlbAe}WO!KTHgrH5ktPas9@ThC}=@<_eoo>-0Sz&9-U!$;}|#v!056 zU?DD1H^l<6d?*(=e{=HYX;N0cG~So{|rS?JtjC*;>rX zdh%DJ@|k%@q0Ud?ANUyB=jKw9g3F9v$0Fmo*QmdzT zJ!3JMTxdp%>A#DnqEyEEga zo|o4oX5D|uFjGCU{9EJ5;xCcT+?bf?6LfLr>ahy6h08PNnW^{XdHWPLj zRuAJcQv*<4Dug1NLwITKtsBuAT+~{ds&}%NU5BTH#{*|thx@8)hhq;VF@_wfe(X^6 zqxJus+IGj&lh5)B%|ynpUswA-MVzW3Pgyl+I%RmyPtUrbE_vsQqV7da&Keb!U01jD zJ{*d?LaQ@&RkiQX4d*t%PqGicjkbR9?{)zm!v>I4$ZA^jG=KP+7rb^k`(AK73K(v9 zWns_}u_%dOYKR?NSE1^f)RN)<+2aBxeW(NkE_{8cT56HZuutOx5(@tQiP_<)VF+n= zrg=kL`h6OIh0L3S>g-Ih7q)F%m@emjbDx~1We)~C${bqh8Pb0v`^zc#Y&B60$^XawMV&U=060fdUNmyE@rdg-eT$ zN_wHU|N8TFQgil$cBe(hsn^`ij@PAJ4<3#zlUggs_qR2OC67PZyQHJ5>&+?JMQvb7 zxg};vHzC?=Ur}#Je4xqbj`z$1rk7={uWY?wy>e`nZIL2xin>c!8a*D+yrsOcg2EZ1 zD*k7XbID|B$|db4*{pqT6x1mg`QS8kziDpq&?=|s{=5G?y?E=ZrK*DFj;!tm%zx>5 z`S3s26T8~Sl7!3EK@)w!M(KD^xT!WAb#ak~Hg=$jFZHFW`!~AoA@#8%DT3Cex$E`+l)6RvMxcJLB`!i?&?r%FxmU@I<|F z?q+}ArRa2@OKXZiph3EwjuLie&GV>|3&VKbPnK`#t)<$MGJok`@YlS&wJ1y{AiOlD4Dw}yn?ns;i#GnMtdiVY{qsv=+M zzEJ3uSE$lh%8_qVt6(92d;U(l_PK|$S z$kwnD+F--9R@y%@vJyL^FR9hy6~qtxTn?K^EYT0bmokV$ zjr?&F&oo-!E+db*LARam?Kg)M4( zc9`{R(S`-X_%p>z15^7K_37^>*IYz0wNUdoyB;iDuyFreQQ_7+^*2s)krusYhGn~N ztA$a+TRp_`;-9eWupesZ95wd#jt1?e&3#y&+qC(9YyCr(sR^2_O)~6h)^v{_&CO8S z>k5ZeH2vvHRBJzK?s$Ql6BF6zdIstASJWY&Kitwj_QUKX+G3m_FkM=IG&{Yu?oDHk zPooyC@-Oa*OS;GNkF5#+-4!0Bj8*G|bT`|1TX#^zi+FkEYw5zsDUD_r*Z$W?cR^!l zafBCc@Cf%nPIhv}W-6+K0s_Bd&MH%j2sfGuwYhbhCuNy!t1dWptISK@ofK55}t!2A-{KePg`{uYUWP$vIXxsQKw}^lRzrK*#>iqlX zN!}I5!_UbSQs#7B1BI3GRgmg}CJ`!hdVhh1c`>`<>d|rdC-kEle$$G$W&(gyB0! z@=O&Ew(W$Bth30A->TyT4DFBL$@my>B`S2D#bfuvL8(ig^s!!{;dIv3PvRPbb2i<8 zizv5c%bFXgk!w0O$*&^TK>3?+U zR8LRO$mQO@uMI!5xd~uA9?w)RpvDKgj|YPrctpRHig*C}TOn#R{PGms4cCcEVd6XN zhMVd@GyzD(Pxa#CU;z0->6_`BK`ug84sm*-*9Y=)kM}UN?Iv5oWY@`{Pl}0Ml#0Lu zkz{`PNAOwBrtIw`zs~W=>$c4aSqZhg;BJT+YvYZ}!9ETHDB;kEX%zOe7)|jD%&8?7 zQ!Yw!f887YlvUSMj);jZr1isst$U|=aa)nY5UG(Tb1oG~mH zi#HndCC9u8kOaZ<05oUM)Ofnud3d1x6(fdY63ng(+g_|E_CmI%+`Tmvmq6{NZ~47l zh^gT->-~b42AcZVJuGkee6}L*CZS@L%9Hm`@VP#J{v5-?SRP%P*xw|ld5x;WN7HTl ztJ=R4Q^LaBRJ2|0vZ?D8u1EL1BW}ypu4Pwsb#`b|bA9*wcw5#lX#!VJ>>otjGs$;- z{G#Bv!4{g`hUX^o%|x>zx6(a)sgX_*BdPZC`Xq~8eCs`>ih}-$L){ED&)cQlQ`#C3bMW^sY^~-&sq+|5QmBXsp78GxL_r$_jnk8)KL)$TkUT5mnr|lx z4#bhYsmJv#-ET8$ZcRFGfUW()z?9=YUaBHBWA?*`H-ryQPkn=n{LYP=fwZI z?9?*`O%+mS&XA_9I@R4<1pLIX#D*X&RCPxc^|13~p-J79Gaf7|J_vd(hKj+^=K zJlenh{uu*J6=2(>BMCnY<~qf4#pjgqC;HY>pIe9t`_h_ENay5n`E#ec&)GA#AxQlf zrjk|5LP%o6nk+lgRolr!9?hTP8~Edou%)dxEH|B4=TsF@XELd>e!`L6!r|w*BLw;R zLMv`O{FGdgmcqV&KS@CPb+vxp$icXu8x&oaC@uWLSB17VhIl*z08CJ4w1XROdzFfL z0T{&U(wFJDvuLrtBWOt9W#60(IhF|CKqsH*lUNPC`{E+}%T@pX3z?j!(A2Ho$%O5`a z7P*kJQoGab;)6WFP-ok}_}J|*uZk01JMcQ_0k-ejB?FIPkhO?kFf!h6wUB65{(Gns z;Zc{X6{!AmaQ<0L%$=&p9 z@vmvd$`_QC3C@0w{qS?87m{B>`Mt)*#whED+6$uXhnwC(k3f+08dDBoeRLUr6prou zxsl^&A*^fEH_>uFHLXv=jFFO(a!yuOAEOY{QL7stZm*xHQ`3#KCha?LV9inHuK~7F zCr_>_lpa0g-=6Sxp58-iR>4!hTCgCLS88m}uj-td667a&@t}l zh?ax+EMO#nsdQPa4M;GoU}4yj^^yb5^i+Hr+}SYZU^vd!HsyWtt!1=Qv0x94YFC+pPA8dE%fBx(_gXz|&f2MSz0KwnSZ$2DuM zVYn3(xpMJbLjN*bb#}efw(ax}%Eqj2_E=WS;t9%WP5NJ659NETR8=)WT|Vmk`&Iia z2M0$tq{nMIChK)s1q8l3jNtOTATDK?;C;M}1ryyp?$i6waG^8JJJrCxckhjQ9hZkV zf+T&JKB1X%>&LdY^3KK`Z<~j6G1y`uFkQ77!z{$j_s}EY+cfmvSmw{%g}UldXy_*} zkq3`6@7nb%Cz?*jvD3j_7?RD-bGd$qbI}#IV7PRH9}& zF}VpwBOv=t2fSI&=1HB-vF?PR^5W>8IdgRHpYi10rRsuP?x*hZlaH@D5bI zc<~Ilnc3Odf4h;wsnO}v$mo_h`EZKDAT=G0&S|MVZp9<5IX*rio9zQ;(HV0?uxsr_I_Cv^mvdrs)WEJ@pGa1`| z_vfxUa^#4DlWTK^`Nd?Ng$HX)x^&wLBg;eCZE_SSd1z>9uQV;}xW>-O8IzSokC6%J zB5v3sUMOgu<>oTschuFN=$3A$7Sa9r-)9rdq=}IKKDZ&DuRz;0xF?yhcj}&Un|Uu7 z23nod4e!90|M>1qWSB;4m29%@{?xb}pV)fii~*PT10GP)%X zdE_vXfWYt^=HN)MvKXj&`9>qPFD4&F2_bwv9UpG9nk!ukW5I*)Y;@A6e#W*9E$#&_d9hTOZCgg_h(4F*{B^oIU5i^Ek(+&%edP<%kREC>)qRky&M=&WO6V^c1FSZ(osfCa_f zA~WZdCKotn&6Wj`)CVMa%^G;yoCmqy+YNlYxn{lN(7Q5B%eRwb=p|GUWBd76sxM7%PPd}!VJ0c3ZKhKMym&Z->I4DTPF)7-ytFeLYUB91yR1f2g2N#h=vYWI*h;W-eN!8Cd8QDP(LBbz^JOI2jRZgPJFlkMG`1eYoe5QD` z_aqLr@~SGsGT+^}BX{3`0nD1OvGU=8O>{Ig)Vp`PN$RU6sBXT2-ADvEcghh&0Lx!I z{QQA<+WAbr9}ecx1(*IAuvzWvbt;FI^k5ot^ONZ_*2a&ujhfq5lW|rp`oE0 z3(36|#FILIu`{N2LHE;0 zH=(Hr;fZq?ZF3JrVJkpCr1VcPC6fHYSdhj;7j{AAZ%03Bkv%hcXUehf*j&gO?Rx6! z)q{9rSB;F6^PI19=l!msGzTn5d7?H}zWUmi4D)6*>>uzipoYED{bdgr)||kOv-(;D z>Kni54^G@c*Dy?4z6VgNzXqy<4)eDhq-?iV&1oQ6ctsl|vT2?< z0#Eamo^t;?ckf<6*~Ej4hRk57;T z;+&rRRcANU;E#(v<6D5i{d=NT2nyyN*2zqpUYQmP2Ty$h=OxuuHGUvEUb~ob zYLDLB0~NEdtS7_#s_!^lLUiZgmyJWg|H+e|bBGvN&iL+G-P3x@0g^2=#S29XoUQ4> zdV7{0S|`NL3O-oD+Uw#8O6d|q8A;0OqJ-^=EG{4jt78jetqE`5JP5bU4-L0?{|TR) zoapS&7jof34{;MIC#Z7a?ok97gvMw;H@C0<0V&mM(c$40c&-`>JeH*02wKn^-GV4^ zNzvIhnptSZtPF9W%QAsf^M^RL{$qDwp0Lw1V8F8u(rU#_!WDzHXtE^N!CZ1 zTxSR3>;`{4#F6cnoIiW-`}d1DGm91%=7`7zu4RBwN%n5odS z%>i%kQe4>?RP#HOXW;8=*bNN2p^bspdRGWvqUxY@wJlov>@k>_d3ii=i4sn7zIA7; zLE^htb9Wz>m}L$^-d=hEIQ-IZ0%jzrZnTCfJ3s=^#zM1Xt#-{6&I?57%Fhe zWww9ZvfoB;+)=c}rr-R*E{Wi`35*w}Muo3z+C+|YpnLq*y^Dh4lEfX3*-KT;$$37N zhR5&oW0Lf@_1B}PIrXirzQlkVwl&|S&n;EDCAQ`G7nRJ|CACuC0FP*F{Cf35lK6_k zv?xbgD*y|Ujjy#C7#K)zv@%CHtIsF^;3b_vu#ZNRPlP4Br(&Be^@eh#4?g^PTdc;Z zDPzQyZv9APxq-RM_z0sV#{;(gU97WAA)5&5$F)Zn?_&`|A_QHdReMT=h4W6js((q= ziFSB%Lo46$vAIa2T>ixAE2m$uM@-Sfao#iQY){M6YU}^q9N5H$(hp}I1p6oS*md?7 zrG5)_II6{Jq`SPuiH`;qa8a6n=S(+9q909rbIzWbAM}V$z}!lZNb~fg`r@~abVfSq z+VnIi$+Q?iUIS@jWGw4pHlE1^5%!}I+}AY&gMxiZY{*Af@_5U$e?G|gC95cr-+QVu zOfd9_fsChqffr7DbVEi*XkeI8YMY#UuAy$ThoC^OfuG%WdzI6RbL5G%k(Q(Yqy9Y~ z{_HiQHE`i!XelORd-g5Y zBaV27=PKHHprF0J+E(~@YH3~Y)nvV%Ti?K%fxhUcb`H(Mz)A6K)FL~UQxFk?igntI z#jqWu25^8Wm#H}TmHEVr`0K_O)44;wkCy+g+%)a_$&pg#zN6*h;iQW94~oYcDBX~F zo_cwXa<$niyEmA0O(r~J)3{dPe-S(Y|0X}I-$MdeYF)!eO+8-lQZ{6JR*$E z#czs_BW#ku>kvV@`s=}$;-LQV z!`1+MPGYh48lnj7agr+Ef2mJ8UCFuFwqWRNWOt11;LDXG8D9IfbvDF`W8jIm_p}uF zOu=7oNZ2I`9=t);f9W2j)GN}**83#q4n|$rmRwA6=yxlLnqLFZ#>QBZHObV8LFK>> zjexu`f@0_qmtS;1%<=#S*Mym(%9);AL8ImiZIM$n1TO<1)W*$JhQo_o^A5k7@g}-E znKP+-HpGC)m^DNXb(p!}SJt;k)ye&+?r zj)H--uOCiJRX1lX2CkoRU*N>9lC_4xJtLMIHXKJ_#8da#!8CS9&xW46KW@L?PQCTS zwaXhwj9bl&<~mzp=O z!d8`Wi%s~PU0mHVvEHWB2X@|K(s-mqe3VqvrrtS-Im2l(09UU7ia)Q&O`!> zOiEdXv!AkroCgULz>`kY%sz-Q&Z?RmKtPK3?}>vn^-Xhur||RbgzCk9pi(`%k?FP`X&ZgW>{V$RQ4b zq<;r9{XK-a#^gSR4p;ByzMWb3UTqJx`n>p?>R$xXf0O5AeK^L->>f*xJim`TKRT)$ z7GnRP(0l48b>7SwN$d&rnLko10Cvsb=EsZ{EXmRG`3YFgoJyYE{fZ{Kl+jkaY}i^q zv(fGTg#F-n#FW4O%#C>bne)tTMuv&g0)}Q99S)m~r9c%F=;o(dF zkQ39>q~z%DA3){E7>5=IzQH^mAc_{@M9@m<>O&f8vpQ+cOh z4xQZ_9IkP!c~t>Jb?RKS@9WJg=;{m3*j#*m>%zaU+va4gsv9Wt46tIrdA-r0t@PEP zchz6t8PfVqWi!Sd z_&O!d)b`7UNCRmI3Y5m84UaUcf?;P>kZazR?aem!N)oSG&b}<0c5jskylWAQ|??`8Y;t=?vjZyh>FO3ivOUzaoRHG9Wa*D(=X zC$+C<*y{#pNAyoW&w}w!E?>X0>X42U#B7D*zgThYEsPy#mZIiaB26-5K+l4;#J3!G zDMDBmd;0jbz_!QGM$V_#@?yO{Wz-(*T`m#_>br-cb0CN=Gj`LP@{LA$JY`^;y}WD6 zM#3E6v}wn2Vb?sHBY3+o$*tjhxoTQ;N{A5qe)yRDIz&AX z>YDaju6Pz^Tn8LnQ%%^@yAzc8xr2PuQ`95!!p0NgzJd2{VaDGe?8_eQU0%X1z6~ z2~G{{q@EY9$llTxz!=n&Zc0ZQA0J0MupncKqtpxCHelZBKLS1bI}^>i5-+C=bl4O7 zvJ9rCjVBCk#E3H}(j@&L%fWy_t*t$y?TdRLGw4Y9!klkhZ;=@$(yE%CDo0Mxk7{bIuYUlMzgJoXU@}m7`q(GbR9ozSv3;@ z&~(4(;m5?b%P@U>@$1rCt1?RLIOj?SR0`H?a!Bg=v`6QK+QOE#O>3&EHe)AxI#=Zx zYLfvdun)Ay1UpaeYe7P8Q~foCD~>IeBNdF#YgXne+wfs~&CbR35EM+n-ESLzYgOO_ z?mrHmG3ma2Qcv^_ovv32+rUANykm!~8C2rACfy4KYe0`Tod0D^oYApt3{Xz=FD40- zaZxqi>Wd(Pl?k=#hrL=LbNDG|Ga1i!HTudwRg%ThyDsp_rG5kiRi)O(rLc4uZdjki z7>Qjb_>MAX&L4gl|HA;)dH$&Ee?C;q0QbEBtwN94(Y~}O-0ra?25ZNGV7Q{ z>s7e6s&l@tw+lpHJ?ViN(0XVae?D+2i zu%C{zVCmuln%f>A2VF3=Jp>mO$%ngUTsGiMzdpmt-L~gC4KbpOFiZ%L;OQw^GhuI1 zvRp5)M(3HIX4d7N^L5E-=@@)**5m-BGlj&q1e@7e;a_+EX$iInYE3sGeiBILT}CSd#0k_c!17JIN6FWPIn7fwngaVIe*JA*Xru-_OKqzA56-r zg2EBku>FJ9>*5?oi7T_`kiT9U3DG=yK}YjHtUE)p39|F0+H6%>LLx+0!D59q2v}`y z3)%tS{1HAf{yRqaXXLkyWr~|-+b#!}jU8sfDdvf0v@KfV?kMsTOe24!oio2v)YnmA z&_2F6uXOcbU5tKWa9rZCUOrJmjruKpNy_R*ZGEhynV|Cz(`use2$5yAQkjrl8q(VF z@9id%41Iy1>$Fw-RGqHo=M**V z&d)8w6%C+3WS^BfH^U}MvwOSr=mQ0a^8-SR+>5<$`b~d4FL)*48&<;?o(Di952>T8 z*3wOO8K3eJ{mT4G*KD)ZxaW$X1oGMFvwE*MXBgVdd~G3r-*nBWj9-tZoEWy#4;}Ao z{gGCp*c=x{ZvqtTKVSasrK2BS<9u59CA5R6lNRr7zjycS>;=RJ;|xOAaA%z->1*am zy2Z(b3c^bbwViw^5at#H2d48@GC|`tuL=K$G@t#x@nzF7WW`gq?X7*iwt)%T-bk(H zOY)Bne031zIu;t^?P$-`cKW0=HS}`@PNJjgI8vR!{WTJD`p=6Zp5DjpYDXMKS_VeB zv7RfgUUazGo@1_)kjC=hiFm-PN9U3(1s`F--~Ph|g!pgi_Kd`^qL8iy@h}4^?!cpw z>&<*{-e3+N+g`gk`_58J-13@CGufxy^NWCdJAWM`lb8~^xL=2Uz<%JT%i?=6i`7G0 zP_7iDIz}nwBQ3VfuKTyK#jV)?y6hX{jAjo_ zf2#eGA*ub8D@46(j^yY)p@7^O5YnGXE#ZBU75pw=<4&?t6&)qGH}LjF-lBKCHkuk)5oD*R)cbrKEbk`9qT20-JG(@EclHNjWGJ~JVlZ~`-vjmxyW-0v zv%bE*BUnX7c`Q0bxspG9W+Jd^cfj3xOW&AA7G~Q;kB*24^$%U`3qA`g77kJVna#R6 zDE!1I$q-JL;@B_K(SJ_~h{o?X?NNt~))FNuiF`pfXon>u*ZlqRlB+QA(21VhND@xZ zxd*X693AHAlE*0&9CkNW<5->CZc36+dqKXdvcg&=#-@?ug>q(dCAc@`jF!14H8es; zr}h2B`sQo_zYh!yV9_jO(Q-~xQ`0~W|6|W{h{oNtPOid4r&s*jAALNz8krBEZ{vxZ zMk|mjHf1ZW^LsN~AgFE>I28DZTh4oe!@&woFl+&!=Y$Uj^y^fXVmS65)>1pxWW^)k zh;A3AL%Ht9&Ayt6s=!?;X%Hm3>k>_^fTLyGVcH?}lY_tH6&j<3t==Qt=hm9Y@AqWa z6so9xgdxPIoKXXCD*5uCSR|^a;V`Oi2Mie&lVlVwDkgHhfphw*kDS z7c{wOAP38kp67j=<%f?uZ@7Vv4#@}4VJSff?Ln#|f&717Q>RqRRwC|c%mx@I9Yymd zf^O|{D3`f5zLL z^#8=|3}HTiZpSZaL4r8MS`Ce=GG6|6UZ8-6pu*sx3nYdJk;%y)932sT(D3z|xYm&< z&}mmos7Z^oVl1Jf9L;zkl(1P%W22wP66I;}tPn1d;~UZHz^e=J0J zTo!R|SFQfyNE#*6df`W)AWnp%5Sw&TTwuJXVU`DxZ!w^n|6fp5MuRl zPHw?wV69H*BsJeYrTM(F%Cy9`2R*Nf1DmusX_pBv--Pnm%uNKk^PFD5dzA~8FyP#D zhGq*d?LEF_R8LDmXj4Y=Q=^3~qAe4u;GU9Pn_sNhXP)aFcPV*I!=XyzwdyF;HD`pq z9;jbf)R74--5G9n>~U)aW=tB-;7;#6-j=SwNGOYcp$KjIbJn1Yz$mq@M89O@rL7rRh*QGru<}=lK`v6N9>PktJz<2JY3-mYkB?Uj5pH{Ge2G!PY z274w7fnTNXj?jU}_1bErIAdcc-&G`$R50rFg}P7e*hPN}Re0S>ljRs29M{)D20%dv z&8FaWo*sR?DpYvn<4#nHp7p)V!YDQm2({;dSdT)2VFSI%ArW?;J;Ie!>0I2`-{B&m zwBmP*u(HM2YiOw2SBDYI8!3!1x==EJeKmU7G-7_5KGL83mX2O{AiNF7IV z7hDT0SkP0ZE?ujVu6NEHG7ShgiQ$bf`T#HMcG=k5gD!&jhBZ*&mkg?jR+XDBo(B8@ z<_a$ra<&%T5B}R7l3B;+RNa)6l(2V%Naxh~^Iv%~S_?cR)~#EIo$<07b3a|$QsS8| z=-M{iKg1$%4}B0b{T!#I?t%dNXTp-{PfIHyVumRQC+#a_nF}Yxeg*QGMBW~2sd)l_ zx5X&$W5R+==Hdm!o;~Dmmbm_}~$nFCW-LC7C>Z6Eep9+{Hr#!?~G^Nlk2}xk}aQ z0<{Q}rZhGZn6cA!8CVOgeTuEzKabA3#P63%2@N@8s>Fnfq%r1weS6)%y7qXvX@g=jmw6#bz|RS>YwD8Yj_&P3Omx!?mPu(hqEy| z%%RU6+Dj_QEpEYkr7nBE`%8=`a(sSq$1IpWM_{?PI8*IWgWW53m5PlP*c*#Zepd$L z`|f7OK7!uv9pNBXE2>k{6X&rQ&~=0ko@Apu^z!QGH!SS3e<^cDbaHzwtb2AOUmEAZ zM6q9Sag4o4>ymFRb9wG?1>UtH3C>h!(dVd+HlNEblP|b?iQ~a*^XHhdjZzU!a~r&T zg5#H($<_Hff9k?^dHQP)b*rc89|GgQbF|eG zh8VO~qK_vFg4V1!eS50=!NGt&>>0)_PTl+nSS5}(nI5lj)nTAMc@K^+ zHfTt-od;fGZC$%u-9<(ni)0E8rS06!^x<}^Wvp&dl55&e#(eeJ-6TSzY5m6}!gue; zFT34`dS8kP4qU%->IH~!)}G=(e+e*Q@!@+&8LeDe77KvLY$_CsOz!t-b5Mq}h<$jg z+NF3yGeHW$k&=`lLCl(~##B~`YDZKP{sC{MF1gyZF2QqbQxWgq*UM`wP-k5w==K z&p*1|Y64U8)nm<>*Wq`Kwo7p;&(A0>_pE)J4l$+1i!nk%w!TB_YrU${ejC2ocJmnm ziG`mKk!@U5Mkf3Rh^phZ%Z}wMvsKd=0&tnQ*sfOncsay#v^Yzp7N`Z3Pvb$QG%KhMiMt~!P(UUch$yYmCtjRX?PIp5c$WBE^Q%0<*o8r{6jGrKt9V$z@vtz z3an||#K5(LbCzs0MI_r))=c&HOeaw5i@i!}k|a}WRU_CBvvmz;>tZ@F0|s&FS6W{_ zxA3d56-&Evr_ZAaafshIgta+}#W&c19dj6wOHfT*CiiRGDX6@=D*a~{;XEhd+#(4$ z8ctZB68I(hzR8Z@Fa%o=MmK9a60>y)QzuyKZnRlTc%MYtZ;}{2igMNGBkr(cHwcl;G+fd7nmiY~OxLT6!a750(9GQv={gIYz;1uOQA1KDH})$z0vcc!o)yua^%ooN;2a<3Gbb$Z3h-3-x^61hW`M^380~npb=SQ9p{^CioZg6iC zkJqo{6u%_uYbu@_Ju=zEhbIJwTYOo}w%trO^};ZOZe!jl32~Yirj7h9UQAmR^M#aMdjPAfT6-2WGGh*)A@sDH6$_3EGB zU+;iJ=2{S45ApE$S$CIyOEHjbSCz!hBXNl6RW7oR3ds|5VZLQsF(lqjvF&SG8pkzZ zRO&hmNjj`PC4kxW+<)2bPtRO7s3s0w3)7r0^T*X#V#5!Y0#E>9N?2i^_!4Haz}*c- zY))+Yv){&=osFj#t_b==pV@{)T}97@ZKIV%W}|FM{acJ;u+Trm4qHEp^Xi&+Aa$@i zcM$Z5BHn8O7ovd!y*uh6DDftIzsvU{G#Mj|b5sL-Y-Y#OBJn^^nYBbA zhoqlRl<;kho0!Z!GI0R59z}`6Nysv=0;tQpFmg8Fzz!$94hcfFt6}H>53$ncyEzEw zMwB>l9gWYs3M240brQ@@WNyKK#RZ9p$Z$@GPO9D)(oS5mKATgN0DF%t7%)W zBPuf(Sx;g&o`9Rl?qhaOp<}cgX$gf*K$q3gd88@figa#1eF@z$mih)*kILytczr^Q znQ6s-JZji86R+VJb0+LQX}5&K3FDonM-^|GSLyjKZUe~PbzcS|$%x|YBsqQDJ;NkH z9NirKU8T-!2juK#^$*6)=(TbZ-qhvP=bCp_Ueih3M_8djzA1gr`-N=ek1q`=j~cZ? zcwrI^35(Zwt?h4c3m72LYEKIL{E{J3+E?a#>WbA@&;IVIj_*(S^wA_By}}l}&{|j^ zX=Yl;pFjVozzcNC8Rgxa<%jl((iZWuuZ==EhWHg{NoHLNyPotvr>}M2S;Pn2f0wAXe82zaAn!Q4qs!@3^?fOQXC$P%0?`Y@ctx zv3{fd(HX8mR06Vj=N4HBS*!K&*GMWuC@wk%lD1Qd4V=KnqD zIOYH||21RF@fd%P%?q273K61Zo53w+ejGS;$4FHX;h6N@JMr$ai;Z0EK@oO;7;zhX zN~&Yx;1My{W4YBSEZF$r(wG?Xxvt@kfZu^@jtM)~>0R$22V0_;k96=7J>4#O2{C63 zNNZnT$gr&#SSDOlXRZvag&XNA-dEB*Sm`q4m{7v@F8}SIkN&@h2-ehP*fJ2K;8@XC z{_DRkLJ#po#Fhna(ey-+6iDF@_DDpzjAVO(APT8a_OJK^M~sSuc@6@EKuU*rfv~s* zCD+=T4`?TT5f~osf(Ds~s?L}mfQ`hP=-vb~asf?3?xt~U?Y}>eo!l*9eNr4O9lx?7 zDDD+^allpaVS@=N-W{usTa)WEtl;+$HQD69J!mbHN0fL=w-Ao2X}twb3}S1}bH!8E zU)_9}e7EsUYXQTu=ScS+F^k^3ebqRforNq{)Rr?<`-0y0D}uX@YSMwO&j^!ZPj4?D ze18zaxNn!BNioKLz_U9uTkW+B6adlQ`S|e^q-GYGj8HbipF$ySM`rWGir(jM-;Sl# zVX=1GfzC+pf%@a4;GpQM1mSkWE+54 zs-Ek}7cOl1hl?^H9+p!VS)jqm)`l0{j-5O2RNA|)YWt4{QiMBXFoyC1xD%;OAT$up zShzQKFfdiUNxa|add(}jl4sog2iSZ3`V*&0lV|*yuQw~P?#tUga`8;h$Hyo)=&FI4 zjftt{sVq8wHI43fW8sU>ND zLcbX*?sg0W{AaITZI~80OtWp<2?($TTC#1IKWuIryhM5m;5(<3bqpj$mO|&4hik)a z&F6b*;?H4K3ybfu?^}~oL2DpA z%l{y)9`cpR(M4krSvD52^YRh-iL|;uA8(zyruITaUv=@s(iaOne|@Im{p^K$smd3f zA7IP!ZSQL#fV|+;#@CFlH2AKv1jj!Q?jS_-p^<864HxZ>{-=*yE#mMi;)vqD^!_Wh zrO>?>#KB_t!p)V^hmXpb-**Pn@|#p1q{#er`c3ZIUpwuOV5 zOQ*#z9gD(Faab#@UH4HD;uo`$dV8tLhqe$k{xywm zw`}~vv84)~eoJrPGtyU?9ZfK2vLU?%Vl&s|5}k~WZC2ptgIelhh^|v$k@{Jzm&*K` zAI|2Prf3t!6D#Y2^7!26;V?l{!amu%oFTP&7?M`#6unWWT82yYGP~`LZM38KCXvc` z-yyLtaz>YYHQI#)2bL`-^rZQ$T*upZHv>@=c^Y-4*zIrYpRa~$2CKp&QXhjLDR4im` zVEzV36^$sL$;{Mm~DjsSI z@XlBI6>({%5gqs1wQDc-jp6&_uBL0US%;82gsb0~R8u5fYhnzqleu<+x6un?ydgzL zhjRXdcS;BsRh{{9H!S>ba)kY1`RYEOYC7?3ch4SK^c}D~-We{j%z_J3S2-8J6ngHfB2A3@>!ls9 zSCW3c=aqtJBb@sq9j4)4DQ#gf!d6{YT1x(1t7uy*FRb!oC#ti8sa6#?0rg+kSd)c_ z8k7zA4j|l}Jkr8L^jq8&Lnq6^Gzc1wnvvTV|E?F>O>grNvgXI? zgrFtxgvb|Ox!G~!GIo?QaNeSsrCA;YR-M-i&m|I95X6*ZDdzpQaGSfj@O^>}ns>?K zDZGcNjN3vK>HH1|3Eay$rXwYJfvY_o(-he8AO|{An#T*P9A44?pJ5LI|8`y+>X9xi z8{w8pv4yF8c5<-sAHmJ#54YA4MogO-g?J%;g(L&trC${xvb;*sE{5^{w9{NHPB|=a zZ$@W29#b9pcqKh$u~3!tMq+pJuFw<7v%?}69iQ+7%aitEXQUj`R$;0pT8ij`KIA}? zi-t#Qh4MlziAb8BE4YQ>j%rr<$q;KrB%8HIt-O*Qgi9b)%j-KZ z81z7SCT0+hqRR`V9w8_q+~0Iuh6SNd8XbL&`gQg*M^4oQU;>Hcs6ytV9sQ_5DhoLEAgec0)=GQp`vA7mz;b{oX8Lj8~yX-G>zd`lhg zuqV9?Y&aH|4K zTI8QJFHH=S0WT4b?pJVdpyJ4~N-!Fg4`TSkfw6v07{X!;>)k&`nkwky(+AG2|X+IrX zt$_8h)!Ep|p;nv%a(q{B>_-cadWz}h^_y%%af1^`pWu<3F7LIW2=3&GUbvkgk>%x5 zuqaHpbz>Vwb?tj3NBE>jGofZu#h#^XY7&E}O*^W6wms9r@C$j*G+Z#@>~lOdgZqzpb$zm) zT3h}vwj^^W2H;#VWiexIkBlG%3QCTDEX=!ifB*VrO@Gs`VJgsPh(oDP68p*5IaJfx zO*(M7xE7!t{V`y_%{^bzd5f%7S)!@yfuDuGHKb^^b|J~`UMsdRE$Djme;L%=wOD#oF5IQa38=f zmxQ};<3_6#&_DE5v8-!j+=;&!d!E(G`}?VZH&|G3!%Q8*ekE;fIVB}(`~ofmaGtCF zv$L8be2Yatpph=<{DcXtH@8gRc?WKSEn za1wxn7g6E4fSwvJ~wG#KQOOR>f>g`$c@t*#YirOe?Eq9GGZm=()Q%b zA%ALS<|+Vn#F$MuiU57=g%FrjTvk?_>tvhs{ueuBrAiD%Kzm4Ela+)elb;mZO5{sn z_Gs`B@DkAq83V9u91!p<@%b6OH+t8cV73({MfWH1t)BWZRC!i{JTX2#6YvaQoRbAShY%m|TX9gT3@Y-5h zosMc4hvp02)jND{pH0;N?V)GOw>aevECxls-PCeY%`!a?stBkWB#+g7ettGxl7KI! zMue$%(XTEf3?a#1w!Yr(UkK02uGSe{KEvLtCdIKp4$q->s6z$E6YMU9hz&@JRP|i_ zI+gyiY!+dC{hjd9{z@dkbg+cHT^O@~1^$e$3rB+XgMF0#n;!l>Q#1ROO|{RK{Ath8 zqZgfT-2~IR&R~1BnOpj8l*$+zdj_N}A8Fa$0lT&4c|w3jI`hdNg_O@;V-58oNS?>w za(5K^9&m=}UIZ@{)?6}0Bn!ru2&59hY4AWf7=xmJ(GL7=g9uYQn-8`WYiCX=FXadZ zc}FZkYQl((GIb3U+-k+KmudWTs|R0zwPPelvcl6K>pLw-bwrL$V*Oh zAa~v85lF{ohgkZ4rCpf0(zw<)D4Sh7&U~d$agVEwWI`Es6H~G!TOeW~Qqcg8>iy$u z_m2x-zWG4zGq#+CE=u{ZSf5ryR_xhJ)~iAxGkr>#1XT%|GM`%NwS`EjYeWHYRof@s z`R=M}q7=w?&_(INJQ+$IZy3;l$chZp5OrY_ptf4_*=!*Q-IUZqx5 ziYQ_>3NXWg79Sr`=O%?qa?KNwGOC&^x||EX*i@jcjQes)0WMz#p<>halL7>BUnn8y zh=T|V%S_TKAhC$dGb%2W%8Hw?g2?7W`8ojmdqs};5K&|BM?C-cOK_cGpKVmtrR_#Z zaXSCc+NTpd6x_QG);))@HpkQ(qJH8S=aXB*p?d85m*=M|-vlN}`^r5nz9gf5nb%6x zm8O$wBGQ4dSe`Y#gIp#1ob<$3%WsGHN4dx;iJ;|LD&hsNW5hz-_>SN!gc=zk0Y+B) zngCH|&e!>1lF6x5Xuo^O=}H&Je3tPl{H=m=t8CzEHq#>3ID8>mO1l2zjtU5EuX*bvCcB1~i&!U`Y5Xq|&wI9hI^?jf%e$$81 z(4Qt+rZ(mcd+!3emJczTZ4V#jxH6LQ@(jftYpwgM)bm`q&-0vJJk}B#3!fP9Fatl` zLAXO&+RpGwqCv$MT~6q~=d1^VQPSY}`rO6Y!3MJo+d5hjTu!R6&aN5|KYON!eBN;nmk=ZkONiO(MxvE)}it=?07jhEb{qDc1RcIY1MmZ};* zeZs*0b?v+3LnP5}Gu)JBXI-L*-L8n8`AqY8#;6}#j9D1>S~Yq=p*J! z6Cdr)_wW=C*f}4OpP8P%d_;vpI82Vk)zuXqGGmWN|J7XhZ|Z)tIu6`(X?PJi4JKxm zS653U-2Debnm1ak<8ZN)5*xeVq(BSMy$D=qyUl?c|?-03#DtpmDJm>##_10lkZQu7Wh=M2~ zqI4(-NOzY7N~<79cS?hVw4i{Lbcdp#(jC%$NCD}N10oI5^_$!4=led-%Rkq+DrfJt z<{ER1F~>T+o@+CXBSd7&K(`p$zXTr(R+!jbWoyPr+}IAZIwS>h(1^SJ+egPGNzcC4 zKuecUHu);3mBuX?`EiQ4oBe9ZP#;*eAf408VeH+zOR9M~UFCLjWD@Q|a2aIfF_$Ya1{eWR3hU{ZEk8isdWU0u-l)$pK3w7uP8wexo4YgAL!0I@^< zPVn;RwiT7T3*Cu^lVO{Gs!J|Ev~C?0RiUY3%w?SG7>zNq@89 zZ=_cj1J7l0C{%K_=}v$PbIEJ@rOJ^-oej1^#mKhR%J(HVX)o?15Eit&N!|q{jZ~C} ziY6;6nUMSU*HiLJEPlUE{FPWOu~l?JMq0kz##++;;Ji+%gdeG!5A*!RE{ex#|nb5NsVIkj8S?iEJoG8IcbPhKr_CwG88 z1+g-c(40cxR1}2J!T`gJ7jfg> zEHF(7+%-DIj;8_&W^qljc$CYr&ZtF6*N;II$ zWZOM8N0k*aVJ;_hZS(jR+>eF#C8++4OId#ScRDB8yb=ZLDd{)_PT}Ti;wyXiq>gq|{qprP!FwpMDz3yH z$@(lXJ$T)zt7XPsXC)&5gGpA+IeQ}M#tRH-U!S3^HSFZQZ%=Bnyufq zk&}l=Qz_znsJS~nyw4(?lhhWOy#DcSi9B0I``Dj9zY@fbw!gdA+FuR1`ujb(=4GK0 z$EmkiV_&GF-CH95(oyFs7+R5y@&HHL_WoSWtvAiY#AGs#dO}$a0=>LN;*%zv03f8( z!-M0lCEOY`KCUDm|4~k85~=g~-%+Y%{-+pjO(fNV8^LnMN{r|H6#R5G&j@W|CH0A| z#>(icrB}Cygy0}*Gy5WsA^JsM$wEps)3d~}hX<~cde85jn5fgWIwjGZYdB)mGS~oj zv~iJfk>M39m^akANjypwq2^R2MX*1I%-VEjq?JUuFSu-pRwy5tOOY!%pHSj{C{Vc^ zsNY{tJs*4R%5}1}5e^ovq~KwV6_J>((GyLx$LwDo71(AeQYm{BuR!zHy z&AsmNsTBR4pP(%Ze1X%QrBkhVHm_zd*O4$%3;!M#V**2 zxOg01v8LK3gL(8n?vo!>9b{LH$A=cM5!Y+$_Cq~^c$q+XgU;WmJ7|v>*y+Hf@`{D~ zy(fd+wWtnf!{0A8g05VI(%0uA+n+xP`!!TZd1QTwSmH@U)!L`+vA@7t}-W1?U%C>b`p)~%bqwNG7a zUqNSgjdU_?KYT385>GNfsiMvSPlS;v_GK3w(d9Ys``+>N@PxC5Oz2CZc_Vutw2rcy z=~PRcrR;|?Z7{P}V(Tzot{=oW9XRm>H&o{*{G!0ip(eVMcL_*owvG1q$nc^@I^dUf zqn`g8HVtPDIxr9f*ZXOuJR|XaiDN&Xlf&&6HyiG&F~PV zIWiD}R8rM^?*J_xM$4q;AD<0f3ijyvvQnO*kLMjl4jk(rz2%@$_~b>_jApnt{O${R ze3M)??FW+wv*dfrkVC}6sXw}K0hTD2{)v3(*d^C*%Tock@5--e6NgKp#0xm=nYk%K zu0>zp-p%Gzk1UF}ce_xdBT@BZd1k7LAW(|{wmM@vu8ac-oVr*61pihiMtm90pB9fDKV&srp20hgCgmim<(mAxm4?(~`S z55&8;eO^hu+8x5RX71F_1dUkzSBZ#(m9D`^B^_yWz20B19^8$KsCDiK_R#3VNJAP} zQg7Y5rM1Ec0EUjAC*H96V1De9qIxNh8+~JJnCWBRls{kl!$0M_cs>_@qeL1!uS+~R z8~H-_){oY+;-ehf(fBGm4%)6Gk|5N*pm}VuzL`?qTuwUDuPnIO8Wq1ao%s#z@g2&ozx|usrC5EP#pgBILW75%V{jn{T$N7jS!#bfA zK;Y(ig?g9@kA#Gv9Ch8t)_G~48l(gG12hx08=vin5L1)YcX-gC<8f`a8$J>4FT|W( zC8g?u(4=4Cy`d5>MXz4J(w(rPU@4ZLoHXbEg*0_xq!to_*Hnj|&{vI0Eq(vrIjl?i z$7&6%Mo7w@r>7@`Q&R>>asCEdg&*99bd`ALGTgvfESyAO@s^2atECJkD(0flbAt-4 zZ-NpNcIg`=ufZ5ar2%*RK~J&Lfi=2kyw_mbyyz#KS!TLfLK(y}ByAfQs}3 zdu~N-B(|^5&QaM``_HfucUv4n=W=}i{ttD`6~Ah9`2!zM-Jn@@5jF4S=OR})E;nJx zQ#|oIX(bH5M&ebFczY|8ZMfn!DsiDV!-Tl{xl`e_+VSIox%28+E?H-1y;+KG05V9wyKa%r)X?+?Kt0+ zbtbbtf9PKA$R@X!jhW{9)FWm6z`OGW@qd??&O>%L zIwR3qgj4~{FK{ApmeP(ieu*abY1hNumWek)lAPR%Y7KgG;f6qYld3MAsHm~$$oAI3xyn?$6+qT8=cp@IVFRXG8pO$B4jRbTFSZ zW@p5CmMnY?3e(x)1X+zGm7$u_T+v-lyZezf!J2};v_tX?;*4*8elg1J{@w6XVf%UU z3x<;Physl&<}$Tuo?-NY@ z&hbhzFHYUp9$2pWeMisPC8~ZdKA~vJeBs{C&~Y*+w@0@6_`7gcq2N~IZ{|PkmP(F# z=NYaGe8i$3j&;Tm+B|{zTlg+|dDE#oc5|Ec>XaEPWS)wznR`&ba1h`HV5a2y%$e(> zI$HG~Ryuv!eW*|eoe^Sz6cbBA$Py4B5Ke?XOsoNJbr$kG7Ru2qGl3Y6{i7qdQn!V8 zQzT9k2FwkC3Alt@6am2esjnX%z(+KcV=nvV^90wZKB(UlCm=;%B_nH_OJ18!Av!Yv z=fbTwExkeo8~~&?PTFPQXZ_;o>^dfwrHq@APoEVVZCg7XG{H`EK)v?#vFYLQseqt; zhoDlS!MaEi2hMueX@UYy8I~ST{IBO(7nELB``@8GQ{KI4FR+WIo<3`-1(10Zut8{i zw%G{&3wzTJA3Td6NXvFQcqo!hLJK#dwDf@Uyj))i`{%l%(H%0;&=qur%=+d+aF1(7 zE7m7z2Hh2oz@(UZJz!Y!S2wIBw7DY6r_DmIFDgIQ=p+r2-c}i^omofA<+R=M)DjJ0 z_^#H^$DCCtzs0$@K0JM!F*mij{&o7#;75PX7wxv2DFMLsA;x_6f2-MX&_}sHQ>j&t zYXeefxb*5UFo0Ou*xfaOKwC0AY?y2sVY#b}F*!L|b-dLPOeq)&qcKTVynp%e?jSe= z+E>w0F|kj9A}}4Q4zBI>zmbXo8dV7Kav$SoT*=&cS-=Z?irJ0I+Gj2Jx)Kdp{Sr6J z=7nZFZyGJq8nw5${^_)QSn@vZzh@1oLsN=;ij6vG!kkIq_B$H#Rk3&(9>W*CdDf4rfI{|0Dd|)-U!@Ojh;* z9mZM01_^BsrJ}l&ups~H!!NpAVUATH)DQQz4jR_Uf2L(J(LYNr9~B$jbXud`VM`=j zdpsSb)dxp>4=?c)BQN6{uG%XNC-;V@hg+1EO@6Rfv+rbm?*C?DxL~Q<0!XrtkGaDk z1$)@lkrpc4`QwPy{^aoA^PMW~mH%Mv){qlCYPfUZe45Qv%`=Jh{>-q&^$64w1+2NWc8QiZ&Mf5ej7hgA>4%haK=_dJ+X*92qpjC8G|4b%fH`e#ApH4?cBTF zC>mF6M}V5yM9|p1b^ilT^sULHO<@Tg2{Nu9c{-*v)?QhRzaDU4fquX6MrM6(*%)k! zcKOp6C3BLybGnZDE2Zf%tZlfxp^pBjdgZZZEgYx!;s*3!3j@o_P+0d&yqdxAyA(4i zO|CGnB;4=9TojA6%@hnquUc-_6^YZ#qnT;9>&I=(68rp`eh6WdQcre4^8$S6+OpPD zGqIaBceC3k+W-vd#wi9K%NXz4GiIksLBR-nS=moKF9?ZrlL^96N+d9xT+jIfzNN~{ zGBmIp8=XyM6*W(2kE=b@Yn;WiiUj)@O0L5L&J=&UhWfh#0sa7A28qaU^=@-V)^AR; z>Z&T_8eUG$^(aearpu7&3{tx37#?z1g`dUuFP+~wuYiuBV)`j~(VJoh_qb@enr-TP zs3}uT$BTad4e1^xfHQFlCV+r@eTc??{;_~&_fFBLSB^3-6guYL@+p1m$sM;0B|$$G z&!z~+)K8f+|EM_M2c!{sfx$U``bd}Txz+QUSR=8{-kGf@u|pXivVLB-zdk`h=er@l zhx3JmO5qnL0{{gkRZiN`&wR%18Lq)v+&g^M4Xj%K)zB&?(Hpxi6KR6OKK z1sOEd99o&|AsSlNhI2kvfOD&4O^PR9isZxLCY_hi3Dj7BnKctO|B6r9NW+ijbObIJ z+6?xv-W?@c8U;P%nN(XSr`|ur&P`!Me$ADTAd*IhLj?^JC%CY!Vy5 zB=GU#8sD^XQG}F#p>hMt;s1v$@$sx!TzBo()ctb~apEPV? znL#pwD&>yl<6FW9{w{DB!_tJa+Gk)^A+}hRZL}m}kJSBsB1K5Sal~eNgMO<4kAqht zCGvk3e;x7LfB#E4^kt>xB|7`1ulZ^&Jck%jW2_rIn!g@r2_J>gF097ua=DK{Qp|X( z@?obQS5idD;&VF*eb4%Rwo(&SJ+8-Estv5lk|u{SZ}!~|B1~D+)Sm(YSrZ8x z9>{WP3qjLgAJ<1bjR;{s#04|LvI90}^rvMfpcb;qUv3(=1k;{{yoE~N6M&vu+MHPh z7NuVGiL6X~NM_|}jw}gn_;&qTeq^%Od+6m>2t-XHN49l+>%AM7rgxT7xveK0tK@ZV z`K!&n(IEuh=&ttlzsojDdp0lAOSeRzd(r@Ni z@MZVC?TwFiIA*Y-@7#w{h1+5+r?26(+{a-9~k|&m(Fj$ znL=V=F#0~e_yOAl=oG{Kc3cc-L7&$+Q)8$NEO%n9T4$)499>2*SZo#hWLyX%+{o$E zxK!6+SwG{b$3=!b>aNJ;?WNaGRD@RIf-{s070}VwHU=VSks5)C|7zM;RlwxHutsN> zm~}UIu{X4Adnzk+_JfmP(e^JLnv zzMt}9h?*UH<5jH}oK5)rjl@f-10i4cykr)C#$dEJ(Jw005}-55BT zs){%#otM(tC;p=R!{#$bIh*^6r#DaeXV^<=C$Q>3`5AGwq=j=M<4ZQEUlsjEd=XbD zI^JJMAAZR;=Ag9~clwgq1SzWU72Yq*M4kyAKd}2ZnoHs+eqR|{{@aozDXm>~T|<>t zjKvn34O!4E*;2YPTqu^eZcBSxeQ^i>oF0@3r2HO7oXfCGl6z`KHFJ6s=(zTVRn~o@ z-be3N80KTBOxs&3j&=8reyQbMF5~-7F&)*(3mxi7{JN2a3J-I*4BJR`Ld{FjC+xSd z(?4vD69E$*TRaGVaf(fB$hP|;rW4IXD_p_}jYuGK;zD1giJ``jnY+#*y;@2`p8;Y< ze$fvbnvOfg&g@fHmrgnNO1)cQPF2aUAP|>|S8Z*17 zc_USi%ii|ZXJ}_(yA4M!;e3hMc{uNWu@pOk;u+R9=jZ(fcVFOIpbIK@YdRyMyw;pD zPIdTmsp)0Nt2yJ+9}GFyojs5E<_5c`ZF2I`k-sQw#P6TXG%Z~X&5lX)&&YrnCvV%W)$?Z2mVO5Qt?kV>!KSGd0l_QB9De{PH zszf-_=E;`;c(Jl#fwRwBm>AKuo8H$4NznCUzqZ!1yzMt@xmqm{2QH-L(ivIuLwxc# z_~C5*$6TCdMU9$1*_w9T1wArxWGsfqv3KvmzJJ8^)VF0LmoaRDNS*wXp+4S;wzC$9 z7aws^*zas6%#V!$Mw5FlavDyb{K@3H{8wC3x6;zv{zY zWCHRhuu@Lg>apIlTinQW1?JLILok`ERWW4`Sq(?mIAFt4MHk&;aQDWXN z9eMBXnTK+y(5ezWVzs>f8Bj97_xh5MS47lgGwMvD>o?B!mtJ;C^RE9nBZZ}euom!7 za>H7&kiHc`M)6SE@luboTflu|SAh6#rOrxMf1f^|@0%frqJUZn0O~&A1?_%KWsqHI zgPf!FScHg-Ms5{w;1e%%xgWUj7(4}D9XP6nwR~Wh?#e#63bOmA+m@vQa==MBVvck3r-9H7Zewq@;1EW;sr!|_UTzAT;8?0vqJ6?f9T;A2wS&XWW$ zr|MW(>jWfJ`VE!wKSO01*(+Pj!_r?wX7mPU>eKkrc!ivsStv=dlzOE3{;Ey=*vex%r z4qM#Thp8j#NGr>Y>%U@5bfCMG^BP&WTAP5CZr1#5JLBpebxIR|@VqOOL@@neH^leC z!wnwSaH7a_cl6D<4ONvs$QWpU^M7VISHCKx-L1TvXaA5cHg`#rju}S+9M`j{E8U8lo~1f zAek!dqxaWZf!K~`#+J!q0^{g*&#vESghvK2^OpUfz9<>iGTJ_zMmur=wG;o{Bi_^t zit%FqvW|gmW6FaIe#Dg5Q2CU)2;ZAYr8a+01c;s55cjt*XmNFV4tWmHYNO{|8^xFZ z6H+p^CQ-^Y_P9noDT+Y3P@@IOrxW zyBc+NbdFZ~`MrM*)a8HZ(dElYmzePGPCtz@`Txvv^IF2DHsphOx;>Dj^bjs%d!(T7 zSt^J`Y^#kPuG*Q?qP~Yb-1#UOz3+{u{-v;eT;kdPbecUZ`SDyL?g;;JJbQcc2|YdI zo1?MGw$7X&*B1+qYcvk&#d*hi0eva`h!TYh0JTP8Sxb=EoU8KSK8~pW1We!^IC_To ze=BI&^67HWG=6AE&z$f-IyyCL72VgHb}0VVd&Q=Rl-h#d@XzYHvLMG!gQGMDE4D9Y zY)5nJ_FIRY_eW$k24Y}cA=wLce+x3}Qx#P_TUluq7~ut?P|ft^h3VuG-E&NZ6bZqM z&_Fz|&K_ow_4!s$zg8=nL=Y^U{yJm+wOv>*@a3;0Uc{)RUa7qS)aZSg3Vd(q>mK2S z%yl#yip|wp$tsV2;?rMA_-ZMXkB`AwbOEGE=qo!9j>=N{+WuD*7aN69Lj2%%U#nQ8 zp$ct}ZC}%s>Bm;@MT?y$wfwlay#>p-D?C*3(tozC+z-(0{-|2u}pa z7rXl2?2eI!nFd3!Mgji-{V;HOc5?k$t|qgh2&mGDU(K?#VdZT8jmGkhMaiF7Ed9)j zSws1qbJ=CsdcnG9rT6DI^Z4*QOXcUX4_~tV3SK~hWU-R4mSeD+hPFRCwocpGJMySD zSy{7iXM^J{$8iebqGv}=S8hEN1^}zqj|8rOer_pn|X08ugHrehNS{ir z6q-_c?=E-y%{q!)Pop#Cb3S?syxaKb3Izfw;D!YtVm`sjYRQdn z87}GfCN?BcG)g}A3)+Ch1f8zM%e6>PJ~uJa=wdp^wXptNIg z@qxxUOc&=}wUliuI9{nZ+Ko|b11T@Re=7Y5PkJgZq3Ofxc1te++t4g@tQn4Eif6q3 zzrF7+vOJPXD6#JiJ~oIPi5IY<5Ed3@gY^m`!X`{`5s{HSg82ftVRWRz41$Be(%am8 z;USO@rU-KY#9;esV=Cj1R$58U<#p?20#DhCA)lR-Lx+tZLAG>5>(b6&GXpW zV_ZtQT{Kkd>FnOABR^I6ex=IyS|WrG;7*bTq{2__z;(f@rZz^M-%y~^D4+X; zhwb~C866DO@Zr0y0FWe&M(tt)-HYd}{Eqy2!21M>NpP=O_SJHLxxg}s{X8|HS$4!x zj12+|nh$W~?3X>Epx|oea)cHibm*WH12yyu*Rk+fRByfpqe7PHjMeU}KfiK(jqA;L z?vh6r1NA3E(J8Cx=Ngw%CP~tJ+dv0*k?{K1pNw>vN*PN3&g}5Z=nCSB_58F`@oiG?)rWfpBTsT$>TLI8vKiWYcg>0XgF-Q*~ zv5=FKXpQzv_F`szqsDcO6Q>Ugb&^v04+-h@gSCqk4c|Hud&nRf9RoBKa zRH`L*a>fFI7>QZY(f-UApHme@)X~c8uxy~T5#V;*j4h>lQ1;pbZ2-5)!@>04Uv#`> zG^wkKYsmuxh)?}SA1h!`Xx3~gLF#ceZ}*4QnGZD*T8%r#dQk(jT~Hu}v64NYiS*`9eU)2&+=kF~ViOOc2eC|jU#1@xL30ec8XH$M&e z_Em%z2G^TQ{)7UEI6c1xB1gdRT9?Y99R*MV;ph=<1H!Bqydz$yPJJGT3& zcjmR9>D22$aLf#Rr7Z$GEm(U-JlGOvI$9%Xlp_;}qI+^{2_7B1jF$-46%{^;JQBGE zULOE_c$!=E*9pF7R;2?$(mJcKEZ)(y$1A!yMsU5STS|_^`x>$@h;lq8{05p3-A(Tb;1e!-(y0}4Y=&!f*;-xXA{D79W4sSezckBb{FwF z5ApUNtYrHot89Ri%c;5sO9@)>%>J(^(eR))a2eP5X6;(tVwLD3o_EQxw1?0W-}>qJ z0NM)obF^Kwp=BdMDdem1h2|U}{_j~%@l@R0nd?6=Uv>Ojb@k?37}dUNWhO-fLs2DR zbrI$Zk9*H^Qg!E;1BD0&d$MFKQ$(sl2cbitINqc#J}cH zP~!%Z)Q(iI(}N1Lziu=w6HnIL20z~g zl5rtSh2J4|a$yae=l&X9ab+Ilv|W^nk)PNwN@*uUMvRa~nmkO@dIB#cai2p_V6}B> zZ0chgb9fLit_T&clW=_VMWNtk-rAnSv+C|l@rUJFxhy`a2y+7TXE=P{fVq}*YekXa zUFax&2Iingy1{wA{MXZ(wbBDrBOlqBV}(*h;qlAOA?=^FmXEO+>MG zo5COh{F_n+X5;2%4novWqSh>h1E7-XvQdCjDpUU+b&m+L<_F#HMDP6Ts2;#G1ZE2C zy!wrAlkF<%^@Qd`)<`4&(`pZvVTvG5Dbk}39Oi3~NIS!A@S0AEk`I0A))fHx&%e4m zlqF_so}6X8dM-vh*z*x^)gq_MKV zbD1YXG`=>LmyK?Z6VrO5hYqON32m_~L%4e-OM9=y#xdOiaq6sX-|pDWp6jL6G1)7y z1if zc*zS&o|o9rdrsCMA|{9%pbkx2?TQ$S?(Nb46bx9V+^Ju3L=;MxCg$HOt}Jb@WC>yW z*B``wD?DmOvoHK`M08z?x5S&?>!2e+@hqz|fYVj(1_bl%yPBd0Rns-Q?z5 zi%Mhuf|rI6(#^&U!8+pc&EHiWUTmK}V_bpd4N}s^+Dcwrq1|6ud?sxX6)YocH60V6 z_53=4DEJ2Sj(Ww#hURWMQB8CNT%QBVkjJ^0%tcmO2s$Z5Avms#Z@TahS;zg^uSh^Z zOL_Uq)M*-a_Y73NQNzlfo9zNjkA-Q#j!`AyagiW4(zJ5wEb%VQS{e&jdI0(hn&KJH z9;)*_YSTm4g!~N64*lyDglo5h2(iUm;T}Qo4YPtQGv1di32@9A)9tps2_>PmDdHFf z`|e3mB@yk_uJKhl@8gsKfYSx}t+pZzgI03Q%0V2~>7VWu+jjeE(SJ9^hRY48 z_gr0_tUjXR`y1SI`VNtQVG06XZj&XS9n^No?34!fuG|lXt0aGN`+Z26JnG=UX}TZ< z`z*^SCBD}10%#4l>RQ70;Fn7KyGvx{Zc>-?9b{PIKi|va_APRg|bM(fz8R)A7z*isDOmME=GZ4G` zeyy$@m^yFe!e@a=FoI&V4?f5ISC@QLy#PXdurruYE>x^&jXWyqA@%H84630A2JoTC zq+-c?GXN=%OyrFNAMLrwhsZMWv%~|A1+!9&ZcCPIEb0xk7~&&`o>spz4$5tN#jEk* zVPO=@T0clr!f+1TmxKtz`|h^aPA5H0JKryyT7&(LNXS9;39<~~D_kyQk_O3|Y?CU0 z(TY^|<4|ebmjF>E$?V3)+})NnK?A80*&zm3;*!dD#LseIvZR#VJ%XoV;$ILQ%LCys zI&ps_-0yT-5KDWwjIEWy-yJgF^2Psp@@|9#5qOP-M5{ z$nQjRL|$%2>Mn&id7meHT`M8547eZc>H-T=peF;#o7Zy!56)riBw>f2d99WIU%zDk zp^WS=ldO}>%v%p@x~*HXj@5$}ru5HYv|7En7CY7kRrlqt^k@bj8t6p(y|%MCaFw8P zy7KBfjP$`(a$Mypz)yb_oTd`SQ%p(^Oo2ZieRg(Zm?Djbn~roizUGYSXvf}}F$JU% zZmVtgh1KWXz{A3vOTMgAitX9^?S8X^4!bbrtmZ_;uk7GGH-@)3|aL5_g@d1~L0gqct_EC~K zPr_|B_620x1ujEjCPF&K`(VDr06yV~9;*S|Squ*RpdjM<*X)s$Jg?BV7;Tas!qKuG z`aff6ct?omtG}7oi*LTmPv-g30qFx{vN`^c{4L=N!u_=HrDs_ z0{sp6>u{jYwP2_bB#4=kuc=!t!9C>F7FUz|H8$(hOyOZ2PLx9b!25%70^cxi z8Ha(wu1U{<_CG*7FKbuNqv!#FkZ|{R62vXfEG|Awmkx0OGmngtQtjiv9zAfwi>nKo zj$PfIUjrOZhyTqOECsopD>LZEm`2e9`%OaknqQ(YkKvMWbb^S+H)y>;I)}3gSkfgZ zPP-)uk&#~Sp~Ja)-w=1_GWM4UAL$1i_-ispZ$#A!^%gJ9zW(?qC|BmhBD%+`Ha4*R zB-t8hVJBvV_yS|1xuKrtpxpiifUnXJ09jutzc#AR+WJmgH-+bz5DYffog=BSq}Du|usM$$?Z7A#Oi zu1P7V01t#s&tU{$Y@!+37OSqKNZ5kF1h^Wvw^nhV@B;jiHnC&}`L4UH!)vx$Sa;jj zUlW|-Am+x^xJpd)jlsk?4Tz$MyZ|mk4*M{vTzAFHeq{k=WB>mUO?RHdRiWtA)E;Fe z@n#4(WM*Oc=(IX2w%J6CMR}4@eBZ<6nwWIts zIq**01=%m1hXYC7qv1v;iAt6I%|JG@KrBcagm!f&7g zWQ13_A8>pEd13$6(sOM7Uo!X1*ub0-T(OM>JVRD?88jLK)T8wXvTMmWfjLX88*m0K zQ|v6j20-V;-*+uK&?vEHO-y|ZC2N+{$vovy`hsm5uV!U>Pr=R>Jservi8imwh5&bp?pDwtMr1Zv&h zcFxx8{LKtL284vbfR_j34CH0j*zDhin81Mq#K*9$9;LEi7d<1_xw^=%BJ-VB$V8Q( zoENAylTGKPY;_YU18{?l{(O@G=t2?H+o_;%)GJ)V0Js?oP#r!v5>R7}+W^Q`xIhB@*{m-A5w!DSLd#;cz#$sLjp>j;AmX&rmL4V{o2i%#ZE)4e z?wC8Ill%GkaY3300gYH2OcA!BQ2Gm<@kpTE)29uPYv9gt_!l;l#`P~{Z`lCFZPU3Q zEL!97Ze8_Q9tO)0YnQ4!#gNfUbOjAz!S=93jm8$ezUNImi(z7Kuj4-=;6vA2TI2s~ zbC*Aev+{TD&(k3Vot#s8=)pW&u(oKjZS9rAfmL~sBH#~-P3+h>Dq5isH3eq5Y<~;S z4*|k{_|6`2@3OF)0Bx+HlGpR;NFq~xC;Rdb;)3E&IgsJRy#L^29o+DMRym>W;EO6__ zw?a6-R`(&k4c;rHWS&?iLckJ?hP;P&_B3X_JZFmm>96Q8sH|VclZ90pPYM*h$ z_`zIq0j@kktXqfzE5J{=Et}eETd5@`xT#HZwE1i@b(cCA%Wk`;5R`gb$SkKbX3n( zodC-@8zhxZe%yRNN;PV3-HM@4GPw8`MM@fRcu+l!42vRI+lf@#T-qN@8j z3w%nsJkGfoMoKD>B-%=qZE-=XcXU zOV3y=oYo|(8qERMiQ58lK%M_|dDJw#_EKj55{eH!H|L8|iI49$;+dJ7yDug6Ce!_2 z{!5akDYa0EwYKrMHJDY;ZeL_NI~s!7cDS>*)l#4cf_?v)RJCq!#9sc7QC#fxIG)-U z1hVoW%zT{l27(5TSpsP0Fu<;gGTS;YJl=OqaY-&{7(DNQ;uvLi_Wx9pk~{S;gWF05 zG=4E!e9siHqJuoi_(h6?9+W5`n#xA*!GV@Y0C8r=a_Ti2;RDFyfr&S6OB5l7kTw$| zhBgN&tmvLv1!;O8n8}*hzFSkZ$^SGxLfj|OBvN+Zct99to-6Ws+zyyK&W0-O7cDjgR#JxjRO{V=tw^-(y2-8oxn2P zIFCUE#n)uIB1@~~jgnb3k$uM(mU|{ggoran^Brh*-aXoVmjh1@!D4-7yX~49k z$45Xq5sm_Kvms^tnAoQzKo7`Y*+9L{%)I8WT90K!nlCeFB(W8V}@ zUKn*j{MLNTLGU8ec;upc&3>LQX=9iU!{(|i>evCM7>2q=;0yrZ0W6>TAYTCu91f;` zKx%21<0fZi!V0^-xHje9P&SjM`FfW5=_9MBnpyZi|D#t}&BB1qW&bcjW9Pf5qBkD#*+kFy44{lE~JQ%Bp zgz||2pU+OAnghPQtC|F>=Md8u{38WN&jkQ?Ui99M-F^jrmgBvI%mbH=%e5B%k>C~; z0!z6M{wuD{$sV%^m34O~?Rm~F53j$Yzf{j+gsGzUo>t-@TszFgNOm8}WC4tTSb}~I zupwi39u39SswLb-Y9og@_~P4PI;T9n`A*8}!3`vi0#?(j7&XEyX_y94_tIkpx@EpR z=ZAw*Ib^wRz1s%heXiR*?-xNaq#UddO({ELSLjN1RMeIMqJIjd3o&({&iQAt1zp0+ zd}5&eu0t;FOfEyj`ObnfE92H`VJdurPwk!)t-Y>yr<+{B!YGa<>Gd9t55270#-O5( z?@bg{@h@NQsf4Eyu4$hH!>`GO9zmb5Z~3blqPhpNM`}_mCc~?Sch<%FP6faP4(1~|2d`}XPji9&X($hOg zDS=M2(zF`-`KBfNt^Ud-D)x4)G&v2{gj)lK!?WA~%=6ZB!+GlJZYiPWKi*77RSZx1?AbvzbdH-H%-goiDo>=vh=Y^H<|XKcuy_R+;RI;LyMx&;1Br3YrJLC5g+NB} z$8lwyt{KNGFbC*RecH|HK|?RX^|Adl%2iQufrb#y-;mle2&Zuk-orUg+}v}Q&FkF7 z(-jD{i8*}$eE|$~_XDDXQAc{Fvm1|vg|$_6q+C(=K4lpJo$^b4)BwU!@}dq zWbt5RUc+l*ecMRzO1PX7NRr9|-E-wRh1suUDnf+EVz=4|IWhHEInmuSXUJ?`-4a%! z27}-yISW<<&0TiRIS-1}CYvBx+OOEYr)#Jqpwz#Trehp1}!h{)n0gBtUY8jQ-o+JdpH4{p+$V5tE@ z4Dj&$q`*)lY`Jx{(-rkvuM=w+kmr3hU{vUZOR*9$`mQI}ON#u{1CA$Yf6nMwt~PcsK%w`=}H7o2urN>mh13DD9&JXY@y=w03FYR$CMi9UGN7v~KG ze@$F;*j7_Ql+1=>Ur42poRdIeX<+8Ynr!zrlxSo^H!&du#V*{vzr&0B2RF?)q~A29 z-}SDi(tCakQVy`)U_dLfDd2{z+3K7;Q&cX0um*QHV!C4*L{~~z9~f+sMKpfv6)nQr z-MYlW!ZPe)$t3Mdq#J~!C543%LN*|zCBeNJuRqUM*-c@A#5D_}CR`57bidU)U|~b- z4)@D`_qS|R&6-2Uk#M#^yM|&F4vj@Me0ZG>6920jhtXY zmICO?k3bs2p$*2vx;4iQAjP;LAbHK2KMsb?fPpfBQE=ojExMghf9W(c7KndId)q#+ zGWUrM!F4JI2D;wu_z^Skr4@Ldkzfm~pjNwZrq@^9T0G!e2g8|5&EkDTwVwx}p|4?eD=%C5t^xCxv^ZAncE$i(ImDuhL-V2I2z zWH>mlaU#x80~?(eSegv$l%9rURt&}XD&lZCIbfpC);2K^QKZiV{1m+`6*octL7oP@ zD0fd(u)W9{ss_NZi`R7Rfo9^e%%zY$sY2WbgiFi#ND(cdG zGq&KM;L~R(^>fRf0{TFGb(e-Tz`vvP3W92R_EBAj@z_9?=njGy+7pVybk`u~NsCe^F=JYO9xS=K84E2VTtN+xz8 zV~}wnCe{}A*MU*MfcaWOz#K!p9_|HZd@xy_Bg+^HLd5R;0+&szec?8NtwVnnZXq^? zUWrS8fzxG#{DDb37PCDs%f_D#R~Hb&0%%R(9D}ZJ9^S%_T2IK?7?ykT@=8J*M2P?GGeVC5hoJbq210b83(7UOA_KJ$ zoW=_6-yCgF|Hx z&DrF!=8$T$x3I=^laJ(YmJ5s*AyGL<<_w9pFO0@oTa1RCbyLZf1(zZONIkw=!S*i+ ziDb#wUlSL?uf{(BxB8aV8<_LazvtnrC|m|Q0P+~&YrgYOj0W=GOOEy?K0s{q0Eb4h zriH41O<(&xCl>V@Obl49VYYh#UZAF&qgi*vN!8xt0lCp`T(ylfs1+mna|F~OnEH8I zt07{zei--vxOxwGEc^F;91S56*{dj&Y{^y;C0q8&-aC6G30alBi;x|%SF-meJ8siK-0@8|dTdOh`s?%db)zTWTSJkH}dPInDm>;&fW^DjWH0*ngpx%m+8{2ug-=lAas?NVRj;;Z}j{VsEv)}6mI))H{k#tl7DE)=a znin`iuwOMAg3j_;U#i7cV@faBbC{sz387&fK+YnKM(EL81?V<*a)#_{a-{!`QamvD z!-!0Pn#DEbApaE}U+*AV+YA)_MzLCJ5eZ`C_jard>-ePs9;yB)Ydbl<{MM0%lKM&) z;75~kxMZ@28GbY;Ul3H?0I~F+7Sm>JR&10l|D_TY;}7XiPt#GCY;dN z0r2w~)Z4J2%c@+@ zEhNO7T$-0Q@%1~hZX>2i*Vic^4`fVzdP{G%(BvOx_x~R*O)czpt#ZPvAb#IJ1R#lj zbCq~a?x!~NTHp?Fst!QBAP5mjmE^KH&n|uXs=6HbMx1xQql{I#!9fGru~-j0p6~z{ z{!_ou_NY{Db21^{W!69XV64T2IhR?fC*oKSC?*+yrx(ThJ$WihfUp5d#4#Fe0)#ez zCUUR_{|7i~yqz(a|%jaO&N!C_d(ERa#Ge>5$8ce41Mq`)}hGle)7NKR*5nU$LE z;xQ^6gHa7J(nxe9B=|zHg2d|J;FSOBJQhn}V}c@9A0F!0C@kUFdGOygr%Yr66_gHP zCWuZHatg^SjVY7a$Xh|Tx?!Izi3#E`GEm~^540s`Qhayl_h@)DnN8x95T$I|$&z>z;T$#k_j@ImzPbSVTIwK%0+O+hgN6l0_T^k_xNc zg^ypYBb82k!OT6Cb@BsWsqRPnbSg3jC7-{>oPde!UByss~AEO;v2e9 zD$7RqJILT}<9!CF>Ye5umxyOR?l~C}w-8w>m_GcIOCv`#$sEdRdwI0Q+A|jS(W(dwMD4mT(1Q_&U{6>wmyA>>%i-w6mzc3fkhXY zyzMp`lG$0hhOrr-Pe^Xmp8+oR)%>!I{=h3;-P9S!6q~(1J zsahgiLD}|F02H#&N52o18_=LR4*VOuu2+q(7@ma69Dg$;axdKb7#-c*DRX#ro=n=V zJS3uJ38@b`<2&GJfJ6z?w`_-#MackJOx07b>axUXmE#sReB8dF~oqYE3_=^TWe=h~VIQPZG*d0p$b~u7Cfjy+=^Liuc?c%4J;q}N) zy_I6@^CmOd&-D=3J+)de%7fx=YoP3YnfE{uBp!$>sN<+_ng#2MhC>3)jZ(g32+IJc zVAoF?76@`7u}!&~+&*_$KgEeu9mvOl=Rdl*axA~Y3;i57!f)V76fCQiF#Rw9%`Vv6 z%kvcIEa!Lu3SUXk(C>c1fkC)N_Xs>d5ao0otecoFTs{F>nd3fhfj;+7U2<{eqXyEY zTOeAXa-yp@Rgym{zJ0j8d21#oY{bcy3^wNm7r)*D6}&quBtVUxgN&ZK_ppO3M2x-z zE*Rlux=+_EIp4!4fY=l5@9v$E9W2}HTEKd~l7S+?CQ~5@%FvclLU2-{um!a0Ax5!# z3mm12(_dcz=4T3>v)5-~Hv$VQ@P4wDx_iaavmSO+#3yb!iMPLHYCO5TPT&!BrgK8@ zcIuEcf7dhN#&C55L~H3iI9baXtG(c#z2)xUi4qa&M`1!vfw~dhMc;=8nt&sak6G$I zIdG#(?OqGpiRh~dxwArf^+A3pkr+|rU|^5hmQSlS>>U(*ne@aDrv z{1{*0vS*HHqSiZhL8YMd7iE05&|m?5;tvp1Lv+Slpw*x$V?5+LMF^RqAKV$EiS~Bt zP7=wB?djW{@*CS%9)sinG*(Z(>TZ~|S5{~tT*pf(t5#6!>_c1^g=aJ3`rhPy-kQoxh$Umf|@=z`eG4Ja2XL7N*2T~>c5dtg?#8h#BUSGn9^ zzhCGSl?_(w4gX!cFU-+K?n4m0#fhY|K{1`c%b^S@rT_UrhjGaw6|Ytbm@5EF4*zLe zd7$DEXQKb>m`eTfC-YZI)%!e1Y}`NPqpCXeFatn`yCRFN6ZaklZ&~sZ)6%Zt#Mql0 z*<#DYc1CxlMhK2|=ffpSkTo$u?x{@Gp>a||M zY|nNqU@%%!b*_pJv`ji8$Cb_JK>2J3K0`E9ZFfAk|JH-Cc#yj~je3$qM{+;`1+jc2 zuK;b)ntKN^)&I@%|I1JEp?6kOK)EV+rK!M*U_H}KwVCcOZpfZdg% zGIVYOQr3|Sp6>EJNDV1~w-0-jL(`Ajz~+r9;BNvbgN)I zfX7d3>MH@*x-twO@x{7hSpy(+q8V%c8YxN)=PG=iEJu_+Iy;DlL)ki?@79lAh;gr3 zZ{N!^v9X5qAspbYe@u-LfS{}Oy|Rjm1jJv9h=>3tDojx_qmHlp#ma4_U5NvJXJ8Dt zjZj)3a%k_Hut0if5qDKUVBL=dwt-y{dfRZeHNj~M3WG0yvrG(jw(4H+5Uo%Jw3;4v zT0{zNtgo-ye732UwG^z1|5>PC@uh^m#ceUVIf5k!P(s2pOwtvtI*b0dJb1kbFK23AW@POn4Gd7i@r-tcSUvi(=1;H`Xk2{5_yMA+&QP67M%u{@=NSvsi*? zA0=@9WtjD+uT0g_0ZHgza*(==Jom@SWlRZ2CXNPR-wsyeOo9ecOqCfM(xW$16ox$r ziB;KphU-#!b_OQ#F113u-(yB7keXo%hOIL2GJiz7pCZ{3-S*Dv`9U-{>FFaaNsNRz zcSV7-M1x8{>wG*wD8R$ukqT3*^~Qs`mCK!9hG=;!xkm0cdKK>h45o!+3t*=uuci^< ztGWlZe`KpT&<67>>arnaH(|3I@3kj9NBQSEi^!_JqdJ>HZ6s#%)E{69-qj>&eE)<8I%B!>Cw;$1l?gH5WM0uwC$_U0cr zO+JJ?d1)=tc!09tIs-%mA&D69<9-7{;AG+n$_t%H)hl9%?ifLFk{sYOVNh@BT zK8DjrQwa5Q|-naDzaeo@LwxC%}N zTad1NJLYnY0x@`=SuRDnJ{CcUmk+LgqNL!sEYyVdPtzU%*eq0o#qgfASWM%{7ha>@erK?Z;ES zHBQ{X&&r>CIXP1yOh4X%TL}UP0H@%8Q;sSxdg6EFZ!5Wtd0Cy~9sE;>_x()z=#BCb zFBXUWoHzsq)B{%tyz)*)6Fgf222poDY@2~6Q%Gq3mcDNaxOuCb2E*a$JAeJBXawkT zTsMx$7St*si>}-sS|T1^2cZDkwzr!`{n82Z(Br-~*W*pSN)yFCrf>H#qYZR;!8g>xHU-N96{hP@>mNp#Jl8~+Is zIl0Pl)2|-IwsOnM(0xPXdiM3-BsaO-H=73KHlZJi#OAb8p+Rfy%?W4MmiKM`qh*Vs zMN^@+19$>tkJd*OT458PHC3Q>L@7cA95A8aYBK=o)o>ck_@+Yf<_ID@*VpObI^2S6 zUGkEB?9gaZonKu6Ev^RtbKx%tz}5m;_jQ)M4ui_7iObr5H5&yfSU({yckW0zd>^z! z0yXXokTm$t*H1DK@(r>o5}8@qjX?Z_WNU&Vn)Bh+m(MIhCKiA9Ur2p5mHa}a{ATdc zaGz7<#P&0@?7?>v*3FR8ulBW71T;q6W_0R_pkUZQ#hFU(0%k!LV}yxK$&H3N!TlZ zAZO-`9UBBi?oH-)$wf%k}hkOUn~nT-=B_HKb6G`{s7gQ=`Id zY^~n#{=yUlS&;Zr?a~8HDjz%eqH8xT=U1iAocnM;^Nnx_mjN)kYH`nCWYV$YSJiu{ z=RaNze+>x`mI5ayF@oRP{!0J=@^veG6Zij806Yb(_Lzx6fx3#;sm{idz)>_n?$y)F zL5Q;_sudp-QhnxYx!-bmok)W$rlnKnsNmqYY!dC6E|{~s1~Ik0?{i2Q9n9|CeA{3T zIt65u0?4@@BW^Y=gYmO9c+O!3BC))JVMfGDWGm&2MvWOTL#9cSyY_c{nn8hv$G&qI z_O&V|vJij+h8&0;1JN}Ae1q->(l8x;TO9yW5kLfxW7rlUvmH}lbc4pC^vCo|uveBr ze=zvsxq8yGAHUgY=^H4;{Zg@DItVD-5kD2X6WQZg>fWO8zeuDdaydb?(Q#=Y5NOvD zt1+iMXI5l?Y20^w8UuEDh=6%l8%9SO9tbiQu=3!6tHa5k2Cxl4u6h`#p$ZK5ouV#? zC-JDx`SZjk2)-LEYUh6kKU89J_#3OM#Ws99EmDS&m@T@!C^SHeZ|!5ejr3Wtp#Rz5 ztrJdx*fCNBILZz2y(;F{jDg|9Va`>)V1bkdi3;ed$nho8ZPZ%;VLBXn6bINjcR2|78V|EkjoDk;dSoz9RMPS;5~)Db>ehBrNDPzNX2aPoDyz_-3S93`!b2YG!Tsf#8hFZdBu8U zlc*|izILEp}p`EL^kB+zXtaNAttfngsPTVj&r++!7x@7M zgxJD@2S%hs&WxaKQZ4j4Q+e0+PB?s(?T0B(!O4Qo?}nQmpmeC_8dK8J`59)BuBd1Y znzz@(M<>2Oq7bf7i6>Xk9ITZ%!!(F3YTfXCn7x246bbF7ldPvnTm>db47?A%JC{+v zkjmNwMgnth>EiDzB9IUy7kok>7`BFj^z$v0HXQ`6*-akNgKEM(WpCc2aZqdxm3Q^5 zS+MA>Ym*n)s#>q&fU$74^%pqOK!64!l@FB4zKD~nJEeO0B@{)8l+%G^nUXN9esSb) zKV5vb?PXW;JY=3b<~iR+jxcqoPBYH4)36CZT(hwfsI<8#k;4EaK3GVg7-@Jc+RG~p z`g0Ce^*TIqZ7G@>7B!ljS4G)bpkfb(U{@HAGCs8{0i$)QnD7Qixc;Rl%qpd%0}=!w zgc^y%Knmmk1hOI^1xj?3sU0mFnPJ1gEG#*sgA5GMo1+WdVCHlCQVQ`qVxU!TG-Tvu z#(f2Ck6wT`lq&6CKo5kp9_`)zn6?w@7v-bq_!*qv-rggJ?40Jf1eq#^ZaG%6T0V))$8IsIgK&eF{Bl$|hQmrTmu>kYO|^2HbBLgfj13eB5N^@=V6I`OHQX2d zTIVn*!F>&<7{p6zH6}E$&3}Q$X;XeO7kBN*Etyr;M3ej70atK%T{IT%hrI87%7({&_#eKg~=@tnN>|iUb2Ec zn$CNRHS6MDY4wLQsxwA{P%j?{BR+p{{+ngD2K{k8`0FX;caF3EkEM%5y*@9?PkjY4 zQZbRJYQsL5%L~wB>klX>sP783DQU0O`L%DCyJM~PUtpl9Fy(jw=b|rr!Xf4Z8K@?doGtw&fgQ4Al4AFHLHdRm0pwmJ9n~zrAq2^bhDd$`OmKl9zi*JV z3z8skuZ914_Q_oT$z9dK#Riss(dp)UO66%I_r5=|{;MikSC!Fk{!opCI0JKn7(?Bp zKK*A#6rhd8s?LhYh=`5NW;$;DhD$?v+CdZ6`9hyYq`_xYwpOuRwwzbFv9lwr`_~`K zp8@PF$e-f@?c2c)elzjf9}Es7ln@f8g=D2cAZp6FdI-j*O=dq1g%xw^Fh~p;wNhgd zX-LXoest&@1RUt`P$JsZl*DWZmtqj%TMD+42aj7?C#0iv;egx<gB429u1Tl9kp?NP99WR3*|1?GSsd_eWVzn< z2YlN6s=x^0@Q%2dBGmBHpuA}cU%0kNXsPw#k{z<2LF1-w=m&R{1mrVD!+nC-|JP9P zY&Q4GPv8D{-@84E)zHXj za~~B}q~C~WalzOH@Du(=t;T~NV(ge{Yj1~Ud>MhShxeXxVmZ@`raHJh?77KN>Tz-&vI0@+lVCQ6cT_mnBMU_; zjC)Dll4JxnhyJ7rvoJb?Bhbl!fQv^%!G;BaYr4$qw1@Zyi}kXw;h-CA^93K!wRZky|ztpEmK zK7+b05{#r5=^V9$U?-%gb-Ji`dIOQVK2K7@9$1$E<0lfnR`)YC>BR5gLO)Bj@I~Jb z?9s=+`3cT}7J_I274~-s>HjF%aGkr(Wq{NJ=8fQumD)5La&f{?^|zNPe!}?I7-0Lv z-d@)0`ScJ-dZ^VUhV!(6i4z#^L`J&sXN(iK!4{#VTuR)&mt(Zc{t$8jeW3g>MALi% zfO`ILzX&30PIu$g6E{qO+Bq5+=4O1LBqJ!&|X<)3SGM1*X&w2JjwWfV zS!$8hfgn-7w2?{4=eQ+abLmIV<5btBjA3N5sXV{Lc-<`+8;VrVkh$F+uDv!7^4zD8 zK!pHdNY=#a$&z6B_y~pg!+{XbHVnRVm-Pmm&lH+uh4xby6J*hT>z`VljvH{m@Ed5+ zegxGq+ztzgo8Wwx3nfJ(GWBjJSd)M>`u;J;n8;a&n#98Xfy_CYJvvRUuDoKi-HU!kD-^?~L)!3!E5e8IPg(~swCOxQXmVlat+*^*l~PJAvSVYSLEHKTMtUHI6G zc}~KoU?P8chEniQhF_S8A02)~EAy&u?@kTnjW>jkF8%#wQ%(`>JORT=-#+^MV&JR7 zm-O@`=kkb%Yc;2b7BC-GDP1}<+!N*^=y+^CUK-91lg(h@+Echh_+V!>3bFt8JSpo z9K&fK&wLGi;(!Xdud`L=2S1X+9`!@N3ze!{Kl0f*-F6J`^5QW)+e#q#)Giar6DFzUMAI#j6Lt?Db=p zKey@Yw+$U!Q@M7IgE?1F!Aq}WtT@=cNwnL-LDXROINy|zu#vU%P%9{!F%O9w5;*gc z%*OolwDtNZdk24Ro8?S7#k#}xf~2ce7y6}n&pV&Y2~(QHCKSw?lzB-kqRthCt8{DA zr+2*PD}#<%tBFB1mUk`cxDi-e^d|R{G)j*MOsS{gUvH_Wt8SXhS@QYM zt%mzOQ=8i5i*y2;8yo##cC(J{$l63@YX9P|TJJNX_0p`Yce05Bo1+%_3OYKn$q;OW zhiC9-p_j*cJjNHBK<7`1*Xe=DXo*!jj3tBO<`c9*VDp{y*p|^KwT3}bi|WfOW2IpO z1J6ayjvYXa0#JosSU3?{BU?+^`F({(&!nWJx(Z6;2uD^+r@V%euh0k_uO|B=>v1V` z!WuoMOzdJjD(JFw*`?f#a87gk(_#26gWI_v9=Ey9dKZUIRc;AeypO3I&xh8FIZ`UC zgHeTUw4)NQ_D22+PurBQLN#T2KOawEBsOp1+J?$=DH7hwH-@LnFtYyfE7qbu*XA z!I{nAC{S3CzQQ2n&6RqOBhC|7%mrd1GV|!*bl}0yj==cyRUM&Xy)THhon!U3v z`bxP$kCGk7?fJ$vh7HH<^ai!JoyKArcAw2$#OOW5_!R8l4Jnwx+ki(6{`UC$@e9RL zonv^l7eCoZU1S)YYKtaYg);>1)aZM@=&$&-*Hb; zx0Cs23qbaPipphH3w6xSdxeievU2*p;Ob4?_w$a_q6B9JA zkbG2#BS=6t0T`pCWh)3VpQyFUbs(^|AS2Zdo#>1uudUq)bFIlmy@cdM4sJr6OIGn@ zznrLymzM~E&;~}b#}>{V<<4`xA3qXRI4(EAzk#^sUFITHyGBi>YCK&J=Azlxv9H5V z7r>JKORZ^TuKy36bZ>nar!+qMA0%KnrgdJ-4xaxLC!D=61Wc zB3@h^H<59L({l`KTw7}~>KAQw(*@w!G}4Tpgwq-(k2g!I8<0CU`zu;4iFWB1$ji&W z`r9Fb_Lc(kHCWb=RpaI5^>uK|c27gI#9LTS#yx(Qv$N&4P?w-*GUJfw>wc2QRUF$4 zSCU1V)|C8BDJKarLZoEYB@)%Yx!*$ZcHA5i-P$93W&$fz`skOj_`zGEIV#+N=kgyM zd~9tKuwf;CD0aqID(OI#=j{oi!!$PWp6%t%6;Fl{p~z&7A8D4*PIY#y2HoZzdM?7N z$!a=C+4qQevHvTcXnt~6r;N>~hc9^U-^)H&KV%r=*Km3LofMzq{U`~>1>EoEtgc2G zIJ=k281r=lxLp^8O{w}Lay%^#JVv^O61)VRPA|pn$}P6pjFEC1wRF~!;A$+ zhIj7xeKf)z>d)2_C^)_PK<2`yt2EZPYO4a^u#&etM%8(IGr=e=UFl9w9Wh+h_U*O& z`E0#T^4wY2XBqe5(V92u4ds8V2Lfl`yQt8NHoQCM?NJg!5Uk&w>6;_`{X*)5BVh z_LbK#@Z7RG_!kD;Xoy`B;u|b?FoBI|Z^~O#Sy>saod@RuHmwVW-=l}oY}#xV$t!Zkpof z{zd4clZP~IzjbYxa>(=Ffh_Fo&ZWzIaosL0*2U_*S@(p7cf2}CxhowAhBVkYnWKC{ zDmg8hiFj#Row}5muYC_Plkk1TTtYJyg1SHPI2Y!_KQ&cMh@P;{7}dY{;)w>q_dEE+ z_q&~BmkEbo2frx^9^0V8-g0;qCvUy?<;sdsdD!9@Db|aVY7$GuYQBqKoDBLuSQ?wK zXBM;jJ*umzs_&R8e$5_z){#`@ivR$(5ZO=#yT>4Zfx`;d?=`A1gE{w(vkv;yquQZL za?4@Q_yub1jmw3tr3ATFcJ`NDrDLn#gQe0Q8{$d^*>{)AnYAvL=j;_mOO7xlLC?b|tO1z5Q%?Zk3IcIrpt6^<+z zFbfi$upUzb0U3DKVEo6zu(4Tp=yHCOOnvmSrK98g@8V&ddh-*MkLlB=IO|p0S03cz zs+f$H*)bs7{NAKHJA3sXqG!=;)xY@=NN2b6{X1cVBa~-cnLnQR%fWK>MED66qla3tu zcSr0kqaCy%^$h<}#t@h1(IeT2S^hors)mx@Qlgh!YX9+OBVL-~?axnrTJ4?i+~Jf^ z%oNWavaQ;`_IdK*P5*{)C70KWV`Ku+m9RE(S%PAV95Rdg)Jq$7Z=P@DGH-Or;bozW zqzoE-t(f`6@g=1&RT{DBMs{&++R+e~{i$lzy)zcDR83EKb=w;B>(~ds6F~E!??g(y zyYtLdzwL=N^$ADm$;Qp~Qyu)yt2)BrD4{W)svBYHQ+1P7Vjg8KJlMFQ1EXt|p2_p! z57~du&$!*w3H>jKzwxFlbnsw!1rp0C1fA}}%t@A`-BnbQ`x-ym9@c+|L_LIj1%`&! zbIzAW&Xd}azIrPXZ3OJ!U{0YVT6HsAU2%eY9dSc5aZGa@=V zd0VfrjHmgT?B<`2y$$ER>3WgwqlXfq1m)yOnw(c*BdfjO)mMw4hY!u(4@EDu&Vgo008kN-J&e~W~67ZV<4`gkYX&_Z64vC48B)RJ z+U`M#eXB=EDE4|Hd!oL^)?AjI$9G$vdd(|M`b8-!SsQ~c8{(C5?7ImE{G4@6ab8cg z>qTw9yNxTZqN zZFTghO(ycP?4oS)+eA+Ftb&p52@Q#W+LY&add-W!4w`3WXIm8C$;!)<$W=)N%{Gs+ z4+Md zS5b_}lxpzJfxF7L>4hs8CJ(Yy^(C?Jy{kq4M zGg*QqZl!TNVkmx^6?;=-Ptue!u3@dpx8qkqOZoWw0a|h`t=Jla8-Tdz`~LXAEv>(-=SDifrCZ|K1!Jqlp``|m<>P<)QPRJ-{5>!DmlriYShEq1 z7_JcQu#!3AGFuiJqMLB4BmJO_8rytwG?w|5cWa=-IkwPBWTTmXSL^fbfAQL-8*mbk zWKj3@^~G?TT)B1YmgVU8z^bY^QW8u|%r9wa(~FC)X-=N%)-?ylNTO9JHUCfi`{RFu z`^Cl2XRGEL?{7@Q^}S2lu1=7HP;A{v!U~zsA=fH#cXdojL7@(aIG5cK6ZJwvoNSfc zl!v|-yBsT=R+lbEW4gGw^i(?A^!4{68wnM!<%^$ha4ypb{7Gp_N=)?ZS`3wXPD@K0 zeTX7d^*OGDg&O&pBU>TcqIM9tEpi_3TBG)O&*a~WX6qZ1AJ=2zstKLRRCf~uTC7+P z9W;ZyiZyHk&)!zlgjQBmDE^Dy3)FyBSa>M_U=Gl`3C-|KAAgM}*Qck>yQH%m*6q?2 zx14ReL{+@r$tPc5vlI-#5R=Kae)c(1QHdYOhL{B5WI+x5^S{sBgr0=4uN!aH-Qx+9 zY3!b$%It<6-7wa z{9dhbV|uR*!3TYiN)Tx zp$(zD_Y>X)uzx9o=B~$^&6qH0*C`L61qGk&b%gYRx#i=w_4qEXu2+eOe%2F+cEAxK ztEYDZk|bvy*CzSXx?OtZx;?KwAn^C8-_k!}*f;D666!f6U2;7%C>nFQgQ8uta!=*v zF%pzCqPW2prIFhN6cal;`}o9!or6O?RHE<+j?CbVX8*5W-0WTk+I04&5C?0D*7Kiv z(^#X5>ez{2Y&5EAH!3Mj&HL!;&_l&${GrvOSYN2IDQ?#Y2!!!Lb3%3ZVGa|t;_omi zrqG^d;FUu%vc0kQNK3c`@ zM{-Mga;w>-Dd{QAQ@m%h{>4iCYV!SkHIn++BIFy?4{1+{;)qleAAPo^bq?CinTg^I zE13HwJ0L~kTuO_Dd%-B(UMjmSaT#6@dHW^QW37cJ%U?@kx+|@|x~|V<-rsm~$N_0! z%2j`8?Q`({eZB*NcJmjuVNAl69|Z=@UHns=!u0erDJ9V#KmG)6hg{f==j>?e%;mQa z_Wte|Qd|XX>qVx%BmM!k_ODVKIt2v<*aZ@ctKq4|{AM{)7~~~mXJ?0zAmqB0GJF*| zIwHwtNLsXWu{R%{p~)SV1^+xY4!*Q1} z<9CKk9cmQssf3;p&W7a1;>M!E_`x>!3VVy5VKS3T=@j>$Crbg1=F>%Kq5`Em502vP z9PET`p4|oz+XYyA5I3>`Tf#vsRW^8Pg9;`}os)-hpCD0iE*;lIt^4YyHfhsmUsVaW zo%3HT))YVJEa0u$A-qCHo7!fHby~*u^p%{JRj9+v8=0a5w>kE^_hu+up&>gF96(LL;02C;d|r8VNtiF6t-Ko1 zpN@~L#rkx5vgh9UnRPa5ej5rNGqN#ipcgbuiS9(c%ha4N+WEzX-klfPpeZe0aNf}k zD6{4dt-Gx`Z`Z#dc~BcmXa8m4JhG*gI`BshN?>pijTWA^I6*?(Mf2CU7@yt402=8< zDYx0S)*b14Br)-l>EC9jlb_T?tFn2ndBT#N50Y6nm+BM6OMZ(}I8rbcCMx%gR!zWFb62|} z;>(wtquio~uwl4%aamw^sXmO~hZq`?1HthSMQ1guo z_rep$>>ccoimy`E?rDv#@P`lEk{<}Ki7kKS(GX8h8D2VY?7s_V(D?_=Wu)iHJr1mQ z#O=NE#phCn6rZcN#no$W+}|%;H05QvV)@c~SUEd7OK>KuP>t23xVdP=5_!mb53FS) zNihgfs!E*Wb{>ou8~g6_` z&C~V%m_3UxO=K#sdDoOXWdfJNAQAjK*GWRdE7-qY8nfw*-uRzh|7JI;1e(BPs>+I}d zR~aNWc7}CPGm1*|rwxI`LWc{f7+@j&Hsr=28EOasVgKNOKO?r&7-4}KWfKB)^3kD! zYh^jfA^-f`XU33Ll%{x4Dx$G@rzfvL5VfM& z{zk?jU?=RyaOW8RSwWZcbjpy<&hV}OtG`bL*;x4Qx&1Ny({5^a1>~#H1t<3fY zTvU2CwonLK{tzBsKU!=7$yaD_D&!Y?JbZ}hzPmC#+Zt|SwfsSnRuA2k2u#cG_4RJ> zyQ){a-2-s0kSg&ZDM`n|6{@kX3aR8gri7x$8|Q*ZSyRB+1I7%Md>t9!=Aecb+V0>k z9&(4FYY3rgI{N*#o}M1qLZL-RDx1LPBb$NBNcN(?dGpJfx7t5X>G3nZpHfPtT_!tq zov^UYNG4(i-k%@W(kd9u@us+LcRo);BCYR6$pxj7E`!OgxtB7QlK#JpsYN`4<)VWh z=MO3warRc~F$pvj+lsW*_Utx#kClfK0Ag0mY%t7a%-Gd-*w0=!G)bejpeHSnV=k<>NuDM_06oG5y-dCpFfYonW2KePyqp8ZJq@qzD@*qzWOL^ACfXZHJ6Mmp)#JRYm zNi;jHYmtm#oVZ5whiT6T1%{wS)m@s+`2OnDTkF78J7+tiu|VOf$_=VJSs_C4u5=;w zM6Cff?K8sJz@ywgQL0RNK5$-FS&Af)>ihh#Oi`;yc^I%a2ivjAkCguB21zMEYvrhz z_Dxyg$B!Q|AoIGWiQ%fr9ah#0KkDmIlwsnRZk#5zbwmo4PcLiqPg=PVTnt$Tq5l5; z56k<-CLm&FVKK1YlU2;a=!;e^HDl+Z2jpnT$!Rxo zUKBS5tK`<17jb!vZ(9nN1Tj@!&d@cV=MOW0DnfB{PeU2kjuS|uW&08ZEp;j24WS;| z_zFpPnPxT)UlwlC5qS8xCEe4jF@RaeNAs8|CqG?2kNF~Z!P-p3`FNjaN9deO^`60b z_lpBrZBeRDaTvbq%C6e>G-Pne@^t^_ch$BzWBsLL>$61X$yS3fVW-}9(VYRMamQh8 zN8!u=9MKK65smtvKYe;NGowGhrvA1x$meFVDaQyiGxP6s#_ICy!579x?0T?KcBr&p zC|$fH?igqz7NaVhZl(ReFSBrnE0oj%Rsop-aN0t`Cp6H1kaH@Jk9HRlkB?zI{_b^A zH)MY`S1`pM6q$A0I$jVzDPAtEkM;-B^knG1u$w zwLD!fC1B3Ei}|}TmTq%&yc?4vF8dcaxF$mc6uSG9kGCEn-?6a0_>(?XoM4y_q1 zh{t>HFDe%I#}HU=?h@AXUzLlyuf`RQ=BOSh5lzk_@c>v9d!aQUqBu&jI_Po4>rWcs)1`o8M?nwG4Q zBMAub0^io{^nM-Sxg2UR_*q7JR4AGaU!$M3pt{IW{hP^kUrOiHQFCKBi~wKFhfIbQ zo;2wEtX_s|#ZTK9X$-WsnU*OK|7MzGw%uju+d0e?>3Js>=?FP3WVX#zsWl&OVc zY&J0n?wdFAL8m#Tm2X8@HMy{m$BAN?-7y}@3JGb0c5*$ok^K4 zB{xDF6!_U*)0ZNb2A6k0!8EYMa@M;WQl6}2b&q(NOjgSD_4YoWvc{`kEx*!irX;s> z^284mm?N04;SSMG{%lq^;D32Yexk%pF;FH5wQrX>IULMYecO6*P+c4iJj89oqv$;) zMNq=pn4K1`-L2L3_URB;U0a7DDsuQsk0a9n<3;pk;c7g-XAF-&;K3W9W%z1_LZ=M% z@uuI-Ca_{0UD&*L3Z&7}eFY0qw7s1<)SP%t!=HR`q|=+~?eXdm4PW^6dE? z9EnSI!u*OJA2k_;8f8Mx~?I_~?*(GY01C8`J zPyC1N+7vl^GU#R=Z4q$0ZVPp&;8jyOK03=7Qn%N_yTLXXJD55|J=(!eWzq}n)%*9C z#h{mpp&;@Z5$3?1YdUjum+!jw-_4qBq0oY)cB_r$>+?Hkx>rgWvV(C>d7|Le7lJbl z)*%!!wj=BS1>O*zSwVoByX1IxM5F`GkshI!7}YSY4CYtim;3lu(!U6NV=C;~_Cm{a zM=#=S5+E^CBbFv7)uZN3Z$n(pOrcv1VTj`+h8WOa;PhU{&B3Lz&a+mtRKbA7W7}(b zJL~R+uCq^*oF~H}MGL!)iJ-kXV~-|NjZW=@0$sAHovat%br)R z{>7A2XXUWrkN*tIm&48Id7H!AfA`m_#@r9qQWH^I$AZEV3Q&+ByGKkC+&;4V|Gv$R zKc7BB#tBEL#+$-#=)rMhOM1n|DX)Py_{DOMZL6L>Ha6BpD-i(Jm$KLWB3#&0N8}hU zE>1L5J*&R9^>QIf&d7)%*IyC!9G$l}pq_jH2`}cK^7mc=ch2Zg=y1ptuO(*=c!_5e zPfUs=6GhZeJSbkW$AU6S92 z`T^$DedGP*TGYAQpw!VbXQX96N_Y`#3PewU5v^)geD0mBV-r=c`2eD_pPoc1t%LuP;IXrNfx!bA@!aDEQt(knX9XTd>%>>5tD zwIA!*DXJ{1D=SIfhkHLaeGc_ieq&OG<@-&z6#l$l^i7-ZETS82AJLB;?TDbz#T7Lv zB*(lnWb{}B>pb(GUZb{Z^NQDxbfXaHM|Wotkut%<4!L6{5=S(4=y|x(<));o6nf$` zQBh@d@jI{DFl}RE>I?Vt124*wk%(o+$|MEQ%GFRM6nX(8-kJOcVK_nff_`-Pu6)4j z6e!~^=Omm7NL+%rcM3iu6OUd>Yx_v!>?Wj3yhgw5CuE8xx_W9 zHpgtA4v?p15XqLzazgm=-zQ_ebb(0(M9W}DO+ zC7rt3+tI%RAeHEMs@fWn3=kHiCIpDRAKSF;om<)1_%Ng+_n9#&b#wcc@#4jV_#+xX z#a(}i@GIK-3276i_tucoU`$hG4$P<%NTi-v@-`9?+c!UopWkJB{mGSweHYXBa!(Tl~ zq~n=8&v^ohI&QH<^m~YahzeKJ!SxKI1CPPjCPmi=jMhHMl#GKPZci0$3rH>upqC$W zCjzj5X{3w>B#+7xA{0X($l?Eb?{@dr=o>+hmcaJ`%~~gY8hvco;twAZJv2a;mO_=W za?*Lx&dVBpZv8?QJ}|0?>jYO3@C$tGCkm_?AC2V+;C*v`d^XeK6To1P%|xLQxw{{r z7h(4gaxXdR-F=f#8$i^*$@u%#9d!XBup)C#!l>h6m%BO92s?N4v|S?3Y}J|I^pR`J^5 z+cFRx0T-1DxSu0_31RnWb6(P#qShRvEY ziM~Oo2SMLdWBho2L$G#sujM_-mHWY#zBfe!@VG(8F72Ar_bIR}% zQSYtk;V$m-^Oe@RYqu8#AzYd>S{8t{V5yY#@y_!xU>NlA7zW@lcGO=c&0e%5KGzEp z`DmqseNNR!pgqgXPYj1GPj**xU?XA{Sc_0y{;CC4C&Fd{SM%nCT^e zO2KYR7ZcZ!(+*vns*oJNut?niE_hH3o``RFjxjEW^_w4GQ^nl&{c3tvkZ;LC=B6qQU;xzhuh{js$X3xq(KekfWN9R`{}kmuKj~E zKQE2@Q&Twsrdsb=?0$lTw>4SvF$-V;!;~nyQx-=fHTA2Ca%rkl81{(SY;yBLDlsv! zKN`F?9}Q{Kx#gl($D$dPPoFr|l&uOU7U!x#+~;WB5mSaDLS%I?Wb7I&Ui2R24x%&T zA$3b=D+w5G5Jmny-a5h6uM;h4dEKze>a>^(E!8QhM2Ex+JtP{ni4wA*4u)oGlt9@D zk8fg|OeEzE1vV+SHiZjU>h7{GZ~KrvL9}3Sv3sNV2;m(Ew8h_##Q=cD3K$|eNROeg zw3mt@GaYYi$a4?%wWcmweyzAh=vD+93a=a~^eZ*GWj>TuK?K0{4Yx3RCYAlecJ{P{ zCsLqHH{ED8nW>iZtlW?W5%JF6N4~f$mw#0NFMxIH{%U@^==2)}(aEn*Pm=4X17VS? zncbs^yq8su{sJ?Vm&KRsb28_`hc>ljGB}MBr&8?NiOFc3PfIf&Sv5DC)AZ=bN5uN% z+vmU5C_2(i9WEHkG*vYZ?p1T08!>}QsIHW@(9%9RPgEuJ*7YmB^Qmwkh%0YhB9y$`v%1ey8$?7z`_=*>+6n>yE;?c^+C!bg6NpvS$X2c#94kGgTuMMMZ z1o!Wo%HXZKTxJn&@Cd4uKr7lS48NBh;0{`)_MVOo_UN=Y-m%1d8lw0ysi3R~v41yG ztYk?rg(DCkW1K#rSgkZv$8E3^U z=LPJ3U9##SV0mdu_Ie?XhL)nW>@eY7z}7KcBuSLL?IjMW{1;8J7-Tq$k>b9J*}#9d zq#e|1ojRZ?l~hdxonnasCyYtR{0oY<1e06Krzb%9jL4W|o`*k8iw1N%<~b9|+-1kX z9F@QalO;J*JENM3`SFSU3ep)}uP$9s0N4!_$A-D6l`aoOL$>Kd$?;s*Wh%rbpdB;% z_2_^;vwg5(pit$0n8gd_v}>ZAG0WE8ZzNK~faJcL>Eq|%qbwqaXLr5LWz4pF4w)fd z6(qp7fVDC@hyR$d04?YE2GzoHkpN~o{5v-nTE)KqdU@4zv>c=?04ssY&VV~s)5Vq5 zAusX*hPJ8Q#q(#T?j~kV6}iljCwl63ypQt1Or%~Er@;vjgUH5kzkn5bSP(z^S(ELO zkaIB5A0*V&L5Yd9z;o09J-o^J`!LY1|Bwh%;`J7KXE-f+!{$1#@>Yv;!p?Y#hU;VT zz%Pc}8rT!Av$J?r%sx5XMul2AO(k`kji$Ue4QpD;^Cv9c(GGll(vvM5=_Z!qU)k`a zVU(Fm?K7sQ8b#z9E6oVln+Vcy0GNpdx?;+>K9UG!NhxvdJ@n%XbC>;kjUzblUS!C; zSL6@tryM3o(hMqGI6{7{KQg=L-e+ggbJWVS8JI3 zaxGGoHMNv2w18QWLNmgM$MjOcAd?r{N0mxMD?A`~C=mLWLfx>H?$6uMcQ(M}Q~O(7 z_4^Y}?A=YRzxk*kNs-xgv9SN=c$JuY#Zg%N^@vlrxDNY^#reQP)YQ}<;bmYH$(c7m zUe%NsBR#!uHQxr)cBdBkSp}_^AwNU~gXp|)UcG*87a_&$SE7^JEU$u6lLBnZLe52) z?M=GMl+LB6U&L^u;fsjQwW3R9eM!-0k0lra0OA?Mr2&=pQNKK0Re48gdE}U{^^41R zTW2RyGHKGm&ctLmpqLX zmBrJmuOdO>nmXS7-EGC=7ReAc|49|{FYG3XBbbqYvdqBacY;LnR%7LqPqeK|(R~|Y zKHr>NTrhCMr(|XZ1J6{J9e^Bzz+(!SfUMM%nW**_J(mmH?Bs?47-)`W)y`{|5b{VSF z|6b6<{l)NPNy*7!XW7cyn!S9tDR?04rt*h2jBf4UF+In|_VCN_t+^;}_e2Vcq0p=q5R+nOKOZoB!>mA2BxM7hOB7*YO z;lpu7u`A7`!V{KH86h<{e-LhA&deDsOJ{0SL=2pGAp3wAhu;nr`9ou&KJ-hf(Q*YL zW-=fv1{5b)$lEg}^<#yEozZpR**cg%V2frabRDVU*=c=O=L*`+rn|xR0$^~!pUZzb zA?QJ_8OK_S0>HbT>EA!1)0=V^FMMpOs zjL&{%^59Dp6AgdOCsvIK3wylWCi4B*>RfbmwDS48f%8!XcSy)nGSi=tn|Rles7QfC zLR@|l9IWv*f%rMLI~zA0smQ-D6fr^R(c^2KeSBXOi~|s=CySOEI+dEN^l3+UUYaGH zC4QK2mEhU{*F;>FuV0Ds5lh_og0zuZG?PJ_QrtRMEL@!JhCvPy5`W$Z+DQo3t>!zgG zXX$~o z9X$$w+FDq&>^i_w*E@lC^D-Mhf#SSDrTF`==K0Br%oK5bCw+W50D8sgQ5DZ0%KY?} zMc5Gs3s}yij}P@=s9Lh*3H9_&X-5uNPD=8Z zwHnrwz!3O$yC?Pk+%X(%57y<1PW@O}|3+o}M3R~+%l*wchrDacHesE1>`tc?V;{ffC3dNTP)>Vn*71 z3P1_*=j{KUQ(Qv{q+!Kmn;2OCOrAT|mBw?JBA341DEHLK6ojJy#T6Et*yn`uyl!S6 zq8Kc|He^9G)L;|UG&U0R@nXUt>kFR9Ri!xmP!m*hjP|#F(VEQKX&e zL4&dpZ+tO>8cOp56luf#RT0lZ&o~ENV3Z;d&970Clsr8%GXt^)6-C7> zlWAu@1Ll^lU{Z7wz6Mq8R!uA3sS{VRqskm8MJh{zUc}C>RD~9=d-mhvt*xB=n&dD& zYwYzYGy8Gh5`*SNUe)v%Vg&IM#j#13RfBmt4UaJ!{wmJul3ZZQv(ZpM3>mCa@fISm zUuffMGe@wE`cZRt_t<2Rk=T0g+Uoa_*OHCEv~lmm7iJfTk&5e_D;r%B#vNRA2Ki9K z919d88R+(%g`%>I0b%9%VmRIGOTDYVy3ICJ5x)0S$k%u^m(ag6Ah9l+3;=N1ZWRF_g@qm4(%Uo?8%S#CP_od;_FmwY}FkF97KSUh;k8u&K7W*7nXuC zAvv++fKRP}WU8dlZQNob9jFxbP>0vz?~{I`MEt&TdZRm33~EN6#pYM1`;Mime#nYf z__CXqHNJGv`eJiK!DyuR6{E;gvGnPOZH1a>RhR@2f`M(Y3?~cK0+Q0sR3-8Pb3(9L z6fIkeM1jT@fL1~{Ka4{h3pUHYF==Gx64Qq36Xw&)f{y$a?m9Y`z=ZY#>@3I%76k=` z3l}c@&<^?ghK?`6`S@+(gnn6(qzj1V-{=i}LPElYo?VhglW%g;`EHN9<^ds8wz`Q( zAGc;Owkq9YAbI)~(-F$??(i;vIeKT^o+FgJg-+ZsWxg>1>$yzKaIExHw!?JW$3rF5 z?VR5Jp~PNuN$CGGRPdDgk1jm{X#v7D0;GAGmX9OZQ{u%6J{=g+-F2~L$BSOUkF&^{ zp1>m;1It$nQ6>{+!*1{6;}z8g5zJ^JF?`XDyN?%!(lYr!V`s@od~zWMeK7RDMBYnd zjt?x`VV zcwek4URS%WhA@8|Z3ls1hrMSj^LO+*s?XkgINs5rh$=LfG&D^1`}n5=*okBj()w1N znY9pI80xE~EfDVT14Zfdkb%ME9ScO(&kN)J?KoB6 zKTIax%@!RaEEV9an0@VWJh;2^>PboF@VuJoan8GT2+a+e3U_S&< zL!2gl1E*yx=j0Wb#Xv`rZ@Vqihp{|)#E&_Qp7zciwU*B4;~!qto#WZ@9M*F(+=Vpwyy18{rxPnS?&8Z{yc6a zCOGKpv#lu#iJ}MY^TV033H5!w)LMKZK?OYxk2mHT4tCeu_pi)X)hSingRe?L@SO7G zPomHOv1831R6K<~=vT}@!eyo{MO7{Pu|)xK+-Q6& zv-8n4U_bsgREC2kWPd~|-jlUkrtwO}H7=EFX>xNcY^{57d0n^41f&7Xp6PvZ`hF~Apb*pxKQ>3S;01Bc6=eyM4htPn{usa;N0@_=E>Dwz zTJwBJe*QI*L<;)MQ|vjCNyNv_f4WA9rHc1?`SK;af_`_?KX-!8E6*X4zyHXa(c8R& zyJXZ6&zH~CDSnVy7PylvX0W_*Z7S%f?#LNeXj<-U`81=ynC-Hb!TVz>wDJqx9O;x! zMKv`B($F>uw5pOJ+asGXI!rbo_nPy7K?OV#lS@mXv9Z)kOH0TvFqi<; zLrnZqz0WK9v#yM>qf&4;d!xjB6Z_i%u)GIeNqG6riGtre4ii9cw>lEM{fUZ^Yfa&~a2(Qj8T?AF1ohUCw|E!pRf1xU zJ0iyi2Gw~1Mvt1&EL?|@75*yAxYM_5C-z{@(bquMrA6cFloH4TeVx(z`vg0fo&kQO z%?$>Vy!9WVQf>P$BTam2e5)$!9=#c#_}CeSDBlk9>=+f-Y3{yPB%Zw-49U2khkj4N~7o@>}po#yp+W^T7RNU1t7cCd>;}PYlWo71zYqMH^YcM2wU=SsS(TvU? z8ri4s7BDZs>`(WKB&3D4Jv+a>-b$Ww)V)|nh;tGY6Mu+a>*5qeX3*oL*`8u(xRnsM z#S0Q-gWhQpY=G!wF+03Sp8(P#5G~@67m__G0*g3&*D(Hptv^tn5Y-8QKIN(vGf%`D zJ@tD9dQn`$hJ{)dr|9xL~S#Emw4oTyx@ z^FRnEEZmC2h9PlsYPqy(R8Dy87kfhdDowk*4-r_Lz$%PZu#a5Fd4H+*rWP%)R?|O?3sp#h`6Ud zw?vn_J?Al=#W@>Ev}!nDiD0-0c{abk^(-ek&j~X4W6#e%BzhuMz1hSn6jIh#*w(;O zR~3??oKbIhC#OF}IW0~P&ra2Tb8D-zuND!8!DLy^K7Z~rM407^{Vmt?-Oo;+iQ;@j zuqnFW*4EJz-J|RNOmv~ik5WbRvwpQ2)uh-PRsQrv9W^H5O&oVdr#kC^n-+{hsbb-x zXP=xdQ&-ef3jW=b;7lAj^Fj87W{F8eevRK{Avhw?p~5KNHb;r^pvY8ZQOf8A4lB|U z4!}ZtyMV=)6dnwMB(0~Kl(-z6**Ki_7S-D#fi#yBD*#;od^eDw$NT;9P~U0SGI!!d z^0sg<3;b>TSrB+CTk#+YM$=Awlisv-yGt0jA*=HrE=#A{y+)wBXEBo3xIWwQIVX&? zA3S7zvaZ?!EoV9pNIC3a=1tEsHo4tnSqLs(8F!rr2uxbkr;}3a^$h7iAoWF6OV&xy z8ZaS%4iUZ|sjonm(w=ioAoqX?GTmOZjcQt0Obn!_4P#s9NS62BrhYk#Xy4&3mN6C= zl8+$eUsfAlbQZ6828M*snBcic@jx+P(|<34ql}lw9OPBVpn)3F#U#clgecV9i`_ND8Jrl z-Cuz{>jf{ZWvkxGLKCST8*y)35x(p-$p3H*wzy*~KAe9-xhq>%2mg5l12UBY5wvbS zo@(cimL@;tiX~>Zeo0hEIEHcZe%>VEp?YxIl=Qi(p65RMn(dKaI)cJ z1q}a99IPLjEw6s7O~iZ|Yu^v51iMj<+9ANlN<6TtUFk)xg}4y!j|KgW znEOCk5kx4fWh9eKoF(Uj_@cHpBVcwDLAVAPBk`Eaps$8R3lndnA`e4c&|9Ug=PtwR zj+GXZY5CL;$HF?1C8}^?o)-H_SS)#%tY^scGK~v!q<-L7mY+WHBttz6*ee8H7Ap_w z>9`9r)7x?s8K3j~uK3d4^qzDQHHMoOlvi*eA(GmF3lZtFBa@m^m2Z($70^njUEC66 z-v$@8^mIcA9FXq`BC%jQa9}TXz{tCA`~=mY5nvRMquW2jSx0&47-w*Bu;2CClq?#} zGw!{;vQh#9Z1_oa=bBp zr29b_ykL`oX3d9^eU&^=vNVf1&zX>9 z9{5;2UDoeCK~-mn;pQGK@waa*Y-~qWSqMOlTpL@)sKOCP^p6jL)%_^)%N07RiSer@#8&iTSH*dbqsXB^*sIGP3vwHq; zC-5I>RR2*flzo7xl_7}$k6QO1%YydLr5Lj#WHMgh2fWiS)I$k{-b*ANs^@)l_pbjrX)PrRTmrt3~REoxl215&)Z`;n2^fB)_Bpl`R3P`AQQR(X3 zfFr%Jmu0HS5xT6QkepQhO@IRiA8=6J>-TfMyXP+$)&hGq=(*lshIZU!^L=aEOyaz| zm>u@(PZ2DAG(*Z6HxlW&0i@oRfP|=L^ugSS|D%nn<>@ne05>_^T6ET$*js z^%8hQ7+Ljz+jp~UrWO|1yn+1QZ@YT1-hsj^yey8((=qMoHR)ceMt-7r(FWkFWZ#Lg zIF5C1No}A+c4s63O6kws-Y-w(T1LZB8XSB8;XA^ayY{>7!Z$aq0J{`_QXNTama3fx2zX3Q6QlNax@@32SC!F zo9^B~xqPAV|AR41v4BBAVlw+sd!A*qAc|GaN%-ojf}$CZl3Ci94^BFT?nn`wm;x=| z^wXg`pEgR!c1rhGj*@NS=fRpO{>eq8Av?b2=-|?me46HO5(;)`KuAo!wGa3DdJK(x z19l34;jzyRxN%hV>!=5G;T!*kTmgS0*6i&2b|I9_o#)1Df>EH9{lB>7|7xp>KVU z%AUq-Um(jk?c+kbWGe6kz=2#*6Ctc(?s+8aX%eixt8k9-*H#+M^JRXb0v|l)qyW7O zc-8+%En(`1w#>bP#<$uwD}ed;ZC9t4zFuYt_T<|c6+5_0=7lj3RM7W7N8%|&oa^d( zO`dMAMnnz0XwZ~oo3zra+{+mce-Vq}1nw0SM*6v0oh%VZsLAqD?MHef?g!D;V3qe` z?j!!-d0M*=X^x`|z@s5{=fWe(GtN1Hv|JJ8^iR{qbOSc6d5m|#*ASm`2eI;4oAyvv zs0TcYAw~p3CqxeTA1e8Qk>>E4fG1T#`x3xDjk;lA^QV7rSH~=CR+N{2YiW4|)df6= zKR6;sAtwfuJEw4P{?~)Q110AAnDfP|yz~d={U+D5YNEVehBb&tXu1xNUV2|b)!Ztj zJGl2K-ZDem(1Z7KtMNum6S(qx)wAr{);r2beoi(5xsbOS{R<1&=~u)TRhah4rj!Lv zV0Rba__2681@MErS*l7XHRN+OUev7dTJ%p=vp{B9J-)VN6J`_9iup0|$zy(}MBMr3>x!zg&oCsGGqjSkd7ELNNTtK2VhndZ(w z8F!`^{%K8CxoFkKPft}u>+`7^r-tMnNK zTbKE2@XDX;;4Bdh`#?1P>(?@(;si0jDKSMWl3Ci>WV7<~2cPMsm_YgI8i1*|&syJZ z`P7qf@Dd>1)39HO(s00Q?h5E)-+xFC=FGX`&MV6#aE<24$4NrUVs9J}?Y|+u=qm$j z9t)F{(h%6?4PU#__n{`oK#NfMY5UIJ4@WN{^vGNPB529!_J4?m&(#amsvustrZk(I zdF^j*%c-#_#0xP7y?9uP>uR*c^aBvN+vXZLyi15j3&a4Rr~?EXTso7LtH+H6S!V** z@I_de)ybz3FWYZ8U|A89c#fLe^lML=3cyttFJ4^Po6qTn#4=cQ`XVeW)dAC1ezZ6L z1A_rp7zqbnU0wYThp$r$fBm|8f_e`ybT+p7hAN#tWy@=zWjOnb>q&5Ma0a)yUBzk- zH3e8x5}L~cuw3+-(^(L}@2rKuSIQWB)pp`386ZDy%c03KT4f>zPxbhn$I|KHSwxEq zpTy?WO2-adJ!4|}MiD zcmdb|{l7uSgylCN7XkzehUR{G2@!p6j%(Bl-$%nBa?I^9%jU@x2;`!JdV7*TgS+(8 zLqzrrR4FY7geuYR(ROi4e%MDD0li3a_;IPHYrn<~-euI{sruD`n zTBZT6R~-K3HG$`fFZ`l@2?9c(p`J(7V{r$W))6*6gs;X+-84PBf#dhL4kP>c4D1|-lIhbjt;+8%U(-8?v zKHPVZyxy2Q8@1KqAI&zBe*vU?z)!ro5U!}=RBsLaC6~`O0a#F`lKW2ZG}?OJ=4NLh zobGG8a(hM+uFy_@;)I`$-AaO7D|EZ^xFN#KD4IG6PyOLDcRwEx!*eCBYu z__jwL)s4irK=Gw{LMSkN|Ln{9;og(LsDOYAdWRjW;j8|)LNm$KD(%t1a7ChHzA zzp1^di4nqf&E3aytGfR0X+O)pXG?GexH^bQd4~KCIJ?)8k+{fyL6>&%S@i(JoAt>|U;6O2>7p!=13D5|Q5LvJEVzs^h zzK|{OY6x`3$8eIP0v8K15q_u+nuU^L@}|B@L66=fYudO);(=T=^A1IzL%wUKCIGnH z4CJH{(_ZvF3g%p&fjk5dFoI!BAH=VoUw+^U_S!sGey@*sW`?bqvB+Xcf#&yJv@w7y z-%|KXty|@do9}Y@mWfIDTJL~^L+f##!#=_D8m-HUV;%&*uM;{y{zO(X*ifOW`qmHq z=lSnMf_n%&DB;o3VEsA=fkjD~nP*`o4kTjX5fN4ONnGr|i60Tj3ZaFt#vu}haj?S+ zpbBw7uBT*l9k2q%TwS*M2qVPfoRv(SqLl9D*i0=uDwtUC`5Jtzw?5msjA`u;Z=Cp8 zhXinSK@S2qz{v?nRw?J7e+0q7hDhVwD)c=k*7)^>3km?-3Xkow1LYT8>2-cse&J3= z*NRU6zS*UBc(z)1t|5cyX5*r1Bd_5>%`dR6g2JW^bbzL$y-~MR7_zQX3+egRn}b(B zpb@rK@nv|=jIDBjoCTp90n`eJ4Pu30MtKl57j9jD`UkLu=Sps`_Utcb58|i5Ks=B$ zWFrms)H7&oAwOmqwD%dLhC^VKIkTN<2-6U~L;$1U z4>TV%4%bdeAwf|<^};xcd{?{UqF>uvV-;{K?xjrNKv(d*k*VrFAPT2EPESqG-Ea5* z1&yTye1#A$QMk3~^6Vberav(e*2Vdc$5EhVgOoXRLaG?njjJj@RjWu-`emo|tCe&L z1&iVbpi*9~W9^_UnS;mr_LXac%}&T0^l4^N0z{Z)a03@QxxZf^@s#2JuyIff?uQ1< z11pBbHc$}`2^Zq!C(OvZ-;NOq-L&hrgd8%E&jLLIf*A-R=}PlvWweZG1u_kJf6yO% zfK=c_Hkb{ho%IVf@1K#;H#O%lF>OX-=F-663DBPC8Zl&#~?LP3w-#uE$r?qioV^xO_w!)+?{z@%kwRdKTFUHnK$Qs+u|Yc)IlMZ?i4tjL~=!N*BlTCBt)Ew(;kFr5i&9U ze92a_DpaTYnff@M0jQI(=q`rUOU4D~XeT8(;vWqOgy_o_0{Jvmp7FZiurMYep|ZY$ z2k*hGs>v3~b@AUb1QFjB=E_$91BAI2FC#ZC6gM#HazY{mpw|YgV>|~rwN~I}0HB4N zg$HDOxvSsKdF$|nWaBU5aqe`96|y3UKR z8p!~WXr}}zlN}ikK{`-4G%9dH9A=9;wUp`C-}mpOlOsn0e5td!O*U(GeH4{dGdEz= zg#ef8@d*hC1j9o37BDzKTqq2TwM8TzVAuYjE+jIM3{MeY9^Q)tl37^Y5dGw8QO(>n zjp`CD5^&EtBbFK8(9hbSV#K>@ySK=F?!gCmdPv|f&`nb?fr6(;)4`N$nPZRaO(~V0 zi;eF5CSEJOG8W*?ZHI?$BYztxuyL1U3OPc@w~Nu~W}gZBt7-u3t`~yzz{!Z1xu8IT zx^1K{Su8&_miUk8E2wCq6UX(U!*U1WbGAj-^Hix%&7DVSeJkXLa|283;;*}w-Fc+JE+5O*$w|cGpAjC>Dm(`;1)cejVIN|^5RU>(QFjS99yIAsCPW;3wx&AnU&IDI z{f1oAab6k~%gqQiLpByY-%v3ShsscPJRI ztM!2ZVux84+Zikhh`SMHDth=vYF3K)?DdA0?3xEb~6#Yax0^yHrPnTuN7FHGxj z4-?UM^b6KUdLTR*`A|VvGnw3(e7hRto4{~&l!`T$&M7Yf{G6j6JPo*Wr)UvnA{*rc zsBA<0sSEXZ9o`C}9`$AkiJ6 z?g`icc8DRCqp%O>Ae#4OaR#oJ%B@}fPM2U^kr#Z{t|Nm~&5f-=xWl93uTp)o0MPs$ z6&D5~8}RMq^CZ!8qtzcgfZ6hSDfq&`b_qciMsrNv%Q0fh6Pb25EMI7;0`w>h~t4t*qwO}lYY`cg}a`)=~aLma`!rnA)uLI zh$9$YBV3ZFZ8&yfATK2zv%rNY7ydk*yANNNIsQAt?An7(;%hFn^*Uza({nZFkLj<~ zH|Wh9;7`8Qj%h!`C3cOD^Bv^jT$m!gC(;gC%H@NemgEnq3hn^Hmv)DB_A$pynHmW+ zsSIBy9)QoAZM-U>P+(XS6g!uvQ2P19ipd|8?wy<%AnK`E7PwSB^k$eYc zIi=c2Cm2q~LAe1~O&rXh zsmL%uiGWE4d^B3+E<7>g!>6S7{OxVZ-v8wJ;^6;Ry8o`dloPo~muVSy2-G^O`8Lc=GEWB@;eO#y)p5K+fy6HW94 zrM6#Y>T^ym{ysp%>KP2&41PNg)MLQ`{8T0_8Niao#%=vv)K8%M`rSokK=Q{`%<(aA z=%>z@T1Kjuj>EyIC)~ci^~UwT^QUvg!~*nq+a80i6m&)L2I*Xfy>7n@+kwFge>o?E zl15K1&q{zCEDMEzVwJeaIHC~}3c@_VYEEH6ZH1u~l(!(nLG{wM6bG5fbg%fY(fs?c z>PVSLp*8sPOPyPwlc=CyGN}o^4u0UPM`>li=d|s<>tb$EN!f=J~kaBY{@{=lt z=pCLNV^F=l){Ui0bRR)sbb*$F@HWgAV|oXdApion*2!Lh#8t!X^Jirjn?b)*t7&yg zLjel{1v;TYfE@cfKz4yVzl-f^bn%|>{ftiy8cWa5&>eMO!erZM$m!34VfTwiH+}9& zKHf!1P(uK;%F(tL2!N#F-2iPRMHPm0SOwxzK=loHx#=R>geyGPu)#$Ka09=NYh0N& zVY0Mxe7P zam6KTt|*sTpAhl)6HV+cP=j-Uv}%D^u-oQ*(9yOHWf%PTD=xvaLqAlvP0zM=2b7zQ z>_NQcgTE)ZE&_rDN?l+e))I*POpEh=t1GcE7)Tdz5V<;qnari+>(?E6uGcfWVX?4( z`+wd<0KN+VD1pO&0KqX0#e5a_q0xty3alggn5IJ{VrDzU=PCTT!7+Ma?qOIOWITZG z01LcA*5=M9^PL~B)()i!zj5P$lLSo<7RDM04v(CN_Ek4-_k3zI0rE+Lqq*RP*;L#( zKili?I}mTq=?h30q)9m?gdvzsiHiuMRYD6Slm6^s_j5nJs-DP%zn+adA)Kh236fSs zC2&0_{mOe?Ho_ChZPao0P(o!u*B_nhMJGp3)y4S5_y9mjIx`U@1|IsTBn-LBG-gQx z1hdBHLrO{q34NxS82|eoE1?m*pZ54~x!76O`mB#lVk`>`*PJX6%y($F9@kirP{J?_ zoQj6o+bQ>%PuvilfiMDpMVE-`6g-A2TSxP_+o(c+KqCbp(ZIx{ym7w59Hqn18}ta2 z0e&91>vto|q6;743p{Uxzp{bNn1M{Nk?R~G7K8+@gIEu-4`)>T?o%~tG8fn&4ScuC zn&{Pqm?x2ADSe(mSwPi)46}`AsplmSg#!ote9#=Spt^)_LB8EJxbA{ySv*J0+af}3 zdtqSar5UV-7&IlZE8$0J57RJ{3Lk8KN@neY{E~8K6wlbY{c4+7IF@gY!`O8E(g#S> z;H7Q-2;5Xp$0?JNIc496|Bmt9hb5U4peR>=7y@kB@8?(NH1+>N$H()($v_3jyf5#o zEivVdZPhYshK!^^D|l=m6qZUk58|2B)YSavJ&6J|<;8)r3;-eR`*^*EwHwG%Bu0j7 z3CJ;27^8#UTTZg@HTa41=6miaKbsa%Q=d31Z>DPQ&;84dN3Rl0L|+XHlhk`F87%S# zKjlSzgP1P}k+|%gV+}Db%;{`)?8`8NA#Os@MS^Acdy}rr;a6tl`G6Z38EK(!1Pzjg zRV&Ysb+MlEKzApNQ}a8wPj6+Gtc2bIY>g>mzi0`FfuB3BoyB@3VpReB`R5>KJnZU# zthzW5!@yh|Z>4(qL-@arF3@5o@0kh+Oh0AV{e5hHCFg$p`0?4ZN!c*5WIDWWeJTd^Jcl=z6!*@-q+Ql z+dP?UZnf;5_QZa+ea2Iany#)8gLlUYq`B4gAt3#R*#Z^e4-b!Z%Vq)^V$K*juyKSu zTe}Jd+`vsD43hpa!W0#}!wbZ151WxW_l{t4l8az znKa;O$y8kYbNQ^?;TJnHfPG1pE+acj+U8VzM5P6fydeiI8ch6vau_r=c_FcX(36*s z%`5%(Q46(cgSzJLcW9TTqyC@Hw9cpH|H&nvo&1Mu?)aWl?WE+^e0~oU{4h6wgc-)A z@{$#j3kQ!%1kx}JxW3BbBnUV=_Hg5df~;2%ylNziQH64Q0z5;BKiFYsc=KMI6$nG) z3XjA@kmDb>NO&3h6s+@rt~u-7u&B6>6>yKl3zQR?x+akI0;xyqePD!ggMkZznZz^t zIIk2^AFA8}na@WJWkJx1jcKj$=w{RWVU->LK?mi%1(rZzVq?dyz@gM|7yS;&AIupn zsWvkHToaBr+s-B~0aYH~fbQEPi#;9GDpQ&k4lH1CuvZ-9>p+ID?2Y*L62dA7mZJkT zse7Ak8W84?PG#R7u;chE43R^YgAH_G7%;KmZLtg(+~2 zFiUOoK5SWNRL|`ifecEsP2a(xVt3^D_Uw;mwjx=UVkT- z(Ea#1+0QmnAH&bU_Ajk-D)Gn5agqGtMc4=h0P7?hwio;4^S>LF{mpMy_^`OI zP)#+(&M9sB{0j>-yLN4SDXfqGn*CbALvcZH5pS*fT1iY(1{I#pc zgXes7+3z@W+#MC>o&>a2s6R1-wp&rU5z#tb0{K;8fp zjYZ+v!lBGd*9Gr`Y43yIwH|W^48KpjR_9y}|1%{d$|kW-j){)>U8IMoZ2q^$7-2Ay z0>u5|jvd=T;R*1gRl@)QO&pXnU)-Vb?_kB5o@RLD2DubIlzsLtcF-NQ?xH?0{C9o& z#IQQ^+0K5C_Js6HB+e2lZPov2UPp>i&+CtJvC8F(%-%+7kv-Y%%Bbfz_b<|hc=&%_ z`OL~Mu4K)-*4#&%Gq5%Pg=Zw1=)lB;!(eUJ4HOyOSJ|SE@F`t?o#pQ0Nm8hVZ0K7- zSxhe7Ea|J`R34!F;eV+09dr+SA06$M`d2%S-3jCXR53wcmo_B;v>e$BLlpu|07MO= zzXTC$ytspDXc3{YPk3FQz!y@pr3_4 zI@&JK>_0uY1k>XrM9Dxr5I@G7qsVYhW`i9PVXXvO z@n*q&4)sf#3LqBt8<$W4-V_KgnEyK2izvaIK7DTz--IXa3?dx-^U($RB(vH}ucW0W zOdW26P>DdrgmrEyI1^tVggxMn#b)+r~s9G>& zArR5LgO~|aW*84%!`Ou+f^LS1A9`q~iDpM<3RU`o$wmh3xx&0wNMtEHz)BR3?+34NhsVXi?gVuTsjUim4al6a zB0X@uneQz)!-B!Hz6`i%^;WTdX7*I+_!tx9zd}TYpL_pw#Zh~>D#ub@!)$V=VZIim zrYAf2ssl)ijl^02)i^QbFR!9Pd8?V@Ed)s*d2NXQ2a(-#v#V-(nDo?-qgy`Xh3KUS- zN%zs9*8e|_5Qy<_{~zq^2i8uiHd9_CU!S6tQFrc$`1<=*80+H-b^H|YKs3A3Fv1a4 zY$0%x+9iYMSe%Yo+V$J{m@i`|B}4oCf$YoppQIe8E-57lsRKJGLX|=dM?!^FY?2^d zftASmaqHv_MEbe&)inKvF-RO?h&PaEb2hfrz6+*AWL$!Po5q*&%3_=^c{}Bv1fB6n zSROGh9!{eruK%ollA>yo50s_Y1Y_D8Hy~r=E?Sjjo>m_ITV&-B$YXKPP9}p#P6*Ch z@1c&M0|j$2yy?*P!eXVLJYCQJIu~GnNLJf&0)+k(fFkt_NBLJs5S0=aKcTp8)d+#! zu=ua_0Y^pMyBpe&C-ft`Y^kPRQdIw+`*9Prq5q8InlD3qN;A;|_m)!fDY{IsC@Cp5 z%>1Tg`15&S6Rl7NPu!rTbJ4O;q?ksm^T9D(_*w-nJbc`9vXHad*70`GJgu-COvGxc zR9TN=I3Z{g1ULOdq4cuw;rJ-!VBCo=3ZSl;b5s3EZo_K35-XJgdStzA$Vix10ueJ& zD$%3Pv0ey12nB*e0waI?Ru+b0;J>kNXY_%b5RjiFX=Pk)a~@E4;5>l_YNXRV3Dywg z4<6m(p5Z4K^v1wNa1R7fQ0@xfm4Vt!*a-*m2TMqciNn8!@XX`~zI_YrVl}M!q6YF? z%2;j*T=>7uN+S-~c2*&2!|B7uP35WRquXo&TwCFAg4?7yJ)=S-&kB>mV50o9| znD)Bhiy#?^^>1%c^Jc0%l6gTP=r*%W;X}%(p@^DhL(p(!^f?nW2O%7RG-E}Z%`kcY zV$g8_q~Sp2>M<|OpNjn7P7?P(@dJxX9HR9gO~srIINGgD1&N4+jzf$e-(%3l#vO^2 z_dvb^pwx(!^}$xwY1^VN7MDl;X3t+gMRfpZO8ow=!<9MDOPCr-5A~a{q*M5&(>32J zk3bI}x2HE6odqx+zeGx^4qJaj#?Lk|R_xe*T}O~$WVWRxbWQkpzR9!vFlJktvz<5dlCJ!37V<9n_zhfow6fL?e6n#_&I zvS(mNG!E=S$~b3zEp_D-b=X2um*q7c?$@wj!i+*E5qxC7qTjaFwK^bfK;rh>ytKnj z@JjMsm(9-#x_|&Od#@dX0z&q$E{p6TqRV=SDfaT)QGn41B2dX7f^6njum}F;xvQYq z059MNZNq&B-Dzk~M+WV$5XremfQ1JkQ2;6M?fQ~`SV!ZnD;AHW=6~w>L!~Gw%>Wx9K)DlL`sgM{>t2-0o~R@^iLfACNm)}#T*`AE zMVbPuP%sUQB5v+`LLhwoP8m$~Y6z*cmZC*(-k{weGa#Jo-H(Gooo1e~^PeNslmDHL zqw8}A=VAf^aLFDl+V6p}?@#acG&VNY;fpe|OThtjQ2$9PVR@wNZ@c<^V`D~$QL`NQ z{1lMkvp6SPV|eI=g%40fN zq`c~<5I{1k!&{Od?GOHG%L-Y$9*f@h`Vpvlc$gY|jQI3;cF&;$R~Db%bvtzYXc^y_ zo2)~&s8yeJW3Q9(3qDT#ut5I~Q^&hejbIdVV{0p%%@Avo$_1T5^ci)-ZYuQz=C>c3IM=Y!y}DithufjvO+d`j4`8n_)2 z?X^nn832Q6GumDpmUeS1eH;2$z!NyH1EMo%UHaWArU&fTQZ^D%qt z7>|^}I@WwK09@#3mRyBkHAN3mU^*!ttWBN$#lAg}+!qerLA^Nx9W##Sco)&bciS@jdUw22vP#lrF3_< zgeaxbEg&G>T_Pnd-QC?Akh-(=ob$W&-1~>mbB;dB-rx7WGi%nYHFISR+ags)0#(Mo zmaL|9Q3xri%=v90=pXq#4A*X}Lt7U3s2%PjFbt4D0^gSFerJgH$$ApH2LPz0q+E(% zC_T6s8yYAq#PSAu=qlJDD?iNwmfbu+a=0g4~Xd#*g|;5P96 zONDIictDF4EN&};QU06auayqZh`Bm&6@cS0V`)chp;kDgrC<`MA9QqdfTdBL`b)q6 zo$Z?F^FJb%-R}H72QP0lY~w3%+S3Q8=C%Ifc{SL$+%0%W4%?KkUc2@wF|ixc1QcK# zW~0Ub^S+Dt^Vw(Q%peH`Xe%LuBbd1?QDfn=LT3}0=sz;;9n(cO6!=*2uawA>e37H{ zfy0M!tVkmmax)`PvkHnzuy-gz`6A|5!k-E6XG<`Z-JhWW5A&f)rizOr!!c~Pf*z@o+m$63mQ45wJljV9l1DazcbamPT;DOw?GJ1h%qoLD$b%Bqul!}uhxXC~={CVLdx zavJ!futb5cyAuVhuAr2whjZO!2% z{^yS$QK)g9^DbY0bFJWPy)rjzEjKqSS8v12jGzf!B6QMZ8TayNu%0|>KqfcXfw+c~50xRZfG^f6*Ce>?ysjMVr$(wR# z`qxwMpEdJA<{50oql3v`59HH$C|%}1i2>r#1#v43YQVpFQv{}mw?{Btna=Y#TMIY; zN7D^)EBEqAuceol7sy^`x3)qeBauVTikbppq_t_)-jPr?7g?De!pbKNg@gV;lSpLixfi!jO~2Bps3q+MKKWq@&A&Zv`hxme{lxkfV)QI1pvmlBw46J3+9pCq{n-j#?A#QuMG<%dFSo2%mH|YZmsgtSUx3ph|K8D=uZ*Mm0nF7@g zFz~rLzM6<&lO(kgP2}1XsOAJq$RqA)UV?lnEiDZlM3ps26NpqpXhpKDBcR=<*;?$WSA+tj_d_ zz%wcie17fAZ-Y+3H2c|AQe56W8YfAQSEaET1K3DRX=Hx;2OCNYPC&1rp6U7X9e8`(dm^M&a4 z*P%0MFTTM?jepHDxpBgdmE!Au>9)G zI`*%L2cDF_Nr%w)vlr*&qDxYsR*;+0ZfqZk_wl?x9IkM}v_*5+zPG3le&-=fl%*?FvQrXd2)b39bgUlFgfdkh;6 z7&(HqXXO&*L2Jv8nzx?KzdlukG8~psG*@AW773(Xd(!K2mb*pmVz?@C)kocq+3*!< zuwufOZ<$e#9O{Zn3*J?f5^Vqn&KJQUxF?X})?kPq@1>cdBkPPKrk81Rt(DmqayN9# z{s=sze#jFBTmuv3u;_n$EG08P52x8Ehx^%q(&fvS%a;;dV2#aPPTLihE6W;TZw>q@$XUS`?=9nVg#?kKQcTkkiuIQqJv$#*vm*$e()W#sAn>JDZXE z3|QEU*qnIr@+F={%>g0u7a*Z=*lmDG^6;vG!4gA{^uH)rM4irn`N@EClP*huecmD> z`y8`t`F&Z@^a=`65|Giine(5(lAce_!VTi!(bg$AR!%BCn%CKvP$+dz+esS4DWi+dTq<< zO~kLT*x0dK!fF4WmmL=kdrRZP%?aebDHxBo4a+KJ)zsQQ++qL&!`w~_mBDcSYvg7Y zT>}HH_lW=G46~#K?;c9yNR3>trd#N~G##-xL7w!0hksZXIc3ClnUk;OrYvm$Yy;eh z@BwbaxI|7}9@VoffDZPaaE!Est3H(Q7rtCHA_@DmQN8PT_>FNAzYRpXWo_Yfo@>0p zks|5Dg4+ayMb4erKKeplv9s#SwD^8~T@j09~Q5A@*8WH-}x+V=61fYz@bs!Hxm z%0ExzorpWJ#$(;q^nBA`8rXJ!)m*ZT5JSE*t%U&&UyH$>&ABPOEE~oqhsxej>r%=w zsk;8Ij)~g19tOI?ym{6q51n^S%dU5go*Q4=1~ou!ZS8Kwe*x=B!%(I@@KR&Z%~5Jne6lri*KS-Puf z$8a6}bocoT_zBQ`K{v+JYRV-qC-?S`YKGTc><_-g)6&wqk)-Zw85s6OO6fX0Ze)g8}x=37q{t(3$`FX@7I#Xf_Uv29S51?=F14N$$~g?~*TW`Y;0m z&?|HrAg4jEz|9RdsC%uh8dq0W=Q^+;&mi4x?kKDa~>>MCX6-~(>Nq5z}2lP9*^Vnc-mwrc-SF5h|cq1A* z`kO|7yg+i^&rwlPStEhMCHkE)m-6k_2G_=KT2%sFj{@5$^K4!q-yA=1!{)Ul&G^6zv++YH`1v`*Rqh_c z5KDOke%t!?5${d$jctd$q)FZRwSmx=^aPT`2YA#8m$C)Rqqm06B*djiULBP5ldd^_ znJ~jN>pA&Qiz=f2v-7vZI7Al(g{M+dL13ALbw{kkn6OO*b~7KPW2HejP#7ATU0WT{ z6_>5z9gEnROfC@kWls!BfVqv?_akrgm!n!9WIR~P;y-@^nF^((nQxQig0b`MtYbwt zK6#En{h>)Zo6g#u>Nvrp?Lne$l^d&XRB{x>lZV_^Tv75=nQvgg+=xSwdLYK;u$xzn9`uP6FQ(#y4@r=7|4kC~*3{kv88m zz?9~Uk^)Hm_8R{l@*$P;S6t4|F~n*0D&UBcf`-aX4{j>d)I$_f?`Jx&FB&2YA6@a| z`iu{+_(}*#thKir-By#fzZAbx@*ZZg!K zExPX572tTcgNJ|0t(If^P8jQJH$G(_HD>_%Y~+n-j$De|&%^A0AB9zA7G)e?d|n=l zjNk!*jKI#-yy{(aHKI$7r|MpT{U3d3FQ!b{ zU}}c!gIBh}-HT_wIvLy~My#DNRI%=4bMC!8GYNytK$c` zrzt};#X((--2-Csu00*@m#EGAk)N|O!?n$ zU2de_$FK;C*I4yCh-;4*$zas=qT_gL>xwGZzv2lW+@&6EuGfa&S~he`2&z%A$IYi(Ra4;_%u0{Bw==!Gfl@6GYZCwJbH3~@w(X4|UeK;_rkcSQq z4tRKZk<;0g8T*HZVBisGQ#?ke2_B^%nOY~SZNHklimU>^+NC-a1qZLnzV~A5m@M@v zxy~)G@ahK~W8(hm>yIhjgK_&INc%WlgOHMun^%9R8@1YjE>A=jQ|Pf2Uz-Sc(Lzxs ztyeZkF^77TI}j@IK`Lf#2-}5sdO;P2c)OKRLvd-dR^S(c3$ScdoN(mEvy&sspTc>{ zdW?VbGc#DY8}y6LGVK0^brgxag{GW!p>k=LRdZ*%%5w| z{%_tw+u?^X=Raz^Rcb8+6Im|2#)^IySYL>iQjB#5&z(Hrsl=xX3g&uA0>_s4#-*NM zFBkAk!qziw6>6swE>@>Lfy%q3b_$NX@tBJkRy*V4C3p;{B` zC>5{r{=vmhF4(N`ZsZLu4P!JNf6(`Zc{vvpxOt3YMPSlu^La@%J2Ss}RMGlIe>_ub z*x8hu1>qhDkI3WD?X%E>_}u4e-3ReYUwd8}eWUHA>+eDRVF!yc2uB^dFug;qA!6?ZoNY-PZml^Z$v2AtpL7;EFe5=UN_>7)xAjWRT9b5|uM zo)Xt%>*hd`21wD8W@b4wj0-C(t*~7X`P6>?{8_I0?jLFlbi?;5Q}jGa$tPNm20}^d zoD1i67L&wnW!h59)54xrDans%PgiW>+?=8K@oD#tzNvKfONjg;%3PYl`9RHvT)|*Rpq7g-wC5Ud)1UJl>eT0EVAd}9W1&MLW@LMu`erS zA}{KOWY!EUzb5-9v~Cy?62M-rlpnIbW3Af}i@HbsVj^CEIP1;5!jdOJa#%su*xKml=DNqHz-P5vE=^+m9wG?84gQRZV9@b^_^k; zVaU6WJN5x>3l};(ScLeAOc!1|S#(7^+gx|5pYSN!y>;S`NEzZmB8t5UDiB%SMr>%bWS)`TI7HWfq^bm$2hH}dVQ3&ANURhl z`B(+k&SAC4B|)Md?zU}+eKx_&;PX2OVY^xmN0e!SCRkzSNO;f{F z_4eKPZ_IivJ8m6Q6>pKbJ#QX?DY5lfsu_OEtyx2tGSP^ ztvH}lo1;Zi#666bl#J&K2Ri1V!=`$+V%FT^BSXXcut8QEVC2(h&ou0H{$;(;tjvz8 zj(;5n-pgcE!~kVIU1~6*fQh6|AN`h}F-pw4yF0DFJTBo*Z(?v@|F~jJqu`%4-A!oD zE9g0*%>`l%ndlO;8@Z|m^jHHOBej8I{Nv98^7R{!{Mf%aIZVPb6nU<09m4=5<_B$8 z4o%;&_H&+;J=(&VZN*@v@aeBTJI$U3%eX_N<<;o&h&dEMG0Xj`Uv~HpQYS-pEKm5O z@?X>L*=K8aA)q{`hm42WU94n9 z{n63*Gqmd$Syfm1T;tzej_3~_dw2prk*)Zd#pUt3*W;Puu9f@zl;$2SoEAgj-)I?p zj&v&48K$PEwL#(F*~~&3B>Lpy{EURtm_X2Z|04_Ke@Bs4zb?a&>Svjn$2SyHE?12j zq#q;rJHhjC(4mI|IR$w%3LXu!ssR*dM-GW}?+*gLROf!6^KkdQL8gCmeiH;SR3k4} zir;rlXI0;Tb*ljYl98C~RK3P2t^Gdqhz1Tyg16YpPrV2riXvBPF9_#v3{wK_(;C!y zhdb?_xis@ph4Y@}CdGKdKq4Ch-R{|_k>n&tNP|OyD|{q=30#(L-qKJdelUDOy8ANvK=*z&>G;87y~wdQTU*hdr=>-G zyp+v|y(zbwJb#j621U`Jm@dwnncQ&3;fl}M+X`||ZvMoSdF`mE)jThyyRyj{Ht>oM zY~3q|eu7zRCXg3_jD~?|6z#i@=Wf1*3EttLs-ZOHj^n9A6WBKgTZme%iXcf%^LvBJ z$#;qqJleL|nZ)s;FQ&qz#|Mm2Fzj=6Ph%c*OclHp;~Q^<)s1`z3#ty@Gsnnw;}Sl4 zsTEL3Wi}6~r^?OD&-*};ugy9QMe}RrKbNk@N6??cWbLNAvM@!7+%ur=Ch*5TQG{bZ zDtBOI<}-l9{lv(OSBP0NxYjMug6B%C!xA9cR^h_RPi0$%S zsuX^D(a-#SfyD%LX7-531mPmjRLriONQB_7l6bxgC37~@`L@KOA-eMm=R&vd6UF%F zL9BSyHD!tY+BoWJp0g{9rdpy!UwEqH1P)sqduuYJdkgtx`GHY|FuJ!O zynR+_R2*WK{j|o}L#PjfaMyZ^?q^Wv5BV)=n4~8;&Jzlca0ODi#K>@We?|!q_cX)o zo)$yW_t*s%GAGpXRde9-KJ>lS;*>Eo2Wpl=NXpIGr_t#i$2c+$Bn!)RU;v2I^>Y62 zF{}UbEY1aa=NI|47|g}7C)iMN-y5!znbhJ4eh|9|%L(&e)6f)?bRRmRX%Vpiu<4=Y z^R9LyQ?{{WuV7R8CL@&RX37A+|GCWL3e#a)FLf@ba}K_DuL}onS1PI(zjVRk?F!IdvAa?Gy|IAOhm39Utp7?kgml?xVkuQfx;;6*4Y zD!rG^X)-S25&4$j?9lsUf+Z`yF7p42)aD11i?|eTQ5looBUSEA3w}&s=CH#kXU?9@ zIqFO?Elm4u4bY|quabGcQAVk=EYovR&@JGa$0`C5Jk8zo`mH_E^P zXUBczrz#rv00W~I-SB*8ZLwtOga;WNICQR&I=70d7g(2l9Z@Y^6t$o~wSTHh&{xJd zI~hmacp`QQckz}_tMTH13YAU!5>CPDBO$2USS@gz6V`v)3LI=g6(wQZ=GCkxd+YwT z)ehg%`tK|X<-a8r2PqbmRLYD&QyVb)^Y?FO_zmY8&9{GZlZPDQe6NE1KEH^S6bZcg z;Dvk|H<4qRi@NErgFAh{Nnj&083c7``shQmhjBQ1NO?mcBe~ubc3|Vfb9<&S;1t<^ zUb6v{oR-d<;9ucvRt3yRJ{HTQ|M`lJ)7P{3Y~f5H(hy8h4+mHZ@_5+{;wrV;^?Hj+g$1?c6CXM({Lc%?@x zcz^??O5Y=}T2xY&8e7o|5Ze(oW(Dq_7{^OKzfGxP&cNbM#_*l=!8Hh-{#Ou*tk`E` zSk*d$y|A7<5nQl)6&(BfT8sT_L)Kqx%wq*#;|KIpqb@U;!Bl5N)%w zvv0Gq7~p@7e*XsFSDxC~pCr+E0Q1R4w48lF#@sCSqV~1iIk_ZZ5sf9m3dM)W06H>k z3lsIw2x zOSCUkwJ@QT58G)sj>)tgY?|6(yuZ|Q zRL+6=VYM^p#d@?1SyTm_YMWA)xkn=}`QpaA=P0%of$4n+XBipJf>!%;q)$5b+Rc`V znJ`fN4@2&--Ht&Nc_g`TB~A#B72lV&w1}m~Y+bffF>|*$Ct@70_zatTemqMz==-z*3_p1m z_Wa)DG?Ar^b&{Q~4FJ}*sBmF+dIL1Y*a54Y9=2Rg;5oASi@~H1`$Pvd4~2fG7AE!) zF}Ujj>wY>>y)uwZ12p$?|0vn)?8EJI$O{QX^ z4mZf4AQ`hr@jQ8DC-T^V5286URDx7V(94HHVZOOocW09mN*=YsA7s~qp+N(AMPNdY z05CSPc!O5??AJk%N4J$%P2OpvD{fu0qQO#quKm}m3I%=JTILb22bWun-TWlilWm&= z{ni9d*L*|ri}EHlKtz@Bod1*5L^g`FD{1jhM|sqbH< zY*u1>qwDNkrqhH#e^2`_^!bZIa_LX7ep3y}q?guz#}K5VWe~W<=Q{~SDp#rCmt}e%nF~Ki})MN-F*iDkxfOins*CBGy3d?#l$WnZx7Ql zup&2r#Cf5rbn|jD1wIx0>u>_+iOkb;fi-##!g*KqYVrYnCaC$Fe6O4u_+?5b`pAxH zLxuX8`35hKk?r3)+|K}!(kt8PV3OX$#TVMKO#r03Xnlc3HP;FLB;~#UGUyl)eurTH zREp#!-x9-lj3xv0ky?JqcCTGYpq>=I8L!%6Vk4yY<2MORL9v!u^Ri~T)5!ai;Ld-@ z4+yenMK~W$HLIl8p|nda4roo5cP)kTtwfdLg!FSy08|>9v3tUUlVYx=pRn84*|Q8bHywsI13UXRh)+JLd(8rGxdopAFq$kW*#?+a<}jzqfQKyvdk% zX$~P&A)0#0_cC1&HVW|ip4bIBWNX-FpiMTemIeao4IF2nB#$0$ z(V{@TJP4MaU1Q=F~q+< zQ)WjhGZc&|jk)74byn>xe4tWs)9!OhWNA!{{=Y^YqD~>tmk!q7XUaI3eP7=bynDkC zlYZn-XG#e$Wk_jV=pzT=^R4vAa-H!$0rq1oT9`LK9hji&LmOW*6@Yao-sH)BawZv6?SYG_KQ|u_MeAlO4Fy_RElSZGgDNVn>4jI zYI1M!iHof7phUlZAQ0zr{astg?rCwEN~r?{KznwpImTe6$X^hWNyC4Cro|afWQU4z z^L#N+U}ulv)Mcmji!Rl>1Sz3#Q}&X|%!6+PL06u`1jz37 zjrOQInvjls0%X}A+%bJ{sT(PZK{+2dvHE;jhq)^LZ-P>1f)}#$d%U|~B)gxpe=d2T zk1iEIq__?rw#`;WF&5Kft>F;;V&{#7uP!Fc*TLXF`GCUFWRr+-qR0=B!1AXsXyYy* zD<11vhO6Z#ppK9R5_JE?hO4+4v+bM;Qt{5U44*>U3Ed-o*2!5!n0|r^Idw?IP9ZEgU^a2MPV$b!*xL! zWJ7tDPG21(U5h1WG_iZ~^AqG+i4A;dAl|@Ouc||ll;CB`TN+R}ie9ZDK(VFfqr`ex zOxajx;MURHFt+?D&LzJ4xj$LJ|L%^zFiLD^^LT}g)1G&kN~P|D+y{^EYuG891Qs84 zhZJuOd({b@b4*m)RZ&}920~V7Dm$Sg0sneb9eW6b&o^gxn9JQn9&zAJQfKokJgV=} z_B6KlWi-as7W51;>{3|R^Y zSaA$-5fAGJ_$N2%5F{smU-S6=;4q?{rVokFal~~U6@}qkAyMBLr@Uc)RC(3_(^Z!v zc7ZIG3pK=*HJn?&;0)0JnVtim$?5sc#|h^hqm^Osx}h6j`=Ohb`gY6&9RfOABEWKh zRoLr=nyOlotgM`T*SgsYl8=Ky&_rl}UIH?dYwrwIvu)XsGSWh?V()!jxv|Le8(+{;RopB)2?${kz7x(EGO?*6ago+!AIhM{N&r_N) zki^KtX^T?2jRYI-FR_8rKpf3q|G-NMR75cVpU6W+k-8h2*idrf_U_}N6?o%K5duWB zA=Aw#Isfb5JS$LU9KYE1#;iERwx^91k7N7bFLNCap-6Q{h~nvb9?eLE;r!sIGMkGA zoHqq=HIm`DB5?tNS8e%VPOJBbPm!VwFtJ=jo7$U*1!BqOz0DSIO3Ry6aHi1RD_L+Y zn5CQUFQfwCu!A`5fvVefuweZH_OP_q7Sw@$hVp2J3fl9Jq0TfUi*JP{Q9}GFZ5<(F zqb^$?B<8`FEAW>ZW4vQ8$tIFp8d8mY07}ojHL>A?8yWdY$-6X0#TTKe*Zb_erQDkj|nDn8;Ku$)b$*eLMU7sLGlBfd?~$kjvUB1r`aQ z6fk-umkr)a-KJeZ6_pZeK}U(vF7_p0S;(e&KxGjmfyvJg`p&^p3c zRjU}XLvF2q`@*<&t@=ef(R*}|0YazMRuL(0`znSF_zuNakep!lc-^Oy zIuEbzh>P!#vuDFc67zpQ5(50V%$Z)>u+F{5f=D}$_F_44n0y=0kdcwM0KL7P=bQ1> zzxH0AJB-!6-@mZ{-2hV_b(HTm6O>;8e098S0xlM2I6+GRrMzx~6+Dp1&y;tay8rG_ z@?Wc;+!CzOa(13N(srWWbXKaC_VZtV^%WW$*B{j1kQ^)YguZ(6(6VFooZvjwNHYk7 zSULK+M_^%N(xCD?fF%Z0!lD;BT5uOL)54P1)wM2li~2TzF;e zeT-tsB0DHQ3Hr}};c3o`{*g!5pao&3uu@_dqYQr?&+}N|bs@V}9@(d)q=aZSqCNo* zWZr_XizMTyaNKTqZVgK7fxc=yP94w_Q#LQM2N`iWXi2{?3Y;fTkA0Hl?mx(3s!K?x zX~Yuyhwynwslz!o}}a&Lxd;@qa+{a+lys z(`7YI`mb+I9V-kf$y+?UX=$DD*sxj=1KO1J_6@u*HQKnd=&rCrO}umIC5(~|xmTrh zJ4qZl*6FX}afX0weKg&eZOYXn}_Ft``(T*?b?np(;ZW7%{pEvvGAmYVZ?HinfE{ zoA@{ATulCY;q>QA`3qk^d3p)df{5fAWD&CdK0E7f ztz757Q}g5-`+&T%W+T9UBtOGMBL_F97*j?MR6YX32(EqA^yc^E9L!A)9%HWqe=-FkgO?1?o3q=tB18#R1A&daX>24EDj?th zk*6nv`o_}}KaIS;TeE(5>mU4E?#XQyFVbxLpxRo)#jq>MSIHTTj~ z%CMUa`KgksW2hijc=kEhLB5d_RvAM-L062+;5=9Wogo;1mc(Q%CDKsd z!iQtm+Y>0gwL#6xN(k)h+vdj~`afPpa_pe5NzCGgLEY*6Q|&XmIv~i2w-)BvpoC;1tS zOt&!BhL+#&bbIPvIs#AT8)ipNbEXm|3;-`J6>h@3O;Crou3kAt%C7C%RCN8)w{_F; zT*JA&UQP+i_=};ywsL&VRAd-zF}dSoXglY|{=X~kpi*W|kzn-t+jt!Fs*|w;ox1$n z`;+d=^d*v~F9%ctC5P*X12#@yp!vrF3;^;+V|xjZxpoK&v=f5$jsTp2k_`GD01i)vdQ0uIgTf#J`*LnmW5TO8VZm-K{ z&PPjq8-w;s z)ojywpjQ)EdWz~mr^MH!HV#hv(dHbwjEorGW<06B8CcN31Y{7xLI=m+YSx4`1TJW} z*PnI%buT-^s5B+N3Z2M zus7=QxrtXNXTIqmWdF|Vtq?yqCi_aOJz>`S(cDc3)KEO7_<%Xl8r9Ua$?E;PC&=+$kB}Wp0@PdIwV*A#|n}h{3>WWOeRcqT~A8CLtp$S=q z{T!?FikLCFd-Z8ziNcow`qz*DL=r2V-b~K_zh_hC^y z{rNWDDOkuO_8X=%t%LSX?yHWePfNogs;PiO0dK;fQC5`>gM)yLflr<`%-v=VwX)%7 z6{O*)1nAH(LxH**o>%E?SHur808NiX%x6qOR;RU8iPb^$+fr$mpgiV3ZPZnB^y2meo4*5t< zEH>m*3ZNr4zU7d&E2Dm#Cpy)@`Z^+#+o)&busM*G#`wK(&Mhgu6jZbc?+MMS2!H@cfp@!p^kk2heFD$ZR|D%4+OasH z@?W&{@yGmoNL@SRICw#T|5P)Q^JjTjx_|p0yhDf7mOMRd4ixl`!(PoiLSywIY14on z_#HtMzNK6$f?Qnjjr`&QI38%698L9ZMFGixr&+(w#0xG=J!Sp5b*I?Q!TD2*`H_`B zpL4dmebPb-NBpaj^>rcDQ%KbhhBo#B+MF|2);DoW3IaDE=-dvvv`CUBJ=q`{o;9UPb7S}_O8^P`WAOGJ4>th2x zbiUik$*udg@Vn#H0dXni(%Dub4Wqz1eW~mzUVjv3b+ysyfw^D#52P0f))5!&J`zF+ zu+K?;rE3Ekg09MpyjLb*P%vnD??#@*Nc*!P1jr3aXKtiM2WpW=$sQaQ*p3ll(>%?& zr_m4}?m-3t7A3M@=wxTfR815>Cr*vp+(Q>>=B07XB-vluT%cQ-GhDa<-vPLYIUXVa zVRAj?-Om$c&qwxNzHy})wzj1Mr<1Mt&lxZ*CJXbXFw)g~*zS}@=XA4vWLILCPNGhz_qvJgx7GZZ3k9__1aCl~WqcssXs@({gt{5YKKaKoO?k9+nXc zTW0PAsj)kARYaRp>4FmfO>bI-vRTjzb0ouo9$2rPnQkJY210b|pA$HMNOMSPKobv5 zXZKb!^%CL4vPF-_;Yp~W1Cmm1FetFdDZ4=finIiFVqkQUb4p4F+PN`tB@|r@4sY2G{=aAIMq6&^z{Q`1J3rG7Y&}gRS;3X@X~?UtTecCMs`vM z4F@buvsqC54$ z$ppJ5g6lkBlMt-B>t8h)nkp+ZS{!T&)MCE|M;G8%O|0Elr6Oz;;39AAVCs$)Oprfd zxzo?^3-8XJ&A}p&tjA*@QHR3-p&%6^9I{E~*y74NM zJX|92&CCBGB<=zvU04=eP0_9S{+r-IT}4gBk4Fa~YL5ErheKcNK+FN^X~jDT>1jlaci9De?FvqvhxXP_0P-l~k?dh$P`p}sGV_(UWZxiL z>nh8k(`@pnZsLPH+55^K-#np9EeZ;$GX4EV;7$jwbM0)t?LpFZy>$E4^-(aRFuKr) zQl7h;j-TFp|JPk)G908F|FP6ww^VOm27}|)9fLL3e8Aq^(NDyk*T)ckOOX)N`J!XX zl3>5gwQ2V$cQ+6F=QKp0y*_bpUGm`qlR@)XQ{N8z$-E~uNsXiNR@ORCEs_?6RwQWO z1x9C>fq!Z;-f40G%?37J2VEtG)_WK8wpBZ$c{E!Qi$O8Ds-R zeI(dTHWacgEO)QLPb)+7)jmfl zpEL{pjs8~nYp{Cb$qCMzU3qpi*Szj)4f2cVSDwjqY7cwX+pZ06ke14!S*5H*k5i(A zHU+|N05AKpo#wdis3NlmI17{RikzQO1KtPb>zgu<*MC`@fQ6VmCI`r?Tw-x@yK5_c z!4yYI#Y|BF{TAfPdYJO<*iuDt0#w7&gs%>(isaJ7JMjGH^&pifM_Z>X$7}Q4#LDO* z4ogO8bF@8-3WOr@2KZT^U12CKjk0ZzK#4&#ZM;x~W($NFu>Kir$mI1T4K0&D{)*w7 zCar~iqVBfrz2IBErm@2}Lg;cU7C%i?L#9AP=YR)YTfSjH^Z)TJ18%TiOq8dDF3(N@sK_qpTaIcly{MziZH`dn~~f(UX1?7k5@>wPVWdp!Kmo+>7U!fv*=!@0s#c-!deh&ff!L zXNE%xoWM%Iv?;c_L*=C+;1qGrk7SI=FoBpH0Y>$;X1$b%iqi=aRlXQo?x5{1fsXh^ zDb6>z*Na=QsCb+q%7HL&QP}3ZWjS0Je~GC6!u*6AXpy=sjoHOQm{NfBK+kB_SC1cx z3z6^MVL5P$n`pYOl5cbG*Z$s}#>D#QD=p^c=3sc~&@a-N2BS@5CxF)PNI8V{=~VgQ z*pj2kBoU*)$Wj3`Pzlh*JSHiVRX|;WYIEf=-Iax<)jS4*4;X&t>q!}Xi56QrBOycp z-AW3AjdgN#`1e}#2U93EfdGzy3P;xBVJ{7DBy^LaHS<4)M7Umg1Op3v`IQRm_F2Ke z8DGY1B}OM=Djh|o=b}n;OAi)cN4QkltM0ad5p%7ThY(NPE5| z8?mX;4*9MZI-0TQWqYl?77ucAre_j0f&XPk>bpVkq;gVNmH{9+dgcN717XpZb~j!p zcm>}cTwvtCW3JAQPAIRaR*wNEfQFF-1?pI6FW=9kM|JmzlJOUQ+jl*9V}NVA|9ioI zg4-+EpuCJ9zQRp3CI*+(ae2bgVH;ze*UHmF(j&xd&4JylT}0J~8|qbq3wM?rCc2BC z_GxA+kL(ptA>nFT$A(MXb9sTib7-{ni=jf0rtrE>0n97v51#XNMx9*&9ZKgO(pfT~ zZ^!t4;^Cd}xMf0QodTLtLqLb%lmoeIbmCyQHfXl4x?If%`~fY_;`hc@FE@%*bCh!i zn=@51)7;#{Gp&yKPq>t3fty6=)G@=yg?=KEuN|UBtmPq{D&ZKz0eC4Y(?#b_a~@3Lo~$RC29+%5DEo0PP$Fq!SZW4(Tq3o8$tHAHf;= z62x>+bTO%w-i7Hx&fGBM4NF6kXSx5jD5=KHu|n-731n#?X3Y$&E?xrq9y)CY`$r(T ztqig*DKYxY<~rO161ie55-B1=s{EvU$B>3rB$kBCz5ilOnnDH7;^#}Ce!dMtSltcF_moJjm4Ez7ic|lIO zM0zrNxPbZkDC!nDqp(EQ&qV-GPNi#S#X+sQ6ZK34uWt3h{RV8PWlIJJMk2M)>VZW- zYU|XafCYtTX$%KtFdGyuJKCJ$*;~a_YVc5=8)N({mz1% zY?i{VQf_h&B6JEoc^P}sdKZkA-JV$sIP28C{64*v=!z6S4HROj9ph4FLB6>w{@)A* zpCj-0{{FY`+?jReeu8F2kr4(KK#Z{f1XR2iv9Ix5eyzR_K4jJ1PwAMf4GYZIg}4j( z_A%hGNoGpW-1BH^hoMOLCHNW`|Qc`yATksQVDX|DqARa;kZKpG)6-*vT zflqMso_nqkQfQ3iV(Si%>h?A-I$G`#Z?}NtMI0;GU}O>oC~&^-$k!*&;zNEFIlb%! z3;D@ALbpG>8^xldzWv*oTgl&K_TN_wtQq=~TNm+%_o_bNe33U2#}c1DFKzgfbWv+J z&g?*={_+)@Mc2vO)T$vQ@kGtExdgQ0b9Db_NwPfjDirsbbb&wy+_DE#0e~PlC>*T@ zvx$(#Zzj?bKYmqgZTRC;hjS%3knd^7D)=jUA27!P5ZakK*4pX%vP2O|1bh(m)L|KA zKIqcHyZ=MK=#L7s^$s`fTHKNwTV9rCUd#X6NDD2*l&}k#$0(~e4q*^YZ&g9%22>!> zhis(T*!l{hIIzP2&9n`kS@Qb~cUJ@H%K^aZb|Lh#h1DOuD^~=HcBjl72NzCknAF~* zq(_$mjoHKSM!cD`I$;>h|5a|vUBP~x0N6gw_`MtLhuJza_*3Wlz7!{)kv{D4fmK4V zGWZ@61}}hb5vaMj950WQ00(R{orhL(YqDp-UZ~K4hlGSKQ*)y7nEuZ+9~sr>myrsN zJPkXfIR)AF9QBShBno|>J~_e0V`U;LL%Y!Zy1geJNQJqzRl5f)(d9QzCs$?R<#p|t zL1z?)is_sbBthYR#xT(2GrXlI3u4h#ZoJ*nP60$Nf)e_UZJdTRxI;rFq!v@*hu zq?*gn(!6pT%s?kEaFCEw4c%e)X*wQABg`=CMz6@XH5+?TF&+Eun55m@yw!*lw$W8y z1ACY0FqDp0rC+_Kh!}YnHth%|67s1-gx)N(Z>w>d-cwSzzxN;!LAy82Jf;Oou)C;F zk)fQP^hp&`W)?l}1u`>M4>3v&d>hYZ;};UXH^b!?l-s0jL|wp%FjqMvm)TTVQSq9C zl%lvi)b#pce1`Es5O_QwLKAY8W=|-TE9lbXbe1i{@H>5*csy z<1K#OOS1Z+ql?hqG#$Bcz|saRgJfR3g-=7vU{MJ)^yM;4d$CWxz=kYIAV>;p4D;r8 zYVN&@sGBJ4c;mYWM&?k^xa9{vs` zg#U|B7`V`q^#a=Ve+a_Kx&ta#j`?TU;ZzGA?X}3}_jLcmK4=*BY|P;yMHQb$yq)$U z43H8?kn8kI+*(O$<-n1;9D_)Txz>dAd!Mbmz>>Q36>NX5hwsq#&@aO@I(}GI;b^nD z6kIyWpvh}w3v_l+#3v)r9NIvQi^Cy(r0inI4jkdvYjkn9VhXF`EI)$md}xRW#@?xG zXjoZU)ej8Zg8y-Kb9A+0nH-^u0zNophz~T}F_MyaBI+u;vHuQz26ClQ z3T6T#$5H@|qzB$;vHN$aZ=-vfwb=8QR(%BC`n+tP|9of7#j(n9H}Sli^*o_W97hU< zw_FBQhR2Y001Qh27C_)&FAIE~(Vb!rU@B?g9mAeZwxfki^j4>eTo}Xpu;Im47ffPN z7#xBo6|_3Dg%!S6ddIc~ONylro(C&EA2(f34A6rZ3BEj}I$-YAJtEg&MZC80+%x{YXo>NRVpm5a50nLA(#!BRmDOkaGo*@DfM9uR}_d0U>HP!j&{sD z+8u?ZPe_^+;P;iF$0_OVPC=1rbuweigw_1Z*F^PkXn_QUeIWzJ_rN@Nh*~$TJo;Om zIn}x^`cGRWey+OkhdeM}zJR-;s@zC^4Q~}5F3g^#I9Be}IPz7&E}@IvVV_7Riy7s~ zxWj9pYAY6U+Ens}m)qVR@csLvmJ4vvGOt=Bo{=uQhE&DJHO?%~qkHy@qdWrl%w{)9 zn_q((KwdQPgx6-oLyxbq3R0P6@lH$&CFz*6EfENi!PMDe;-dekjZ)(OVC*f!vRb<} zPy_{)Qc-D8kr1RoIu%f)rKP*O8&Lrj5b0KtF6r(P5RmSC1*E&{jOp6n-s?L*&UJpR z^D;lbcL*5Ic1e+w$eP% zB`w=oiE5bd`Re0`yqKz1$brw7%XGY-Q+?E1ew2O7QR$DOdNAw}`aR<>*>f<#kEjvJ z#~Xk;A%8x;E4}!n``cd8JB#2b`#zig-2<6Y=9DyrOmO^1!W^@`){qZ&aBh}&_!=4>d+#z(YPC-Z9J?E>+voBXU4nT4yn7(b<=snD zqyv1zU~#`Ag2dQ8e*P+ z?<&QczRB@lL@#_ot9yID6hV*$aSCWVQeAU>DO0oGdh#9C)uEb$((Y;AZ#=+&=|TFO zqi#RxOX>qjY?CAhWJh6l3EYL36)^R`dWb&5e>Mxa2O{3zT)KgT#{djW^rngp zRHv~cV^LcGj$Qz+nkaH4j$nWq5dd>yaV0CzKg0Vl2N&zC>b)l?n^MCpVGZw4RlWNU zfL9+;v5Vd(3_*=Nu-iST&?f-iMm+kJ%ej=|vao^TXK zah>DCAjJ)FCB?Mm73xj7AtR&u(|ZU}0n25(YmGd7(hB)FTX-w(|F$viI2+mye{<0H z|7w-4Cztz18F|zJ4UYKCp&HmIESr#qi3R0Hmh{9RZ7f)(0(P+_U<;*ry(lR24mv%T zWtpl0du4LH)@W8d`_+EF_{`h(q0pvW<5{ii-^ z#uEI*pd zkSww$Lfqimd1r69&o6_QtBYrNes%U4h*UveN`*8~kgx)tGvMXKl{|4FoePbt#U~lT z;?ANW%Wk@PaXlXD3Px zzQFqlpiIBUSv_p!_5HCF08H)$wZSj`R<1CD|#aSTB0DvD5K?gex zV<`bPYeF14e_2{B`*!OV?QBR2Xe`?F>=a-4wRFpYofn)s;=0YYAV0cjGpG|cV0LMG z@Ixa1@Yp$IB z%-@#$>3pT>S|zy8j@H2M_b2vaBiNIMZ9S5bt@YN=@!=qvO7keaHJesUI#kcJWZ7t` z>L%J5Xgrw;VP@mCzGseaWm^RFu3oYF6`OrSV148Jd4Xfs^ithtU_Lp*qkKr=UvI#l z)orAWxh`IOCU-%|I$rv`hzW^Xs*ZI8D=A;%k_R&)1lQbsuRjLhap2~Aozcik!9y)( z<||>C?guE;?6c~&r;eUEC;^8ao-pD>(eB@S;5UVo^6#pQy+XHUd4ZG<{nz6 z0Vy94Am6vjP~}6hr(|IKda;NSKr})t zvW91di*0Jr{dr%ICuucZ@aSl4aP4?G;%b34^%?UBUgmB?MyZ>t<1aLwe z+}&uIx8sS+XLp)8Q;?bjN3qr2#JC_9XvXeC3N5Q*ZAf;w`+q_vq=JVHU~L_mp5pk> z6y~m;SG(++VR|02gvhwW@U;juBNEm5267;TVTBih!WuQ#?1I)AyMU?>V!uEle;ZI@ z00bZn`&Fm-ImX4kBqs2r{#l(}J^bwL05&8AR0)Xp--?Dd;-3QTEdo+iv&waN#90EgVO*VVGi^S@s~Lz=9>ck~lzmGRh~un)fs5VJOv%5Rvh_CU2E(V{42 z*{|8Tz4>{73a7ITg0yLwd##z*({x$#v16ELJcXdp_6g{MsZNkcqcWx1y0+~gkRGaK zIj?|DuE5M&5Q!^+_82HiLm0C*&T`J{x(AM{TFbAGbZ0m!+9Qfp_2QNuF9^?7&H-v) zukkGqP2wpnMuo`zhd44S9$%{B-==zJ-cAQ8f$-6hdMDV)n^@Wqx%2!@8Ed+?JHdb8 zT6jiH5k@S;hbEgY62<9IXOTRV)T3@vFv`21uy+1hFt`&gp0nTL3D&3syD5-}6xif^gpQp7J#Zb)2~t zd9SS|b*`l2q*^4lzQ$#D1?4A{Dv6J|pwsCQoI%j0fB0ws;gl)In_v%RlZn}#cG{iz zB*;xe{QF=?o{XB*DD#7M{>t{%`{26)t{y^HAY%89vvXfFVWR;D>|v0-0=wyAJwt>S zN#3ls`~)ChNH*p%)^3}W`7ea&*CU$uFJqli373+e;XgOqB7?jpFkoa4m-;S1a0@^; z4pqv^al5HY-Xq%j?h!29w|YypyU89h+~=a{*t9=3)`R&QuKPoaLwuaGg8ZZP(0BJB zc#8bF81`K&jelXZ@X<*C6^10vH7fyxDQJ`>HQESC4B@6xw%NVWzFzD&18d-xH*~R=U%4Kl^%miPj|1cqUp}pNqEynp?Tz%>k>gTTk z5I5nFo^qd)ACM&wjB;bDceR26N0=wcfx6f}6#A z;VQ^6JFN1`Hb;bHjl1e(@jj%y(0DYJlOPUq0$}RUL2L0v$^1`Zf3WlIU?+<_g-jFT z#1_pQEVes29k&jv{%+ccM-UV9Y}9Uf5)yX{O&9>1^^iP-e4A>z4?xn$f&C5z25ki5 zR;|5^P~pr8H^?L^|2%{^gN}d|5d?s(=%|JSZiGh%^(geqzcM*as$0*m&);%rTb*$A zpoB&VJ4jL*luJnSARFb?@wV|2|A6VY_s@`7L>V&|fhhvq&W++84f0`Yh77z!-bc|kC7*y(95)79$)_4rm|%e(4jRc9M^%^P+uxvh`E z6$$$EEr$j7z>TXl-<;mXb<9W$F3VTi~Ps9ln)=lWSxGEc3VF}YrN(|-aA?{5WdyoO;XcJJP&9kqzs@X<>9Hhb* z4Jpt)nVe)yLgJ8-0>7>?d8^i!Hj3y172^uE;m0S&dz9VT!4*KpiNAP^viq|zOmRnm zBXH+es!}=A0z0jxitmOGROtdolIetmxR>D#lp@oW{ko6x8S zRh^O&B@W<27{a}(S8(yo0%Y4t{~oOUmD7SC#aqU=o*OHEp_=bZ-idfzmObxdwV($eVWLz+=8vLMOT^h9D5X)p++vP*jNt-+icts)l&Iaqgk-^4SsXj!ur00RF? z;Dr3k18@?6{Rc@kJBMF|CGQS}sW&ZVSg0{=tq3F)?L1p;sw-LpgqAO}4s;A4N(TD6 zT<4`#NnN;Rb=!|W5d=M%9KWBG-f7_?qYYZi9^wSv4j6Fq8CLL+27j%Nen`XC5X(dJ zZg}K41tC6LobA2bk=pVm z7j($Hg1>XXi3c?TNKx_+|Hyz41`M{2uY-}s*ez(SV9aGc`wx&7vL`23A@vIMtDv4h zuqvPf;jlh5bi7qd91nRQ;C7GmGn6awU~kKHOZL}> z?t_q9z=q_+UXPS&J+L@RaRR*E9*pd9E8E?2Z7+OZ!TbrH6jQ+A`lvFLH$)i2X6{rz364WH9LQ|7D1Q(^VBr-`$`__Y^a2M z2loV+iN&hRef4kAMpv0KLEi(qS;Q|3rY6WrrvX;66!tH{vH4SH|bZlI6}{}Noe-5+~GSPi#5dLNT!-G!+Z zBPgj6h^~bRk?t^(Pd@8n8)BE*c`1mg}wQoVM?3cLNu zWpeLCnX+}#I}yH!^Tb_N?Kx?k{Xh)qT#SNbHkcln9cldZ-QB%U|Ht!klE4716o|-0 zN}HTuCCi+aEC76ML{VG(@Wvp@l|Ou1tz*r8jfN62Ri+d&Y0^(KGV^rdyzs}5tAbQn z_Uwcn(ZZhcG$NVvHa$Gr87~0L#!|5T&w6n9paJIT-~wyHEezOxKm8^#vIFp5Wv;?W0urPj2ZpgvCGqTb>fSU>?u%ojrS3D z2y%gUo^~06g$=DJ$g2L+g!$7e!Nx6v&0t$ON0K$YQw{VBLX7)W* zUYq*iTR;Vj7{^Er1T;a?+SiJj+Yqeq*_Nf-sPNeznZNVM>*`uwr@3yALL;_cs=7;& zMOLsqQ&iH1|8L7SU&YL==ybFDW2)yEXk=8Y%sCnMa?-#Oj&LdkeNSt&PO)WAw@7@g zYT!Mv`JDX`(S;^u?NgWTLq#}0*cAv?B%}Hg@A!3Gutn?80JgQUY^#FbLgqcA zVs$X?2Di2XS#R9oedkvvA+s^Qr~roT052cC%PnQH1s5gAq@zZH&%jOO;kp0PSaS-n zQ5ieqe;v~R22j;-5=V1SK<#%*9BxfsU57Lk5;RC;1W~MkcH3R#@U{*Qg3n|F0{_4? zDxsCIdj-N25wmbG${n1&pz~f+GEaNoMVum9-}<}-bUx3Zl{+YNxxPmJA*<_+aiw6L zK7lq3=t2n-D8McYkR(9Xr@5&1&kd9k5bXdu`BqP!$BQxjiE?=_o(~OU#jc!ZhSNhS zrDP=TJ}7<>u!A>)$aMh^1sW;UB32?>`&)8veM}Mo?1$Jvi~OghVbg7tLL>|jZX39D z1@%}^jwQ0|A0$RkDe+s45wn9k77kN{c?-Q0#;2{~V z3m+b7$8uF-}&V*MeJG(@oiR@loU7>CGTe%B4tgz@8~y1{62c^7BadV&&OpxY+dpR*J) ze;AgAO!O&`^%YPQFKeE?iKb zV7UpjXE0!J!E%Z=0JZu_aQTN$?}9t0{Et%t&ow>R9B`8!3MHskf+ao@`T~SPHGqr% z2`Ip+)bT}A6j_}Hm=V}ui3A6Sx4db4apoAX5f}SOLd1f)XPbi7mR zz^%E!Z;l2H83B;gu<|3Fix?uxfjmz_gfAgc!N!oj2q}BdAR-!aJI2S1eyFty4Sgb5 zfCy|hb#ac?j*;p%`<0on9#;Yo!NDRx-=r-31cCx$usnl^tx9m3Sp35|DZHhPwa0>> zZ(>58o~(0uRHZtuaNYnbk2e$NZgI9iQ4Cm6xK^bcZ8yz=9LeZ~5T!0a1Hfv`OCM z_p*ngMw9&wB86ut$51cE^a14*M>Hu!msf^IZC4c5CL))2c0u$)g}_q~AE7lJiESKi zQMwq<5JAO&Y(<=i0^5ckP(`;uN;-RT??5Gq)`Wb73<(!#$5lccZNL}$zF7Co>T13o z3vcC(=o_>6;c0Qi9wFo8#u6R_UNcKl-=7cq2IK$|uUWzq6PcKW=)pxg8ImNhYhvv_hk z@OBLwav+gsFcaot+pQC;AYQg$A*cz;Zw? zhs@@PL!fnI$g(kFY6eOI#OSYkN3Tw*vBU~cWBIn6?ZWjByIr^!ZVc|Bw9_fAt&!D{ z0JtD45`ZiQ=QkEf9uMr+(PE+?%cYwI2TJRGJRcBuBdC5V)fl4k0v#s+>Wi_%k&uSb z6&*pqBj@%SFa+pnDLP-F0{25IiU{{e@@u_~p_XHHgdkYuWoZ$88#Bc&IrIq+1XKc} zOlrNw>>KSV!6H{M%Cn&`H_T_ORQi6@&stc(9M2Eo@r z2+#`_?EdyE_mzCixh9hf5)tqP@eA!*f(OsI)3+0j^JSRX^@~RiyjD2`wKp+fF##ls zJYP2=N8BjLJlV=;)^_=M>ZE}|+Zrd4lsLSFtg)o&xp{z@V#(m*oFsk>^sGyHE{cah zbc>0_z#2lNQakJ8is?UANMrSQ)oNkz!(R=Wf>zV8k`RIsz`O{VV;ID`ZT!7wc#|)= zBMH;IeIWU_;_=HLfmXhdMu89w7oQN!5x6O7JKLgIIrz=GM=UA3_B|*#R8c%GjV^ws~o! z4c#$Q#*Hj^F<{Zq`Oia2Z08(Uc;yGz1yJrT`TP1!hhh-Q*~7*kfgk132=5I7|2(Wu zlg}`{6)J8g_YkEl{T1RokqVjBO;E*g>e-T)(C)mP`b9nyH1a2fL&?wyRTi1H2ZT znH>^xBzptoz%^>@F;sJ6@I6>-SV7W5ZUT=V6w;slG)$8XWbi%-=lz# zs{ZCgfAcCx6d}|N0z^e25(^Tg(bJ5cGLIJD$dLqd4Y|!gIoh>Z;=0G>r64uy4DZ~2 zkj;%JoE0!%HM(vJ#2$16h(Z0$mv&ozpzs9yG{-$G9*v2C7bb@QwiAFpBGp>$=?CjF z{gxUq9>DMXyU76T&U*AqnO^$;IJ{JlP~Ab*R=@;jzEGaE!OH?+n^^IgCkDljm|TZA zS^ZH1DGx^qF-@}949?Ynqy?d7OoeG8$`~50?U|2+i$qD%AbOgOwL|hfAlwZLlmKl? zoZge%@4?K4l;wlzLGKNP^;VWxQWvAGWrQ~IfxQDWpd=n&?ch;&MdwzEc?5VjOdMd4 z`Tzhp8mwl5CyzqvMdG3zC7{3cRjoCtutw>*8LAktY!vW-OAs0-5(0Ak=hrGuSybJ8-mN> zT@Hd9Ots9O+A>*vLvGEcHOS*wxU-p80Ig5pK@{K>fH|!YnLwfi>>$z2S0iUHS$&Rq zRPJW)-r)tQ9`lza;I#vM-=*gG@j;mzq~5=)?Apynjbc4$>TK4+6-iN)SCwz-@7z`c z3m8ny;IBLzS&14YrX-X_b1=hI&Kk%U0t~AAn+BxRekn`+Y5R9!M1Xk;Tu~>Z8HlSN zGWKBo`49MfxP_b#X^2-w0bcYSHpF7r(WnCB=9`F9FXd5Mkq*fbi+{`17-nHh(rR88_9=ho^*sc zvW2xi8G>73$LsBQ;p&m6)op)B~;m=QU>QRFf zPgc-EhR7ap!PWzeUz~TbKs|*-+{%r}PxtSB^bcbIqE@Fn5CcF&+_Ig0@yna7)c?iq zd11j8KO#jK?|_#0-Em836LmzaNR;_2z5m;r`@Pg(kSrJ;Mdi!!v2BUB5=HZj$fL`2 zZ-dVa>oW|xlT~wSHC3lV2pHA9e;|Y;l56OBuQe+?Pyzx5$uY0*;)n3(9=ZY8c7!65 z0`j53f)$iXUt9Da$dEI@f;h+{fua|8k_`%2uZsQ0iA(%jpT=mc`2Q{&Z^wFHV+7F7 z)(EIZey2qp)KY3g#^XTnstmrsgTEOiU`R#utY{#wNd8(=(dJ~9Lh?r8nVRMslr?SC0m=5Qvk7a)fXs$eu3 zGX!`Ed;nO0Vkm%UBp4R2#An`4RiqExyVBSg>oj_3pWJ&cV&@#r4gDCY1xz#-A-Iy*Nq;xkdbv)K39)%F_$;o$;&B z;nEEj{bEzpr^VC$cpa02Tw}w|snw{kO=?vr8hqFXjveUY#l#Jje%jLs_1>*hol%5d zo>9p6RIDE7AwI2m*$gmnH=0so?>1dn|M+=a?usykjd$$lP=O9H{PV%Gofh&O54H>Z zqanR?-phw0n}1A1u&}mBFFNH9}K2K?vrVcbU)0y7T?xkTr{ds)3*}f9JQ| zE97ifU}7M&E`**y0y$v#^}t^S?uL9*lUs2evroP_%B&db;JkSOs-S^H3I=$_O+g0; zrUJysqn#Ifv5uVam|&H#3u=^Fxwk&5WUz%r18^S*O(aY>TS^SLHk9lhX0^$2g+(nWGi9V7= zu!qBH8YJYGBI)2NBFG0fZW}~M?pO_82R)uzsw^^G4gqQQ8<=@W8gVG_K6h9Mg(Z>j zx`+a7O+LW2>KOqDqX@VvkxQ9b#yiP+NNNrsIYeJE5oQMsQSGiC{4Uz(EjkwPp{v%v`#B?`@S53qmRw(>_Qmg|e5=hF$tkbR> zmjIm_h|OdnM;ta{`G8;|!;*zN@+F~Y0DId6?!OLbSG{0iWqRiDrYbZr$gP_}hh8_) z7UA1jDX*#B2=CPeDC^MiyvxhtA{gwRhGcXbv(LZ5ea>?Fg&EwrW!ZZPS+ilDb-JfB z=%Dq66?BN85=3w?m2N}ZNtVOi)qNOrs{oYUNiTaVkljUAq{sI|WIf&>n*lCWZV?Sv zk%EUIYCzo99!B;D+5AYjfWA-LKZy*ml|&xa^(+Jx#^F319l^Q}yG~ucEHv`}vf_Re z7X(3D1!)I&n0Gv)0lt>VQJ7*;!bQ;P&4q9B8L&zh2m>|UWu z5>l+YAi?;}KyD(dGa+sJi?*3@;;P%ZVX^Md_CSwHr>lPKZW}slHf~(YbL+=bR zUZ`(9p%k$J>M8SKDO0?O#@cR7NC-14qnf)7J8Z{51E_cI{g^hYzN9pigiIDhKSCco zLUb9AG?ZWFJvx5!0i0IT)*w{@u?(^Sm`Em(P#9!$cq(Mc-%@{WlDP+1KqlZ02`$)v zL_x6xD9$#;5WO0lmN{4(0EbLCF8%h<*?ovie?#itqwue%n;Ql-VeS0v z5_m+Wf?cqH7lkPc^d(5HF65Y8_yyF$bEiDr0bdWeqGUl@54DlF$L$(&Mw%;Ckbq7O zT0d2IixSk|H=C7DD^L*H9oa9F-~`9~-qUICX-AcrCBW7!v4za$zuY_mlt3sXLbmX> zmVQx_Bf~GM41(Z;lkZ-2zm-c+vS))voWiJ-ad=8EI=Nz;LfqIR>FLbZ;VBuOKU|dHFNV&8Ii4@5jXa zXbPfx=5y`Nwfj$Poatn~pgUS;jF0AYI4`eVWlSk5tHgTsb~^F1Z&{aAON~$i+2!8b zQMvoBF0ozto?Y=$d-2tK>R!47!BlGP$opeUT_$-|`{~EM$CeVviZsZAmkdikYPuwT zA?=mxiGT-DPd4*u7_WWyeL`_23lCBg*o4gufQ*>~V8IFyoD^>>awYJp9V+Z`QKNR~f5GH>*w5g2 zPcrVqbF{~CI}Oswh6*hj*-oTu>Rh8OY1bABTY%>HKAK~^dcOWD6#xd|;(}YbNt|Z5 zhjVXEoag<=g=VN>VNpu2pbsV|z_Z~GMheC`)GeGyF`Wfq*NAQQpNrmBvDiL&^-ujr z84rvONIe913@v8qd|> z{*JgspBJUpe(2fwu!LeFr^?8ASRQEIU*QZ?J{P6`;%bt%5tU4jBF-#LA-yiB)9Ueh zDyn9%am3Dm5L?D5flVov=c9MD-SbOw10kFJuGc7U2G#oGJNc8R^&Li?#%>$F$!QP& zJuNrGIyxX(pT6Iqmj!!ml5@ugvnjVrn2jg*Zn-QAEXMT*URy_-j6W_fkR95`O6v=p zl$H>2WwoJyk!v24MNU#YUx6Eo{&cK16ywm}HDw{$gJr|@Hvbm;W0HM?o;41ni9!pl z(RIk}d$;)VHX=5h^U&}dn2|pIsIB~Tq-|d+Yw#mk%n#?%n&zfT!MTnvd0P_!rSO{E zow9_zd(>0P|GR|hZc}Zd`G&rxQ>p?H1PqEX-}r=STeR_J#5pitT6t z=AU(}tsdc@on3{zUlG8Y;V867c|E_9C>dn=K|RH&B~q;YFkMM$f#}boe@J~?r-Ty= zqy|s-qlH9u5u}scSIAJjv0~>}5iRm%w4O6?C$D~?!~0Su=CnqN9KRz4j@h_hr@ynD zs(iKuENruyRNKI%WIB{5*5PNrv!Pd*`IkV~2w3l>*=qX6Wy_l&IlXsSc0}vBIRN}M%yB+)TEKo{$D1(lDw8HVscoIq_`#`}&%GB{L?+J+ z;>T-cmV^5io1QN`+ixo2c=z=8SkV;gOGb|OlCyi^FNZzvy>}+`lvrz1aC~!%I$Nmh zjhz0qeBV&x;zq~pKlk{6;81Yxvppe^#%3}TNs`s&0 zIqf>FJ&h7#pP#kizPyJgyq@|v&2gc=gsP31c@}mVgp!2@OiH&-j$2qb#j&xlf3s!g zFvA5hGqp+Mt$5De2IiY(8^V#}1?YRfjq*VR0ng(HNKRe7d-flWN4zcV@+&8?oV9$7y;xm3qda~m8^`JQO7hp!m%_U5Q;QmfWi&!1&%cTpJgi|718WPRyx3 zG}AIc>9o}3+{VhZKLW$mKe|-^l38(VZlI;h_tmE~R?*Fm3AAT&T(!5lURkRaF$5*< zj-pbw55{m_4!vXexT4`}lxrbB=_VU5Lu*Bnvz5@^-T344@3s39_qQ6Cgm@Gj;xReR z>|aWHKcX0IkzEXXz|Hv4Th{6}b)=nWkZ-%KZ(%UQlr=0Euk9r>R2EP8NY8W$MrN18duq7`}Cfb@Mq@>c1mo|!4 zf@aI)n9s$t%eW}mTr}^9oG7L@gq-Kt#c_}}d$KmrO}Cg^)g`>zb;N`JPc6~7!)p>h zroI7!p-;;-wiCwMIYix*aq~I_vH7YDrsPxM@{E{0bYGdA{78sRP3@H#weqGv`cOaB|?405opvU)|;D17^WD=Qg~zYiTUc#d!xzo3SS- zkfK-}hbO$~w`)Bd!!_^G)$({OU=Lp+d5~^lfQ8@ph;xYrnA0O_$Cjx^f46Gx(`W+gWH~WR%1}dlNF;_2rNj_WQ2&a$jT@1#w7jQe*JrjNU za4zyH$Pv5X^(-?j+DRV@miBl4ZZ_p+f%mdlf7kL?U`7n~f#s>x(oni|kvI1`8HORcF@zyE+cI4-1o=XZ=bh&ySrd{;mTZT5W&3g ztzV;yt8q#=t0&RXyJGVE;IoO9zW6+)^Ihd&P$Xvdr<(>KI zy?8*0>XmyEY5($mfx>ZRL%C8MoCz&t|do)5+DXiu^h-_`c_nU_km^G;nNB*n^8UV$66 z8DuC|_VDhc-4D@W_J&MAG-;}3eVd^d%-OfuOPD|#?f@@APdcY$ZGl&idQq@tI}F<8 zh0SOlxZ?1ff;x5eCxdUnieq$mEuDi%yPe&Ap4NUh_Z)@rq3Hj90|q0LD-ZMz^TtQB z@e`Jc&~=*6b~%Ei@BKF5!Qxh`xZ6+ERGrbtqQ;0Uq@Xb4`()JfY=tLO%u4OTZiCsT zL0TnItruo1cYQ1danU==Y=cC*^6)#K`mls*G*}Y@*=3G8bKrSij4wC{qqU zajL&ArHqZqMJ|&@P^{kB2!uz?Ci8cfA+xuS7T-YcgZ#b?@AT{>x1QnzwQ4RqO`-xx zhg2B6QDYGVCL{?>1mqIe=Ll0TVpA};ft{2S7T&#s?=xZh%mG)SIMDp6=FA)7^}_WK zi`XYa5?O-IBP1(M14?7S&L7_sLT5vyi!fy)#K?pTZe$Bh*bxBXGDpl z*}+@6Mp~leNo@E~7?$E2EB($0=ImTd_Mu)gq**yoW?4qT1^vY%=JeYxb4%{!uTnKu zs}helVUONDxK0umJ(#7Fm!RE`qEtSw$L^b2sDDI7G+4ytN#gL^f57{@i3$n4s0y_A zZ~OQ8il@{M>3nn4Ac$OjW;}3)`>o~C1s$o@9vtgYf9UX7m16Ujf4+n#?*HFUT)E8p za%;y{bU14WLCT>=%?csBHiB0}s9x7?TWu>Rv5pJwnbH{lgfISWTBR?F8fR^^Nu4iH{;s>#Q%DChFkg3)&+~bq46ZH z%&0$4{^b=7Qxc^ltzABr2l^Ckp}QtGJAJXt~KWfM}(AIL8JS|(hTOAk7bS1-Okw3c7lla&kUpHUH&N=Ju_~c1s zmV@$N%#6$4(B5%ZP4hHbq;2BRI>i6V_LcdCoi={s5d$+^KXw?wHe^()+bYymROek8Sg*IRdMe|XEI!h+2IYB2pm4@c&O^PzRq zs@+6~XHN~i;1iB>lO|8TAB&rM1&>xNB@oKvtLl>(=2!j9sR=4!jWarXdxo!%-?Gq^ z^P#a`xDM6YuC>gVs8$#Ax|c4Lz=qQt>$$_lbyo9b1wD*pn?93>hkCZY@+O?>a4|%9 znsch(J8j4`jQ+IF1@>^q)3;y#Z*aP78=AeB$>#K><&N9X`j3hg#;V!_tkI@Nb8ou# z31&#$CM{R-_|$_U3Elahe0N5R0s5}Qn+k>_;ek4_)=@^6@&&4k?pSmfe1wf_-&Ky! z<3DL2(-)McCcUit@S1mB$>COX`5?sAc>$h_-jlu>Ma9_Y$nuXj?1^p6d=?Y?zmPEq z*2iY!SkUG*E*i+qY}s*>fzN>qua%RhMlz*g?K{Dowjaz8-yOQu|87f@S;iA?nOQw> zJ-9}A^U_Atezv(6*29(<>JFxG@tlm? z0XNKWzFL{^x4x65yC9E2;z$K!z?;h6Vf%a7P*pr^a9}V&GRL8{K+nYVfgTcS(TT$t z-}sbU8O}Wo5rDZG8bs~v?83d@ym>QhVT_ffnOTe7^+MheUvP3phJOMVj&nLryL5*J zpdf`0I3m73VhCG#Ma3PjgrT``^_AU3cdq^s$%iuYiSK+X+zDOFf{nfiy>5lKs774H zjlvME!a&aB;rt-iqG}p78mPnvZKYjOhLT;D`CaX{#z0Q@iMO7!jj@xqa{3471L_m0 z1^3Nf_ug7vLO{(Z6PAmT)>tmt0C92E;v-XE`fKX)Y@M*#kCc>S(sd)2+0sRG;OOK1Q zW29Pn8AoCYlK|Sl*icNXgaSO>NG${*fV@{3B)Or#wlz()RRuXU!kmFlV67wB|k<9iB}V1kT}huQiND- zQ5IT%&h9|vgbuyXL6*1HI$q2}?j~FSDFV0+%VOQBEP(|l~tmVqs%}p3F32DUv>HlYEN4i>UhM_ zLnsFo3!sc1Yy^|)jPGW|twg%_@H^@Dr%dA})VhBUsJ+eZ^HeLXfKJ_ov%$uQnjG`; z=VJ#qKQ$1z?1T&*+BN5unT~Jl?@(7x)AgLqL=|h|`9E%Z?MMS)2t2!&>uvrR+IJqY z+|c%uH_c1y@^dOyKyDaN6+AdSi}tw;nQ*kFDLW`tTP9sW5$Gcxv{q6aZ{}{F+wLZP z(B!+6-p(>0l!5I@>hRSoXcP@D=Q(feni-vXd5^}d+&2@t(1^2emt#G5PAfD!$E>*j zEwKtO`G5zE98zv!a_hqIIvCs{oB2{gb!BU-_7N>CjUM_~_u&7=U)#Q62NOQ!$_Ue# z_kX6oV+$-5)4~yhac+?Or$NgeHAir|gsRRDp{s6i{atiB+;icFbssg|}-mgiLM%s8^n_`wms zw-DW(Yo`I;Z_0T98=eMBg~uEruXWkd3Lc+X#8G_xm~v%M@krca{Kw3T(&gxxGTKu! zHq+)TTSiH&a5MY}$vVGSXI4tz!K~svT$}{0vUv57tKUyjiNUNvttg#5w5pv%gE`7C zKNP=SRK9>b#S1TTm+b^#7l>7SXIb$J;@&b^PKvhu*J=Fp&QF zuGWQ3djlA&80~2AZ1WX4>z=^oO-x zvIGRzY=gR$FK=4goO;dvyKr8B>RNU@v~W;+NxP7lEAD07Zi=a*r=ic>V5+D57rnvY z6oB^5l06f7bZj4B5P`DoWJ`3bLzbM6iRE2*^jmnw$ee^Uo=qSy?|uWpj5MuQpr7^hn9gqh?b&( za%sqyDw*!F#U-98=#kCXW2krgFF^z*(sEi@R%GBeYSuUj=%|j{RA76V!vCe(f2ZAS5UV4>#L_G?nrKt|=TjV!xL-m!nn^cbG zC};7gd&I|HxK1>_voO)+sF^DPWmPgR0)o)46I`p$kv{Gf$Y*peC26qNTyI@-*jS2} zg(36?>9uFwN2_Wz#yMMFbr}jhfV?WGYQZAe3-HE#kvAuTCm-Tm7IyAX9tF=eMWYBb zY0qwO|115)if)1An+QxaWgTDQvn*8Dj0NL(DQaFI9eJLwbz;DDJJd7b!z*ON7K=2rwln*$5b z4UfgIJDL&Q7-~m zKVI)VLZB?raR(j#zSFl@M8XJf=iBR!i*l8hps&}nRK`RzQyNIt(e~qlTYG!b$GAL2 zUgZ-NL4<`87aJ@Sf`#flJUs2SP=GB;;%Mm*FK<~-`OzF))C%@I=P^fkezo4=W37VC zR0P`k-GL9$?wyEU*}JNvJt+bKDrlc~YCqFo&geA~Sl%5|IO?Oj7pnJSk$QK%dnoVG zOU4z!P_dMWqmXu@O7zcm9aj&`=B#1Orv39qrawh%gcBuMGad`;SD)}HhCc@M-UDY+ z)gl6|C~F`dYjnJQbVUjrtRXNJTRDz>BvO8lPk3Gz`V|FDPRf)iSgHFeLfyBHgU3uy z<%c%O@md|ppVGW8bA9^nU9e?$xf5gzop|d&Gs~?I?w8S<%`ZjyE1khnRPO<6RH&-~ zt#GkOkX9ghz$W%Wl;aDvGWH;7rnQo`iJ=*-nk6=SRgG~V8+8o%Foa3W=WpP(`?hhB z!;G^sgi=Q;t*0M12nD70`t;26{ETqFOA;5tF9eFT36vEc3iA$`Iu4#03ePObWo1dG zr7O~}>@A6Sz}*=knUZZ^UL0Wh1iIm9*fff@g`Z?k?q9eh=#|=09j=1(x&eS#D$*+z zyYq9FTw!+d4gkoQ*_|&BWDVk4S`=bpV&M@H@>*IlYHGyr2PnMHHu|Nd*gjmj+Z5D% zi^KjaM-u3&ZPGW0@QcaoS39vjD%nIMbN*m*rElh)bbtd@J=k={2hRVZ^Q>W0O)!}* zMJ2>*$iv8A8NA*)?RAzd*Gnoe5^81`TgBYs@_4 zIN;DqdAVL+XFf`-c2x?Kcqc`jJz9Ff8=sOytY2W6D9w2BhG;;Rh#=tn|LJ#{aO;)q ztQ+fd#*a`0{M<8!;%6uKL!{jvKRAc+>UY;|2&JUcl~%?y%1TkM3rMQ}@i@oY5FoEA z-UR}R^KEIT1a4skmj*GRXy!)|3N8g7eoRHI{E6z1ftR@Oa03q+MbLnp(Az6X`BTiR zwYp2hi9vN}FrtbE@DgS`$;W^lY~VntHdtMrzs)dz@;6h<#@@aW1oQPT{;7?S>xk($ z^A2YRI1=KXY)a~RyHrCaz>U0|jup+Iyg?oQV8s&vHy^U6AZ67U@AU-E1=^I96u6f` z1dHi+!P?py2NMIt3t??fOidSyt-hB1yx$moO>{gOE)<7ji<=1VmT#WMQf7-U{%E=N zG{=8ieZ3WFp#ZRftxE_x$7n{wZndiDC6k0MEc1*B=;DPCX?way5q4D^vLmJ9IbVJ% zboI=lT$Zcp9%+tGc>|KIe5MP(tzx@TWx-3NNzKo=S(rn=y`a6DJV-g-0BG|Z%qTzT zxf{-R-D9KN1z$yVZGRngOeseP`OK~50(MvXHA2bHXNwDq<}5d6NZY?O=u3W#W3ZWm zan2-6D?GLXfuhl%)7#P*I)>en1pDWE+KF)KSpPhX2cYgx;_z+CYL7fHF}eTuolNxJ zDP?mf{G6(;FDmrG>7S(jh?Ns6e!g9DWP<6mN`_bQTN8&tOJws3e+?x!=gW~rhu-^N z1+}M+Xz)vj>tA{%860_PzhJ)uVb=PY9NT@dZ71=9F)&<0!F}~k&nba_ZBYXQ&o9h< z6zK;It#!R8?|ya@&!~;P!ko*mq7$r~ zrG#!TMMI#TqkLt&TrMpt=8(bhF^Xt7ofOaty^bEuffE1Bq}cZT_VUXnP8W%U48@-)a&G^!#;E#Eoe#?Gl^o-p$SV2LkWLvD*pJAcFsxA zHT^q*UpD#so$foIZBfDllTv$h$6;*}du$2i?BZU2Lb8(mmL>^hJ&cbL7`tnC@xxX5Q} z?k?agRrL62S~;laL_)9`wNAU^dD*MfqET$`l_fXX=bdwaBX#lleCw-V|17U^dsRk% zwq*o(dR1Q-@@2bnO zcvNk|>CQ=p$(*1gS(7FgKk~%^ErLj6z(wfytL!@pvpd@FZ~u{LXORA2G8hH#lfZHr zoC_#NiGR3b;l!PlB;>AR=5G0d#^P>mRlxr*#7iM3&zlC=z}uXp37(q$vD;Smj4?U4 zTXN!BUhlG-zGQd<)IZ{Yf6(*n{u^4w<>!u>eeYg>j79cI&$9+)dgg6$5K|>qG`QA#n@DC<5m>V`+bj!~vr7znG{PzUa`EWEs+{*MqW9%NLOsAgU=}dXw zB64GAauh|V|u~U*_VmPu89hhd=DGrT^ z#sdLcUX^F>usv1R+Yi3Uo0law3)2|>L;;faWoQXPFj(B~mz;L^-t;+E$4~CpXep>R z=TUbcqpXf9kOqPxZ`VyDW`ZeKNImDakdTmvhex>krsz6V+(atQ;Dry;Ne!Svc1`Xk zf&Y-4}pi+)1nc#j9#mAkf89NUkP z5}UW<5q`vJ_5y%2*jfW;OjGWB$qhH!uBUSGN4t2$i~~NQQA6tC5HE3 zH_hZazF-S9N(M38rlaXZW0W<=95TZ5)FSs+JeqbB92RRF*W1k<1BHV!>dYj)*8eC$e)OC*yx;1!GK* z40QNGNZ??6pu7CnnT(qWmJ}H*ER|U7GWz$=u*-^T1gWXkQM|fr9n!N4h0oa5S0>1MY@!3P)h0U zMnI&yVIP<4d)Hd;xBu)h_K)?fG1kKn@B2EhdCqy%ywb?blfa6x<-}NlsJ3dn=*#-y z6Rd)dKista@}S!`Pl<;#s?6OWhT97`avr@Xx$*K0{fsTuiG7z=v#*!@7cpr(PfH12 zVf?=EqiSD|{!$(<>*&%#?>lV7$jxQ7va^GmhNj`V#+78@6c4Gd)rGFx^QA%AYuZ(* zfpYZmAH1bKc1bA!t5HAFg6Ib_qbpIGjnR6?Y}Rbj%><=dHlLjVoh7c~j#}TA{a4jD zbQkS^#S$tg@VB8-E|Q1m)ClPGK+&F`1JzN!E-bu$5IpT2+$V38ZLa-eFP|sQ`^@U> z0+3t`u&)Y-w?ulOW16`d=r6X%;teFqaDi``7-~ z85kS$D846?M3L#}ikG{qtE)N7t&j8XzQEzUqLbCvD(+VY>4u8Osow1MdLlD%^-c5> z`jGBoEB$@=IFSmbCHJvqK7VltDxhMgx z0Lg&E;d-Cs+l^vBDa6JdTiZryq+8x31Fk8Bw}=nX zk)ms#+ny|K`B0x_-u>i|f9As-sD|!nXidtkGJHk4daIHXJTjJUBD8O>95sP`JnP*7 zFoNjX*cIJ%v4|H5O&Y7{k0ODp5rdd^QE8^K2xZ;7p`c&g1u=o~(@4ewB37C5CZ?BaR%rcHQg4i=1 zpyo;yb5IZw3zxl{+I~AA*S~539u2UD0B5PrY{k z;)7yqVTR?MxaIB*k6RF$#3v>!$Fb()UZR%6yaF#n>kZE$%PrPwwwBQ|OV&TCaa~Fj zKF_+c{Op09n3G@!1qmp>fG#L@gv^hfHst+iBN3KQ;mq%lX{eLEMY2;x#lNgdURtdP zNjv(7)%?Gn4joq2(+?Y?ytd~*&eMN|#xx)te5qYiOertm(3#+yX! z^uU$nMh%0(O}36^hrDK4g6 zwo2OXA`+S5o1&J-e@4$76=tD9!-Yl+(${58aO=S|-R zu6SQ4>qyj2Xa3h#&x^AaG}Dy53;uePvUY?T`X`7Dr?W<~FXet#9_7ztaLHazN?9^k zK4bJ3*qwcm^)^jfZ@4lyHIp$~F>{;>#+wOM6uo*pZFRlex+j!L@^>a?$FwfNlOp=p z|Dt4Hm{;=B=Nt3q=4HIZ-IlD^Y_OB<1?h9D7ZK{Rhfrwn__kgMi)mv5Cy?;)j@)gT z3MlVLF$0}VxxubBGIwqQe$hV@&(?UAQ_Yt*;BMjixpK{Ba=|atYIT=;uj{!zBqZ(n zH?CZi7iwjC1TDpGK0>95B4=M%xKDkcQ7Oa}V9}3LCyXh9y<YSZa&H%XPFQo8EoiW0Bs|O$;!@9w#!Tx!k%ZL2;D{4xx|k^ zs)ElIilPkq0<2;qJ5HxuiV>mC_loTJWgFA)%2}qO3ZKg-DEKL;w@ttZ}VM_|<-TT`e&uMa`0gKk#@2~P5J7^OPiE$OS#exoST+Cn@%M8w&WS3enKh&Sm_Z4BQoPJ^FQw~d1{XfJyHlsS zlYPm0A7+H#y~S$cT6odm{=TdRqe^@kdbBd0=sO7^0H83{Jf)>0`6ln&a7~_Pv}u^! zF^K+o@BamirSs%(H3=FT$msA38nF$0iLukl!`G;EzUD;CdiQuNkkm4VBQQ?wkaTrX zfXX1LUoYImb~zV(Js;bSf7dnQ$g4z$LoA^{$a@yjsb4iyyO84pz~z9Kak96MVNt*G zMAt7aGH`kLG%#X7hJOrizzZ=hX$P`d5u4jic}mTZ6UVbBSqR1l-P-tl4ifwUpbYV) z2dOvRBzh21itJjzz-V#K>dy$i#W2hAF@f`3uLVDbfg!-nDwSOHIpT|KwGl~!myfD=Z`c7vdETu}LyVy+@^n90lpDT}3 z(%=QMV16oiQG}^iId4jlji`v_myCZ7lQFD`;udSYV3n#oY?$BBzb6Z^1YI32w5QIA zc{@>{i~{Lch@d>ac7Y5g;?TQSHsRLj${bK?($;=!W5GP&!z9W*61znuNHVPH^&N!* zsQXSMUP434wndQv&OHa9-IH?|;;r2YV7-;z%eD(G9IMI!WJW8*q7C zU7w&}9{O8I+Dw0+BC?tbqH-kaQ(KWs(Eon_U?cgCZvnxAID0H?8DAsakk?uJ5r6q{6AqvQxK!0;JV4-k} zs7`07thl!)7TTo??kaGr+r;>Kb(SRlGTQoTl+C?M`%}S4e&It z5rtrv&AsFQ0i!bbk9}#_`9f#3jkV+aIJK>V8J|hjnsa2ts;8o^!?%8?JmO*oeTZAR z^s;Wx%s@XV>9Wg?&6gb%cbADwnJ@3}dN*@kvp!PZXvBhzYH4AJ?yvlu&uz&g3<*5!Mm>1L4WLJQp|`qT6rL?wqBboXu; zC#+8q*JA_wMsG1SgGOq5i(kM^ybQ>XfMA+{p`hR$G*Tyh6d)R_#VPhm;!}%C8KBVW zwsfH4jgeUm>r+5&#;G?piNvjYUGJ98v7o;%nBq~oZtpT1S4l!#g-0hyh}~mEi!Mj4 zH*ufiH5@H+oQFQoEUp`@C_qa-Q8oPax~}A_4EhY`8kI2>5F&(ItH8-luwt{(wa?21 zq6ANc*^jh9eB!ZRl?$Np3Wf6SarM|*wu^Gm+g)&uz;mXvG5GIuet~AY3Z0BhYy)v* z!8XpU{PJAV`Y!IRROX-Sz)Hf(L7{)p=Lu|KAEJN8k;`mog%+pYT*xxgJ1pae4)Ve0 z9u~;g#!43+l(y(*j5O<_n|n!|KG?WOpxA6o}b6+X$lv7r@5IS-M4U36YXk(_eZ7{rc zUwS|0*{vkFh48uXzjOxasFkW+cNT+^s37u2hk-Y5b3yMnT1RCrf1%7)7_L_1rhSPj z?++pq!oHtBf3j@s+_ek(u;wiNK7Qy?tc|L4i3$GYJ1ylR?!OqbjX9k6kPW^|mEp}V6Dp?thgsp*e8s;3Nd_!divo&;SuDyIUgYvQ{ z)x5H3@x9!gVeS|X=n|T7I7~>-M)b2-4vuPKw8EGRsRR@+2QAW#-+4LhsbHas|| zE{4l~kd1CJH9hH)XP3biHax9PWCM~|aWfROJA^RxPg)YN^&QXg66l{Lub*0(q55D| zzoyOG#pO~|Rbc?#2;Z(Hi+%q?RO61n3TPeVyG_%h8$j2MY4{r0QaLr(hO4aldmX2! zEmgRRFS|b`s2(Zb^@{3YFPl?ymcnYn2i2vHqbt*??;V4-*6!88kRI^OziwwU=Entq zRT##cdKE97SSF>E)n+ixPEsPF29H>wnpZ4T%l|a;SAt1``{o4Fl)HdX!D$l#ENIOZ zZ4Teq?iP+QB0{So@~Xt~_g9$-u0Qv8>JS0Kpr^xaal2JlMPolf#N{ukCqs$YR>CJY zpWoY~aBxhl|HArRUfMRXA^vD`qmGf^AsG<-9C=ZxbDW)>%In9IMuS_+><3?NeOY`- zL*uA&Jo&B{$>)P4CA|ld;Sp~u<{KJUaDm^DG2fu@PLIC+^)U~-dzig0OqTb~_Rs}%J}XxEF?)KKK6QWum_!oVlT`x& zxAsNB$;o^|VaK@%<#Y-xnz;u$@Y5A$7=w1q{aBk5|1ANLqhEYbvlu|@#ERu7Ed?y} zBc{xEOO9&Gx?=4&E>^YSITn$G;36-t?!h0DRx->POeg0Pj-&2DLfE`Wj*``u&GAk9 zzw~2oMQAiI_0=>-GLm=$rjy!BKxGfzxmUt-dwWf`<29PACz8PMuct5G0Z1j-xMU0( z?L~;_x~KQAXA<2>#NB+?w3#dJh6y@itSwvcepfCX+lU)db9I?|(Pp{sL=wHU15?(T z7B#O~e_KF6B`m$Vxd{rf@O%;t8`}G7*gMS5(b2NJEwd#cOjr`j2Dkq7JwQ@} zLBjjj2}cz!H_+RR<)jyep2#zGt+5*Q2iy`+13n!1RuB|M2&JPNFe^9DuPZx)K@>d? zxTvqs%H*{J3#_r#v%UR@%hyMUTh-!}QC7B%4`4~)Q1nl&&s?rp8o28Ht_(@_FCwch z4=1L0n3TYWBT@CPbbOx@@U7oZir7ed;isgffx4|24UQbgs+nk36dk-yuweZw&h-cW{O>LwXC7y>Hop2->0ZzMC045 z{Ku2?_nelH1;Gqjz3%`OlyDg z@7@<>NLI_0TJKe#nwghsw~6q-b11og(NVeK{*~C45^v=)x7paQ4Ha8~qthife(Q{X zRuk)J=bW?Z<7flHi3rv4RA)^BJY@7uGFY)%v(H`Vhp_;<&^W^|+33%0Msby0r~vYe z<$fdl!FhSmAVQK|DXdB8co`Hoc06h{zU5o4>$1DATSA!hKQHYq?Ju{c=VEpMINF6@ zT?&KQk};KvtiJHM^HHXp0ySTq!-#7Yeil+aNb?3cC|FbFHPUWvlD2seN?e?{1~qi6 z98p)d6d19t*=+a@!Fm^&le8(euw-R(a6ifiZUO12irC4yNKnt8qzXJ9HrU{KFP#K+fQSM%G8i9bb1)ad3xF^Dt0aL7pWe zqBBAQ$79^Hn|Aje5raGn`XjG93Xh-ys#9oGq+Fo*syb5T92DDPZ5pswYo7u6J&iD9 zn6%jG$Df(&q-4Fb%*uG>q!@c_ktW9<9E%U{$%|_14U|~w({&62f>qw%$Bm8JT;Z{K z>+$=4Ft<24lLfmMiO+Ih$xJ-FVU*JbD*#O@8B79D8qE*t$jRu-`ZBQpxD3=qUgl$t zrB1>A!1wZd|37NJ`U_)} zcK=r+cWlpv=&&b#;RGHudNPbt92p}%{omb31LR)5ry#T{Qie=kdxNfIn6MD6Osk(? zKLU(ud5F!4LRX(cve4RbF;-u2azaZ=czb zr4Y%Ci-l=$C+&YI{68YWA|pfN)}FTX-k#37%xlQ$->Ih`_S=0+gX49 zn2#9%8S{EtdOfa7tRLcK^a;wnPT>bt?8Z^n97=-zdNl;eh`Yj}LLbIqztsBeAOZn` zbx4jULhmB08F1FAbZ3NCm9AdD8ZqB7+7UDF1tsmR$4V|>m>vAARZYW5X%cy-Gfj+_ zb?%3I2ygb-+culjjJ9amXciv7nUaobr5rm7V;YKwm;11v*P&tOD`E-+kv=~_rv*B> zfil6Zuvgl7y3v3K2WMP4-5{|W@Nsx{J)rU6In`^=NBl>U`(`D~hRPL~i=MuwfBTQ_ z52|ng&_1Ej^$dOLF9074p6$&i9O?Kbr(vEU@B&~yBygwCMz#COil5I<2%rr0zWH*O z`$v3K*eM7Lk61PdW1+X9LM%Cd7bv0afWn9Y8G-;9Y3 z=$lNJ?IN701F5i$KvY$sTV7)JiDla(%$z`{$}045c_==?DnpI4Jy^2YdcVXtaoJ8Y zy`3YVG6Ne+&mR6nW$yJwU{vc6E$O+F0q^jm_fuI~-Oz(EL&Iiwhbl~Qjgl~|LR?c5 z)wr6ee+F)QB4`0ZRYGY%nA}1?zC)V<=zNb!?je(ThqowB(95(cIlO;4TpFU3a1P** zl%Iz-qKe=VR1~WvB||4^HGR(4J~Uy65GUKyn6ZD7l907l?>lzp+3f58yu>^Y zGs&jF^ZfuyBiOV~2{P&C4-$*4E;l+iisxEQO^l3m-N z(7F`cc<=1t$@%%brknfyYC(&otmtj&j$HeYV`{i(!*l09VS{ zM$`9Pb{luk17S>DwmjY`Q*w-CrCbo8`d!qIt-%swGwK za7V_r;{kL1aMswwFZ6@p2*EB{5Fp6ue|C_^i#U@aky51a3j5&(nJJAp4~+c0+WW}S zJO3AOp4h0rbIP-YE|QbkSRl>yj+4E!E|3jQ^Yi#}Hk~=@G5pn>$nf~_<6m>O_<_n@>^T=~Y?f@&&)d$jP>HOb zotm2ZWx+2fD0p2=tZ%?iJtAdx)yYDq;@!n;%dxXG9)~?d+tN*{tUa6D)(}7sGo~f6rylM01skyo0 z^736+dy6HhlXG*7XV31AS6Jrdd-+tLeo0Iv6@Xg95aq z?^aI@N@lAu&G%6K-rs>|w*4LrP%wgz`SXC%<;8|BayF)UDuv zd3vJV#!4OUy$vKp6u{W+9|-EvQ{tiqWZx)acI*t%=CsI{nb&vq&xeG3YcBkL#Gyok z+xm^V+Lpkehf>dXJ*=O7TcN{6Ct#^n`JU&=)2FqG>LN)=NsCjZ4F2cMnGBoaj`8x& zGmMQit2+(65*RNqPOr}})BG3~*82JGW|Q2xgIu~*TE#C89QG(NXdi7&c5P^oe_(Fz z=H~XnYN87&kGi@#>#@}4#hGf68{*=HrKOQ=mJANFd!z)Xwm>sl6(kT=Km1O_NjTST zVX$6GWT4lTA@6?k^?wvGeLB80?N5gNcfmRu328ynN)ag=TjfvRK}~M!de(%k$A8a# zv@vyP+M@f}LXm&wo+4!69!Z{naYGF>u5n5Lp0PXvgv@1oxAZzZK2RIt)vb;!FM-(P z9)^9wosyE+7znH5lEHwr1%i(n4J7@;0|7(#TU@G-B7;StDoZy|a_A~m3szH3Pf!1? zS=LtnGy4DK(nFNZkBtUXGc)*z*LFm_nYQN(Ar{}7@@FK@nd0GkH|MKNodo$FrlWBJ zL^XsZu1Ote)R4atDib?=D^bYh$ifNUqk926ksyW#DhBa@j0m_#z&Yo)ai}`<6Yx0D zlk@Gq@ZQ`#r&PBbVmjxQr@VxK&QJYAy)K9YQ8(r#G9%z5N7xJ~lGo{j4nK0nPF~;c zR(vxoyiNz`ujm79_49tQSFDB3BG?*+&`Rdu>L>C8#5iZB7zL~ev-sY9xV z09Atq8e+!(*smuVF`FGTbgHPBh`9ooMu(DZ^I^pW9t!sWQE|jmKt%tz+^<-9)vlA< zs^*AE&Q}ydz1AegY^#aEg>ENiA)z4rE2N`CMbwji&2!#-aq{kX-iq<4r9ijShR_k- z`x?_fe-KBox6swg-~R^=lS7l`#!|J@_sYtFuiF^3Cc1JZ-gCPtCF{KL@wxi);OUk; z2LT5A$&0B*mSb(ja#BK1B_lPLrUxqgxm=^=BHzAwbMe9j65^=ZEsW=$YQFZLQ`roO z637|1=sF1iwLq3Ql9WUoX_hECgn8b9poF^N+r2L}{DRaVLK6yC0Q~9->7k(ucxgyB z?LPoqud{ew({31d3@AIK;_QaSN^@-D=&?^sB5}c2?q1!2k}24^L=R<*we9Z>YmIN> zIs7YfUFxfw@x@zzXy2Suzd?;ap_s7^S+Y?TMqgx(%HI2-aTc2}z4uDsP!B~*lI2IkK0U6D`iWPPgpshDOH#&R$eyJwi_=#fx znBKQRKE^lX_!Zn&?3|B^NQm%<3$S{|6kpI0TBHLTbg01+AJNcUSo8ie;LQ{De?{zJ zt)-BOZX{~ICSxXvMSS~v7>RtxZtB8$4a$2Ih{ApTlpr+Ah^#C3J)Z5&8#FFDT$ z9u$U^r&I8XqurZBy7m_^GC>LQw^a8q=yb@HH{WW8P(!Ye`5mc8)*Bep#jO-okN>UF z?S;U9@`2|y?@-o&HTekHKM7r>4$*}t^u$!st5&NNj5#>61~j^kOwuCo;jf0nR~|(& zsPR+5s|X#gy@xWJF%#ca&*-G2x7ETc%ugSt3 z4%l7ZfZ@7u@l=hIR#sNRE2CUFxw%WIVBf2&6Vp1U2g*qoC!983ikknXO7irisur9f zCmCsqk4>vAWpHxuG#*-CnTXUb_bnSqNO}Z&xTnbaVu%{qKRh~8+Yr7pJvZ6BWYlm? zu=ei$<8sS;dZ{J)YqH+m*J)6uAai+)nad_|L~b^^QF4=X!k#~!T=qxp11jNCReYwS z31*kW6~IG~71$^Jlt5k*c)rpm&1_teTj4iFRKudyq&T@jLlFF=b`O%lHJy~Q%G);f zqwZBgzbcz+;Rmhvbt7|SGol`%E^3LT2B|Q|enM0s1aRztCiM2yzlP_)YHgWzOgPKO zL}9J!>o-l}0(>Oec~BeT#UqC8vemwH|z7#(;sgd?vm!T|QNFb~=5KeQ% z-AFXJ*qsnkO@3t>2~nomL6XeOWz#Td@IYf|ZWa3#0Tc&`fSiEbQUbO5>23olJPFaQ zlK@0T{7qUi030Ucz?%q+%ITb76{eq6hmJF|$5KTKfkNm*4*~NH0Xx?Rwg(I^)Hz2! z=^BysbT!=FVzce6VY@70^$Xm|hMboRnC~TisDyg+SRfHiOhm>`dB=lhJKJ2{{=FN* zkxrfuIztld9JG@2I%-!BqPZq=B{T|jYAING{rb)wuMvCx(oAlPWV~|fi@v^leSLk` zB_)LomqIVPkl-wpbdQ%GP-)C_SV{I>T3PY(_I^^SquCUv)aW~G;S}nfTGvs+EI1X4HMiyA&eDZe%J%A%``lF|*EtC?Fu zRj%Sh{jb1*wEXVHO_^lNI>xHs-rX>n>T{=h_t}vNZq=B!zR&$(3-@XhxI=#jgQ`Z^ zFxb|7qNd)HHDCy;@xvr@h7E)MXBMW5KPyr@4Q&&wi7=3^o(tW0pncAiFm6%?O6sQG zcrX2Mf*%4uI^r+9Dv;s49Vs+SvHqjZVGX-ti>Z!xLC3T6k!zdBm9W~+yoOxB6Ns<( z_19P-hT}{}ox!4B845RN~sIZuIZAjf}lwo(EoM&+Km6xyzWIpOTF3lbZdvRO2Rf1gt}HPDXi%Uej_= z_La}zv}ySs>x?7b+hW8nyh#52IG+5BiVcoYPL3dBz+1zkmG{1Ae7U`yocu~`uD$0e zbFl|D4pl?<`E!Umavq^#NbKb>D!+tSK^mkxzTLg|8e~hKbhT>)BlaA;!w6Lr5DWC^ z7&?ma1{0p>fycRtAATzZ74}{TmsOpAdlob+Vhcdw8mBAn3QlSt0~`F$flB&J#FB({ z9RV(|ZI|cX0rkx!z-l1`AuuM0G?ntK2vyCc*jlI>fuQ$=U1TS869S$Sg^&UM;eV`1 zB)8J3Nbvrp7*chh=JUU}FXP&&G_l!nTalWCX&Oz6xN z;+3DOB@X$iJASzAN~xRLT3t;gB~?9>Ccw__IXZfuhFfp)f1M%~G!QdG)k97U`K8m^ zX&)URY!zwQIr!&>0IS2$hT9B(|S!AAJkX{F-!@$1erlEP1$KjjJ$EC3=Tbp2+ zXC3n)@s3SQN!btX49tqg+0a_O&+TLQ>%c(>H|S7MjWq`>YhNF)Ue2kTkpWD=TvP7Z zxQ&-Xr|wQpa?hQv-zz6@2~LfsSS{HOY@vD9e02h15L1H4MSuz+hzF5&xYNKi)xXMg zKxaC((Ez?uH0sH%ao!{5swesQ_{hmzI?{>uDF6A%nhm#CVfgvopdF@$lfDpgO{9ig zyn2ga0SXF03ZDA@yKaDgNAb{R01Bx(k``gu+0;01l-wTUW05VLus#RPLQeAJ5p)p{ zcS7ASvGGhb13K6}c^?onK_sqi_R8M(b3p3v@H5ly-5sgpM6kiD)H`)wEO&wPhad;} z5tglOHt_^O1fA8{hm_)SPux|*#pbLPy`gS`}ANi(Kqps+C8_a?^i&}8xh{> z-Y#|Zql|1jaurIy3F#MYy)q^JJ06u(x?NszZRtIV*|3FzoiMIdpRb#2x|p9VroWl! z@6#FD`D7^@*@*|D-~5P+`|^sRZvB&7ChFW*conon`l|Mui&@}l;f z{J4Seb`mZowo?O#JB0x7FXisdVCL0}~^gTh8>Q zZ#mA3=KZuK;Zk4k$7*Ty=UK$ozbBDW=eDH4B@vuKug4aUq5LDg=b}K zj6aGzhAZ;>1t=%(8a3&R>$QckWs)Q%dI`Hhry}yaN#_larGd!W#R= z2cLBsMF{Ewzhr!{pOupPZC)x zeRVCg^G^E@|5RpfDGxDz*cK}L)la{l)KtLVBicb_=a1fp{%H@74R7O$HL}Xe-^GpG zC|WjtGWo4yL7DNz3OBBXQ$O-=_vh%8#4UfVH6`E%ui2}jgjK7b+!NkI%oPV(ZNJc%QTvlrFJ zm(McH+fkZyW}c+pVV3*A!7^~fPVIs9Uz%+mADvvzh}G=B01YolIPRDHw|qt_#)LX4 zwpcI98nEhG88`Xp=PWj7uyRV;*w~Ot%gC5j4J3TOYGN{Ben!Mx9az;Tpi&h9e4aKl zgRJHF8f~V3FfVm)tSg?@&VaR|CiEh$kgfN_k)~R}TqO1m4mP7-N^wC6F8%b%nXMCF z#lW@^Re7k*_>ob2TF}fxMJ6_MTb}dB6VBMEqvd^m#BY@jt(O_sdw^PUgH=Z#jB2Fh zJQPAA!%pqeOql07gzoe%fk{sRXfQS~{xA1Mgh9|JRzXenJ~ zBcx&*6?UzDX&(bJ3cHlBu<%eIs{uES+a%TTH4UchUCH_f?#z)JgKUBzg9!>qq(#<# z)VU0r=bgMys56f%-%P1*CXpW3QzAt8dvVEVYt*6-D%8``kUmVePmU;gI_WZqJIt7G zrC42EMK@nYuFNWe(>3j&(xD<*{q}B}G9h(?yb@7CgmWb%s zYfx<+LSaNm8&~Ie|NIxJ-jOkLnS3r>tJCA(CLrh8nmFf3i}4ixtmUg-oP!*WofcR! zLSZ&9wk_-&)uCE{C(q~lS{sC zqamR~2aEBk$ylt~Wo*g3TrjJ_Q2WTY^E`JZ$e9>3<2~-ROwuX+@Ux zpsLj~17%Tm%eu0OqI9wG8%Avt05=W*_n3~G+pjD>teUu)Y(Z{cmdXHYdz0kd@u~r9 zYikW|n#3h5KC>~E-wQtj`s8@nVt#uLqFhyHcFfsYSj>vFg`6~RD017^nqhX_GCQcm z;|SONma|=sD@LKVN;y1+O`mnE=`lKbg9xOc7ktMYV*hra+%KeY5QAM=dAU!LyqOHiR8Led(0dr>BHcY`GPW7UzN4sjil?k+#e0 zp*0)8o%BBqlDk^swXj-^|B!iAOL-Y&&((&PST^T7Jcq$ij*mN-2#t9^w}+HI&-<3b zSzZTD(8e~1IYf>y0BC>zI|u8}#n7W;y$oazT!S>v^`-HT6D-NzGqw}VB$G2|`-aSZ zAIlqgyqd~HN(A&3RhG~3t)qKEZjLp#!D$dQm)uDO>*g)tyC#jMPu=!OF|{WFYDG!!K`yva z5K1vQGuf**wFAf~JW+(s7Kyl5o;DT%_5uLA8R|Z<8e4#)6Rtwl2;lp1=G>*L;r@!Y z!eWRNB>_9YNC^McxcsplF?6;w;hJt9bEfuIq*tD*{CEVh?bne2`H$Z2%T$d5!A52; z?uWNFGkainp6Z+cm87*l@6=6rybub06}d7{JikH05leIBL)snh_wwz9A{CMRtGaCh zgc$lRg>A4;ZWWN*0Vu$y9oiTO#u^#C1jP$F1ifM)N6`3R9NhF5mR=p~x#cse)41+I zB!YsDyqe7_tORg$djZbYpJOL0TPM;L>FT--QzSF~(M@fxud5@`>(0xaet)vyM``I$ z;K~Ce1l*3wfyqo6 z=A9^#xY2^Y1r7MB@^09*d)dj$i3uc+k-zXuDPKtB)7n?$V4ppg=~|ZvRRcTQZu^lh zoyd|NHxvqlgJWQh5Ltb?ITJ;F*e$)w>sovFhf@o6Hj{H@JMoAob`Vz~tZ%D>={VrW|ZfmsB}AZLH)0@y3(_J~QO zm2$pVpA?Bd_a3Au6?K*iutvy4Y-LhBy%EQZ= zHhO)rr#doUWE8MiC|eCI`~~_R@?rQ)n)~(_-~1V2eaZX%`xgK3%Ec@5b0`1_m+Hwt9LjBp@geqBk2ihQ4mw1qlg>)MmT6-?!o& z6hHo#sccxD8=1QK;!1z~#)^+r7|(Canxf;cT>Lto?aY4aw$1mRNcAasGtGK~R0J;e z{lA@%Jxbd9f+QoSlb5xal1c?S+yX?)@jjndv36g-p$ap_C9H>dHJEOCXXZ}J>)fXp z@Q^-Ez<_Z-+A7TJyJ1@rguvI0>flBJc0Ms87W9~hU0H<#rjm;oAiz3My3Xz++FiS5 zVA05LoE}^vEDY^(#vhFAx%in=KAOlfOZYGmVS|oWYt_dS<$@Ga+)OO1>KqTza1U7U zPt8nzD3XuRihU1h5%^eSOp77Gu-$zA!QT|41{|Shvj&dRQVIy|ZJ$CW8}zKVoFm}i z??=4qS;x*d0+Y`VBOqsfWMEZObq0u3vC&a}0-k4;n{cDVQBEUga>U#KNQ@ zr>klD8UKg0`j0EkFp+xSnb0Jv9?e2oncviOrP$14zPM+j40{rJiYPqMu4>FIel zkQ@VHW4PeQ-FzQ0noc5@&pPW!M(z>`iLesKJbOC6KqM|c*~<;JS>3rL;kiqOD1f{j zJjSqK*AIWvxv+(cVbPb@>}KNP(({1!n|sa;jE`3pQi+fkT&5DbVq=q;H0nYne3XmJ z3v^8FryKhQ<^aLFKY6kx*ls*CdEW2M8;BHQ=i6S<2^>CWdKB&rDUr35wj|%vclOfK zO4!((C;0J$FRvR>Zfv z4FCaqOVj={a71-Gu+x|Co}r7&9BCfXTd0k4%jtgXPy}}yN8=TH7nupJv|F>lW^`VfNUZ7BJ=U&pzz+1(RLA{~*9Zc{}azMv+$bgx!d@e1vC@xBHF98w#J zyZ!ni{Ke+_Ua}{d_Fe)=5p^r}pix`OF#&;q*;xx`XXiyQGcoZYetxA1s@dl(M_ZIu zY*(jBTfqWS9N|5|!txwI*caP51H3otPHQ}L{1#DZdBDg<67zK?d%n8U3HX9GtWD9E z16mcUm>7|g;*D!{W;D54p*=w|h(Bj#Jlma`>lv?6TTQt5p^nZ@e@p>PUT#vtOHUT9 zZ=8hm?azIQn72ZxY;=_dt{RQ7Bo4@~_M} z1`P2_RfPUVal=N2ELTgv73>wv5g^N zMs ze;By2lioWK znoK_($15MAw}2Gse0>fC+Rgs&wj}Pu=ib|u<`;ZZjnz9G{OF(PeEbORHn`)pH_)co z@& ^7SDtVPkwga_1j)yksP931ksK^N0LCF>!GSAF`V&_xI)C)q!?_AO+y*zXH`6 z;?uN!ccSA)3yf_F_9?T>XeP#ud7dQfFoGUbc9|7Ezk=?1*P2)jHs*!)YPDF1O8t4)-NE&3fHiMBMO zb{=E5bE-FP>>v^C&#aIA1LiCi7*TxjDO+TNENqX=S*Pu!R zhj{#f4w3$i@0~W=4WDl1qoQM=T%wxmW8ZMMSQ2nt19f*U@Ky4rcWOavth}xsNC-g? z=97u~^@5%V-8Khx))7ID;hPF$-34+%<2u*-gGo@#iOYysGCgJGcp<~&*DWCi^z?q7 za7{5YyCx-Z0xy?+FBMsT7n7mC1Og}a(9p0k#uq(|g}}&3*YOGjmsIITZjAN3e?k*0 zmXx`vG#{#z?u5XC@80WzwPxN)Mp%LpQP^&6R^kdPXI@*<$*+y{IFrbpLf$iBjUG1< z3Iy;jQ&vejyV`dKxkiLRCDp}n-RNMo`PSh&Kian)insym$Ly^`CzPd!j`JZv? zpj^0rqTi*uijYgB$wLk#Z?T9m82xV1lR~=JvGCgS;WQrZoEyca+gA^jKMLC< zK#Pa2$v-%gSXGMM2AiSouw_aMLwq!j?(<@U5AX|EXDDvGV(d0As_aV~*KXDouY9!K zw!x^pwA^VBssihbPNT+6XF9o+;Y8wgk9rxPT>fZX)osV(kWcZGjN8o%>MZ{tC)>?# zJshrkddy+l*#S4DCyp&6>@Jb_uQELdepg)MxEU&s@~w2G#_8tWg;ZzFmbG$R`HuGb zXUF(#OYwQvDr~q72cYbA-sZ*SWcFG;WB0cPkyJfpN5@5ouDa{p%6T@Q_XzFKV0W?? za=#MjdpZVXL5sGl3IS*J$W@nF=Yf*SOg*62KDUn#o=xWvS1QZ|9c?L|dsSez!}gBl zJ25bVw2w9Vf)|lz^I6S3&>UFUOH!rIR6QQW#$f+>l7$4C`+GBQf`0lPkX6(FXm{@j zw)jmFZb6V`YW{N^#KJ(p_z0C^M$&GzWv-{j#3OOz5s}K}pF1CI=OC-gcHBm2ldg$7 z{`3&WTp+I<_!}EY&7O*m4gz|D%7XlU!_D(m9&c^LpYOV5 za7MTALwwPqYsi67q^$eM2E`ll9_w$BBL4=`jLFT9zrASRTwkMkc=q}; zR?B9s1oE2Y61=dF*5TQ8C_b**pfDx~RY&pxB9ITsr7(O-Zi=ri%CCmSQxRca*k zBkO-SP>B@$yk+i2E3g>&RY^H4xS;pP;{0`uvVF-tz1tS&UnVAwP`9Fo`a98T?>X3s zpobQX^(i$SBK8-ol8Zuv`L*ZgpS>k`S0q}_dO=YGs~B|iB)fZtuT|TQDS4&(6K}8G z=HSi5xJh*1SSnv_l^cRoIr8ya3nSe=?t6eAJD9JX{yQj(@p_qYx+ZUc88UY@&UT!A zOx-tVZtEMZTDricJ+q12*mm!wd-@0351ZsN^Vna~qigm?L^0u{F*`T5Xt(IR^Hh@< z*4GRTwReY`X$e6V^A}9wZ|=PJ{9*6`sfyTKW!3cMOj?k#$3ggmV%CnpP{CMq_&P;M zkGXMwSwTadf_YssFPC}wJu>D67-Bo?lO#O`f$_3_mFIiUl{idnnA5BpLfs8jE#Hq- zU{Ln-hcSvE3a%E`u%Lai)PBNGvEiimG4FBjx~DWgWO$IZ_w2b%1 z9=F-(?l)hCy>3*Og`%R9bmb2y*^`TE>m(QUMzN8y{0aOioSC05fz*Q2A2cNi(0@j~ zvi9~Bnsn*a00f#qQbVxo7!daK(Ot(%oG-(Sz`4uif+m>2mNWCeVuF_kC$E8gJ3JSj z&}OvQTJU+UPZ1~*R?g5My%L>)yudSrs`Of@$mW+Epq+&`-ahq#AVTJFZjY6a(Cga? zU}YLh>4Uj3^!dmFpt9PGc}&w`{-CfDzb`yo9?&6G8XrG)!uNob#D&O*`M=Us^(|e4 z3a_3=N?gbRmT-kwELa3)h8isK@fMie2!A z4y73Z$56Ly1TF2EVOZZmB$aKBq@NWZ+yl(O2FC;vAkd{-V}wWMY;(09?r(ky0z2f? z5plrGFqufoe)Pif$wHG6UErrXyCZM@gj}-c|6%P-z^Pu}_fZW>8Z@9%XpmSkL{fyx zM##J@GbzLpGAy!8P1q62n0aOmmX$ekDhZh_D`ZNhC6qD4;yj-{e1G3_{^y+QIz!jK zuG;pF_4&N-`#kq^-}m#p<5pKhPOztbF$?aoy)0t%mX_nfx@w1x<=eA?Yzgo~Bro=M zf`JFRm|*tG_ru$0M=kUc>?G|ByxI7w9)Ph|c9T3@VTCk{%&76I!+rqV%uio$fL5~uhmX`j zcTPJ=8X0h$*+1=5gFgj5%tv4>39-N$KMVD=r>(}z*`_a%m2HoCKvoFDU1m950H~TVguH2gd;l9HKC%tzg7j@i> z^#*3Giv6z8hZQwvz5m$~>sb>Mc3^iGZ>laREHut{Kl)2Mv^;g%$D3}`V>sxx`FV4g zm*Lr7U)pnzCD!u=ZI^Ax(Te6Um5jhnej&M}FVp)jnC!(T@j0ccjq&mDDq%}Ni)JB$i+#t1AB}J@kdAAxWvcdQ zgWiGMK2(}Jl)Lk{WQZl3OBsv-wK<{tSdz+1!Gxrwq(p3~2OIhThl}zwG!aSet0mrx zxw|9{RTxYYP=={Mt7mbT0un(-_nw#y^bLXGR}}^TLm@vN!}VHm_5^SZaJQ8T+}BH* zrsEPr%351LldL_zfVm~E(o114wrd#fFafl%6{gqa!^Lk1%L(@yK%Q@{r~%qNfBcW` z%NlFu;n(zcduIP?=t!BwluCmyi{xPrrtnL;@vUJ0g0==2PZ9)|i6ERqag>yQy*e0G z4sl@)Wti^EHmuqQSAYoDo@{vetdf7}3b>Y$sT^Fg;(*K&21HEa(aC^;3n1;hURb&mF zN$bwFGO!$I(v!i_wDmr-e?j7cbA4I9TRdTy>qhq_9*MZ?p!zBxE!->e&D?yhHf-{ z3*&1bFTn*j$S6{{8uibgdu{HIBv;B#9fEWV2IuWaF94$HLJoxyV4BMxJOoq)Q5=gw z(vvvzeQmo=I`RDnlSt}3$dWv+cE4M3yj;OG=|l7aXhp~!lM2TdPC@HCh`4Pjdwec4 z?-JOLtlk=$2f2>r3v5~-2tI67D!~Xc#0g$)v2rYBpVv`}N)vo>y&CRo*d2y+T_9t$ zzA&$yd9bec=J64;8h)D?SNIx?Ze$k;?t$-fxI+F0be<#k-Bo@+J2q{m_AeD#`=Etc zz7z}Bfx+#TmES!qM4>tDK8H^gC8fsn5 zz;u!=ym!dVsNK7o*`e4E0|D|Q%wX84GPq-4z9KDb63>BLqxl~lMc|SCxAL*ofh8Cf z1JiUL+12oX9D{Ua!hNG(`>|}GG#hjqN)8hd_kmB*_p@*{tn&1W_{xr{^LlrV6)t{p z0@?y}4q8~6fx`qQN7|FA=)KI#XOL9lvYs6CSs-rV>WQl-_Om^03t5Oiq5N8>t1fTK z1Vb;G$*U-z!kAlfdN5b|t;=Zp37c zx}IQzL>TE(R!TNKvkk5DY><2~MODkR>!_$AuHQ^vfoWsj^}4v+rYLrh&F*8M^WyKK zSWbs<-I{FkuCw!}H0>8cKZ+-1dMp-38otg zs?rLyvNslbHYca{&Mz(ot}y7i2(v#MnFHZ);6)feII%PiV zWA6peOu9Y!QF4P9C5%>gf870}CzGp!J{fR%R!I7ya`?V#Q*)_a;fs=EQ8oIsr?ZxU zRzfFHS^+bslueray>A*V1+2D4IcJ4KVv@D*Y^xc>EcPw(g3B@=-md2LBmdBpXk zom!EtvzmFRnV>Dw0}0lpyo>-av(RPDbgX z@hVL_f;DVHNP(LseFDbkMk%}3A3Z!bmeRX;G!XuD(>C%Pw?-(o(-VHw3-KX(y-1B7 z{T~{@yKwxW`t4DXnLN-k%JqJ5@6%9$tUCv`;j)DeLLeLb9Kc@$fm8 z9I+!$C)?jV8N2JzojGee@qSd_-FV8(e16Q=)hjrflb4z$hnad}V>uBsW5^g2J8fNT zG|KUNu`b?NUHfL|ld@8h*1h0dZn8<1&0qVXaeWsfrrn|kC%JfQfD-)dPj-XzvIPNy zJjNX&{L_wcT$+(;41S`NP3AqUaD;Z0Nd83+d>vx)jc4b?*nGe`L zRIvCJUENoxi|ng+dK(tsrDxMP41KbC61YkBl)!S*`!l`fuMHFCJtvr1mV`F_%qj8m z^;eX1f8)KNZacSrV&VRez?|m9pb&b_^~a z>)*bm>e8}&mF(s=+B41ex7=0e@1ot=%swP$@Gv>K2ghYo8ysv=6;Aq;O_#!I6B*U_ zCX0r*(Pnn0f{~NCO8OD*U#2^x3I`%8K4z+>N7%MU?dKw&yzIL;_w^SK z&FQt%56M$S_lVS+O`eqY!RiMpa7xLrKSlzxv5$wA?|a4>&PayCw$Q6zILohms+vdHolp2HoGR>aNIxjH^FFH^s}Lfk0ZR# zQfzdLL7g{S;$^TT-*$es&pQ|`lNTc(EG&C1wxrR3`dKUOr~d25CP6zyM@2;pqMCKB zIH$>{Y-o>{o0blL2ZsB#H?*}4!w0I0JUV30 zSkjiFVntpRX;rN*U_w4Nw_w)-X=l~$e#OtcxfdZN9UP;f`bGbZwwCwveVqIq-)C*H zpL1onB=+8`N4dl#D|_DHLfa%y|37B@N?@O~|FJn@~{ z(~P?s5gp(3ajLEDy{2z0okbG~C5t9PX9RT@zv&m>rptCl?@xGKKX>lq*4g6ZPDj>%VjWyO8bNDiwulQY%S>!4AM4>RvS zNI~+#a;z2x62@G-r{eT^1DV>{5UGnkWTOgwcyN#7tJn4(oKladtk$cOyS+H&WXK3` z9%z>t6z_nnqZIv~@gZwsa@fE7F-4?kqrMpqMzr}^{U@eNFVP!G3!kQ0q_6!f?OM|~ zI~8Z_jD67eXcbqgo7QMomEeKNls@Giy36A1GWVE<9^rbjK;0iI4p-;-a=2FaY$%8b z3Bek|d01CaDt?!<|L--7W<08xq5V9ThgpAn&Ul3PY|^0NP>jPmwtsWAB3hzc>Ta<~ z^otj%Z{A!Y9aqDekY7Tw`1l#GZyTxh-ei;!d~sV;@+-kU?ak9`@$w01J%%o9SXziS zt;*L~PyOaQ-8A9JE`F`{4feM^Y!(%of|2Wf{sBLu24`a%tYxRByy0BlA0uEMSjkH6 zxpgFP!0ghm9E@Ibc)ZN-{^5ddulO2VS&WksxBR9oL$^wD3e=vX9bK z?7EES6_U;TrdKNF)UG6-;NFoUUad4T7Dl%`8{U4%vL-!q;=#1oZ{2>$-YE{R(m)e; znsx0RAML?o?sw?JtL`*9kKCGQl_dPs8CkvTl-0X~5b}|ztza%~?l@JdgM`qjKa1;y zs9wt%kFCekDW`O}9@k{q`q$h@mZWk!TV~}Qb)=P99aGcGl)m24N`EzklcvX)1O&m}65xGY{lhPOJB zHgEr}k;o%8ugUuS;l}KoBJX@!P0u~diTyfoiVisL#Mb2%#}{~%1eR7Ad6-(;#R&>e zs)uTJKi@m}*TzYpX~N+tez1m$j>2EawBbKfFL1ngrW0rV1^YhS^n%Oih0FImY-lYy z^yeO~y%`2Fe`7ltZlB!cSiiJ~!w`Bats^RJvf``k(*`P+YOscirgs82jRCgN{pefD znkvg-{;7^MCH~--E4oG_pMKTL-Nw1h^O?MH3>Yx_nJ?xTP5%DssSeA`K()+dtu|k0 z(c*m9rLt>6M$1jOo#DIFguBpzR)j9g>`N8;Ud`Ti-Pr;0m9{aG`aG!fvKw#4yT0*~ z(Te$bSh>h>FW*bi{$E|c&lZ0A^l8qm0Xw>Ul*E-c^Y^L|(1MX_Bgwwk%I7a?zFFFK znfh6G{l!?p$6lrR-|X@z2%dSr$kMJ8qj;Ae^F{9okHqYI4Ee=U(vKdihLpS)XFPRf z`bv@Nv1Lcv@2p6h8|CSm1DE^wkVV@K>beD0#=V(M;pU#eZ0I02rqoEjGrlB`7O9kA z9aN2M0L`xzW=JTToc=Bb=Py(zBNLBZ(8r;Ol?S7GtQE+!`L>0|Gm_MgpM8}0sq5;+ zNew7r+!Q4sfTt}zXJ*#FFyhWZcQ`mSqLl-$kQjV}3ulzi3RP%jSFc%R%&ZRSYMRR! z!cMR$RmOhOb{r48g4uHV{=18Tv^pZjmz{6EH@|_WqaYmFwA!sc=9_Q6TPPbf_Ju9B zGDu64msfA~#5NN9Zo)CXGUiZ6b%;FprNI&8}K&ml{yyWQT@8s7P-DRAmpzV>8- zhJ~RIc~w$b^^=g8K`kkX(UVg;DZWfP0{#x;$C1@z4?w=IOq@9l_=f(zavAu$GE)=p>urlIY%9S;93@e1lcU6)bO{ zRbu1`z5LQYT0i?~kUgmM{wk_r&H|<}m!>l-x?+YTK*MqEwrxyj-RtNdx{rj^WPi9c zjLh>%e;mS{?fTe;u2Rxh#rQ({k#=qZ>{>?#;K=w$lO2<+_5G35W4ybR(v?Y*x&S3d zaUOmBPm;b2qqlU1BY^mDx30wTI8I)1HJ*r0)!B0!=#<>}>@i{}?L@v)07?JR)vG(z z*^B1e`1XFD5ml2#>k*(;V{(y|fAXW)>8?Vx`bZ~aIUd&vKO4MDuwc{r8*`?r!|NFx z9TgTA*X}EIrbnF7`0iQ;vJEZ&j;gd>TPi8&Pq$PT*KP80bJX|>uV)v>XJ6!eCYzRr z>N#O#)J8W?SG!{!`LqI;3|U&Z$TSj|z3S@f0{F&~njBP-G=zmTG<*p|Ca##q*mr+q z{cHbDTsE=)RV;E-d?|)-Zb;5{G3wR?QL}2S{Ef19RoT#;e&(=Z*OlUY?B&P{zRWAi zswzYWm5MuIecXS`NXvxF$QQhUucAyES@c)uzKLTqA{= zoc2f*PqgXJ<|3V+7MCT6uE;I?3MP^cTdMlTTeyGzMT=_1-A_bn-oxY7A|XAb_fE0c zdcnz3c`EH8x4eMC`f+$cM+4KKj@n_f+z~NNUi_R}y1SwR64I`(s|y6$o! zkI!>-6+O;`0bjm6JDGK;-y zV(Z5#5{~Z*#23DFx$yK)>h*Un2j0n(`(4f9ETj57b|a zMfK~9b&iE~naxi9_nQbg5J=)zBmErI^LjYdAhMC!^@UTdV{Dxl|DdC@H=;%#vb+8C zz^t^>j5m+m+yP@zjAO$faXD7AJM4*~-AFZ*e$TrB8JO=4w>xR1k379aoZ>YEth>Fx zh+2%c?_DihU5gt3zWeuu*UHokg`Ktg4aNf?y4 zoAq`DGbGmKpHr0^Qs_-o9octweb2GLlm2h1A0{3suXDCk3*@9A!~;Jykpv|U>edk` zQP8e8y#nf_y)PmGdGgrURG(Az8YClW#7jGoL4sL~)4<6TJrt|>`v-^n znKc`ocK)ADQzEo_ksMTxlazViq$Dj&Ge+R@sZ-K$9Gm+q(Mdcd81GUfKlT9-%yT|WVY0Rt%Sw3Kmmf^C~_;bu1%s^!0wx^g7gPpQ`2PL$Bg&8p?WKSFoyf{0U<{)cH3Y)x)BtpS+o7a)J4rzc^YJ6DU=T{OIBd2r# z)3>YiK&3?(R_4ahhDC^_Ic-Iq6g|#87Z&WkUYnPabLjAtS{R-aF3-i~<$293lwGM) z4-TVu@RAp63&hZRqoDwsv4l?Z^spbqS)n?S3Ra&F>gQ zaA~FJyL_LOBCf|{OZ!%QA{5!2b+>HdH4A%Egft%PDlyP0+!?Z`!sH^bFlWC-& zUFcn~4rb2ctAB`p_EHr-uU1~Myx5o*Nj&BJY`}c3Y{2rOT4=_Vd^kGytI z?~4rTlUa-trj&j9r&d?+c_Uoo0V|Gx?3Qf2?fstr*Nj7#hjYr*LqTdB@khC4G1P;r z*#6^t9cY%%bY0RMSw))Bo`F_R!^X$SckNaKy#QOc%lkUL=;tOXoe5BXrqu=|S<&rf z*(!-~-mH(=6YaI0WO(&;DjyT)mgP;H)3bL6o}7>Oq^D1l7iJ<_rBl5)AY#`uefk_< zqUK^g2}D+}_~ma%Eh=-xmgBq6CJ7^k%v|Mo@g%jxTY0=%bxc-W@g97 zPmrP+{;}KtdDT)^Z?Ov!}LC!8L$N&z z^Ek$X=s3TgRmLCm;z)xu_8s{=XK0;6KL+srlh*Qz6jnI$>BMJClM3a1nByK0ulR?dSUZydqmf7bxYo$}O zmI^tPogSn+c#$5;=rxWCQ9byc1+PT$`0ir}k2{ukIy#o7g!`sw1LZ5QljVLPfZQB=aomx6KXs$?z?^&5F&VE(>0x`md!~OA3pVD^!a4 zbK$x_^*@6Yt)XLnbPxw5s(y>_a*uyC-OBWSq+5{A2Vl%TJSQfzU9K2$CB zd+y8Ld&!7z*Mhi;bN*_<%k%xI$#Uq-C3ODF_Ov$6{Z7~c^Fx1cT^5$af?F&ftR)*} zmknQyEw9-~#%tf*+o%F2pDq4Zo_@}a#Ot)r8=zV5T)Q(b7HsB`qQZDB6P zA#_jj(}V}AxxfP%iry`-NM5dnbCb60sIjq+%Uh#Mq)=bzs(w>krh}Yi_UzKVuFsmg z;YbLmdGB6!n2R?CLe_ubOHnLZZ%l#Wvi*Hdt)sTtWdq-j&$}H4{Zb`sv;(b<7eD{+ za=Y<%zFlK)Pefy%J!`CeL_}WK7whIPKPdvjGUS8}`5#;4UwM-ArhEbvDi+U#&RqCG zz!~1H9of*RU|+%xjGb9Es854ztuJ)p46{A!SGXI($gughMsc7jF^s$+jTBF66akkn|S*`t7uldgmklN_{b4~`LclENS z59x4WW-X{Ta`cIr0lNKzK5&B!BUR5%1+SZfJ~cNAWkk``&n;XeE!NhG8xBw5^Ko?> z_EUMU+MKw=`p;{B%i1#O4mw*SywYipAikxYO8Qzk^SRypY}>zkR2X0|-0YfaqZF2D z4{y5)7Z(>=H4pnkMxDeS*5_|^xzL$&`>&QhUy5%ZGG#h2Lh#?F48IG{Gi3Uy-Jd+R z!6zc@e*^RlPBTXX*Q&bN^*%DWqc-9__SLIwnLIDE%Wxl0bm?@ViPrQ161=&WrAFa5 z{ZtAsis_Vo8}X2xtM6|Qg#4-zIT&k&b$|TbmK07Fv~UboxprB(p4P0^8A)``>ro~SSQ>XC=3q6 z@v{4Q_Tg{ES|2G-Z)Q0%%6rJgj`9sGeGMB{1{jks)=+?x<<0N)2A4C;3rrceAbK>{U&VS?kQxoG~ zSC74&WK&oueRU3jRL?|mgi-I{taB~`#`7@E?D*#3@JHVx&<>k|!=87%8ur&!ab;$p zDBGpUNokLIsum_m)Wx=keK-(P)D#W>kkz$g7@d6je_izISB^}mV`|TrK+r#2Izf~< z?2}uexE>Ura}gtYHzJ#wk~8s12-j@q`T18@)YYdw_hLYhMX(;C0Xp6-fZ&05uu9ue zG(lWj8_fOwYCF_(Dmd&Hlbyl4yM%-_)!KI<5Np=&3!y^c0a~BlY!Dgcj4W^IyD(^|4#(`&b3iZ0WbI1@S;FR; zBu2%bsMT)z-KnkTky)0nlS@Q+6;li;Y_I90r>~O^l;FkcO?sstr=`6+)8|rYyWUB` zk^zOR&aqH)W}&x%bn|H7$@darUV9w|Q?u~49v!_^gH0pzm{%784dd@IE``px6K5v9 z-|q^q{aLaNVNa*scy*rE2~w$O+&Apxrr2f%K#Xv?lK9jwrs;QWYqEe~nS%I0*2jwg z-$z)E@yP=O)J2{@JMn0v6w1FP!qq4N-laF+e|d$I%DL7nw}+qH@{-Z|SGH3`XG#9% z!&g~&Rau+?lRoYN>Op8+naiV(Eq)pS$+x3d)uREOfNZFrfN!Q{sO_vO{H;&vt1uRw zx?u2>maS(QcPjVdqC>aArs)E4z<50lQNuGQp0bOsu^c~Eqx1KIJd}Vmi))%L#eN8_ z=?LUju+gkmn{2hrTMg&Htj@=h8^&eq9AgaMJHIh0{pv>sA;Ey-=v!mSgysSh%J_v3 zoVnaqkSTsWxK3yPODH~s-F4$W`@>8fuJR#itEorks5$PY!#@pQsYb-i{MK?l_xpyD z+bW|$gs)?~#!Ibc%KHf-;XuSat%iVU(BrM8O&7Y;p8?UJ{f7CJFvrkVsL}3tM!vbS zO)*HZlaW&KWNW_cvO+tJ?M?Pph;GIMB^9(Mo=-894ASl(&pm z1L~A1Y5Ip~{bKvgi)wmLX@bBO3hTkaH%mSRxz<4K)CJbW$3H_Iw*I}2RxKdv;l~|U z9}IBP{2iX=!j$>3{ExlSJ_e?B9G^QrcnfNH4rIrs@T-mrv#7erCe7ii{?=;KPzOQ1 z?VQYIoZL#-@tCF!g#~6B+TyCcROL9BCt!Kzyy1@-qU(-qQd`ns*Co!(tmfDg2AKIm_GhY^(mY|1Y)nX-@2= z9?#U@FAxQaSy;{hGYh)^ zjY)(66gK&V108R?W+$mnW;!VS)UKpMe6*(d0eZi0o*`i4UN5`w>fcT=V1N*A8AfBK zpQ8qjrH=gvBRnO-037>waLusH1)P9Gr4mtlZ}(FN#Wvu^Ma0Fa5h}YA(G-Nhn$o}{F+@HO%_BqnkBHFE^4gErT;N*DpW&FL*hZr3>YhHZ3591qo z4iP7WOamGPoHEcTIhUz(L)O7QPHGXv=k=zvYf&_@kjVx9&*DA5lAL^(PeR!~Iee`K zXirKsPr|9#9HWu3>KqQko4VjGz`igtgbW9GAM6FRkIcR(+#ieCMZbHoklwgwjVJ&+ zh)~~l2@$Jz*e-W;N?Fi>dnEu9ez!Qh$up0(+igG>Dk#2VOAjR@j(u9vk0ue=DnL4V z5;JQ?PP@!Riz^t!mL5$#d$&>$%3F5Lm2G};nqxYD){Zap7Ix6Y$zGVCwkNIQ`p`yA zP}h+df;)CdxnJA?FzH(6CS`HNYPZ`^+;Gha%MGK#7(MsOm>DAqM~rWpX}Ki==3dZl zM=K7|4?EZj%b(Ze7#kqT<_iN$2Ih(^DkzO2^JkIJ3IGzq{!pu}5jk>HX}QOruv+la@^O@r|^I>nouTA$ctHFsO@Pe{44J z|C}n}tLb<+)*sE8wBN+fJ;$msH#$$)VcgEJmJftd2E6jD4$k{K-c*~pRSYe6_D4|;kF4K+FI1IZOq)nVJhaUZ9P{kG z16+CW^jk5&IY6NNpKuQ+cmA*3gGoIFwOvFLYHS?`>6$_6LC6WLQyFMSkThUoaXCZbLU8&b>bQZylgyJoUt~4>PTx0a8trn-9tN3xxSPwQ@=kH)9O-6+d+M93l5yy!IL`PQFYFOei`g2341wib!5<(;f40!biUE zK(zYyNvfOx*ZID4(7>OOw|&3)6R&z z!S(TAuFlgG)~#D$leCSK1p!yMUOEoQ5THfc{bU+;Z}C#qe%3*951^a!Qa&WEc4fGz zBB%C;X2`;)?>WVwa6vQ!StblA#z9|+otgHGjL*yrx~}hbQM{dBfl0+MU#GoW-KDwZ zZqN|tKM^(Xf3eS=VqwwgjQU-L`sJnNa1=H=un|P%q?=V2JS+EAm;-6p>v1bqFfWnB z7NTjhA<27A?1eyG?;WJvMaoU!8p%jpN^I6xXrB{~FY!wTr;0v!JHvamUUzhgv-Y)e zadHv}gc~4YB4pIHYmb(u2R+dbKy0oC#xIinfILEIRARm(? z{9{!dX=e)yA+@PBc~EGTq8fjh8Z#`P8`!EV`FgE6fd6r$D9qSqSD@Uvu{qS$Ll|Eo zZmkq;M-+?>%Gs*oRh(xfx<>T7ZH~}cKXXue$pv$>&j8u*^t-(BINqB9?wqCrR`e{2 z+h~#M)Y7l;i5ZT~ywbzI>5#X&P8F*Wif?P~+0L5Os?$l_u|bjoO%|k(y83z%K|z)F z_V(as&vbo!DkCBz#a{(K22bd3Z;AUWwR7T#l^8K&_0!f&n2taDSL@F#V|&7+@ytFB zE~|WDs+VV-Y8``&b1NGEsi20aSJHRhm*N2`1c%)x;7l-CvxgdK--k|x&Axjx_wUgY z1q+3W?&ML?V?tulqNus!;cP4{PeWFB;VzJvx8x^ zaL$$UHqSupJeU3w`o)khZ!l@QLGcBP8z|jbgUdCeMW}^2mEfC#9&r5$$d-{DzCjE+E1&h`Ob|`t zyr#3+{2SMfcr}!BpJ9?o>tBWA^)(5XQFLhBN}zgEqq~3X)ad*QYP$*4N?sDp0%!iH zw+C(jcr8kHZW3kKQ9K4v&3g%J$+#|oGv>tnhWPwpQ}cG)hk%g4hE$HYlp^%P1B-rS ze18?}$mMXG3WFc{pZV=5&gqNECJ#OZZ{vZXKVLE_yc@~>H2@oh)}DN3AAvsk#{HCP zNy_fPY_LSZ4UO*TC=dRxWDdHf!E~kx>5!p@JdY*G_P1r! zOEX2zu-~I43AC0r@WdmYGQ8c}>2iS_XS=lslDS4xHV^=bU>_GGig zdu(Ty~k80wFuCw1%8%?qw~^R6^O%2;F8SlT>e@Qt>T>ncJ=>H$BYG{v^#r50!PB}p{!?Z7YF98n=K%_MT? zfAvn{y?f3EyL}*i7}q6I(Y(h>I%ZeDrsnn@MUb9TXrIGUw({u7^pT>iQH^~E2EO8= z_$K@{CW;vt8x-il#80NVs)C9c#>Wm5Oh@*WSG3{~Uy?4yTj<=7M}?BrmV{5Vyfj;IQ?TD|7F3kfD zN@oEl@WV*k0m4v#6&2C4a5tR|>_@<&a@dUZwcF_RJ!0vn6n_036tBL*59N7};+>0q zSZDO(|6>mA^KO~{#KLm*l3jH7O~7y$96GN%{&vxrz@^k&5%BbGZRe3%7Y}~jYh4-> zw0`DYs@6-Sb;a{Vv^nuBEKvq|{T8LHs*0o(xXmbyU8MH)4cG>V0@v&IAZ2FbCqHt!CTPxzZJknRwVde6Pn``OTJ>#VHgY}WB>v|mwPi{q^+UG*Z> z9Z;h(QewZ7WAird3Q4@;P9fPAOBIz$SRyKzNX=?#FRgW7KsHj@FFJ2pxXFkUyOuk~ z=O^3JlzJ;XohMph+}(GG){c8<#x{u~1pL3gSXb;GyqmJ$X8zhjei<-eG<39e1X3tj6kPJGb<=p`e^LSV#2S z>jrfXH|6c$&O57^-egg&xfwSf3^P$1Mt(izbN6R5w??n{lo!x$jdnHZ1xu;X-OJH3 zxT9&BYbBG5&Wce25R-3!y4D%%P~8q@1Z{hMvi>d9ezxQDZ;GAU^yH=F5PQw&FE5ds zYY%AXy}rzgzWKO3J$?P?_;@g2J!N+!KCX8GA2U`r=h;%(KD+r1UWAKae4uqC(63}; z?2X4gcam?|JdJ<6rS{g)>Ml1@PlH$D z<={_pn|UUv5M7F7GVqmcUwWMFzdLvepC(oxzgU5geFYD-mzSy#=e1$)EBkAnuF)=G z8H@vo|Imx?Uj09^Wy0SYM}rs#RSTH4yzoA&8jLTm4FXW2*K+1{@C{+J?o$;U#26_x z4vtj{gii`cDd3Ai$T-BkwfJd*pwh6&#lgefR0=q>u)o4+ z@ijJas~4(nKvH$Ms8nYQEGzI zBC>?5Z>iMo=5}AEQ-=Ys80eWQ@vq$r{M>n8G@0ds(~7Z_Tob{}De;RQ{>1GLbnJrL z?((6#qGDoVxk}fzAAY~BR`~DZapE%|kpFv2CID!$TlOx+i{!9ayQxS0^?Njv#NM+( zUz8}2e&mhja+Iv3dIa=&kev&~z=1H{s~Ow;uXHN4X%q0Fa6dhPe;-Ra@_H!R4 z$wDeCdw%umF2zuhci7IM7~cy*6|oWN4k~A2kjmolSY<}kD*>Rx@PJLwFH8TJ6kCk1g{A%A5x<`E-hcrZ~a~LJ;cs=xe&zD%^^6 zUXW6BRC#%d(9m3CFt*$}xhP=%&Hr@dg~* zpz1gP{W@QBXU202Y_$4qc?LXE5u6(0nTwhI+>%C81>sB}Qmr^qEp17?2PZ|NxatHB|Zu;^eJ2Jtl&CpO^kec9G{!|6mq z*7^Y#^i%Ue=Id(+-`1AsM>?EBYURvFcwu+wA~n8 zJ1k}u(#ZbT8xjx@g3VDBdjIlAgW*r}Yp?sx1U2)KD!KQxtn1BpeZ+q&P{goVsT*FPvap|!KNZa065>p$@peltKFxh}D1B$o~z5|XFw*HLf{B81XLoC7 zohTl4nz*6cN1Ooz5<&-O^zLm}Fsz3Ezvo=H4i|5Zyh`;$OZdhSR!5**L!_)`2bpef z4MBTgsRg2Eb0AkjmnGOnSJx|!npC?v8q8pd5ABi8%FYhl5D*YZjJ+BFnkURsZF5Zq zN;`VB5wUK#Gy7`jPf`@}fN)Sp)mSj+Ot>loS+@lN$pM{p9(=tSzckJgfnmm9m@%>% zAl-WcV-R2*qt_L_6{eqBI%~fRdhBvoV|@i2qkn#-np`rx_W1v|k~B+w+rGP}!Teh} z)(urq(9qq3iO zkOnBkh>IC@GB8C?5X-`HB_MzJY^mW(UN7pNVOmV?!WFwhw-xL`&jlU|xcpGJwp*X) zmAhg*wrmg3y4T>f>U^mfy;pv43hG^wu3lB+*Q`i*5tAdwYb5@Y0^nWTzXn;95jCLh zW!x_fxhXh2n8quwO13P_Q^yH9v@aRp@BYeJojTk~K!jj=+ZrhGg@EknNSo4-7#c(m z=>3e$)N#mW%ovS)zL7`Oo!&9|Y?5l%ur${lAl{miN+i2; zW<4VjS4TgqA_KZi=t#CMbWsJbs)AERi*lbKqZj~aTEu&GZS_tK!%ZcyG@+NAk~l>t z9=ZySm~~bBQ5=($B&udoXsgK;zFie{?p%~W-N(7QgDel@1J|HBl0Llb!oq?Tn;9`^ zHNd3rjHyHq7jJs}E>0{Vpm^FF5HIZJtGPs^rL$&Vxk#6a$OQ2ybu!YRbM*Q1?iY#G z2#JZh$x%{FhQsn+qETe%c#iK7(D4(q`pGmf48yr=)E}c2CyCSlc6MAs-A9(Rf4*H2 z&MV_qgO&8no1IGAoD8*0q}1TpMAEJkwo3QCtj+Pw+qT-;<1NzTEimWM`~&6pliC(EX7s{K_*bt9sx@QpqPu4jqoD`Bd|x7Uig(#1 zQT%SgGva{fgMUv2FxyI$j(EiAWb0@`0N2N^s+svU0kzWQZhiaQxFdifJMY5Sd27MH%M^u9?fzwWGd0!d9uM%pPBK* z3OWyMPl%;t^2iZ26ov@Q#Vpi=%RM>>MKw%!-nG zl(MQN)%0ew4-=g`#o|kx@?xgVPDeIymd6}#IACQ6dK=H7_=ra877V$#*K+pZ{3AJ@ z|2*8o-Fkn6dj#)JMT4d3cNPa?FlF_>XG=zw^OwwXT|}5oW0$Na3FveA8SiDp8aO@P zWPW@cj=0-{Jul(Dj6{^?{$CWeX{oedUmRbSmrV)Dw{6C>IiTa-1=s?1UmQ9cK2^|N z<{Im@aCOrZdO~+Y>uboy04TX34K=sV7mR&LWhFy<2DssM2AxdPoU1L(tV|?cK&ldkwq25@!A@ZK*fbsGUu!52++Nwb z!9IVZql0&FGL)4c+_skTUUf>V>oVeCq;N)ztqb&qI|7K3ia4TkZ9|Y=cMHqOU8O1e z4F02nm^M20-bp;1fUJ4&44$M@X0itKIU&^6}Jyy+mkM= z$WqqG_4lj!xaKjez76QmO+et>yXZihSu9_ys+kB|=5K%PQq#We&}IhjPX8ARZpFm^ z60jjtst0SeNhgWXG|g4)I;$Ol&z$?I3XO#B#$dfI8#@Nja4G8J^;_)Had5OCFF_9_ z>IyB_BM@35As(Ode=80}4)oYWxa($=fJM)IRx~LSnn}Qqef{LS$}#yUo`hVE7=Pl` z6ZhXS99c+;kM)t~`}ISAh;uNFsVT_M$vL)y2!= zu*IbA1&x1EopnX6`tx8iV>mIY?w;q~TAThT9E7J}9&js7=gtVVg z;7#M-J&&~HXA&NO`>3O#nUSZp*AzK}pQc2R{#yhTN}Vx>W=zAh90JMXz@?mh+=p*; z=+e_}{haNuOPN&c{5V;6c@_&J62O=NTaToK59{q}>X=J%7@gw|LRA7gYp>%Wj{F8) z#sb2eFAxyCS^ixiv}Smc-SNVfuA7N<|2{S_OJMVyis^Hv{dvqkYpTDV;QeB!GA3q)Nh`F82J@v8#as1M^w(BA*3wVx0>t_|#+H`<064V`=)u}+_cRa2-NvB(x3 zzg2AizbpgtGLD0%@4u}Z5mc;5NT-DrpF^%WP;v&$QWd*zmBQtVEqx@%QQ|Gr3WjX; z>7y!F!FWW}b@fGRQN-IIkRxvbC5&zy_2%GgA7A`tM>jLt(p_hFNglWpMJC5d`jpQ2 zCTNTQsh>rG;0ztk?OlV_Y0z|ynM>X>9%gj}APB*JS|uKgMi?fi;GX0u5c! z@g|=~jl~+RW|5ZUj3}>~MzY2!dGTT)J?Z-QK1~S0Es2Vv!%#^+} zQX2eP<&%B*zUGu8TsdHRYLk5wFLW!)&>wHzW-hB^hD;Qgywxz=wlE9B91=7`pvh(n ztKfK9Vr-9M!O4rJ^ zh?lbn>K6kD3*K-IWLC%1x@?dvk>eKu!~#NS!b2fa5e?kFb}e8Uwp-!UG*#3_e1Xq1 zS5+N@Fn0)(jq3_<^d__OPA}BO_n5rFDci+2>7P>rloq|<##QpMVJ|{uBjqS``$q^M z&P1f(K&Ao!bmJ;%7dXRQpJjW9x9lE45C_HCn+6H$F{`KXW8a(2q+#2|oVRCJwwChiG)3-!GcA(r9+~~#GQJrQa zuOEuh>NcDjL-r{WUvv+QQk3bl{khN{Bm{yh@N5tgJ(2`T{3#?vcI?%PMFH4GlyyK| z;N8RG?7L#zcCa1#?^B3=ZQXFNu9PGm(&=Z?WPE2j*xp!mjXr&8Y6uMjcy^iICEjtB zGQK%Xzfpg?gaEF-Q5zini?K6G0R_nVI0dSB6fG_nP~~J&T?&J0c_pI-Lae3ItUj25 z!2xgKuyjukLN)Leh@(NztL?+ReKjM0?v4af5X|$FDf-FuKwndutf+>Ce;k0746lS( zlzduV!IU-0k;xrjVMAQKi$e^5fbj){{~mtQ{!$m94$w| z6@_h{G7}|bN`{P4uVkhSnZmgqdQ0bjzVDpxtZ#j5oprv}e&4n2;n~k`xbOSAuY35N z)3CO=QX-qvp}(EgH9^)n)H*pnC<=ZClz|f1MuzH>M6imwOfDu zi7@Rg*ER{FSx3WeHKG;)(N!)C;2 z(j?Nug^|@9;Zeh00joC#mTaPP@IJCJPxFa@3MxzqvIrEor)C|OxpwE!Bznx)f(P%e zPW5D*vWo3}KiI|?`%rK5(u*85=ZaTOnU=Rc@ke52Kzz6jiX(g@;(u3L!6>)JCT>pao@oxfRJSR4fBy>xo;Pc}?=qB_`Pw%2 zWGTLE)Z^ci9Y~OtM46+H?!?3G>+LkS+kG5OzYNl-w3%-XhqCrXxRCRQ+ry`OiA0ti zo()!IU7~BIb|5c9&8S>zXxr6C7>f-U_6g^aI%D-YepB4VToI`?)1xXTau150%8*Bf zKhv${h_RC~=-=dSeEpwbIyQPxj3kdO+B36?NXOeP}{ zm9rYhHW5i7*dV*Oc_`dH`_UQ6l&t*XB|lmD;0-xnBw9)DjCqjdT>}bFz;sFsnY0mL zfo!A$KcUiD9eOiUPP~LHnrpJredQn6@rVOrC;~sO#?VRbk$ZoknA|Z+m(jj8Uvo5n zy5GpO7|`RrsCo63w-NzE)a?dS(@$wA@mRUIkQ{j;*_HX4PQ2yYyd9qH^^|iws%S4X zQqmh?)YH-Rt&2BW?}Zxhm!E1&$?p0JuV>CtZ!U4OT_%#&6v~~o)lw*6VUfYm!zA5L z6famEs;}J>9olkY><)=iW$kNi0B^0Dd?-Ggl#%+J4@(N*3t5I-ag+9S-yyP_#6Rgq)sN;**zbI!uEs`j%Pg+z%hyS#EGE?!;>9sVb4oS14n6 z6xdUg^fpZGLB`KK$e8Jq%<`JGCI3K$#G#tVIq$sjG%acbXhgl635vW)gc}=f3jCy7p zmAdt)Hs_7|Fq9h_{4EH~6;Zu}4VRl{@p$R8&L=!uOo`?_C<&nqK$N_)S%0mUyB><} z9LL8kw>mUfi0}Q*;bQ%!=cYMD_i5+yEk#c_Po$}cPY%DIO306CZf>qdokx{hn1gaf zjFo&e-9scK64ODX$n&^lg%vPT@^-~&Z>r8TTD(y#&%?t5xiy31&P1NgiQPzZ8@wP! zB=Tlz$sRo_#NM>HVi_C~=!crt2N4k_7V=C)v#-YYCXX&0IUhx5&><5iuq!*gMQbc+ zX?kxQRDz9e{)1sRKAmbxZ{w?RqjUtf7lON5yS8)mj5JQ2y7Nd+oBD&C>BeZmm>YsUi;?RVktE z52))`9bF==f>JFmf&Hw*qfi4iXm*F9Baxajyqh8g{xzb7;)F<(-F#bi4l^2Ga1c?B6je60AN3K60LaKX-_Na^AUlp|sSS~%Yi z$LVsf=F-@MBXXylbCW(Y!_&YHv$ee@NAz6*;3L9yQ$e;wHB@zShzUwv>~>kqI+m#n z@H_GoN>6q~+va{D)JJ63;qL0;$DA#qB!TtzvcK=$zoOmh%|=>FnbM$eW>vMn#fzCm z+>o*6KPJ|puU%kF1~hQZ&oAUhcD~=6Lx-O*k2y=UqkQ$@!vlv8ALiuYQBzX7LqkJD z^Z*Qdt6nR;itCyz%}6@$tL{6zuLx{_nmyDjsJVp=1`}z{CStj_rSf&L z0smIv*>d?QmbvtK&23A4{G0j$O52sCq(&07)MyzrMq?P@%x2P1AuypG)MIS^70^UDWFyOtIdz*@A|#n!v)Zn9@7LqzPW3 zs@-(yxfAkk$m*O;9~%(oZpxIA+3~Qer-LiT?kX}>QC)tdyeV0g7bR*rvHjQ=NT&ei z5y&tI<7|;5(bN0^OSBywvmA%}pQQP{=NP9xi_Fm~h_LOhjZD|%H?x~ws$Tvx)I#N0 z!Zo>AT1Ab^=i-yWkEoy^It?LDx#-LtJaJeqA0k@t%+YtaD(72JVy_(faVY`rt*+?j z&W_8`Y$nPduw6>f59}XKE=Pe> zA>4oO_a>&kTx9{7J~`;p3oYdydmMd z(w)XT$cjbp^Pu~4A-jb8r3JB zcAN<+PVKC|uQ}eQOb|VdX+|;4JLyfjYohn9Y6~{4Mp)N4ZNs$i)l+mlNL)cbD9CNJ z7(Xg9rQ%i#+jYE0>k=K^Xtv!Uv9~6l86RpoD0`iA?QsAra#eG=Txvq+LmB%lRgCEM z`Ta4KDl89*s2)vf#%P*>ZADbrOih`q!6l(&${|ju`cH@#15#xDH>`5nwJ+c3998!i zg&?EdqsM4dvZ1I0Vp)y;2r#m&hiyFroF`Jaq$ZFkIs zp)XYGYb$KH!#z}Ca2tX3UxknJWhTY+krvTt49bsb&43?^gR{=zW!vdmn(&v(dHq!7 zGaFdRlUmD$krG8TQvkWc2IB2%h4%Ax^m#+Js|!4Q<^x(zy`&SvgF|fuNN_yUt1|J$ zw~nrrns@Bo`h7R$D2c|Aj^DfDCUxBi*jl@fD3LYG9op|AW`46fU*PY1rpw}z?9gLR z?c1Uh{%@ggszTDc+B=FVrBl(zeyRG6j?0$@#lBB*3pSlCBXrhDzZg#J7^2O|4_|U) zj@vcmt*j_jK&l{uH|U(uokFP|Y3<^TvDFN)W13d7&eM%8=9$Xv?{^U|pbt(Sav?^d zu;QOBMsnxGTodLtCr8Y*HBVbEVE3KUcHinQ)nadkTH*vW#=lqKf^w; zb)2d9zsh-}R{E>>*M9>yMRlKk9MtCZ+g!QxrMgVomERXQdAvXWQ)+-GGEh=Tg~grjTCRH4zjg#ml16=Tc&Hx(oqwP2Nw^IxA7^Voe~Vc z_5#V`D$cBWJf=2cVA-f(A`FPXm%HcOOcq0jk$&xioznYCW@4ny$xd_)yi%AkR4wRK z{Cm{6@V&<1wu@kAUPL&8$qHTLq(yXw@hw@T;w$AJX&j`hCJY1zObKN9i1Uch%q7ze zt6wblczS8~CQqc2l`jfjKA++ek_OUwdha@)=KJjokCHbG5a#^U&*sHrhlNV^l;{dh z@iS2m&2}&Dh^@0$da<9#HutM5Q%g#Zc(wFt#7&BKuic~)#k-`WM5=Gn!u5XnS>kbw zZG4K($$VnU7521J?D-8vrZvAOpyKu)y>V}fjeF7mrQ%S}vTpfnrsc2COh;>nohF{? z@Ato^Y;5*wsU+E(TsbvmMie0}pjwVjf*7O1uGi3IzbfA^ zFj*+iCx||-vhA!qxFp7ov!Pu$&dw!I%BR`HwWfMt`ryd68c84K*gSMITeDqgmrrEQ zC7;uqQLUQXa6Y+-K~`4wZfyy3Uy1V1{;N9gKkSKaanLX~m^APYXMUlgDok~rms65m zD8N!LvtDRR|GwYDw)o$qYG!m-unDV#qd%8aU->19KH%pg4pmWHS_cPOAfG$)>c`lx z9KVS@_mr>CwMUJ|R;zQlo(e$0pkKuRmqjUZtI%HUgY0JQk+c8wq0CV1yjZz{%17N! z;nkA1cs7>$`@WuQy#9BcQ#00Ei4%OY<)Vuj75kRNZ2w;+o4%kL=>S_XCb?|4XxwbF|f7tNN|NT4)S zWA`Sc9v}f>wm_7Jm4!uhWY5Kw3;&R};<*N2I<+rI@}4Gd5BBNis~E8AG(7$*joIK@ z@&kR!NMfPvT*=4PrzlIYv&+^7{KNyz13I2G(yja5sEWANb;zEstcg|`slUfQT;6NG z5q?hD-a9lK9cue^5hu6j6}PC%O}!UO;8;A+^hOHxo1Ta9CBfqJu_o!tPB2j? z2XY7-bON62-I2)$`j(B-)h_?SsaUAqTh`B3QjWn4h0c|Car02LtcW&670_j9cc!kdBEd;oiN?ztdw;zKILQ zq5AfJzSJGUy{?FDklkGMW6lJ%mIXgy5VZ@AytDH)dKvEAc`!OU`jI4x=jyljJ-A^{>R4pBY2KWie)-Fve9R1O zQt|CN>7ntuc`%W*!wW~nZaMq}o$HmfI3ugU$Sx*PF8}%K5jZH3U^-j_J5mFNfF6Oj zMC^Jmgvu->`FsBPSr&0|ZE?rZsz>A=d6Udb-jvkXRx=^D2@jcRcO==Rk z?ml>MlFYo=eP}*p#H78NOFr#PHlsJ`NMqVH^Pzo^&XKdMjcb^A-ssS`EGxFDh-el{ z=otPpB%#q*iuk+O zU|=Pf%Qb(F@Gcs1=;(w(gAgEuTO;`pj4kW~R$le;($!SQ>m-YoKURbU^5UO7HqKj_ z)MfMUheYzf^~ZdZ+!LiOev@(d?16en^!)Ga=>7Bhq;+<`T4~IEZn@}u>xkE`F#I71 zBvRp@!@}hLxj6rQ8$V0g<|g#N7iIni{O{TL|93Vjme2)*(ssmw&b2s<8K)uE#+_6b zY`G9_H_+nJ*rSf+Va+n%Cu5$_>gKy|J7@NKk-@e$5Pa!yxRp39`#iL>uxHQ0k^ z9((eLgtyI~Owz}f(Ae(QMQz`r%vw|185%tT#2KR#D}pYKRgQW9N2W`9HaR3U)RelN*(KuvU zff(%B@*vx)k>18MIl)%v6#Tl5%LwJeg|=J15jdq#+ihyZaUgee`1zu>=bAGt=>nYE zZV7s4=3Tn9@D`J#jxWFCaHn2Hb*|HB8_(ML~OEYcy9@X{Y+S))C zm+A2ic5U@NTD95s1L03u+1RL*7&W+`L1GSr9Jl)1W+ubXTf`(@_{sQhrv;Y|cB4NO zljqN+F6)csurKQ>s8f&Aw5_tGqon6DL}$3qxq&~daPTtv zLl#t&4*PDTqq8mRLanVYOJFJPd!)BP1xG{2w>S-mi{qP^=;g;xA`YRJpyN`~Bckyj z4@Uure|^3v>+hRfQVXK#xYRlyU1O}ev}7g03gJ)Wg7-%>`9}2i_Uh&3ISic@Fs?a@ zP*G$mqpJ5QTRx0v$bxAZ>EV~ZCDxD*loz>dM+U3sq)}0Mk)}=0 zh;!C!Gp!A=iky4*D!EL4uWin>t_?r37H=MZD>?H+xb?{V3Et7Hy^b#O%$YMRWL8#O zgz*@UNJxc>W?cl?k6-`wIwB!%qB|x*syc|@=rJJ?KH}f+VR7X%LQkbduVQbFcbxLS$rQSk;7Px>+0!k3Uy!)ykJHHw3=LyWrkfRq@YX*y zAMShy3~QJg=OZz3)VC&HJDX4QSBp#H7oLC9h8SQwesJgr@r@Ae&yYsp`y(S9cOQY< zf1;D?7>%`lR2Sy*T?4_^+1Xd>Ihl0E10iIOy7zG>Be~);q)=noTc1?SAV((OV3QBt zMb7#XWFjxr9~tJVDQe#z`!(qF=B5o28&`WWJKgP$$8xv1o#^hPoWVw!Jw;EwhjXz|i zDJv_dT6f61O!lQ?+{6z~40l;KTRIQEVJE8j2{tuWBZby`90$C=pBimVuDE>OSx7!c=y?3r{;$a7mGld;YJJIs)G8SrtxQc5#SHRP>~8 zDt137;|g!p6DM|&Z{E5U_xBy-cU1op`uS<=Lb?_EX{`(9hXHVDca_8c?I8S`;{O~h z%JyQ97nN~w1uqOJ&RiETYtEovyTrU0fg9h3W3KD}wWNSVs?lmLw#+-CG~fwLeR*e$Fl@EnVcV zi6)->*SV4YWqo!%(2;5&feg}{DCuU}1iESjDxj=g4ewM8+H<12t8>&h7MmV(B5dvlaQL*9_5Vqq% z!4^9Gz@e&4lcNK5gmQtDzWHRa3-%KFsd~{`F^jTsSW4@W?piq@N(e4xpe0wpVeqpo zqX3C?BIiPqjQ95FEZd%vvnd=B5<0+`l|2dhJjk~XYDGd*bY!Tqyu3FEZD=}vtItQV zJK||%d@Kz2d5>@`vCz37#yHYdeFSGAD~5>J*8DWjo}0LpWo2(}+I`}omlw7u8$9(c ze|4RAP)C1}jazauVfvyA3#9;WbiqU*RU=5;=@~{wIi1-$)o;I*@2nSo6nq8x01l7T z=@J?30Pk7DRdO+>%t@r>=5@Mmxh;-8+`vn~#66P(`I29pn~N8aq9g)%w5bagE$w`M zaZyP{#hNJ|-E6x!C|*`1Bb=TlOauU`56H8L(Sd~5ua6!N5ut$9)GPL`4Y6<0IPA2L zbl}1`02jY;%`VvDG?O~{<3arIrl%$a3@ZrsR~u-;u_MG@YG~QQ1*@4Pv}oDoRng>R zVx$Lc;hkNE$^{Ym2okn_Dq`EM>fv<70`y4KA-CzFu#vZJ(<<6oHt)cc($Uf~lO16z zY+GGMqtz31uqV`N*Ps??&&Zbt=El=M#wR`uxJG^{e{fBwHO0_p=~~9v_R_!}YD|1p z_CVgYp#3frHaNG0lY@iY>N**>kB^Ua_KD+Ye_T}5F4GTL6x7T6=ZZoM;u7+wHK!&= z>k{0iOmZ=k#jE9KC3~Vi-z{G=_1q(0-wnB=ElyiSY|eRfVG*fcNwzPpuEr#SPpmDw z&sY0Ij-FhF`L0bfQD=~FJ^@W3YSd1Ks<1<>qN=JY)x1?Acc?-%3s_6KnlRV>AF_Ks z7F-V6t9KQ~k-4kG?Pc)&`_aPIU!t%k2pF9JdRO;jJDzIxK|~|&J!b>^6(-G*1^yAK zsi3tcV_CW~J3?1_JZ0$uo&}3ZDyQ%bGVBR z0Xd%+oJ~8=}??zTE_U72R@$G4C<6b z3t1`)(3l%31Dl7dm)E2<@wD`lb{4X){9AQhmn8=d9a`KG<|bj$d%_*c!ExVqN1{pU~p{qn}D?LxPE;0qE*jz!HX~y4PZ8P5ksb+NS z+zAc(5(u@VsN>6*WYD7SH2D#5d-ZrSJ}$)B%??`2WZo#5bA&NKJN+XHoVKGBCdOU6@AeY&ekE@^p989wm+T3VlU<4IE2T;%uUISgl>|jChDD3?vS$cn ziJqv>uw*8U^wdQ{Jv7@?9~Xn}Iu;`0e~0;q)wd#gKEsOOQ_c+{jP&$JY;0^itS54` zeIrWNZ`$+<@_OUettTKPQjMywO1Ml^LTE7uhr?xVT>xUetb6o3pEc#kCV8^|XGkdN z(BZ?Bdfo3!-!4nW%<&?FEeBfS(4j+LKl>>xZBj^kcDaXG_;x4|u7Ug+iMC>&wPrJJ z`{rgEUMgv~PR>1W3w|6bPZO~xpP4qjda+jW*ujGfpm+>%46=Jq1`E0NI2x6m$h2Z5 zlgZ9d>pzCVW>}b+FK-nvu8cb==L_EaWPm^R0Oyl2cs@HvP@_q48C1MeuQUtk;5vQBl{~LoZyq#^`t*jwO7)!m)2{ znb;XZj>Effm4v^Kv67V!e<`~!4UWdBIb*L$U3@#H0q#5fs?B@cZ_u(=VZvxuu8a{m z-gRYnurDVWW_5v(O{b#A6chsq8a%Sf%9n-A{tQlle}JNND_e-2})ecDaK30jnso1N^d zngl269bgCc@fIPA9kgu6gm}KFnmD<-N+2}_8+V*d!8gMCTvyc-?8ALFot0L7*VZyE z^`?`y@2Xnjo@w1t1>u{O=sU@Ve=8{|y(}(fs%r&%MuH!-$a%(93^nyQ1Lfl#3CYhx zc4S}9VtDAnm|bzF@4MAkX@3#SA73pO$YTJrE;jb@GFH_14WV=HGV~1*D5+leeG9xu z*-|Vr!vLoK2Ntp%vc;ilFZT7qQ&5j^9fa%i&X#Y5b-jM;R{QkifMlct%mv@v*52zl ze2&EEI(B+`vUO%@Q(c1Y+m|;R0?lVDro}J}v#;|Dm+RpG$Zk zX9y=3KGe>(qu7el93&Kc?8ArdK)yi@DTXO}#cL`gXPlw1-hdq@V`Zr#;;=w!va&P) z!QuwZ*#DQCXaeu=)mzG&^K~KY-Cz6m`RYz}tBZZF*8|3z-fqAJv)pb&6Zza$3s~WC zYU*8jUFSQgsr-at-?8o?klLy(`>sHrsSBM&;Mj{vG8B@xT*xlC6|6&Md%#IfK3n{3 z3%C~Y@#F26*GlZ*<@Et#wy343?>$$!qaVPp?Qc&{&--Ey`*`zz?9+7}XNBq10#oe; z0WRgqj-}~8@ThFoPC$$NeC&*rp|J1x{Wsq5FaMuEQ~dwl3tcQ{%kCY-+!BcdAvfoS zTk+S*$hl`yT3En~2R^3i(lEdh{Kd-Z0pJkkH-5xxGrslFgI;I(GX_{9cm{}1NGmD5 z6V8mE{qBYX7yg;Ci0<%%mzPpPq|#L2zXDYrl#-I7L@IXyFCDV8+YeRI4*x@>XDC4{ zbAvmNcGlu1(6@5XH_&rC#F~o(XBVQY`P&T^A2v%V#Fu>i~d=` zkfdcy&bJTjH~<|FV;-~TEQSXf1gb?N&6twtf*0}U!iyw4(L?lrufXuFzTv6*UWsg!*QwuFIde(Kz_vpP^wT4mb z^}W4%8DP+(psn_>yKmt;yKed7bB(E8mevsw5qN&-NI5t3)Hbt>i-5iBX=$ZF-Z_As zExk2fEIEA;FzC9cXE^*Io9>z$Fc*^%qstRP?Dgji$%C6F3?Gthh&O>XtUkZKLsM+i zRpo)9VyU6@2rG$i6IX`+o$CYY20VjL-!q})!u*CRhTFlsz z=ixpQvXSy3oWX_wL#wv(UkA!p%(gRyX0L}ig{dlgAm{@p)f%n=lb~5^o(oiLAlGZ^ z$#fnVTZ8ez_T9R9!9W=M%v;3%VrJ&b`HsNA5h$6lzdd=cdwH>Na&~)7Z}O3x7E%!V zwiuCzhag0@3t711?r#SNA2T-Qyu57f*^kdJmIVv@ef??y=HM`(&HelL!9ZVzBWDQ@ zoJr6edlIxK6IA!wXGJex-VO{ra`r46KK5g3z>Qs8JR~C{gT@+3k=T{@@wN4gjPk0g z8xe`x!Nuhn8@qeIc}p3Fb@>}OfTs`(XW-_$ckiyle8YFC=p0!173@THl3uZuDO`gE zA3@OSgW4w{B%^fV#8G%GfN$hI&!DIaHqT9WN)f|Pfwg&s8!rNCil)SDf_DG(t(a-; zcl0!?j8tGEOd0maX1=p!D>}C9(_da5B%p^mC*}bh1HpY@?_Mra!rkr?dVDT;FAvB(UBMMUwyhc6Y!!lb}S}#kvm-CD|hd1B*KR1{`RJ{RBvW_iU7=7*5v}o znIEc@UaO?6Y}MRMtPki57FO18fa}D>>*z3IIA`8HK8zRdz_OdWO*YYf5i}^-s1|#o zz-tR1Xb4GU7Q?N)e9}CZ&fT)@Rg%Ndms3CBLX-jbNCO%!xCc|v>O6Ejvra)tRTT=! zE>j)LY!T~DjWzA42qA43cTCmlMHghZs*yPbB7e<8Wlm+#7}qC$BI|;1kTffU&sKr( z zSVN>z`2_`Onw9l7q-emG$sa4?J&|-~2^e=di}Wb)di~i05=gRJv3TK+K6AH~{T9P| zNJx%|jI54P=l$!izewlczA7|2aKh?g2Mt0a+yNK2@b>N7&p}Vf0{dfaKi*&uJq6|j z#*{^9lr>PR5F2JeeV&VGjCul(pgA^l%GvVAu2U1SCz389?pFg=#75I^eJI6A2UUgIv{-AAGl zi!=-fqvh0)_m`B*h$6k zJ&(%CEh0w1!p0UloQ0TEoW-?BIo{m6SfoXZS9+BYZYGIv5em*cVKS+Hx*9A6bRBcS zrFf>Eh7@+NjuJ+$kyeP0?mhGJdRwF3nKOj0TX6K)vCl-1ljkW$p@oe|I$xjoNK3at z9PmppW3t+Z7$F!5F9p5O~+Syy3XGY0~!_M;)Fnw;I1D?{Mw}tJnvYQM?kC7FP3^U zipxUm5!TW-Fj!FR-8c#F*17N-H2=7IsY}E(Wg$LW(815E(j;qGOrW^f9fOPEJov>UBT4zEyUmz++DxtF`NIOEg z6Lto`Pqy^%>~{Cy`3D);sKMpi3rRD}9Z@5}zlIbI8r?dM`aT0)C8hPaH^S)mHl_Q3 zSaEN=c}xB?8d}mlQ%|j~RiHrM(|-53b}h`V>|OYg(zJobw3Y5~=u_Ngrd7UJK6VoG zGYR8@qJSe53PWdEM5n&E2{H5~2OyDLtZV12%dfk653?mcHdGNJ+K_Cp5{eXi6M@Dx z(?3AZRHhidj-Qm?jdKb7p7&gx+Ww6-H+cf(?CBQ868sR}2N0 z40G1j=y~%d=lQp#r7Ic5ms?=hS_g&A{v3_A096E$7~+qqfyuEpABWF(j*y_wSAki9 z*PU!s%?A42q2wEp71C=`OQl=Cemx0%OC_Ogkd!}YU7heu#HN$`;0r}f&8^P_xyV{NBSLJg2WD|67F)YO!P3*`FU<>>5hQ(pcfrBl11eulfM zuR;I8WF2928CD{Q5r`=1-hR`q3Ax`J$sO>|4B&4Sr@0}YqAX03$sJ5{wpJ_b$bzE^ z3b2|}5kmt_=>w?)ks=(~#fWEI0XPZe-!CaS6FX3`k(PGR0hADH+&3qT1JXIb98@P8 zl%AR-wreM%;ToCVvr|;n1q!IWsl7Xb*e=yWV=s zLSaI<(#<|xz^fPaM7K}D$9xK>aO0*;RbcL}u3^wLJC^S{l>wfQgPWTbR-G^+V0KQq z0UuzVS8dzxg}F;{Y4U$;-BAG&W*y-V??r4Q53J)UH@NKxM|d%c2LbPb!wOAtoGRtb z-|i0g-p|i(D5c{h+*3$50m@Juaz6d%5GWpBsqzQrMpJtP8%1GD%G_3D`XS8Cz1qF2 zI@0gny%Rj&!6SBjuy1XL@d*bzr?0QScx8<1E^h8ykimCYd*izJ<6r|}E*I{AD+wQb zvArZGcI_*Zgj^XQDh(_&(ib)p|Lxtow= zf_yB9{|53}XxFbl2zdd-EvRSO#2m7kFwxL0u{yayaE`m{5)}0HSqOwY`DTcq@Ja37 zt)f<+mjjsW6cG`*;5E0el?^P430e-1S+##RA-U}%R;K{*Cem|+p%k!td>-(^fd2onGZVhub)j0GY< zh#FYo9Xoe^iq**mM&Et%nHR!qck1g;An)rqoEeYkJ+{#i9VS>UnQqGdjf5`F&Z=z-@HTMn;N52s3+`P>FpX2k1 z|M&}wW##1LVnkNVzYv*v>_6ku{}*@jU-Ok8O!hXk{;FJ2f<(BMd|3Wayws^n{{yUY Bi~;}v From 43709612fa2ec6a37a1dc38c2f5d57458986090f Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 15:16:20 +0800 Subject: [PATCH 07/24] =?UTF-8?q?Replace=20ReLU=C2=B2=20with=20SwiGLU=20ac?= =?UTF-8?q?tivation=20in=20MLP?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SwiGLU (SiLU Gated Linear Unit) is a proven improvement over ReLU² from 'GLU Variants Improve Transformer' paper. Uses gated mechanism with SiLU activation for better model capacity. Changes: - Replace c_fc/c_proj with gate_proj/up_proj/down_proj - Use F.silu(up) * gate instead of ReLU² - Adjust hidden_dim to maintain comparable parameters - Update weight initialization for new layers --- component_system/components/model.py | 88 +++++++++++++++++++++++----- 1 file changed, 72 insertions(+), 16 deletions(-) diff --git a/component_system/components/model.py b/component_system/components/model.py index f74d89386..59c5be10c 100644 --- a/component_system/components/model.py +++ b/component_system/components/model.py @@ -13,12 +13,18 @@ def _get_fa3(): if torch.cuda.is_available(): cap = torch.cuda.get_device_capability() - repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" + repo = ( + "varunneal/flash-attention-3" + if cap == (9, 0) + else "kernels-community/flash-attn3" + ) return get_kernel(repo).flash_attn_interface return None + _fa3 = None + def get_fa3(): global _fa3 if _fa3 is None: @@ -45,7 +51,9 @@ def has_ve(layer_idx: int, n_layer: int) -> bool: return layer_idx % 2 == (n_layer - 1) % 2 -def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: +def apply_rotary_emb( + x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor +) -> torch.Tensor: assert x.ndim == 4 d = x.shape[3] // 2 x1, x2 = x[..., :d], x[..., d:] @@ -98,7 +106,9 @@ def forward( fa3 = get_fa3() if fa3 is None: - raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") + raise RuntimeError( + "Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path." + ) y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) y = y.contiguous().view(batch_size, seq_len, -1) return self.c_proj(y) @@ -107,13 +117,19 @@ def forward( class MLP(nn.Module): def __init__(self, config: GPTConfig) -> None: super().__init__() - self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) - self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) + # SwiGLU: 2/3 * 4x = ~2.67x expansion for comparable parameters to ReLU^2 + hidden_dim = int(4 * config.n_embd * 2 / 3) + hidden_dim = ((hidden_dim + 127) // 128) * 128 # Round to multiple of 128 + self.gate_proj = nn.Linear(config.n_embd, hidden_dim, bias=False) + self.up_proj = nn.Linear(config.n_embd, hidden_dim, bias=False) + self.down_proj = nn.Linear(hidden_dim, config.n_embd, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.c_fc(x) - x = F.relu(x).square() - x = self.c_proj(x) + # SwiGLU: gate * SiLU(up_proj) + gate = self.gate_proj(x) + up = self.up_proj(x) + x = gate * F.silu(up) + x = self.down_proj(x) return x @@ -174,8 +190,9 @@ def init_weights(self) -> None: torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) torch.nn.init.zeros_(block.attn.c_proj.weight) - torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) - torch.nn.init.zeros_(block.mlp.c_proj.weight) + torch.nn.init.uniform_(block.mlp.gate_proj.weight, -scale, scale) + torch.nn.init.uniform_(block.mlp.up_proj.weight, -scale, scale) + torch.nn.init.zeros_(block.mlp.down_proj.weight) self.resid_lambdas.fill_(1.0) self.x0_lambdas.fill_(0.1) for ve in self.value_embeds.values(): @@ -285,11 +302,46 @@ def setup_optimizer( dmodel_lr_scale = (model_dim / 768) ** -0.5 print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") param_groups = [ - dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), + dict( + kind="adamw", + params=lm_head_params, + lr=unembedding_lr * dmodel_lr_scale, + betas=adam_betas, + eps=1e-10, + weight_decay=0.0, + ), + dict( + kind="adamw", + params=embedding_params, + lr=embedding_lr * dmodel_lr_scale, + betas=adam_betas, + eps=1e-10, + weight_decay=0.0, + ), + dict( + kind="adamw", + params=value_embeds_params, + lr=embedding_lr * dmodel_lr_scale, + betas=adam_betas, + eps=1e-10, + weight_decay=0.0, + ), + dict( + kind="adamw", + params=resid_params, + lr=scalar_lr * 0.01, + betas=adam_betas, + eps=1e-10, + weight_decay=0.0, + ), + dict( + kind="adamw", + params=x0_params, + lr=scalar_lr, + betas=(0.96, 0.95), + eps=1e-10, + weight_decay=0.0, + ), ] for shape in sorted({p.shape for p in matrix_params}): group_params = [p for p in matrix_params if p.shape == shape] @@ -323,7 +375,11 @@ def forward( x0 = x for layer_idx, block in enumerate(self.transformer.h): x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 - ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None + ve = ( + self.value_embeds[str(layer_idx)](idx) + if str(layer_idx) in self.value_embeds + else None + ) x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) x = norm(x) logits = self.lm_head(x).float() From ad41c96b672b0fddd3f99ad4a6e3b8289a537e70 Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 18:09:56 +0800 Subject: [PATCH 08/24] Update component-system: extend DCA timeout to 900s, add baseline-first sync, track former_val_bpb, improve workflow merge order --- component_system/PDCA-DO-CHECK-ACTION.md | 8 +- component_system/PDCA-PLAN.md | 3 + component_system/config.py | 2 +- component_system/domain/models.py | 2 + component_system/protocol.md | 4 +- component_system/repositories/state.py | 46 ++- component_system/run.py | 172 +++++++---- component_system/services/workflow.py | 274 ++++++++++++++---- component_system/task.py | 21 +- component_system/web/app.py | 11 + component_system/web/templates/dashboard.html | 4 +- .../templates/partials/seed_runs_inner.html | 2 +- .../partials/seed_timeline_inner.html | 8 +- 13 files changed, 416 insertions(+), 141 deletions(-) diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md index be1e30780..d90418c9c 100644 --- a/component_system/PDCA-DO-CHECK-ACTION.md +++ b/component_system/PDCA-DO-CHECK-ACTION.md @@ -23,7 +23,7 @@ Retry until the run succeeds and you report real metrics. No empty metrics. ## Workflow 1. Work in the seed worktree (one branch per seed). 2. Adapt/fix until it runs (runtime only: bugs, OOM, imports, config; no model/hyperparameter/training-logic changes for better metrics). -3. Run canonical command (**≥600s**): `timeout 600 uv run --active component_system/entrypoint.py`. **Must set command/tool timeout ≥600s running this command** when invoking this run (so the process is not killed early). +3. Run canonical command (**≥900s**): `timeout 900 uv run --active component_system/entrypoint.py > training.log 2>&1` (or `... 2>&1 | tee training.log` to also see output). **Must set command/tool timeout ≥900s**. After the run, inspect `training.log` to confirm completion and recover or verify metrics. 4. On bug/OOM: fix and rerun; for baseline, retry until success. 5. Commit on seed branch before reporting. 6. Print DCA summary block with `commit_sha` in JSON. @@ -47,17 +47,17 @@ If no final metrics, use `"metrics": {}`. Runner extracts from stdout/stderr: `v | `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | | `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | | difference < 0.001 | `neutral` | -| no historical baseline `last_val_bpb` | `positive_signal` (first recording) | +| no historical baseline (best_val_bpb) | `positive_signal` (first recording) | | metrics missing or training error | `error` | The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). ## Action: Promotion Rules -Only DCA may trigger a merge into baseline; P must not. Runner records `commit_sha`; on positive signal the workflow merges seed → baseline. Merge conflict → system queues merge-resolution DCA. +Only DCA may trigger a merge into baseline; P must not. Runner records `commit_sha`; on positive signal the workflow merges seed into baseline first, then updates metrics/state. Merge conflict → system queues merge-resolution DCA. ### Promotion (`positive_signal`) -1. System merges seed into baseline (you do not run merge). +1. System merges seed into baseline first (you do not run merge). 2. Workflow updates `baseline_metrics.json` / `baseline_branches.json`. 3. Metadata in seed/run state. diff --git a/component_system/PDCA-PLAN.md b/component_system/PDCA-PLAN.md index bc3393a5b..7564ffeb8 100644 --- a/component_system/PDCA-PLAN.md +++ b/component_system/PDCA-PLAN.md @@ -46,6 +46,9 @@ AUTORESEARCH_P_SUMMARY_BEGIN AUTORESEARCH_P_SUMMARY_END ``` +## Runner / worktree +Before each P run, the runner syncs the seed worktree with its baseline branch (merge baseline into seed) so P always starts from the latest baseline. + ## Steps 1. Read `results.tsv` if present. 2. Refine prompt ? one concrete idea ? one isolated improvement; name target component. diff --git a/component_system/config.py b/component_system/config.py index 9975ab2d2..ffe3ef7cb 100644 --- a/component_system/config.py +++ b/component_system/config.py @@ -13,7 +13,7 @@ # Promotion threshold: improve val_bpb by at least this much to promote PROMOTION_THRESHOLD = 0.001 -# Worktree root relative to project (string for display/config compatibility) +# Worktree root relative to project WORKTREE_ROOT = "component_system/history/worktrees" # Default branch name suggested in UI when no branches exist (not a global baseline) diff --git a/component_system/domain/models.py b/component_system/domain/models.py index f03c9a121..d105569e1 100644 --- a/component_system/domain/models.py +++ b/component_system/domain/models.py @@ -71,6 +71,8 @@ class SeedRecord(BaseModel): latest_metrics: dict[str, Any] = Field(default_factory=dict) plan: PlanIdea | None = None last_error: str | None = None + """Baseline val_bpb at sync-before-P time; used for positive/negative/neutral judgement in DCA.""" + former_val_bpb: float | None = None class DashboardColumn(BaseModel): diff --git a/component_system/protocol.md b/component_system/protocol.md index 7c27b126e..56d43941e 100644 --- a/component_system/protocol.md +++ b/component_system/protocol.md @@ -107,7 +107,7 @@ is the seed worktree. In that mode: ## Baseline-First Rule -Establish baseline before evaluating seeds: if `baseline_metrics.json` has no `last_val_bpb`, run the baseline (no-changes) measurement first. Use that result as the reference for promotion. +Establish baseline before evaluating seeds: if `baseline_metrics.json` has no baseline result for the branch (no records), run the baseline (no-changes) measurement first. Use that result as the reference for promotion. ```mermaid flowchart TD @@ -173,7 +173,7 @@ The canonical component-system execution path is: uv run component_system/entrypoint.py ``` -Allow **at least 600 seconds** when DCA runs this (e.g. `timeout 600 uv run ...`). +Allow **at least 900 seconds** when DCA runs this (e.g. `timeout 900 uv run ...`). DCA must report a structured JSON summary (including `metrics`). Runner uses it first; falls back to stdout/stderr parsing if missing. No metrics → recovery DCA inspects logs. Canonical metrics: diff --git a/component_system/repositories/state.py b/component_system/repositories/state.py index 7d60ae92a..ef6beec5e 100644 --- a/component_system/repositories/state.py +++ b/component_system/repositories/state.py @@ -28,18 +28,54 @@ def set_branch_for_seed(self, seed_id: str, branch: str) -> None: save_baseline_branch_map(m) +def _branch_metrics_view(history: list[dict[str, Any]]) -> dict[str, Any]: + """Build view with best_val_bpb (min over history) and promoted_* from the record that achieved it.""" + if not history: + return {"best_val_bpb": None, "history": []} + vals = [r["val_bpb"] for r in history if r.get("val_bpb") is not None] + best_val_bpb = min(vals) if vals else None + best_record = next((r for r in history if r.get("val_bpb") == best_val_bpb), history[-1]) + view: dict[str, Any] = { + "best_val_bpb": best_val_bpb, + "history": history, + } + if best_record.get("promoted_branch") is not None: + view["promoted_branch"] = best_record["promoted_branch"] + if best_record.get("promoted_idea") is not None: + view["promoted_idea"] = best_record["promoted_idea"] + if best_record.get("promoted_at") is not None: + view["promoted_at"] = best_record["promoted_at"] + if best_record.get("commit_sha") is not None: + view["commit_sha"] = best_record["commit_sha"] + return view + + class BaselineMetricsRepository: - """Per-baseline-branch metrics (last_val_bpb, promoted_*, commit_sha, etc.).""" + """Per-baseline-branch metrics: list of records per branch (val_bpb, promoted_*, etc.).""" def get_all(self) -> dict[str, dict[str, Any]]: - return load_baseline_metrics() + """Return branch -> view (best_val_bpb, promoted_branch, commit_sha, history) for dashboard.""" + data = load_baseline_metrics() + return {branch: _branch_metrics_view(hist) for branch, hist in data.items()} def get_for_branch(self, branch: str) -> dict[str, Any] | None: - return load_baseline_metrics().get(branch) + """Return view for one branch (best_val_bpb, history, promoted_branch?, commit_sha?).""" + data = load_baseline_metrics() + hist = data.get(branch) + if hist is None: + return None + return _branch_metrics_view(hist) + + def append_promotion_for_branch(self, branch: str, record: dict[str, Any]) -> None: + """Append a promotion record: val_bpb, promoted_branch, promoted_idea, promoted_at, commit_sha.""" + data = load_baseline_metrics() + data.setdefault(branch, []).append(dict(record)) + save_baseline_metrics(data) - def update_for_branch(self, branch: str, metrics: dict[str, Any]) -> None: + def append_baseline_run(self, branch: str, val_bpb: float) -> None: + """Append a baseline measurement (no promotion).""" data = load_baseline_metrics() - data[branch] = {**data.get(branch, {}), **metrics} + data.setdefault(branch, []).append({"val_bpb": val_bpb}) save_baseline_metrics(data) diff --git a/component_system/run.py b/component_system/run.py index a0b8c79f4..04452d03c 100644 --- a/component_system/run.py +++ b/component_system/run.py @@ -21,7 +21,11 @@ from typing import Any from component_system.domain.models import StageName -from component_system.services.workflow import BASELINE_SEED_ID, WorkflowService +from component_system.services.workflow import ( + BASELINE_SEED_ID, + SyncResolutionQueued, + WorkflowService, +) from component_system.task import ( BASELINE_BRANCHES_PATH, BASELINE_METRICS_PATH, @@ -48,8 +52,8 @@ DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} -# Canonical DCA entrypoint run: require ≥600s so training can complete. Agent must set command/tool timeout ≥ this. -DCA_CANONICAL_RUN_TIMEOUT_SECONDS = 600 +# Canonical DCA entrypoint run: require ≥900s so training can complete. Agent must set command/tool timeout ≥ this. +DCA_CANONICAL_RUN_TIMEOUT_SECONDS = 900 STAGE_DOCS = { "p": ["PDCA-PLAN.md"], @@ -70,7 +74,7 @@ def _signal_handler(_sig: int, _frame: Any) -> None: def _get_timeout(stage: str) -> int: - return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 600))) + return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 900))) def _build_log_paths(run_id: str) -> tuple[Path, Path]: @@ -108,7 +112,7 @@ def _dca_command_guidance() -> tuple[str, str]: ) return ( f"{timeout_prefix} uv run component_system/entrypoint.py", - "No active root .venv detected; fallback avoids --active so uv can run normally.", + "No active root .venv; uv run without --active.", ) @@ -329,6 +333,64 @@ def _invoke_agent( return process.returncode, stdout, stderr, stdout_path, stderr_path +def _build_metrics_recovery_prompt(task: dict[str, Any]) -> str: + """Lightweight prompt for metrics-recovery DCA: no protocol/docs, just task, log paths, report shape.""" + task_json = json.dumps(task, indent=2) + source_run_id = task.get("source_run_id", "unknown") + stdout_log = task.get("source_stdout_log_path", "missing") + stderr_log = task.get("source_stderr_log_path", "missing") + report_json = json.dumps({ + "checks": ["log_metrics_recovery"], + "notes": "Recovered metrics from saved logs.", + "completed_at": "YYYY-MM-DD HH:MM:SS", + "commit_sha": "", + "metrics": { + "val_bpb": 1.24, + "training_seconds": 300.1, + "total_seconds": 360.4, + "startup_seconds": 25.8, + "peak_vram_mb": 11967.8, + "mfu_percent": 2.15, + "total_tokens_M": 140.5, + "num_steps": 268, + "num_params_M": 11.5, + "depth": 4, + }, + }, indent=2) + return ( + "METRICS RECOVERY (focused task). Do not read protocol or stage docs.\n\n" + "Task (inline):\n" + f"{task_json}\n\n" + "Do not rerun training. Do not edit code. Do not create a commit.\n\n" + f"Inspect logs for source run {source_run_id!r}:\n" + f" stdout: {stdout_log}\n" + f" stderr: {stderr_log}\n\n" + "Recover canonical metrics from those logs if present, then print the summary block below. " + "If unrecoverable, use empty \"metrics\": {} and explain in notes.\n\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"{report_json}\n" + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + +def _build_sync_resolution_prompt(task: dict[str, Any]) -> str: + """Prompt for sync-resolution: merge baseline into seed in the seed worktree, resolve conflicts, commit.""" + baseline_branch = task.get("baseline_branch", "master") + seed_id = task.get("seed_id", "") + return ( + "SYNC RESOLUTION (merge baseline into seed). You are in the seed worktree; the current branch is the seed branch.\n\n" + "The run could not sync this worktree with the baseline branch because the merge had conflicts.\n\n" + "Steps:\n" + f"1. Merge the baseline branch into the current branch: git merge {baseline_branch!r}\n" + "2. Resolve any conflicts, then commit the merge (e.g. git add . && git commit -m 'Merge baseline into seed').\n" + "3. Do not run the training entrypoint.\n" + "4. Print the following block so the runner can confirm success:\n\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["sync_resolution"],"notes":"Merged baseline into seed; conflicts resolved.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" task_json = json.dumps(task, indent=2) @@ -367,23 +429,27 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" ) else: - # Normal seed: merge the baseline branch (__baseline__) INTO the seed worktree so the seed is up to date. + # Normal seed: we need to merge the SEED branch INTO the baseline branch (so baseline gets the seed's changes). + # Do NOT merge baseline into seed — that is the wrong direction. Work in the project root on the baseline branch. if worktree_path: cwd_note = ( "Your working directory is the project root. " - f"The seed worktree is at {worktree_path!r}; run git commands from that directory (e.g. cd there first).\n\n" + f"The seed worktree is at {worktree_path!r} (use it only to commit any pending changes on the seed branch).\n\n" ) else: cwd_note = ( "Your working directory is the project root. " - f"The seed worktree is at component_system/history/worktrees/{seed_id!r}; run git commands from that directory for the merge.\n\n" + f"The seed worktree is at component_system/history/worktrees/{seed_id!r} (use it only to commit any pending changes).\n\n" ) steps = ( "Steps:\n" - "1. Commit any uncommitted changes in the seed worktree (e.g. batch-size or other fixes).\n" - f"2. In the seed worktree, merge the baseline branch into the current branch: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + "1. Commit any uncommitted changes in the seed worktree so the seed branch is complete.\n" + f"2. In the project root (main repo): checkout the baseline branch, then merge the seed branch into it:\n" + f" git checkout {target_branch!r}\n" + f" git merge {seed_id!r}\n" + " Resolve any conflicts, then commit the merge. The result must be: the baseline branch contains the seed's changes (merge direction: seed → baseline).\n" "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" - "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + "4. Print the DCA summary block below (same metrics as the previous run). Use the merge commit SHA from the baseline branch (after the merge, from project root: git rev-parse HEAD).\n\n" ) return ( @@ -401,7 +467,7 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: """Build the agent prompt for a stage. Prompt types (by weight): - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. - - DCA metrics_recovery: full header + log-recovery instructions. Heavy. + - DCA metrics_recovery: lightweight; task + log paths, report shape (no protocol/docs). Light. - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. - DCA normal: full header + adapt/run/commit/report. Heavy. @@ -472,38 +538,26 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" ) if stage == "dca": + sync_resolution = task.get("sync_resolution") is True merge_resolution = task.get("merge_resolution") is True metrics_recovery = task.get("metrics_recovery") is True + if sync_resolution: + return _build_sync_resolution_prompt(task) if merge_resolution: return _build_merge_resolution_prompt(task) + if metrics_recovery: + return _build_metrics_recovery_prompt(task) dca_cmd, dca_note = _dca_command_guidance() baseline_measurement = task.get("seed_id") == "__baseline__" conflict_block = "" - if metrics_recovery: - source_run_id = task.get("source_run_id", "unknown") - stdout_log = task.get("source_stdout_log_path", "missing") - stderr_log = task.get("source_stderr_log_path", "missing") - return header + ( - "METRICS RECOVERY: The previous DCA run completed, but the runner could not confirm metrics from its final report.\n" - "Do not rerun training. Do not edit code. Do not create a commit.\n" - f"Inspect the saved logs for source run {source_run_id!r}:\n" - f"- stdout log: {stdout_log}\n" - f"- stderr log: {stderr_log}\n" - "Recover the canonical metrics from those logs if they are present, then print the final JSON summary.\n" - "Use this exact shape:\n" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" - '{"checks":["log_metrics_recovery"],"notes":"Recovered metrics from saved logs.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' - "AUTORESEARCH_DCA_SUMMARY_END\n" - "If you still cannot recover metrics, print the same object with an empty metrics object and explain why in notes.\n" - ) if baseline_measurement: return header + conflict_block + ( "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" - f"Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): {dca_cmd}\n" - f"({dca_note}) When you invoke this command, set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds so the process is not killed early.\n" + f"Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): `{dca_cmd} > training.log 2>&1`. Set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds. After the run, inspect training.log to confirm completion and recover or verify metrics.\n" + f"({dca_note})\n" "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" '{"checks":["baseline_measurement"],"notes":"Measured the current baseline in the dedicated baseline worktree.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' @@ -518,8 +572,8 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: "The task \"prompt\" is for context only; do not treat it as a goal to achieve in this stage.\n\n" "Workflow:\n" "1. Adapt or fix the generated code in the seed worktree until it runs.\n" - f"2. Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): {dca_cmd}\n" - f" ({dca_note}) When you invoke this command, set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds so the process is not killed early.\n" + f"2. Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): `{dca_cmd} > training.log 2>&1` (or `... 2>&1 | tee training.log` to also see output). Set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds. After the run, inspect training.log to confirm completion and recover or verify metrics.\n" + f" ({dca_note})\n" "3. If it fails for a simple reason, fix and rerun.\n" "4. Create a git commit in the seed branch for your changes.\n" "5. Report the final result in JSON between these exact markers. Include the current commit SHA in the summary.\n" @@ -625,10 +679,10 @@ def eligible(payload: dict) -> bool: worktree_path = task.get("worktree_path") if started_seed is not None and started_seed.worktree_path is not None: worktree_path = started_seed.worktree_path - # Merge-resolution DCA runs from project root so the agent can operate on repo and worktrees + # Merge-resolution and metrics_recovery DCA run from project root; sync_resolution runs in seed worktree if stage == "dca" and ( task.get("merge_resolution") is True or task.get("metrics_recovery") is True - ): + ) and task.get("sync_resolution") is not True: worktree_path = None if worktree_path: @@ -667,28 +721,31 @@ def eligible(payload: dict) -> bool: prompt_path=prompt_path_str, ) else: - run = WORKFLOW.finish_dca_run( - seed_id, - run_id, - stdout, - stderr=stderr, - log_path=str(stdout_log_path) if stdout_log_path else None, - stderr_log_path=str(stderr_log_path) if stderr_log_path else None, - prompt_path=prompt_path_str, - metrics_recovery=task.get("metrics_recovery") is True, - merge_resolution=task.get("merge_resolution") is True, - ) - if not run.summary.get("metrics_recovery_queued"): - description = run.summary.get("notes") or run.summary.get("idea") or seed_id - _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) - _regenerate_progress_png() - if salvaged_dca: - WORKFLOW.seed_repo.append_event( + if task.get("sync_resolution") is True: + WORKFLOW.finish_sync_resolution(seed_id, run_id) + else: + run = WORKFLOW.finish_dca_run( seed_id, - "dca.salvaged", - f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", - run_id=run_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + metrics_recovery=task.get("metrics_recovery") is True, + merge_resolution=task.get("merge_resolution") is True, ) + if not run.summary.get("metrics_recovery_queued"): + description = run.summary.get("notes") or run.summary.get("idea") or seed_id + _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) + _regenerate_progress_png() + if salvaged_dca: + WORKFLOW.seed_repo.append_event( + seed_id, + "dca.salvaged", + f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", + run_id=run_id, + ) move_to_done(task_path) print(f"[{stage.upper()}] task {task['task_id']} done") else: @@ -710,6 +767,11 @@ def eligible(payload: dict) -> bool: task_path=task_path, prompt_path=prompt_path_str, ) print(f"[{stage.upper()}] task {task['task_id']} failed") + except SyncResolutionQueued: + # Sync with baseline failed; sync-resolution DCA was queued. Move P task to error so we don't retry it. + if task_path.exists(): + move_to_error(task_path) + continue except Exception as exc: traceback.print_exc() if not task_path.exists(): diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py index e9cac3c99..afb028fd2 100644 --- a/component_system/services/workflow.py +++ b/component_system/services/workflow.py @@ -42,11 +42,65 @@ BASELINE_SEED_ID = "__baseline__" +# Short display labels for timeline (kind -> one-line text). Events not in this map use message as-is (truncated if long). +TIMELINE_SHORT_MESSAGES = { + "seed.created": "Seed created", + "seed.updated": "Seed updated", + "seed.worktree_ready": "Worktree ready", + "ralph.enabled": "Ralph loop enabled", + "ralph.disabled": "Ralph loop disabled", + "p.queued": "Plan queued", + "p.started": "Plan started", + "p.completed": "Plan completed", + "p.failed": "Plan failed", + "dca.queued": "DCA queued", + "dca.started": "DCA started", + "dca.completed": "DCA completed", + "dca.merge_failed": "Merge into baseline failed", + "p.sync_resolution_queued": "Sync failed; merge resolution queued", + "p.sync_resolution_done": "Sync resolution done; Plan re-queued", + "dca.failed": "DCA failed", + "direct_code.failed": "Direct code failed", +} + + +def _timeline_display_events(events: list[dict[str, Any]]) -> list[dict[str, Any]]: + """Return events in reverse order (newest first), deduplicated by (kind, message), with concise display text.""" + if not events: + return [] + reversed_list = list(reversed(events)) + seen: set[tuple[str, str]] = set() + out: list[dict[str, Any]] = [] + for e in reversed_list: + kind = e.get("kind", "") + message = e.get("message", "") + key = (kind, message) + if key in seen: + continue + seen.add(key) + display = TIMELINE_SHORT_MESSAGES.get(kind) + if display is not None: + # Keep commit_sha / target_branch in a short suffix when present + parts = [display] + if e.get("commit_sha"): + parts.append(f"commit: {e.get('commit_sha', '')[:7]}") + if e.get("target_branch"): + parts.append(f"→ {e.get('target_branch')}") + display = " · ".join(parts) + else: + display = message if len(message) <= 80 else message[:77] + "..." + out.append({**e, "display_message": display}) + return out + class GitCommandError(RuntimeError): pass +class SyncResolutionQueued(RuntimeError): + """Raised when P run cannot start because worktree sync with baseline failed; a sync-resolution DCA task was queued.""" + + class GitService: def __init__(self) -> None: pass @@ -107,8 +161,6 @@ def is_seed_specific_branch(branch: str) -> bool: c in "abcdef0123456789" for c in branch[5:] ): return True - if branch.startswith("seed/"): - return True # legacy candidate branches, e.g. seed/seed-e57b95 return False def setup_error(self) -> str | None: @@ -206,6 +258,18 @@ def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: return self._run_git("reset", "--hard", ref, cwd=worktree_path) + def sync_seed_worktree_with_baseline(self, seed: SeedRecord) -> None: + """Merge the baseline branch into the seed branch in the seed worktree. + Call before each P run so the worktree has the latest baseline.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("merge", "--no-edit", seed.baseline_branch, cwd=worktree_path) + def promote_seed_branch( self, seed: SeedRecord, target_branch: str | None = None ) -> str: @@ -256,7 +320,7 @@ def _baseline_worktree_path() -> str: return str(WORKTREE_ROOT / BASELINE_SEED_ID) def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: - """Clean up legacy persisted seed state that no longer matches runtime rules.""" + """Ensure baseline seed worktree_path matches the canonical path.""" if seed.seed_id != BASELINE_SEED_ID: return seed expected_worktree = self._baseline_worktree_path() @@ -372,7 +436,7 @@ def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", even def _release_seeds_waiting_for_baseline(self, branch: str) -> None: """Release seeds that were waiting for baseline result on the given branch.""" branch_metrics = self.metrics_repo.get_for_branch(branch) - if not branch_metrics or branch_metrics.get("last_val_bpb") is None: + if not branch_metrics or branch_metrics.get("best_val_bpb") is None: return waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) for seed in waiting_seeds: @@ -529,19 +593,19 @@ def _get_or_create_baseline_seed(self) -> SeedRecord: def ensure_baseline_result(self) -> None: """ - If there is no baseline result (last_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and + If there is no baseline result (best_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and queue its DCA so the first run establishes the baseline. Idempotent; safe to call before queue_p for any user seed. """ seed = self._get_or_create_baseline_seed() branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + if branch_metrics and branch_metrics.get("best_val_bpb") is not None: return if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): return if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - if branch_metrics and branch_metrics.get("last_val_bpb") is not None: + if branch_metrics and branch_metrics.get("best_val_bpb") is not None: return setup_error = self.git_service.setup_error() if setup_error is not None: @@ -602,24 +666,30 @@ def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: def queue_p(self, seed_id: str) -> StageRun | None: seed = self.require_seed(seed_id) branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None - has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + has_baseline = branch_metrics is not None and branch_metrics.get("best_val_bpb") is not None if seed_id != BASELINE_SEED_ID and not has_baseline: self.ensure_baseline_result() branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None + has_baseline = branch_metrics is not None and branch_metrics.get("best_val_bpb") is not None if not has_baseline: - if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): - seed.status = SeedStatus.queued - seed.updated_at = now_ts() - seed.latest_run_id = None - seed.last_error = None - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "p.waiting_for_baseline", - "Baseline run is still in progress; Plan will queue after baseline finishes.", - ) - return None + baseline_seed = self.seed_repo.get(BASELINE_SEED_ID) + # Only wait for baseline when the baseline seed is for this branch (e.g. master). + # For another branch (e.g. dev), no baseline run is queued for it, so allow planning; + # the first DCA completion on this branch will establish baseline metrics. + if baseline_seed is not None and baseline_seed.baseline_branch == seed.baseline_branch: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + # Branch has no baseline and is not the baseline seed's branch: proceed with planning. setup_error = self.git_service.setup_error() if setup_error is not None: raise RuntimeError(setup_error) @@ -707,6 +777,67 @@ def queue_dca( write_task("dca", payload, task_id=run.task_id) return run + def queue_sync_resolution(self, seed_id: str) -> StageRun: + """Queue a merge-resolution run to resolve 'merge baseline into seed' in the seed worktree (e.g. after sync failed before P).""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + raise RuntimeError("Sync resolution is not used for the baseline seed.") + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "p.sync_resolution_queued", + "Worktree sync with baseline failed; queued merge-resolution to resolve and re-run Plan.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "baseline_branch": seed.baseline_branch, + "sync_resolution": True, + } + write_task("dca", payload, task_id=run.task_id) + return run + + def finish_sync_resolution(self, seed_id: str, run_id: str) -> None: + """Mark sync-resolution run completed and re-queue Plan for the seed.""" + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + self.run_repo.save(run) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.sync_resolution_done", + "Sync resolution completed; Plan re-queued.", + run_id=run_id, + ) + self._enqueue_plan_run( + seed, + event_kind="p.queued", + event_message="Re-queued Plan after sync resolution.", + ) + def require_seed(self, seed_id: str) -> SeedRecord: seed = self.seed_repo.get(seed_id) if seed is None: @@ -752,6 +883,21 @@ def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, Stage if setup_error is not None: raise RuntimeError(setup_error) seed = self.ensure_seed_worktree_ready(seed.seed_id) + # Sync seed worktree with baseline branch before P so Plan runs from latest baseline. + try: + self.git_service.sync_seed_worktree_with_baseline(seed) + except GitCommandError as sync_err: + self.queue_sync_resolution(seed.seed_id) + raise SyncResolutionQueued( + f"Worktree sync with baseline failed: {sync_err}. Queued merge-resolution." + ) from sync_err + # Record baseline val_bpb at sync time for positive/negative/neutral judgement in DCA. + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + former = branch_metrics.get("best_val_bpb") if branch_metrics else None + if run.summary is None: + run.summary = {} + run.summary["former_val_bpb"] = former + seed.former_val_bpb = float(former) if former is not None else None if seed.worktree_path: worktree_path = Path(seed.worktree_path) if worktree_path.is_dir(): @@ -825,6 +971,26 @@ def mark_direct_code_run_failed( if task_path is not None and task_path.exists(): move_to_error(task_path) + def _ralph_try_restore_worktree(self, seed: SeedRecord, ref: str | None) -> None: + """Reset seed worktree to ref (e.g. commit before P) and log result. No-op if ref missing or baseline seed.""" + if not ref or not str(ref).strip() or seed.seed_id == BASELINE_SEED_ID: + return + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + def mark_run_failed( self, seed_id: str, @@ -862,6 +1028,7 @@ def mark_run_failed( and task_payload.get("merge_resolution") is not True and task_payload.get("metrics_recovery") is not True ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) try: self.queue_p(seed.seed_id) self.seed_repo.append_event( @@ -981,11 +1148,13 @@ def finish_dca_run( seed = self.require_seed(seed_id) run = self.require_run(run_id) branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - last_val_bpb = float(branch_metrics["last_val_bpb"]) if branch_metrics and branch_metrics.get("last_val_bpb") is not None else None + best_val_bpb = float(branch_metrics["best_val_bpb"]) if branch_metrics and branch_metrics.get("best_val_bpb") is not None else None + # Use baseline at sync-before-P time (former_val_bpb) when available; else branch best for baseline seed. + baseline_for_signal = seed.former_val_bpb if (seed.former_val_bpb is not None and seed.seed_id != BASELINE_SEED_ID) else best_val_bpb output_text = self.combine_output(stdout, stderr) summary = self.extract_summary(output_text, StageName.dca) or {} metrics = self.extract_dca_metrics(output_text, summary) - signal = self.evaluate_signal(metrics, last_val_bpb, PROMOTION_THRESHOLD) + signal = self.evaluate_signal(metrics, baseline_for_signal, PROMOTION_THRESHOLD) commit_sha = summary.get("commit_sha") if not (isinstance(commit_sha, str) and commit_sha.strip()): try: @@ -997,8 +1166,10 @@ def finish_dca_run( run.log_path = log_path run.stderr_log_path = stderr_log_path run.prompt_path = prompt_path - # Preserve runner-set keys (e.g. commit_sha_before_p) so negative-signal restore can run - preserved = {k: run.summary[k] for k in ("commit_sha_before_p",) if run.summary and k in run.summary} + # Preserve runner-set keys (e.g. commit_sha_before_p, former_val_bpb) for restore and comparison. + preserved = {k: run.summary[k] for k in ("commit_sha_before_p", "former_val_bpb") if run.summary and k in run.summary} + if seed.former_val_bpb is not None and "former_val_bpb" not in preserved: + preserved["former_val_bpb"] = seed.former_val_bpb run.summary = summary | {"commit_sha": commit_sha} | preserved run.metrics = metrics run.signal = signal @@ -1020,12 +1191,17 @@ def finish_dca_run( source_stdout_log_path=log_path, source_stderr_log_path=stderr_log_path, ) + if ( + seed.ralph_loop_enabled + and seed.seed_id != BASELINE_SEED_ID + ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) return run seed.latest_metrics = metrics seed.latest_signal = signal terminal_status = self._status_from_dca_signal(signal) merge_commit_sha = None # set when seed branch is successfully merged into baseline - if seed.seed_id == BASELINE_SEED_ID and last_val_bpb is None: + if seed.seed_id == BASELINE_SEED_ID and best_val_bpb is None: if "val_bpb" not in metrics: seed.status = SeedStatus.failed event_message = ( @@ -1046,10 +1222,7 @@ def finish_dca_run( target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. if signal != "positive_signal": - self.metrics_repo.update_for_branch( - target_branch, - {"last_val_bpb": metrics["val_bpb"]}, - ) + self.metrics_repo.append_baseline_run(target_branch, metrics["val_bpb"]) seed.status = terminal_status self.run_repo.save(run) self.seed_repo.save(seed) @@ -1063,10 +1236,10 @@ def finish_dca_run( return run try: merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) - self.metrics_repo.update_for_branch( + self.metrics_repo.append_promotion_for_branch( target_branch, { - "last_val_bpb": metrics["val_bpb"], + "val_bpb": metrics["val_bpb"], "promoted_branch": seed.seed_id, "promoted_idea": "Initial baseline adaptation", "promoted_at": summary.get("completed_at"), @@ -1119,13 +1292,14 @@ def finish_dca_run( metrics=metrics, ) return run - self.metrics_repo.update_for_branch( + self.metrics_repo.append_promotion_for_branch( target_branch, { - "last_val_bpb": metrics["val_bpb"], + "val_bpb": metrics["val_bpb"], "promoted_branch": seed.seed_id, "promoted_idea": "Initial baseline adaptation", "promoted_at": summary.get("completed_at"), + "commit_sha": None, }, ) seed.status = SeedStatus.passed @@ -1141,12 +1315,13 @@ def finish_dca_run( self._release_seeds_waiting_for_baseline(target_branch) return run if terminal_status is SeedStatus.promoted: + # Merge seed into baseline first on positive signal; then update metrics/state. try: merge_commit_sha = self.git_service.promote_seed_branch(seed) - self.metrics_repo.update_for_branch( + self.metrics_repo.append_promotion_for_branch( seed.baseline_branch, { - "last_val_bpb": metrics["val_bpb"], + "val_bpb": metrics["val_bpb"], "promoted_branch": seed.seed_id, "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], "promoted_at": summary.get("completed_at"), @@ -1247,23 +1422,7 @@ def finish_dca_run( and not metrics_recovery and seed.seed_id != BASELINE_SEED_ID ): - ref = run.summary.get("commit_sha_before_p") - if ref: - try: - self.git_service.reset_seed_branch_to(seed, ref) - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restored", - "Restored seed worktree to commit before P for next Plan.", - commit_sha=ref, - ) - except GitCommandError as exc: - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restore_failed", - f"Could not restore seed worktree to commit before P: {exc}", - commit_sha=ref, - ) + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) if seed.ralph_loop_enabled: try: self.queue_p(seed.seed_id) @@ -1376,11 +1535,12 @@ def seed_detail(self, seed_id: str) -> dict[str, object]: seed.updated_at = now_ts() self.seed_repo.save(seed) self._reconcile_seed_status_signal(seed) + raw_events = self.seed_repo.events(seed_id) return { "seed": seed, "can_edit_prompt": self.can_edit_seed_prompt(seed), "runs": self.run_repo.list(seed_id), - "events": self.seed_repo.events(seed_id), + "events": _timeline_display_events(raw_events), "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), } @@ -1470,15 +1630,15 @@ def extract_dca_metrics( @staticmethod def evaluate_signal( metrics: dict[str, float | int], - last_val_bpb: float | None, + baseline_val_bpb: float | None, promotion_threshold: float = PROMOTION_THRESHOLD, ) -> str: val_bpb = metrics.get("val_bpb") if val_bpb is None: return "error" - if last_val_bpb is None: + if baseline_val_bpb is None: return "positive_signal" - delta = float(last_val_bpb) - float(val_bpb) + delta = float(baseline_val_bpb) - float(val_bpb) if delta >= promotion_threshold: return "positive_signal" if delta <= -promotion_threshold: diff --git a/component_system/task.py b/component_system/task.py index d4b9fad05..ea6f40dcf 100644 --- a/component_system/task.py +++ b/component_system/task.py @@ -285,14 +285,21 @@ def save_baseline_branch_map(mapping: dict[str, str]) -> None: _write_json(BASELINE_BRANCHES_PATH, mapping) -def load_baseline_metrics() -> dict[str, dict[str, Any]]: - """Load baseline_branch -> { last_val_bpb, promoted_branch, promoted_at, promoted_idea, commit_sha }.""" +def load_baseline_metrics() -> dict[str, list[dict[str, Any]]]: + """Load baseline_branch -> list of promotion/measurement records. Each record: val_bpb, promoted_branch?, promoted_idea?, promoted_at?, commit_sha?.""" ensure_queue_layout() - return _read_json(BASELINE_METRICS_PATH, {}) - - -def save_baseline_metrics(metrics_by_branch: dict[str, dict[str, Any]]) -> None: - """Persist per-branch baseline metrics.""" + raw = _read_json(BASELINE_METRICS_PATH, {}) + result: dict[str, list[dict[str, Any]]] = {} + for branch, value in raw.items(): + if isinstance(value, list): + result[branch] = value + else: + result[branch] = [] + return result + + +def save_baseline_metrics(metrics_by_branch: dict[str, list[dict[str, Any]]]) -> None: + """Persist per-branch baseline metrics (branch -> list of records).""" ensure_queue_layout() _write_json(BASELINE_METRICS_PATH, metrics_by_branch) diff --git a/component_system/web/app.py b/component_system/web/app.py index 9495a62ec..971b666e2 100644 --- a/component_system/web/app.py +++ b/component_system/web/app.py @@ -1,6 +1,7 @@ from __future__ import annotations import time +from datetime import datetime, timezone from pathlib import Path from fastapi import FastAPI @@ -31,6 +32,16 @@ def create_app() -> FastAPI: app.state.workflow = default_workflow_service() app.state.static_version = _static_version() app.state.templates = Jinja2Templates(directory=str(TEMPLATE_ROOT)) + + def _format_ts(ts: float | None) -> str: + if ts is None: + return "" + try: + return datetime.fromtimestamp(ts, tz=timezone.utc).strftime("%Y-%m-%d %H:%M UTC") + except (TypeError, OSError): + return "" + + app.state.templates.env.filters["format_ts"] = _format_ts app.mount("/static", StaticFiles(directory=str(STATIC_ROOT)), name="static") app.include_router(router, prefix="/component-system") diff --git a/component_system/web/templates/dashboard.html b/component_system/web/templates/dashboard.html index 82b1056da..103f87f32 100644 --- a/component_system/web/templates/dashboard.html +++ b/component_system/web/templates/dashboard.html @@ -55,13 +55,13 @@

Create Seed

{% endwith %}

Baseline branches

-

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

+

Per-branch best val_bpb from baseline_metrics.json. Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

{% if dashboard.baseline_metrics_by_branch %}
{% for branch, m in dashboard.baseline_metrics_by_branch.items() %}
{{ branch }}
-
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}{% if m.get('commit_sha') %} · {{ m.get('commit_sha')[:7] }}{% endif %}
+
val_bpb {{ "%.6f"|format(m.get('best_val_bpb')) if m.get('best_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}{% if m.get('commit_sha') %} · {{ m.get('commit_sha')[:7] }}{% endif %}
{% endfor %}
diff --git a/component_system/web/templates/partials/seed_runs_inner.html b/component_system/web/templates/partials/seed_runs_inner.html index 488d9912b..5ca9a1525 100644 --- a/component_system/web/templates/partials/seed_runs_inner.html +++ b/component_system/web/templates/partials/seed_runs_inner.html @@ -3,7 +3,7 @@
-

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}{% if run.created_at %} {{ run.created_at|format_ts }}{% endif %}

{{ run.run_id }}

diff --git a/component_system/web/templates/partials/seed_timeline_inner.html b/component_system/web/templates/partials/seed_timeline_inner.html index 8fabcdd54..d545f0e19 100644 --- a/component_system/web/templates/partials/seed_timeline_inner.html +++ b/component_system/web/templates/partials/seed_timeline_inner.html @@ -1,13 +1,7 @@ {% if events %} {% for event in events %}
-

{{ event.message }}

- {% if event.commit_sha %} -

commit: {{ event.commit_sha }}

- {% endif %} - {% if event.target_branch %} -

target branch: {{ event.target_branch }}

- {% endif %} +

{{ event.display_message | default(event.message) }}

{{ event.kind }} · {{ event.created_at_human }}

{% endfor %} From f866a761b83ee9377749f9ab0baaa884bcea662c Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 19:57:09 +0800 Subject: [PATCH 09/24] refactor(component-system): improve PDCA workflow, metrics tracking, and concurrency handling - Update PDCA documentation for clarity and consistency - Add seed lifecycle and concurrency review documentation - Track former_val_bpb for better signal evaluation - Implement best_val_bpb tracking with history - Add metrics recovery DCA for missing metrics - Improve timeout handling (900s for DCA runs) - Update web dashboard UI and templates - Fix sync resolution and merge conflict handling - Update protocol.md with simplified instructions --- .gitignore | 1 + README.md | 40 +- component_system/PDCA-DO-CHECK-ACTION.md | 94 +- component_system/PDCA-PLAN.md | 173 +- component_system/config.py | 2 +- .../SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md | 177 + component_system/domain/models.py | 2 + component_system/protocol.md | 90 +- component_system/repositories/state.py | 46 +- component_system/run.py | 249 +- component_system/services/workflow.py | 3041 +++++++++-------- component_system/task.py | 83 +- component_system/web/app.py | 22 + component_system/web/routes.py | 41 +- component_system/web/static/app.js | 128 +- component_system/web/templates/base.html | 4 +- component_system/web/templates/dashboard.html | 10 +- .../web/templates/partials/seed_detail.html | 173 +- .../partials/seed_detail_response.html | 3 - .../templates/partials/seed_runs_inner.html | 27 +- .../web/templates/seed_detail_page.html | 3 + pyproject.toml | 1 + scripts/clean_history.py | 256 +- uv.lock | 34 + 24 files changed, 2784 insertions(+), 1916 deletions(-) create mode 100644 component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md diff --git a/.gitignore b/.gitignore index cb732b720..a3fb245de 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ component_system/history/ component_system/baseline_branches.json component_system/baseline_metrics.json *.log +.ipynb_checkpoints/ \ No newline at end of file diff --git a/README.md b/README.md index 8de13d9cd..15ee32f53 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The idea: give an AI agent a small but real LLM training setup and let it experi ## How it works -The repo is deliberately kept small and only really has a three files that matter: +The repo is deliberately kept small and only really has three files that matter: - **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. - **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. @@ -16,6 +16,8 @@ The repo is deliberately kept small and only really has a three files that matte By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. +If you are new to neural networks, this ["Dummy's Guide"](https://x.com/hooeem/status/2030720614752039185) looks pretty good for a lot more context. + ## Quick start **Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). @@ -47,41 +49,7 @@ Hi have a look at program.md and let's kick off a new experiment! let's do the s The `program.md` file is essentially a super lightweight "skill". -### Component-system workflow - -The component system runs a continuous **Seed → P → DCA** loop. A resident daemon manages two workers (P and DCA) that poll a file-based queue and dispatch each stage to an external code agent (Claude Code, Codex, or OpenCode). - -**1. Start the web dashboard** (optional, but recommended for monitoring): - -```bash -uv run uvicorn component_system.web.app:app --reload -``` - -Open http://127.0.0.1:8000 — the dashboard lives at `/component-system`. Use `--host 0.0.0.0` or `--port 8080` as needed. - -**2. Start the daemon:** - -```bash -# Default: uses Claude Code -uv run component_system/run.py - -# Or choose a different agent backend -PDCA_AGENT=codex uv run component_system/run.py -PDCA_AGENT=opencode uv run component_system/run.py -``` - -**3. Bootstrap via a coding agent.** Do *not* tell the agent to execute PDCA stages manually. Instead, give it a prompt like: - -```text -Understand this project and follow component_system/protocol.md. -Do not execute PDCA stages manually in this session. -Instead, bootstrap the component system by creating an initial seed -and queuing it to component_system/queue/p/, then confirm the daemon -(uv run component_system/run.py) is running so the P and DCA workers -can process stages automatically. -``` - -Once bootstrapped, seeds flow through `queue/p/` → P worker → `queue/dca/` → DCA worker → `state/` automatically. Results and promotions are tracked in `state/` and visible in the web dashboard. +For the component-system workflow, see `component_system/README.md`. ## Project structure diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md index 6bed2d95d..d90418c9c 100644 --- a/component_system/PDCA-DO-CHECK-ACTION.md +++ b/component_system/PDCA-DO-CHECK-ACTION.md @@ -1,62 +1,44 @@ -# DCA - Adapt, Check, Action - -This document merges the former `PDCA-DO.md`, `PDCA-CHECK.md`, and `PDCA-ACTION.md` -into one execution guide for the merged DCA stage. +# DCA — Do, Check, Action ## Responsibility Take the generated plan from P, adapt/fix it in the seed worktree, run the canonical training entrypoint, evaluate results against baseline, and -promote only when the signal is positive. +promote only when the signal is positive. Do not propose new ideas or optimize for better metrics; only adapt/fix so the plan runs and report outcomes. ## Workspace and paths -Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use **only paths relative to your cwd**, and treat the copied files under `component_system/` as the canonical context inside the worktree. Do not use or request absolute paths, parent-directory paths, or files outside the workspace; the runner has already set your cwd to the correct worktree. +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. Treat `component_system/` in the worktree as canonical context. ## Input -- Read the task content embedded in the runner prompt. -- Read current baseline state from `component_system/baseline_branches.json` and `component_system/baseline_metrics.json`. -- Read and edit worktree-local files only. +- Runner prompt (task content). +- Baseline: `component_system/baseline_branches.json`, `component_system/baseline_metrics.json`. +- Worktree-local files only. ## Baseline measurement (seed_id __baseline__) -For **baseline measurement** tasks you must **retry until the run completes successfully** and you can report real metrics. Do not report empty metrics and stop. +Retry until the run succeeds and you report real metrics. No empty metrics. -- If training fails with **CUDA out of memory (OOM)**, the default batch size is tuned for H100. Reduce `device_batch_size` in `component_system/components/trainer.py` (`TrainingSettings`: default `device_batch_size=128`). You may also need to reduce `total_batch_size` so that `total_batch_size % (device_batch_size * sequence_length) == 0` for gradient accumulation. Then rerun the entrypoint until training completes and report the resulting metrics. -- Only trivial execution fixes (e.g. batch size for VRAM) are allowed; do not change model architecture or training logic for baseline. -- **Commit any file changes before reporting.** If you modified files (e.g. reduced batch size), commit those changes on the baseline branch. An uncommitted worktree causes the follow-up merge into the baseline branch to fail. +- **OOM:** Reduce `device_batch_size` in `component_system/components/trainer.py` (default 128); keep `total_batch_size % (device_batch_size * sequence_length) == 0`. Rerun until training completes. +- Only trivial fixes (e.g. batch size); no model/training logic changes. +- **Commit before reporting.** Uncommitted changes break the follow-up merge. ## Workflow -1. Work in the seed worktree prepared by the system (on the seed branch, one branch per seed). -2. Adapt or fix generated code until it runs cleanly. -3. Run the canonical command (allow at least **600 seconds** so the run is not killed by the execution environment; the first step can take ~150s and training runs for 300s): - - Preferred (when daemon/root `.venv` is active): `timeout 600 uv run --active component_system/entrypoint.py` - - Fallback (when no active root `.venv` is available): `timeout 600 uv run component_system/entrypoint.py` -4. If there is a simple bug or OOM, fix (e.g. reduce batch size) and rerun. For baseline measurement, keep retrying until the run succeeds. -5. Commit changes on the seed branch before reporting. -6. Print the DCA summary block for the runner; include the current commit SHA in the JSON so the runner can verify and record it. -7. Let the runner evaluate signal and handle promotion policy. +1. Work in the seed worktree (one branch per seed). +2. Adapt/fix until it runs (runtime only: bugs, OOM, imports, config; no model/hyperparameter/training-logic changes for better metrics). +3. Run canonical command (**≥900s**): `timeout 900 uv run --active component_system/entrypoint.py > training.log 2>&1` (or `... 2>&1 | tee training.log` to also see output). **Must set command/tool timeout ≥900s**. After the run, inspect `training.log` to confirm completion and recover or verify metrics. +4. On bug/OOM: fix and rerun; for baseline, retry until success. +5. Commit on seed branch before reporting. +6. Print DCA summary block with `commit_sha` in JSON. +7. Runner evaluates signal and handles promotion. ## Output Format -Print a summary block for the runner. Report metrics in the JSON first; the -runner only falls back to parsing training stdout/stderr when the JSON metrics -are missing: +Print the summary block. Put metrics in JSON; runner falls back to stdout/stderr parsing if missing. ```text AUTORESEARCH_DCA_SUMMARY_BEGIN -{"checks":["entrypoint"],"notes":"what you adapted or fixed","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}} +{"checks":["entrypoint"],"notes":"...","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.24,...}} AUTORESEARCH_DCA_SUMMARY_END ``` -If you cannot provide final metrics, still print the exact same JSON shape with -`"metrics": {}`. - -The runner falls back to extracting metrics from canonical training stdout/stderr: -`val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, -`total_tokens_M`, `num_steps`, `num_params_M`, and `depth`. - -If a DCA run finishes but still reports no metrics, the system does not -immediately mark it failed. Instead, it queues a follow-up DCA recovery task -that inspects the saved stdout/stderr logs and reports the metrics in the same -JSON format. Only if that recovery task still cannot recover metrics is the run -treated as failed. +If no final metrics, use `"metrics": {}`. Runner extracts from stdout/stderr: `val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, `total_tokens_M`, `num_steps`, `num_params_M`, `depth`. No metrics → recovery DCA inspects logs; only then treat as failed. ## Check: Signal Rules @@ -65,35 +47,29 @@ treated as failed. | `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | | `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | | difference < 0.001 | `neutral` | -| no historical baseline `last_val_bpb` | `positive_signal` (first recording) | +| no historical baseline (best_val_bpb) | `positive_signal` (first recording) | | metrics missing or training error | `error` | The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). ## Action: Promotion Rules -Only the DCA (Do-Check-Action) stage may trigger a merge into baseline. The Plan stage must never merge code; the system performs the merge automatically after a successful DCA promotion. - -The runner records the DCA `commit_sha` from your summary (or from the current branch HEAD if omitted) for traceability. On positive signal, the workflow merges the seed branch into the baseline. If the merge fails (e.g. conflicts), the system queues a merge-resolution DCA run. +Only DCA may trigger a merge into baseline; P must not. Runner records `commit_sha`; on positive signal the workflow merges seed into baseline first, then updates metrics/state. Merge conflict → system queues merge-resolution DCA. -### Promotion flow (`positive_signal` only) -1. The system merges the seed branch into the baseline branch (you do not run merge yourself). -2. The workflow updates `baseline_metrics.json` (and `baseline_branches.json` as needed) with `last_val_bpb`, `promoted_from`, `promoted_idea`, `promoted_at`, `promoted_branch`. -3. Promotion metadata is persisted in seed/run state files. +### Promotion (`positive_signal`) +1. System merges seed into baseline first (you do not run merge). +2. Workflow updates `baseline_metrics.json` / `baseline_branches.json`. +3. Metadata in seed/run state. -### Merge failure and conflict resolution -- If the merge into baseline fails (e.g. conflicts), the system queues a **new DCA run** with `merge_resolution: true`. - - **Normal seed**: In the seed worktree, run `git merge __baseline__` (merge the baseline branch into the seed), resolve conflicts, commit, then print the DCA summary so the system can retry promotion. - - **Baseline seed (__baseline__)**: The goal is to merge __baseline__ *into* the target branch (e.g. master). Run from the directory that has the target branch checked out (use `git worktree list` to find it), then `git merge __baseline__`. Do *not* run from the __baseline__ worktree and do *not* run `git merge master` there—that would merge master into __baseline__, the wrong direction. +### Merge failure +- **Normal seed:** In seed worktree: `git merge __baseline__`, resolve conflicts, commit, print DCA summary for retry. +- **Baseline seed (__baseline__):** Merge __baseline__ into target (e.g. master). Run from worktree that has target checked out (`git worktree list`); do not run from __baseline__ worktree or `git merge master` there. -### Non-promotion cases -- `neutral`, `negative_signal`, or `error`: log only, no baseline merge/update. -- Failed run info remains available via queue/state logs. +### Non-promotion +`neutral` / `negative_signal` / `error`: log only. Failure info in queue/state logs. ## Constraints -- Training must use `run_mainline_training` or equivalent for evaluation consistency. -- Evaluation (`val_bpb`) must not be skipped. -- Do not edit `baseline_branches.json` or `baseline_metrics.json` directly; the workflow writes them. -- Only `positive_signal` can trigger promotion. -- Keep `component_system/entrypoint.py` as the canonical runner. -- Rely on git history plus state files for traceability. +- No model/optimizer/training-logic changes for better metrics; only make the plan run (bugs, OOM, etc.). +- Use `run_mainline_training` (or equivalent); do not skip `val_bpb` evaluation. +- Do not edit baseline JSON files; only DCA promotion updates them. +- Canonical runner: `component_system/entrypoint.py`. Traceability: git + state files. diff --git a/component_system/PDCA-PLAN.md b/component_system/PDCA-PLAN.md index f81b6259d..7564ffeb8 100644 --- a/component_system/PDCA-PLAN.md +++ b/component_system/PDCA-PLAN.md @@ -1,112 +1,61 @@ -# P — Seed Planning And Generation - -## Responsibility -Extract exactly one testable improvement hypothesis from the seed prompt, -generate the first implementation in a candidate worktree, and hand the result -to DCA through the runner. - -## Workspace and paths -Your **current working directory is the seed worktree**. All reads and edits must stay inside this workspace. Use only in-workspace paths from your current working directory, and do not use or request absolute paths or any paths outside the workspace; the runner has already set your cwd to the correct worktree. - -## Skill: arxiv-search - -Use the **arxiv-search** skill (`.agents/skills/arxiv-search`) to search for -relevant papers. - -If the skill is not installed or the search script is missing, do not pretend -the skill exists and do not fabricate paper references. Try to install or make -the skill available autonomously. If that still fails, continue planning from -the other input sources instead of asking the user questions. - -### Prerequisites -```bash -pip install arxiv -``` - -Install the Python package only after the skill itself is available. Installing -the package alone does not replace the missing skill. If the skill cannot be -made available, skip paper-driven search and proceed with the remaining inputs. - -### Search for papers -```bash -# Search by topic in cs.LG / cs.NE categories -python .agents/skills/arxiv-search/scripts/search_arxiv.py \ - --query "optimizer adaptive learning rate" \ - --category "cs.LG" \ - --sort-by submitted_date \ - --max-results 10 - -# Search for model architecture ideas -python .agents/skills/arxiv-search/scripts/search_arxiv.py \ - --query "ti:attention AND abs:efficiency" \ - --date-from "2024-01-01" \ - --output json -``` - -### How to Extract a Hypothesis from Results -1. Read the abstract of each result -2. Identify a concrete architectural or algorithmic change (not just a concept) -3. Map it to a target component: `model`, `optimizer`, or `trainer` -4. State the **expected benefit** (e.g. faster convergence, lower val loss, fewer params) -5. Reduce the idea to one isolated improvement that can be evaluated on its own - -## Read results.tsv first (avoid idea duplication) -Before choosing a hypothesis, **read `results.tsv` in your current working directory if it exists**. The runner copies the latest result history into the seed worktree before P runs. Use it to avoid proposing ideas that were already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). - -## Input Sources -- **results.tsv** in cwd (when present) — read first to avoid duplicating past ideas -- arXiv papers via **arxiv-search** skill (primary) -- Clues from past run failures in `queue/done/` -- Manual seed files - -## One-Improvement Rule - -Each P run must propose and implement exactly one improvement. - -- One seed means one hypothesis. -- One seed means one causal change to evaluate. -- Do not bundle multiple ideas into the same candidate, even if they seem - complementary. -- If the prompt contains several possible improvements, choose the single best - one for this iteration and leave the others for later seeds. -- If an idea would require several coordinated changes, choose the smallest - coherent version that still tests the hypothesis cleanly. - -Good examples: -- change only the optimizer schedule -- add only one architectural block -- simplify only one training heuristic - -Bad examples: -- change the model width and the optimizer and the batch schedule together -- combine several paper ideas in one seed -- make "general cleanup plus a new feature" in the same candidate - -## Output Format -Print a summary block for the runner: -```text -AUTORESEARCH_P_SUMMARY_BEGIN -{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} -AUTORESEARCH_P_SUMMARY_END -``` - -## Steps -1. If `results.tsv` exists in the worktree, read it first to avoid duplicating already-tried ideas. -2. Refine the seed prompt into one concrete idea -3. Reduce that idea to one isolated improvement with a clear expected benefit -4. Identify the target component (`model`, `optimizer`, or `trainer`) -5. Implement only that first version inside the candidate worktree created from `baseline` -6. Commit the candidate branch -7. Ensure the summary describes the single improvement being tested -8. Print the summary block; the runner records the commit on the seed branch. - -## Constraints -- Each seed targets exactly one component -- Each seed applies exactly one improvement -- Prefer the smallest viable implementation that can test the hypothesis -- Do not mix exploratory cleanup with the experimental change -- Do not include opportunistic refactors unless they are strictly required to make - the one improvement work -- The description must contain enough detail for DCA to continue independently -- One branch per seed: commit on the seed branch in the worktree; the runner does not merge branches. -- **Plan must never merge code.** Only the DCA (Do-Check-Action) stage may trigger a merge into baseline; the system performs the merge automatically after a successful DCA promotion. +# P - Seed Planning And Generation + +## Responsibility +Extract exactly one testable improvement hypothesis from the seed prompt, +generate the first implementation in a candidate worktree, and hand the result +to DCA through the runner. + +## Workspace and paths +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. + +## arXiv search (CLI) + +Run from repo root with uv (e.g. `uv run python component_system/run_arxiv.py ...`); arxiv is already a project dependency. + +### Search (CLI script) + +From repo root, use the script in this component: + +```bash +uv run python component_system/run_arxiv.py --query "machine learning" --max-results 5 +uv run python component_system/run_arxiv.py --id 1605.08386v1 --output json +``` + +**CLI arguments:** `--query` / `-q`, `--id` (one or more arXiv IDs; overrides query), `--max-results` / `-n`, `--sort-by` (relevance | submittedDate | lastUpdatedDate), `--sort-order` (ascending | descending), `--output` / `-o` (text | json), `--download-dir`, `--verbose` / `-v`. + +### Hypothesis from results +1. Read abstracts; pick one concrete change (not just a concept). +2. Map to component: `model`, `optimizer`, or `trainer`. +3. State expected benefit; reduce to one isolated, evaluable improvement. + +## Input +- **results.tsv** in cwd (if present) ? read first to avoid duplicating tried/discarded ideas. +- arXiv via arxiv-search; past failures in `queue/done/`; manual seed files. + +## One-Improvement Rule + +One seed = one hypothesis = one causal change. Do not bundle ideas. If the prompt has several options, pick the single best for this run. Prefer the smallest coherent change that tests the hypothesis. + +**Good:** one optimizer schedule change; one architectural block; one training heuristic. **Bad:** model + optimizer + batch together; multiple paper ideas in one seed; "cleanup + new feature" in one candidate. + +## Output Format +Print a summary block for the runner: +```text +AUTORESEARCH_P_SUMMARY_BEGIN +{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} +AUTORESEARCH_P_SUMMARY_END +``` + +## Runner / worktree +Before each P run, the runner syncs the seed worktree with its baseline branch (merge baseline into seed) so P always starts from the latest baseline. + +## Steps +1. Read `results.tsv` if present. +2. Refine prompt ? one concrete idea ? one isolated improvement; name target component. +3. Implement in worktree (from baseline); commit on seed branch. +4. Print summary block (runner records commit). Description must be enough for DCA. + +## Constraints +- One component, one improvement per seed. Smallest viable implementation. +- No exploratory cleanup or opportunistic refactors unless required for the one change. +- Commit on seed branch; runner does not merge. **P must never merge;** only DCA triggers merge into baseline. diff --git a/component_system/config.py b/component_system/config.py index 9975ab2d2..ffe3ef7cb 100644 --- a/component_system/config.py +++ b/component_system/config.py @@ -13,7 +13,7 @@ # Promotion threshold: improve val_bpb by at least this much to promote PROMOTION_THRESHOLD = 0.001 -# Worktree root relative to project (string for display/config compatibility) +# Worktree root relative to project WORKTREE_ROOT = "component_system/history/worktrees" # Default branch name suggested in UI when no branches exist (not a global baseline) diff --git a/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md b/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md new file mode 100644 index 000000000..854cbf0e2 --- /dev/null +++ b/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md @@ -0,0 +1,177 @@ +# Seed Lifecycle, State Transitions, and Concurrency Review + +## 1. Seed lifecycle and state transitions + +### 1.1 Seed status enum (`SeedStatus`) + +| Status | Meaning | +|-------------|---------| +| `draft` | Newly created, not yet queued for Plan | +| `queued` | Plan run created and task in queue (or waiting for baseline) | +| `planning` | P run in progress | +| `generated` | P completed; code generated, DCA not yet queued | +| `dca_queued`| DCA run created and task in queue (includes sync/merge resolution) | +| `adapting` | DCA run in progress | +| `running` | **Never set in code** — see gap below | +| `passed` | DCA completed, no promotion | +| `failed` | Terminal failure (P failed, DCA failed, or reconciled from passed+error) | +| `promoted` | DCA completed with positive signal; seed merged into baseline | + +### 1.2 Documented transitions (from code) + +``` +draft → queued queue_p / _enqueue_plan_run +queued → queued queue_p (waiting for baseline; latest_run_id cleared) +queued → planning mark_run_started (stage P) +planning → generated finish_p_run +planning → failed mark_run_failed (P failed) +generated → dca_queued queue_dca +generated → queued finish_sync_resolution (then _enqueue_plan_run) +dca_queued → adapting mark_run_started (stage DCA) +adapting → passed finish_dca_run (neutral/negative signal) +adapting → failed finish_dca_run (error) or mark_run_failed +adapting → promoted finish_dca_run (positive_signal) +adapting → dca_queued finish_dca_run (merge/sync failed → queue_dca merge_resolution=True) +adapting → generated finish_dca_run (ralph neutral_signal) +passed → failed _reconcile_seed_status_signal (passed but latest_signal=="error") +``` + +Baseline seed: `draft` → `generated` (ensure_baseline_result) → `dca_queued` → … → `passed` / `failed`. + +--- + +## 2. Gaps and issues in state transitions + +### 2.1 `SeedStatus.running` is never set + +- **Code:** `SeedStatus.running` appears only in: + - `is_seed_eligible_for_stage` (P not eligible if `adapting`, `running`, or `dca_queued`) + - `ensure_baseline_result` (early return if `dca_queued`, `adapting`, `running`) + - Dashboard `status_column_map` → `activeDca` +- **Issue:** No assignment `seed.status = SeedStatus.running` anywhere. DCA-in-progress uses `adapting`. +- **Recommendation:** Either remove `running` from the enum and all checks, or document it as reserved and start setting it (e.g. for a future “running but not adapting” phase). Otherwise it’s dead code and the enum is misleading. + +### 2.2 Sync failure in `mark_run_started` (P): run/seed consistency + +- **Flow:** When a P run is started, `mark_run_started`: + 1. Sets `run.status = RunStatus.running` (in memory). + 2. Calls `ensure_seed_worktree_ready`, then `sync_seed_worktree_with_baseline`. + 3. On `GitCommandError`, calls `queue_sync_resolution(seed_id)` and raises `SyncResolutionQueued`. + 4. Only at the end (after sync and other logic) does it `run_repo.save(run)` and `seed_repo.save(seed)`. +- **Effect:** When sync fails: + - The **run** is never saved as `running`; it remains `queued` in the run repo. + - The **seed** is updated by `queue_sync_resolution`: `seed.status = dca_queued`, new DCA run and task written. + - The **P task** is moved to error in `run.py` (`move_to_error(task_path)`). +- **Result:** The original P **run** is orphaned: it stays `queued` forever and is never completed or failed. `seed.latest_run_id` points to the new sync-resolution DCA run. A later Plan enqueue creates a new P run and task. +- **Recommendation:** When raising `SyncResolutionQueued`, either: + - Mark the current P run as failed (e.g. “sync_failed”) and save it, or + - Explicitly not create a run for that P task until after sync succeeds (e.g. move run creation to after sync). That would require a larger refactor. + +### 2.3 Other transitions + +- All other transitions are consistent with the intended flow: P → generated/failed, DCA → passed/failed/promoted/dca_queued/generated, and reconciliation of `passed` + `error` → `failed`. + +--- + +## 3. Multiple seeds running at the same time — race conditions and conflicts + +### 3.1 Task claiming is atomic per task + +- **Mechanism:** `claim_pending` uses `path.rename(path, IN_PROGRESS_DIR / path.name)`. Only one process can rename a given file; others get `FileNotFoundError` or `OSError` and skip that task. +- **Effect:** Each task file is claimed by at most one worker; no double execution of the same task. + +### 3.2 Per-seed eligibility prevents P vs DCA overlap + +- **Mechanism:** Before running a task, the worker calls `claim_pending(..., eligible_fn=eligible)`. `eligible` uses `WORKFLOW.is_seed_eligible_for_stage(seed_id, stage)`: + - **P:** eligible only if `seed.status not in (adapting, running, dca_queued)`. + - **DCA:** eligible only if `seed.status is not SeedStatus.planning`. +- **Effect:** For a given seed, P and DCA are never both considered eligible. So the same seed cannot have a P task and a DCA task running at the same time, and a seed in `planning` or `adapting` will not get another stage started until the run finishes. + +### 3.3 Read–modify–write on seed/run state + +- **Risk:** Multiple workers can run concurrently (e.g. 2 P workers, 1 DCA-GPU, 1 DCA-AUX). Each worker loads seed/run, modifies, and saves. There is no locking or optimistic concurrency (e.g. version field). +- **Mitigation in practice:** + - Each **task** is for a specific (seed_id, run_id). Different tasks imply different runs (and usually different seeds for P/DCA). + - Eligibility ensures that for a given seed, only one “kind” of work (P or DCA) is allowed at a time. +- **Remaining risk:** If two tasks for the same seed could ever be in flight (e.g. due to a bug or a restored task), two workers could both read the same seed, update it, and save; the last write would win and one update could be lost. With the current design (one active run per seed per stage), this should not happen for normal execution. + +### 3.4 Git worktrees + +- **Design:** Each seed has its own worktree (path `worktrees/`). Different seeds use different directories. +- **Effect:** No filesystem conflict between seeds; multiple seeds can run P or DCA in parallel in separate worktrees. Baseline seed uses `worktrees/__baseline__`. + +### 3.5 Shared JSON state (repos) + +- **State:** Seeds, runs, metrics, branch map, and queue dirs are file-based (JSON under `history/state/`, `history/queue/`). +- **Risk:** Two workers writing different seeds at the same time can overwrite each other only if they wrote the same file (same seed or same run). Since each task is bound to one run and one seed, and eligibility prevents overlapping stages for the same seed, concurrent updates to the same seed/run are not expected for correct flows. +- **Recommendation:** For extra safety, consider short-lived file locking or atomic write (write to temp + rename) for seed/run saves if the daemon scales to many workers. + +--- + +## 4. Edge case: automatic merge fails — can dependent tasks start prematurely? + +### 4.1 Sync failure (merge baseline into seed) before P + +- **When:** In `mark_run_started` (stage P), `sync_seed_worktree_with_baseline(seed)` raises `GitCommandError`. +- **What happens:** + 1. `queue_sync_resolution(seed_id)` runs: seed set to `dca_queued`, new DCA task with `sync_resolution: True` is written. + 2. `SyncResolutionQueued` is raised; in `run.py` the P task is moved to error (not re-queued). + 3. Seed remains `dca_queued`; only the sync-resolution DCA task is for that seed. +- **Eligibility:** For P, a seed in `dca_queued` is **not** eligible. So no other P task for this seed can start. No dependent “normal” P runs until the sync-resolution DCA completes and Plan is re-queued in `finish_sync_resolution`. So **dependent tasks do not start prematurely**. + +### 4.2 DCA merge into baseline fails (normal or baseline seed) + +- **When:** In `finish_dca_run`, `promote_seed_branch` raises `GitCommandError`. +- **What happens:** + 1. A new DCA run is queued with `merge_resolution=True` (and seed stays `dca_queued`). + 2. No new P run or normal DCA run is enqueued for that seed until the merge-resolution DCA finishes. +- **Eligibility:** While seed is `dca_queued` or `adapting`, P is not eligible. So **dependent tasks do not start prematurely**. + +### 4.3 Baseline merge fails + +- Same pattern: baseline seed gets a merge-resolution DCA task, stays `dca_queued`. `_release_seeds_waiting_for_baseline` is only called after a successful merge (or after the “loop avoided” path). Waiting seeds are not released until baseline is merged. So **dependent tasks do not start prematurely**. + +**Conclusion:** When the workflow’s automatic merge (sync or promote) fails, the seed is put in `dca_queued` with a resolution DCA task. Eligibility and the fact that no new P/normal DCA is enqueued until resolution completes ensure that dependent tasks do **not** start before merge resolution. + +--- + +## 5. Other edge cases + +### 5.1 Restored in-progress tasks + +- On daemon start, `restore_in_progress_tasks()` moves all tasks from `in_progress/` back to the stage queue. Those tasks are then eligible to be claimed again. +- **Risk:** If a task was in progress (worker had already called `mark_run_started` and set seed to `planning`/`adapting`) and the daemon died before the worker finished, the run and seed are already updated. After restore, the task is back in the queue; a worker can claim it and call `mark_run_started` again. That would re-use the same run_id and could lead to duplicate “started” events or inconsistent state (e.g. two workers both thinking they own the run). The code does not detect “this run was already started.” +- **Recommendation:** Before updating run/seed in `mark_run_started`, check that `run.status` is still `queued`; if it is already `running`, treat the task as a duplicate (e.g. move to error or skip and don’t run again). + +### 5.2 Ralph loop and merge_resolution / metrics_recovery + +- After a failed DCA, `mark_run_failed` can call `queue_p(seed_id)` for Ralph seeds, but only when the task is not `merge_resolution` and not `metrics_recovery`. So Ralph does not re-queue P on merge-resolution or metrics-recovery DCA failure, which is correct. + +### 5.3 Baseline seed and sync + +- Baseline seed does not call `sync_seed_worktree_with_baseline` (early return in that function). So sync failure path does not apply to __baseline__. `queue_sync_resolution` explicitly raises if seed is baseline. No issue. + +--- + +## 6. Summary table + +| Area | Status | Notes | +|-----------------------------|--------|--------| +| Seed status enum | Gap | `SeedStatus.running` never set; remove or use. | +| P/DCA transition consistency| OK | Transitions match design. | +| Sync fail (before P) | Bug | P run left `queued`; orphaned run. | +| Task claiming | OK | Atomic rename prevents double run of same task. | +| P vs DCA same seed | OK | Eligibility prevents concurrent P and DCA for one seed. | +| Multiple seeds concurrent | OK | Different worktrees; eligibility per seed. | +| Merge/sync fail → dependents| OK | Seed stays `dca_queued`; no premature P/DCA. | +| Restored in-progress tasks | Risk | Re-claiming can lead to duplicate start for same run. | + +--- + +## 7. Implemented fixes + +1. **Sync failure in `mark_run_started`:** Before raising `SyncResolutionQueued`, mark the current P run as failed (e.g. error “sync with baseline failed”) and save it, so the run is not orphaned. +2. **`SeedStatus.running`:** Either remove it from the enum and from all checks, or introduce a clear rule (e.g. “DCA in progress” = `adapting` only) and document that `running` is unused. +3. **Restored tasks:** In `mark_run_started`, if `run.status != RunStatus.queued`, do not update run/seed and do not run the agent; move the task to error or a “duplicate” bucket and return. + +**Not changed:** `SeedStatus.running` is still never set; it could be removed from the enum in a follow-up or left as reserved. diff --git a/component_system/domain/models.py b/component_system/domain/models.py index f03c9a121..d105569e1 100644 --- a/component_system/domain/models.py +++ b/component_system/domain/models.py @@ -71,6 +71,8 @@ class SeedRecord(BaseModel): latest_metrics: dict[str, Any] = Field(default_factory=dict) plan: PlanIdea | None = None last_error: str | None = None + """Baseline val_bpb at sync-before-P time; used for positive/negative/neutral judgement in DCA.""" + former_val_bpb: float | None = None class DashboardColumn(BaseModel): diff --git a/component_system/protocol.md b/component_system/protocol.md index 6f50dec72..56d43941e 100644 --- a/component_system/protocol.md +++ b/component_system/protocol.md @@ -22,10 +22,10 @@ The intended control flow is: 1. Read this file and the required context files. 2. Ensure the queue and state layout exist. 3. Create or refine a seed from a human prompt. -4. Queue that seed into `component_system/history/queue/p/`. -5. Start the resident daemon with `uv run component_system/run.py`. -6. Let the daemon workers execute P and DCA through file-based handoff. -7. Monitor the daemon, queue, and logs instead of roleplaying stage work yourself. +4. Queue the seed for P. +5. Start the daemon: `uv run component_system/run.py`. +6. Let daemon workers execute P and DCA via file-based handoff. +7. Monitor the daemon, queue, and logs; do not simulate stages in-session. Manual execution of an individual stage is only for the agent process that was invoked by the daemon for that specific task. @@ -81,10 +81,7 @@ Read in this order: 4. `component_system/entrypoint.py` for the canonical execution path 5. `component_system/config.py` for promotion threshold and static binding -Baseline reference files (workflow-managed; read-only): `component_system/baseline_branches.json` (per-branch baseline mapping), `component_system/baseline_metrics.json` (baseline run metrics). The workflow writes these; only read them for context. - -For interactive bootstrap, also inspect the current queue/state situation, -especially recent items in `queue/done/` and the latest baseline information. +Baseline files (workflow-managed; read-only): `baseline_branches.json`, `baseline_metrics.json`. For interactive bootstrap, inspect recent `queue/done/` and baseline state. ## Workspace and Path Rules @@ -110,18 +107,7 @@ is the seed worktree. In that mode: ## Baseline-First Rule -The first meaningful measurement in a fresh component-system run is the -baseline result. - -- If `baseline_metrics.json` has no `last_val_bpb` for the baseline branch, the system should establish a - baseline run before evaluating ordinary seeds. -- The baseline seed is a "no changes" measurement of the currently bound - component modules. -- Treat that first baseline result as the reference point for later promotion - decisions. - -This mirrors the root-project rule that the first run should establish the -baseline before experimenting. +Establish baseline before evaluating seeds: if `baseline_metrics.json` has no baseline result for the branch (no records), run the baseline (no-changes) measurement first. Use that result as the reference for promotion. ```mermaid flowchart TD @@ -187,14 +173,9 @@ The canonical component-system execution path is: uv run component_system/entrypoint.py ``` -When the DCA agent runs this (e.g. in a sandbox or tool), the run needs **at least 600 seconds** (first step ~150s + training budget 300s + buffer); use e.g. `timeout 600 uv run ...` so the execution environment does not kill the process early. +Allow **at least 900 seconds** when DCA runs this (e.g. `timeout 900 uv run ...`). -The DCA agent must report a structured JSON summary between the required -markers, including a `metrics` object. The runner uses that structured report -first and only falls back to parsing stdout/stderr when the JSON metrics are -missing. If the initial DCA summary still lacks metrics, the system queues a -follow-up recovery DCA that inspects the saved logs before declaring failure. -The canonical metrics are: +DCA must report a structured JSON summary (including `metrics`). Runner uses it first; falls back to stdout/stderr parsing if missing. No metrics → recovery DCA inspects logs. Canonical metrics: ```text --- @@ -250,55 +231,32 @@ Use the same judgment standard as the original autoresearch loop: ## Bootstrap Procedure for Interactive Sessions -When a human starts a fresh interactive session and asks you to use this -component system, do this: - -1. Read `baseline_branches.json`, `baseline_metrics.json`, and recent queue/state outputs. -2. Ensure the queue/state/worktree layout exists. -3. Create an initial seed from the human prompt. -4. Queue P for that seed. -5. Start `uv run component_system/run.py`. -6. Monitor the daemon and logs instead of manually executing P and DCA yourself. +1. Read baseline files and recent queue/state. +2. Ensure queue/state/worktree layout exists. +3. Create a seed from the human prompt and queue it for P. +4. Start `uv run component_system/run.py` and monitor; do not run P/DCA manually. ## Operating Loop -Once the daemon is running, the queue-driven loop is: - -1. A seed is persisted under `state/seeds/` and queued to `queue/p/`. -2. P creates or refreshes the seed worktree from baseline, generates code, and - commits on the seed branch. -3. The daemon automatically queues DCA. -4. DCA adapts, checks, runs, evaluates, and either promotes or archives the seed. -5. The system persists runs and events under `state/` and continues with the - next available work. - -The system is intended to behave like an autonomous researcher: keep moving, -measure results, retain wins, discard losses, and continue until explicitly -stopped. +1. Seed persisted in `state/seeds/`, queued to `queue/p/`. +2. P refreshes worktree, generates code, commits on seed branch. +3. Daemon queues DCA. +4. DCA adapts, runs, evaluates; promotes or archives. +5. State persisted under `state/`; daemon continues with next work. ## State and Logging -The durable record of the workflow lives in files: - -- `state/seeds/` stores seed definitions and status. -- `state/runs/` stores stage-run metadata and run outcomes. -- `state/events/` stores seed event histories. -- `queue/done/` archives completed tasks. -- `queue/error/` captures failed tasks. -- `logs/` stores stdout/stderr from agent invocations. +- `component_system/history/state/seeds/`, `component_system/history/state/runs/`, `component_system/history/state/events/` — seed and run state. +- `component_system/history/queue/done/`, `component_system/history/queue/error/` — completed and failed tasks. +- `component_system/history/logs/` — agent stdout/stderr. -Do not rely on chat context as the source of truth when the filesystem state -already records the workflow. +Use filesystem state as source of truth, not chat context. ## Daemon -The resident daemon in `component_system/run.py` manages two single-threaded -worker pools that poll `queue/{p,dca}/` continuously. Each worker dispatches a -task to an external code agent, which reads files, modifies code in a git -worktree, runs the canonical entrypoint, and prints structured summaries for -the runner to persist. +`run.py` runs two single-threaded workers polling `component_system/history/queue/p/` and `component_system/history/queue/dca/`. Workers dispatch to an external code agent; the agent reads files, edits the worktree, runs the entrypoint, and prints structured summaries. -Start the daemon with: +Start: ```bash # Default backend @@ -329,4 +287,4 @@ environment: ### Logs -Agent stdout/stderr for every invocation is saved to `component_system/history/logs/`. +Agent stdout/stderr → `component_system/history/logs/`. diff --git a/component_system/repositories/state.py b/component_system/repositories/state.py index 7ff3ab2fd..ef6beec5e 100644 --- a/component_system/repositories/state.py +++ b/component_system/repositories/state.py @@ -28,18 +28,54 @@ def set_branch_for_seed(self, seed_id: str, branch: str) -> None: save_baseline_branch_map(m) +def _branch_metrics_view(history: list[dict[str, Any]]) -> dict[str, Any]: + """Build view with best_val_bpb (min over history) and promoted_* from the record that achieved it.""" + if not history: + return {"best_val_bpb": None, "history": []} + vals = [r["val_bpb"] for r in history if r.get("val_bpb") is not None] + best_val_bpb = min(vals) if vals else None + best_record = next((r for r in history if r.get("val_bpb") == best_val_bpb), history[-1]) + view: dict[str, Any] = { + "best_val_bpb": best_val_bpb, + "history": history, + } + if best_record.get("promoted_branch") is not None: + view["promoted_branch"] = best_record["promoted_branch"] + if best_record.get("promoted_idea") is not None: + view["promoted_idea"] = best_record["promoted_idea"] + if best_record.get("promoted_at") is not None: + view["promoted_at"] = best_record["promoted_at"] + if best_record.get("commit_sha") is not None: + view["commit_sha"] = best_record["commit_sha"] + return view + + class BaselineMetricsRepository: - """Per-baseline-branch metrics (last_val_bpb, promoted_*, etc.).""" + """Per-baseline-branch metrics: list of records per branch (val_bpb, promoted_*, etc.).""" def get_all(self) -> dict[str, dict[str, Any]]: - return load_baseline_metrics() + """Return branch -> view (best_val_bpb, promoted_branch, commit_sha, history) for dashboard.""" + data = load_baseline_metrics() + return {branch: _branch_metrics_view(hist) for branch, hist in data.items()} def get_for_branch(self, branch: str) -> dict[str, Any] | None: - return load_baseline_metrics().get(branch) + """Return view for one branch (best_val_bpb, history, promoted_branch?, commit_sha?).""" + data = load_baseline_metrics() + hist = data.get(branch) + if hist is None: + return None + return _branch_metrics_view(hist) + + def append_promotion_for_branch(self, branch: str, record: dict[str, Any]) -> None: + """Append a promotion record: val_bpb, promoted_branch, promoted_idea, promoted_at, commit_sha.""" + data = load_baseline_metrics() + data.setdefault(branch, []).append(dict(record)) + save_baseline_metrics(data) - def update_for_branch(self, branch: str, metrics: dict[str, Any]) -> None: + def append_baseline_run(self, branch: str, val_bpb: float) -> None: + """Append a baseline measurement (no promotion).""" data = load_baseline_metrics() - data[branch] = {**data.get(branch, {}), **metrics} + data.setdefault(branch, []).append({"val_bpb": val_bpb}) save_baseline_metrics(data) diff --git a/component_system/run.py b/component_system/run.py index 0e3bff04e..9f6c274a2 100644 --- a/component_system/run.py +++ b/component_system/run.py @@ -21,18 +21,25 @@ from typing import Any from component_system.domain.models import StageName -from component_system.services.workflow import BASELINE_SEED_ID, WorkflowService +from component_system.services.workflow import ( + BASELINE_SEED_ID, + DuplicateRunStartError, + SyncResolutionQueued, + WorkflowService, +) from component_system.task import ( BASELINE_BRANCHES_PATH, BASELINE_METRICS_PATH, COMPONENT_SYSTEM_ROOT, + claim_pending, DAEMON_HEARTBEAT_PATH, - ensure_queue_layout, daemon_heartbeat, - list_pending, + ensure_queue_layout, LOG_ROOT, move_to_done, + move_to_error, read_task, + restore_in_progress_tasks, ) PROJECT_ROOT = COMPONENT_SYSTEM_ROOT.parent @@ -46,6 +53,9 @@ DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} +# Canonical DCA entrypoint run: require ≥900s so training can complete. Agent must set command/tool timeout ≥ this. +DCA_CANONICAL_RUN_TIMEOUT_SECONDS = 900 + STAGE_DOCS = { "p": ["PDCA-PLAN.md"], "dca": ["PDCA-DO-CHECK-ACTION.md"], @@ -65,7 +75,7 @@ def _signal_handler(_sig: int, _frame: Any) -> None: def _get_timeout(stage: str) -> int: - return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 600))) + return int(os.environ.get(f"PDCA_TIMEOUT_{stage.upper()}", DEFAULT_TIMEOUTS.get(stage, 900))) def _build_log_paths(run_id: str) -> tuple[Path, Path]: @@ -95,14 +105,15 @@ def _is_root_venv_active() -> bool: def _dca_command_guidance() -> tuple[str, str]: + timeout_prefix = f"timeout {DCA_CANONICAL_RUN_TIMEOUT_SECONDS}" if _is_root_venv_active(): return ( - "uv run --active component_system/entrypoint.py", + f"{timeout_prefix} uv run --active component_system/entrypoint.py", "Root .venv is active; use --active to reuse it from the worktree.", ) return ( - "uv run component_system/entrypoint.py", - "No active root .venv detected; fallback avoids --active so uv can run normally.", + f"{timeout_prefix} uv run component_system/entrypoint.py", + "No active root .venv; uv run without --active.", ) @@ -119,7 +130,7 @@ def _build_direct_code_prompt(prompt: str) -> str: def _stream_pipe_to_file(pipe: Any, handle: Any, chunks: list[str]) -> None: try: while True: - piece = pipe.read(16) + piece = pipe.readline() if not piece: break chunks.append(piece) @@ -259,6 +270,8 @@ def _invoke_agent( if config["via"] == "stdin": popen_kwargs["stdin"] = subprocess.PIPE else: + # Use DEVNULL so the agent never reads from parent's stdin (avoids EBADF under nohup/redirects). + popen_kwargs["stdin"] = subprocess.DEVNULL cmd.append(prompt) print(f"[{stage.upper()}] invoking {agent_name} (timeout={timeout}s)") @@ -321,6 +334,64 @@ def _invoke_agent( return process.returncode, stdout, stderr, stdout_path, stderr_path +def _build_metrics_recovery_prompt(task: dict[str, Any]) -> str: + """Lightweight prompt for metrics-recovery DCA: no protocol/docs, just task, log paths, report shape.""" + task_json = json.dumps(task, indent=2) + source_run_id = task.get("source_run_id", "unknown") + stdout_log = task.get("source_stdout_log_path", "missing") + stderr_log = task.get("source_stderr_log_path", "missing") + report_json = json.dumps({ + "checks": ["log_metrics_recovery"], + "notes": "Recovered metrics from saved logs.", + "completed_at": "YYYY-MM-DD HH:MM:SS", + "commit_sha": "", + "metrics": { + "val_bpb": 1.24, + "training_seconds": 300.1, + "total_seconds": 360.4, + "startup_seconds": 25.8, + "peak_vram_mb": 11967.8, + "mfu_percent": 2.15, + "total_tokens_M": 140.5, + "num_steps": 268, + "num_params_M": 11.5, + "depth": 4, + }, + }, indent=2) + return ( + "METRICS RECOVERY (focused task). Do not read protocol or stage docs.\n\n" + "Task (inline):\n" + f"{task_json}\n\n" + "Do not rerun training. Do not edit code. Do not create a commit.\n\n" + f"Inspect logs for source run {source_run_id!r}:\n" + f" stdout: {stdout_log}\n" + f" stderr: {stderr_log}\n\n" + "Recover canonical metrics from those logs if present, then print the summary block below. " + "If unrecoverable, use empty \"metrics\": {} and explain in notes.\n\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"{report_json}\n" + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + +def _build_sync_resolution_prompt(task: dict[str, Any]) -> str: + """Prompt for sync-resolution: merge baseline into seed in the seed worktree, resolve conflicts, commit.""" + baseline_branch = task.get("baseline_branch", "master") + seed_id = task.get("seed_id", "") + return ( + "SYNC RESOLUTION (merge baseline into seed). You are in the seed worktree; the current branch is the seed branch.\n\n" + "The run could not sync this worktree with the baseline branch because the merge had conflicts.\n\n" + "Steps:\n" + f"1. Merge the baseline branch into the current branch: git merge {baseline_branch!r}\n" + "2. Resolve any conflicts, then commit the merge (e.g. git add . && git commit -m 'Merge baseline into seed').\n" + "3. Do not run the training entrypoint.\n" + "4. Print the following block so the runner can confirm success:\n\n" + "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + '{"checks":["sync_resolution"],"notes":"Merged baseline into seed; conflicts resolved.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{}}\n' + "AUTORESEARCH_DCA_SUMMARY_END\n" + ) + + def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" task_json = json.dumps(task, indent=2) @@ -359,23 +430,27 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" ) else: - # Normal seed: merge the baseline branch (__baseline__) INTO the seed worktree so the seed is up to date. + # Normal seed: we need to merge the SEED branch INTO the baseline branch (so baseline gets the seed's changes). + # Do NOT merge baseline into seed — that is the wrong direction. Work in the project root on the baseline branch. if worktree_path: cwd_note = ( "Your working directory is the project root. " - f"The seed worktree is at {worktree_path!r}; run git commands from that directory (e.g. cd there first).\n\n" + f"The seed worktree is at {worktree_path!r} (use it only to commit any pending changes on the seed branch).\n\n" ) else: cwd_note = ( "Your working directory is the project root. " - f"The seed worktree is at component_system/history/worktrees/{seed_id!r}; run git commands from that directory for the merge.\n\n" + f"The seed worktree is at component_system/history/worktrees/{seed_id!r} (use it only to commit any pending changes).\n\n" ) steps = ( "Steps:\n" - "1. Commit any uncommitted changes in the seed worktree (e.g. batch-size or other fixes).\n" - f"2. In the seed worktree, merge the baseline branch into the current branch: git merge {BASELINE_SEED_ID!r}. Resolve any conflicts, then commit the merge.\n" + "1. Commit any uncommitted changes in the seed worktree so the seed branch is complete.\n" + f"2. In the project root (main repo): checkout the baseline branch, then merge the seed branch into it:\n" + f" git checkout {target_branch!r}\n" + f" git merge {seed_id!r}\n" + " Resolve any conflicts, then commit the merge. The result must be: the baseline branch contains the seed's changes (merge direction: seed → baseline).\n" "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" - "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + "4. Print the DCA summary block below (same metrics as the previous run). Use the merge commit SHA from the baseline branch (after the merge, from project root: git rev-parse HEAD).\n\n" ) return ( @@ -393,7 +468,7 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: """Build the agent prompt for a stage. Prompt types (by weight): - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. - - DCA metrics_recovery: full header + log-recovery instructions. Heavy. + - DCA metrics_recovery: lightweight; task + log paths, report shape (no protocol/docs). Light. - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. - DCA normal: full header + adapt/run/commit/report. Heavy. @@ -464,37 +539,25 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" ) if stage == "dca": + sync_resolution = task.get("sync_resolution") is True merge_resolution = task.get("merge_resolution") is True metrics_recovery = task.get("metrics_recovery") is True + if sync_resolution: + return _build_sync_resolution_prompt(task) if merge_resolution: return _build_merge_resolution_prompt(task) + if metrics_recovery: + return _build_metrics_recovery_prompt(task) dca_cmd, dca_note = _dca_command_guidance() baseline_measurement = task.get("seed_id") == "__baseline__" conflict_block = "" - if metrics_recovery: - source_run_id = task.get("source_run_id", "unknown") - stdout_log = task.get("source_stdout_log_path", "missing") - stderr_log = task.get("source_stderr_log_path", "missing") - return header + ( - "METRICS RECOVERY: The previous DCA run completed, but the runner could not confirm metrics from its final report.\n" - "Do not rerun training. Do not edit code. Do not create a commit.\n" - f"Inspect the saved logs for source run {source_run_id!r}:\n" - f"- stdout log: {stdout_log}\n" - f"- stderr log: {stderr_log}\n" - "Recover the canonical metrics from those logs if they are present, then print the final JSON summary.\n" - "Use this exact shape:\n" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" - '{"checks":["log_metrics_recovery"],"notes":"Recovered metrics from saved logs.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' - "AUTORESEARCH_DCA_SUMMARY_END\n" - "If you still cannot recover metrics, print the same object with an empty metrics object and explain why in notes.\n" - ) if baseline_measurement: return header + conflict_block + ( "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" - f"Run the canonical command: {dca_cmd}\n" + f"Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): `{dca_cmd} > training.log 2>&1`. Set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds. After the run, inspect training.log to confirm completion and recover or verify metrics.\n" f"({dca_note})\n" "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" @@ -504,9 +567,13 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: ) return header + conflict_block + ( "You are the DCA stage.\n" + "Do not put forward new ideas or optimize for better metrics. Your only goal is to make the P-stage code run and report the result. " + '"Adapt or fix" means: fix bugs, import/runtime errors, OOM (e.g. reduce batch size), and config/path issues only. ' + "Do not change model architecture, optimizer logic, hyperparameters, or training logic to improve results. " + "The task \"prompt\" is for context only; do not treat it as a goal to achieve in this stage.\n\n" "Workflow:\n" "1. Adapt or fix the generated code in the seed worktree until it runs.\n" - f"2. Run the canonical command: {dca_cmd}\n" + f"2. Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): `{dca_cmd} > training.log 2>&1` (or `... 2>&1 | tee training.log` to also see output). Set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds. After the run, inspect training.log to confirm completion and recover or verify metrics.\n" f" ({dca_note})\n" "3. If it fails for a simple reason, fix and rerun.\n" "4. Create a git commit in the seed branch for your changes.\n" @@ -579,19 +646,25 @@ def _regenerate_progress_png() -> None: traceback.print_exc() -def _worker(stage: str) -> None: - print(f"[daemon] worker-{stage.upper()} started") +def _worker(stage: str, lane: str = "any") -> None: + worker_name = stage.upper() if lane == "any" else f"{stage.upper()}-{lane.upper()}" + print(f"[daemon] worker-{worker_name} started") + def eligible(payload: dict) -> bool: + return bool(WORKFLOW.is_seed_eligible_for_stage(payload.get("seed_id"), stage)) + while not _shutdown: - pending = list_pending(stage) - if not pending: + task_path = claim_pending(stage, lane=lane, eligible_fn=eligible) + if task_path is None: time.sleep(POLL_INTERVAL) continue - task_path = pending[0] try: task = read_task(task_path) - seed_id = task["seed_id"] - run_id = task["run_id"] + seed_id = task.get("seed_id") + run_id = task.get("run_id") + if not seed_id or not run_id: + move_to_error(task_path) + continue started_seed = None if stage == "direct": started_seed, _ = WORKFLOW.mark_direct_code_run_started(seed_id, run_id) @@ -607,10 +680,10 @@ def _worker(stage: str) -> None: worktree_path = task.get("worktree_path") if started_seed is not None and started_seed.worktree_path is not None: worktree_path = started_seed.worktree_path - # Merge-resolution DCA runs from project root so the agent can operate on repo and worktrees + # Merge-resolution and metrics_recovery DCA run from project root; sync_resolution runs in seed worktree if stage == "dca" and ( task.get("merge_resolution") is True or task.get("metrics_recovery") is True - ): + ) and task.get("sync_resolution") is not True: worktree_path = None if worktree_path: @@ -649,28 +722,31 @@ def _worker(stage: str) -> None: prompt_path=prompt_path_str, ) else: - run = WORKFLOW.finish_dca_run( - seed_id, - run_id, - stdout, - stderr=stderr, - log_path=str(stdout_log_path) if stdout_log_path else None, - stderr_log_path=str(stderr_log_path) if stderr_log_path else None, - prompt_path=prompt_path_str, - metrics_recovery=task.get("metrics_recovery") is True, - merge_resolution=task.get("merge_resolution") is True, - ) - if not run.summary.get("metrics_recovery_queued"): - description = run.summary.get("notes") or run.summary.get("idea") or seed_id - _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) - _regenerate_progress_png() - if salvaged_dca: - WORKFLOW.seed_repo.append_event( + if task.get("sync_resolution") is True: + WORKFLOW.finish_sync_resolution(seed_id, run_id) + else: + run = WORKFLOW.finish_dca_run( seed_id, - "dca.salvaged", - f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", - run_id=run_id, + run_id, + stdout, + stderr=stderr, + log_path=str(stdout_log_path) if stdout_log_path else None, + stderr_log_path=str(stderr_log_path) if stderr_log_path else None, + prompt_path=prompt_path_str, + metrics_recovery=task.get("metrics_recovery") is True, + merge_resolution=task.get("merge_resolution") is True, ) + if not run.summary.get("metrics_recovery_queued"): + description = run.summary.get("notes") or run.summary.get("idea") or seed_id + _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) + _regenerate_progress_png() + if salvaged_dca: + WORKFLOW.seed_repo.append_event( + seed_id, + "dca.salvaged", + f"DCA output contained final metrics, so the run was accepted despite agent exit code {exit_code}.", + run_id=run_id, + ) move_to_done(task_path) print(f"[{stage.upper()}] task {task['task_id']} done") else: @@ -692,33 +768,48 @@ def _worker(stage: str) -> None: task_path=task_path, prompt_path=prompt_path_str, ) print(f"[{stage.upper()}] task {task['task_id']} failed") + except SyncResolutionQueued: + # Sync with baseline failed; sync-resolution DCA was queued. Move P task to error so we don't retry it. + if task_path.exists(): + move_to_error(task_path) + continue + except DuplicateRunStartError: + # Run was already started (e.g. restored in-progress task). Move task to error to avoid double run. + if task_path.exists(): + move_to_error(task_path) + continue except Exception as exc: traceback.print_exc() + if not task_path.exists(): + continue try: task = read_task(task_path) - prompt_path_str = None + seed_id = task.get("seed_id") run_id = task.get("run_id") + if not seed_id or not run_id: + continue + prompt_path_str = None if run_id: p_path = LOG_DIR / f"{run_id}.prompt.txt" if p_path.exists(): prompt_path_str = str(p_path) if stage == "direct": WORKFLOW.mark_direct_code_run_failed( - task["seed_id"], - task["run_id"], + seed_id, + run_id, str(exc), task_path=task_path, prompt_path=prompt_path_str, ) else: WORKFLOW.mark_run_failed( - task["seed_id"], task["run_id"], str(exc), + seed_id, run_id, str(exc), task_path=task_path, prompt_path=prompt_path_str, ) except Exception: traceback.print_exc() - print(f"[daemon] worker-{stage.upper()} stopped") + print(f"[daemon] worker-{worker_name} stopped") def main() -> None: @@ -728,15 +819,29 @@ def main() -> None: signal.signal(signal.SIGTERM, _signal_handler) ensure_queue_layout() + restored = restore_in_progress_tasks() + total_restored = sum(restored.values()) + if total_restored: + print( + "[daemon] restored in_progress tasks " + f"(p={restored['p']}, dca={restored['dca']}, direct={restored['direct']})" + ) daemon_heartbeat() agent = os.environ.get("PDCA_AGENT", "claude") - print(f"[daemon] starting component-system daemon — agent={agent}, workers=P/DCA/DIRECT") + print(f"[daemon] starting component-system daemon — agent={agent}, workers=P/DCA-GPU/DCA-AUX/DIRECT") pools: list[ThreadPoolExecutor] = [] - for stage in ("p", "dca", "direct"): - pool = ThreadPoolExecutor(max_workers=1, thread_name_prefix=f"pdca-{stage}") + stage_specs = ( + ("p", "any", 2, "pdca-p"), + ("dca", "gpu", 1, "pdca-dca-gpu"), + ("dca", "aux", 1, "pdca-dca-aux"), + ("direct", "any", 1, "pdca-direct"), + ) + for stage, lane, worker_count, prefix in stage_specs: + pool = ThreadPoolExecutor(max_workers=worker_count, thread_name_prefix=prefix) pools.append(pool) - pool.submit(_worker, stage) + for _ in range(worker_count): + pool.submit(_worker, stage, lane) last_heartbeat = time.monotonic() try: diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py index 8c1dbf484..6842b1da1 100644 --- a/component_system/services/workflow.py +++ b/component_system/services/workflow.py @@ -1,1355 +1,1686 @@ -from __future__ import annotations - -import json -from typing import Any -import re -import subprocess -from pathlib import Path - -from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD -from component_system.domain.models import ( - DashboardColumn, - DashboardViewModel, - PlanIdea, - RunStatus, - SeedRecord, - SeedStatus, - StageName, - StageRun, -) -from component_system.repositories.state import ( - BaselineBranchMapRepository, - BaselineMetricsRepository, - RunRepository, - SeedRepository, -) -from component_system.task import ( - COMPONENT_SYSTEM_ROOT, - WORKTREE_ROOT, - get_daemon_status, - move_to_error, - now_ts, - new_run_id, - new_seed_id, - write_task, -) - -SUMMARY_MARKERS = { - "p": ("AUTORESEARCH_P_SUMMARY_BEGIN", "AUTORESEARCH_P_SUMMARY_END"), - "dca": ("AUTORESEARCH_DCA_SUMMARY_BEGIN", "AUTORESEARCH_DCA_SUMMARY_END"), -} - -BASELINE_SEED_ID = "__baseline__" - - -class GitCommandError(RuntimeError): - pass - - -class GitService: - def __init__(self) -> None: - pass - - def _run_git(self, *args: str, cwd: Path | None = None) -> str: - try: - result = subprocess.run( - ["git", *args], - cwd=str(cwd) if cwd else None, - capture_output=True, - text=True, - check=True, - ) - except FileNotFoundError as exc: - raise GitCommandError("Git is not installed or not available on PATH.") from exc - except subprocess.CalledProcessError as exc: - stderr = (exc.stderr or exc.stdout or "").strip() - raise GitCommandError(stderr or f"git {' '.join(args)} failed") from exc - return result.stdout.strip() - - def repo_root(self) -> Path: - return Path(self._run_git("rev-parse", "--show-toplevel")) - - def current_head(self) -> str: - return self._run_git("rev-parse", "HEAD") - - def branch_exists(self, branch: str) -> bool: - try: - self._run_git("rev-parse", "--verify", branch) - return True - except GitCommandError: - return False - - def ensure_branch(self, branch: str, start_point: str) -> None: - if not self.branch_exists(branch): - self._run_git("branch", branch, start_point) - - def list_branches(self) -> list[str]: - output = self._run_git("branch", "--format=%(refname:short)") - branches = [line.strip() for line in output.splitlines() if line.strip()] - if not branches: - # Unborn repositories can have HEAD pointing to a branch name even before first commit. - try: - head_branch = self._run_git("symbolic-ref", "--short", "HEAD").strip() - if head_branch: - branches.append(head_branch) - except GitCommandError: - pass - return sorted(set(branches)) - - @staticmethod - def is_seed_specific_branch(branch: str) -> bool: - """True if this branch is the single working branch for a seed (seed_id), not a baseline choice.""" - if branch == BASELINE_SEED_ID: - return True - # One branch per seed: seed- + 6 hex chars, e.g. seed-e57b95 - if branch.startswith("seed-") and len(branch) == 11 and all( - c in "abcdef0123456789" for c in branch[5:] - ): - return True - if branch.startswith("seed/"): - return True # legacy candidate branches, e.g. seed/seed-e57b95 - return False - - def setup_error(self) -> str | None: - try: - self.repo_root() - return None - except GitCommandError as exc: - return str(exc) - - def setup_error_for_branches(self, baseline_branch: str) -> str | None: - try: - root = self.repo_root() - if not baseline_branch: - return "Please select a baseline branch." - if not self.branch_exists(baseline_branch): - return ( - f"Git repo found at {root}, but branch {baseline_branch!r} does not exist yet. " - "Select an existing baseline branch." - ) - return None - except GitCommandError as exc: - return str(exc) - - def ensure_seed_worktrees(self, seed: SeedRecord) -> SeedRecord: - """Ensure the seed worktree exists on the single branch for this seed: seed_id (SSOT).""" - repo_head = self.current_head() - self.ensure_branch(seed.baseline_branch, repo_head) - - seed_worktree = WORKTREE_ROOT / seed.seed_id - if seed_worktree.exists(): - seed.worktree_path = str(seed_worktree) - return seed - # One branch per seed: branch name = seed_id, created from baseline. - self._run_git("worktree", "add", "-B", seed.seed_id, str(seed_worktree), seed.baseline_branch) - - seed.worktree_path = str(seed_worktree) - return seed - - def commit_sha(self, ref: str) -> str: - return self._run_git("rev-parse", "--short", ref) - - def head_sha_at(self, cwd: Path) -> str: - """Return the short commit SHA of HEAD in the given worktree directory.""" - return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) - - def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: - """Reset the seed worktree's branch to the given ref (e.g. commit before P). - No-op for baseline seed or when worktree is missing.""" - if seed.seed_id == BASELINE_SEED_ID: - return - if not seed.worktree_path: - return - worktree_path = Path(seed.worktree_path) - if not worktree_path.is_dir(): - return - self._run_git("reset", "--hard", ref, cwd=worktree_path) - - def promote_seed_branch( - self, seed: SeedRecord, target_branch: str | None = None - ) -> str: - """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. - If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, - pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" - merge_into = target_branch if target_branch is not None else seed.baseline_branch - baseline_worktree = WORKTREE_ROOT / "baseline" - if baseline_worktree.exists(): - try: - self._run_git("worktree", "remove", "--force", str(baseline_worktree)) - except GitCommandError: - pass - self._run_git( - "worktree", - "add", - "--force", - "-B", - merge_into, - str(baseline_worktree), - merge_into, - ) - self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) - return self.commit_sha(merge_into) - - -class WorkflowService: - def __init__( - self, - seed_repo: SeedRepository | None = None, - run_repo: RunRepository | None = None, - branch_map_repo: BaselineBranchMapRepository | None = None, - metrics_repo: BaselineMetricsRepository | None = None, - git_service: GitService | None = None, - ) -> None: - self.seed_repo = seed_repo or SeedRepository() - self.run_repo = run_repo or RunRepository() - self.branch_map_repo = branch_map_repo or BaselineBranchMapRepository() - self.metrics_repo = metrics_repo or BaselineMetricsRepository() - self.git_service = git_service or GitService() - - @staticmethod - def _seed_worktree_path(seed_id: str) -> str: - return str(WORKTREE_ROOT / seed_id) - - @staticmethod - def _baseline_worktree_path() -> str: - return str(WORKTREE_ROOT / BASELINE_SEED_ID) - - def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: - """Clean up legacy persisted seed state that no longer matches runtime rules.""" - if seed.seed_id != BASELINE_SEED_ID: - return seed - expected_worktree = self._baseline_worktree_path() - if seed.worktree_path == expected_worktree: - return seed - seed.worktree_path = expected_worktree - seed.updated_at = now_ts() - self.seed_repo.save(seed) - return seed - - def ensure_seed_worktree_ready(self, seed_id: str) -> SeedRecord: - """Ensure the runtime seed worktree exists; recreate only when missing.""" - seed = self.require_seed(seed_id) - if seed.seed_id == BASELINE_SEED_ID: - expected_worktree = self._baseline_worktree_path() - if Path(expected_worktree).is_dir(): - if seed.worktree_path != expected_worktree: - seed.worktree_path = expected_worktree - seed.updated_at = now_ts() - self.seed_repo.save(seed) - return seed - seed = self.git_service.ensure_seed_worktrees(seed) - seed.updated_at = now_ts() - self.seed_repo.save(seed) - commit_sha = "" - try: - commit_sha = self.git_service.commit_sha(seed.baseline_branch) - except GitCommandError: - pass - self.seed_repo.append_event( - seed.seed_id, - "seed.worktree_ready", - "Recreated missing baseline worktree before the run started.", - commit_sha=commit_sha or None, - ) - return seed - expected_worktree = self._seed_worktree_path(seed.seed_id) - if Path(expected_worktree).is_dir(): - if seed.worktree_path != expected_worktree: - seed.worktree_path = expected_worktree - seed.updated_at = now_ts() - self.seed_repo.save(seed) - return seed - seed = self.git_service.ensure_seed_worktrees(seed) - seed.updated_at = now_ts() - self.seed_repo.save(seed) - commit_sha = "" - try: - commit_sha = self.git_service.commit_sha(seed.seed_id) - except GitCommandError: - pass - self.seed_repo.append_event( - seed.seed_id, - "seed.worktree_ready", - "Recreated missing seed worktree before the run started.", - commit_sha=commit_sha or None, - ) - return seed - - def _preferred_baseline_branch(self) -> str: - setup_error = self.git_service.setup_error() - if setup_error is not None: - return DEFAULT_BASELINE_BRANCH - try: - branches = [ - branch - for branch in self.git_service.list_branches() - if not self.git_service.is_seed_specific_branch(branch) - ] - except GitCommandError: - return DEFAULT_BASELINE_BRANCH - if branches and DEFAULT_BASELINE_BRANCH in branches: - return DEFAULT_BASELINE_BRANCH - return branches[0] if branches else DEFAULT_BASELINE_BRANCH - - def _first_user_seed_baseline_branch(self) -> str | None: - """Return the baseline_branch of the earliest-created user seed (excluding __baseline__), or None.""" - user_seeds = [s for s in self.seed_repo.list() if s.seed_id != BASELINE_SEED_ID] - if not user_seeds: - return None - first = min(user_seeds, key=lambda s: s.created_at) - return first.baseline_branch or None - - def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", event_message: str = "Queued Plan stage for the seed.") -> StageRun: - run = StageRun( - run_id=new_run_id("p"), - seed_id=seed.seed_id, - stage=StageName.p, - status=RunStatus.queued, - task_id=new_run_id("task-p"), - created_at=now_ts(), - updated_at=now_ts(), - ) - seed.status = SeedStatus.queued - seed.updated_at = now_ts() - seed.latest_run_id = run.run_id - seed.last_error = None - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event(seed.seed_id, event_kind, event_message) - write_task( - "p", - { - "seed_id": seed.seed_id, - "run_id": run.run_id, - "prompt": seed.prompt, - "worktree_path": seed.worktree_path, - }, - task_id=run.task_id, - ) - return run - - def _release_seeds_waiting_for_baseline(self, branch: str) -> None: - """Release seeds that were waiting for baseline result on the given branch.""" - branch_metrics = self.metrics_repo.get_for_branch(branch) - if not branch_metrics or branch_metrics.get("last_val_bpb") is None: - return - waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) - for seed in waiting_seeds: - if seed.seed_id == BASELINE_SEED_ID: - continue - if seed.baseline_branch != branch: - continue - if seed.status is not SeedStatus.queued or seed.latest_run_id is not None: - continue - self._enqueue_plan_run( - seed, - event_kind="p.released", - event_message="Baseline is ready; queued Plan stage for the waiting seed.", - ) - - @staticmethod - def _status_from_dca_signal(signal: str) -> SeedStatus: - """Centralized mapping from DCA signal to terminal seed status.""" - if signal == "positive_signal": - return SeedStatus.promoted - if signal == "error": - return SeedStatus.failed - return SeedStatus.passed - - def _reconcile_seed_status_signal(self, seed: SeedRecord) -> bool: - """ - Auto-heal known inconsistent terminal combinations from historical data. - - Returns True when the seed was updated and persisted. - """ - if seed.status is SeedStatus.passed and seed.latest_signal == "error": - seed.status = SeedStatus.failed - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "seed.reconciled", - "Reconciled inconsistent terminal state (passed + error) to failed.", - ) - return True - return False - - def create_seed( - self, - prompt: str, - baseline_branch: str | None = None, - ralph_loop_enabled: bool = False, - ) -> SeedRecord: - seed_id = new_seed_id() - selected_baseline = (baseline_branch or DEFAULT_BASELINE_BRANCH).strip() - seed = SeedRecord( - seed_id=seed_id, - prompt=prompt.strip(), - status=SeedStatus.draft, - created_at=now_ts(), - updated_at=now_ts(), - baseline_branch=selected_baseline, - worktree_path=self._seed_worktree_path(seed_id), - ralph_loop_enabled=ralph_loop_enabled, - ) - self.seed_repo.save(seed) - self.branch_map_repo.set_branch_for_seed(seed_id, selected_baseline) - try: - pass # branch seed_id is created when Plan is queued (ensure_seed_worktrees) - except GitCommandError: - # Keep seed creation non-blocking; branch creation will be retried at P queue time. - pass - self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from prompt.") - if ralph_loop_enabled: - self.seed_repo.append_event( - seed.seed_id, - "ralph.enabled", - "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", - ) - return seed - - def create_direct_code_seed(self, prompt: str) -> tuple[SeedRecord, StageRun]: - cleaned_prompt = prompt.strip() - if not cleaned_prompt: - raise RuntimeError("Prompt cannot be empty.") - baseline_branch = self._preferred_baseline_branch() - seed_id = new_seed_id("direct") - now = now_ts() - run = StageRun( - run_id=new_run_id("direct"), - seed_id=seed_id, - stage=StageName.direct, - status=RunStatus.queued, - task_id=new_run_id("task-direct"), - created_at=now, - updated_at=now, - ) - seed = SeedRecord( - seed_id=seed_id, - prompt=cleaned_prompt, - status=SeedStatus.adapting, - created_at=now, - updated_at=now, - baseline_branch=baseline_branch, - worktree_path=str(COMPONENT_SYSTEM_ROOT.parent), - latest_run_id=run.run_id, - plan=PlanIdea( - title="Direct code agent", - target_component="project_root", - description="Direct code agent run requested from the dashboard and executed from the project root.", - ), - ) - self.seed_repo.save(seed) - self.branch_map_repo.set_branch_for_seed(seed_id, baseline_branch) - self.run_repo.save(run) - self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from direct code agent prompt.") - self.seed_repo.append_event( - seed.seed_id, - "direct_code.queued", - "Queued direct code agent run from the project root.", - run_id=run.run_id, - ) - write_task( - "direct", - { - "seed_id": seed.seed_id, - "run_id": run.run_id, - "prompt": seed.prompt, - "worktree_path": None, - }, - task_id=run.task_id, - ) - return seed, run - - def _get_or_create_baseline_seed(self) -> SeedRecord: - """Return the baseline seed used to establish initial val_bpb; create and persist it if missing.""" - seed = self.seed_repo.get(BASELINE_SEED_ID) - if seed is not None: - return self._normalize_seed_runtime_state(seed) - branch = self._first_user_seed_baseline_branch() or DEFAULT_BASELINE_BRANCH - seed = SeedRecord( - seed_id=BASELINE_SEED_ID, - prompt="Baseline measurement: run training on current code without changes.", - status=SeedStatus.draft, - created_at=now_ts(), - updated_at=now_ts(), - baseline_branch=branch, - worktree_path=self._baseline_worktree_path(), - ralph_loop_enabled=False, - ) - self.seed_repo.save(seed) - self.branch_map_repo.set_branch_for_seed(BASELINE_SEED_ID, branch) - self.seed_repo.append_event( - seed.seed_id, - "seed.created", - "Baseline seed created for initial measurement.", - ) - return seed - - def ensure_baseline_result(self) -> None: - """ - If there is no baseline result (last_val_bpb) for the baseline seed's branch, ensure a baseline seed exists and - queue its DCA so the first run establishes the baseline. Idempotent; safe to call - before queue_p for any user seed. - """ - seed = self._get_or_create_baseline_seed() - branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - if branch_metrics and branch_metrics.get("last_val_bpb") is not None: - return - if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): - return - if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): - branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - if branch_metrics and branch_metrics.get("last_val_bpb") is not None: - return - setup_error = self.git_service.setup_error() - if setup_error is not None: - return - try: - self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) - except GitCommandError: - return - setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) - if setup_error is not None: - return - seed.status = SeedStatus.generated - seed.plan = PlanIdea(title="Baseline", description="No changes; measure current baseline.") - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "baseline.queued", - "Queued DCA to establish baseline result before first seed.", - ) - self.queue_dca(seed.seed_id) - - def set_ralph_loop(self, seed_id: str, enabled: bool) -> SeedRecord: - seed = self.require_seed(seed_id) - if seed.ralph_loop_enabled == enabled: - return seed - seed.ralph_loop_enabled = enabled - seed.updated_at = now_ts() - self.seed_repo.save(seed) - if enabled: - self.seed_repo.append_event( - seed.seed_id, - "ralph.enabled", - "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", - ) - else: - self.seed_repo.append_event(seed.seed_id, "ralph.disabled", "Ralph loop disabled by user.") - return seed - - def can_edit_seed_prompt(self, seed: SeedRecord) -> bool: - return seed.status in {SeedStatus.draft, SeedStatus.queued} - - def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: - seed = self.require_seed(seed_id) - if not self.can_edit_seed_prompt(seed): - raise RuntimeError("Seed prompt can only be edited before Plan starts.") - updated_prompt = prompt.strip() - if not updated_prompt: - raise RuntimeError("Prompt cannot be empty.") - if updated_prompt == seed.prompt: - return seed - seed.prompt = updated_prompt - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.seed_repo.append_event(seed.seed_id, "seed.updated", "Seed prompt was edited before execution.") - return seed - - def queue_p(self, seed_id: str) -> StageRun | None: - seed = self.require_seed(seed_id) - branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None - has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None - if seed_id != BASELINE_SEED_ID and not has_baseline: - self.ensure_baseline_result() - branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - has_baseline = branch_metrics is not None and branch_metrics.get("last_val_bpb") is not None - if not has_baseline: - if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): - seed.status = SeedStatus.queued - seed.updated_at = now_ts() - seed.latest_run_id = None - seed.last_error = None - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "p.waiting_for_baseline", - "Baseline run is still in progress; Plan will queue after baseline finishes.", - ) - return None - setup_error = self.git_service.setup_error() - if setup_error is not None: - raise RuntimeError(setup_error) - try: - self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) - except GitCommandError: - pass - setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) - if setup_error is not None: - raise RuntimeError(setup_error) - return self._enqueue_plan_run(seed) - - def queue_dca( - self, - seed_id: str, - merge_resolution: bool = False, - metrics_recovery: bool = False, - source_run_id: str | None = None, - source_stdout_log_path: str | None = None, - source_stderr_log_path: str | None = None, - last_metrics: dict[str, Any] | None = None, - last_summary: dict[str, Any] | None = None, - restore_ref: str | None = None, - ) -> StageRun: - seed = self.require_seed(seed_id) - if not metrics_recovery and seed.status in {SeedStatus.draft, SeedStatus.queued, SeedStatus.planning}: - raise RuntimeError("Run Plan first. Do-Check-Action is available after code is generated into the seed branch.") - if not metrics_recovery: - setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) - if setup_error is not None: - raise RuntimeError(setup_error) - run = StageRun( - run_id=new_run_id("dca"), - seed_id=seed.seed_id, - stage=StageName.dca, - status=RunStatus.queued, - task_id=new_run_id("task-dca"), - created_at=now_ts(), - updated_at=now_ts(), - ) - if seed.seed_id != BASELINE_SEED_ID: - try: - # Ref to restore worktree to on negative signal (commit before P when from finish_p_run, else baseline). - run.summary["restore_ref"] = ( - restore_ref - if restore_ref is not None - else self.git_service.commit_sha(seed.baseline_branch) - ) - except GitCommandError: - pass - seed.status = SeedStatus.dca_queued - seed.updated_at = now_ts() - seed.latest_run_id = run.run_id - seed.last_error = None - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event( - seed.seed_id, - "dca.queued", - "Queued DCA for merge conflict resolution." - if merge_resolution - else "Queued DCA for metrics recovery from saved logs." - if metrics_recovery - else "Queued DCA stage for the seed.", - ) - payload = { - "seed_id": seed.seed_id, - "run_id": run.run_id, - "prompt": seed.prompt, - "worktree_path": seed.worktree_path, - "merge_resolution": merge_resolution, - "metrics_recovery": metrics_recovery, - } - if merge_resolution: - payload["baseline_branch"] = seed.baseline_branch - if last_metrics is not None: - payload["last_metrics"] = last_metrics - if last_summary is not None: - payload["last_summary"] = last_summary - if metrics_recovery: - payload["source_run_id"] = source_run_id - payload["source_stdout_log_path"] = source_stdout_log_path - payload["source_stderr_log_path"] = source_stderr_log_path - payload["worktree_path"] = None - write_task("dca", payload, task_id=run.task_id) - return run - - def require_seed(self, seed_id: str) -> SeedRecord: - seed = self.seed_repo.get(seed_id) - if seed is None: - raise KeyError(f"Unknown seed_id={seed_id}") - return self._normalize_seed_runtime_state(seed) - - def require_run(self, run_id: str) -> StageRun: - run = self.run_repo.get(run_id) - if run is None: - raise KeyError(f"Unknown run_id={run_id}") - return run - - def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - run.status = RunStatus.running - run.updated_at = now_ts() - if run.stage is StageName.p: - setup_error = self.git_service.setup_error() - if setup_error is not None: - raise RuntimeError(setup_error) - try: - self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) - except GitCommandError: - pass - setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) - if setup_error is not None: - raise RuntimeError(setup_error) - seed = self.ensure_seed_worktree_ready(seed.seed_id) - if seed.worktree_path: - worktree_path = Path(seed.worktree_path) - if worktree_path.is_dir(): - try: - run.summary["commit_sha_before_p"] = self.git_service.head_sha_at( - worktree_path - ) - except GitCommandError: - pass - seed.status = SeedStatus.planning - event_kind = "p.started" - event_message = "Plan stage started in the candidate worktree." - else: - seed.status = SeedStatus.adapting - event_kind = "dca.started" - event_message = ( - "Baseline measurement started in the baseline worktree." - if seed.seed_id == BASELINE_SEED_ID - else "DCA stage started in the seed worktree." - ) - seed.updated_at = now_ts() - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event(seed.seed_id, event_kind, event_message, run_id=run_id) - return seed, run - - def mark_direct_code_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - run.status = RunStatus.running - run.updated_at = now_ts() - seed.status = SeedStatus.adapting - seed.updated_at = now_ts() - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "direct_code.started", - "Direct code agent started from the project root.", - run_id=run_id, - ) - return seed, run - - def mark_direct_code_run_failed( - self, - seed_id: str, - run_id: str, - error: str, - task_path: Path | None = None, - prompt_path: str | None = None, - log_path: str | None = None, - stderr_log_path: str | None = None, - ) -> None: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - run.status = RunStatus.failed - run.updated_at = now_ts() - run.error = error - if prompt_path is not None: - run.prompt_path = prompt_path - if log_path is not None: - run.log_path = log_path - if stderr_log_path is not None: - run.stderr_log_path = stderr_log_path - seed.status = SeedStatus.failed - seed.updated_at = now_ts() - seed.last_error = error - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event(seed.seed_id, "direct_code.failed", error, run_id=run_id) - if task_path is not None and task_path.exists(): - move_to_error(task_path) - - def mark_run_failed( - self, - seed_id: str, - run_id: str, - error: str, - task_path: Path | None = None, - prompt_path: str | None = None, - log_path: str | None = None, - stderr_log_path: str | None = None, - ) -> None: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - run.status = RunStatus.failed - run.updated_at = now_ts() - run.error = error - if prompt_path is not None: - run.prompt_path = prompt_path - if log_path is not None: - run.log_path = log_path - if stderr_log_path is not None: - run.stderr_log_path = stderr_log_path - seed.status = SeedStatus.failed - seed.updated_at = now_ts() - seed.last_error = error - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event(seed.seed_id, f"{run.stage.value}.failed", error, run_id=run_id) - if task_path is not None and task_path.exists(): - move_to_error(task_path) - - def finish_direct_code_run( - self, - seed_id: str, - run_id: str, - stdout: str, - stderr: str | None = None, - log_path: str | None = None, - stderr_log_path: str | None = None, - prompt_path: str | None = None, - ) -> StageRun: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - run.status = RunStatus.succeeded - run.updated_at = now_ts() - run.log_path = log_path - run.stderr_log_path = stderr_log_path - run.prompt_path = prompt_path - run.summary = { - "mode": "direct_code_agent", - "cwd": str(COMPONENT_SYSTEM_ROOT.parent), - "stdout_bytes": len(stdout.encode("utf-8", errors="replace")), - "stderr_bytes": len((stderr or "").encode("utf-8", errors="replace")), - } - run.signal = "direct_code_completed" - seed.status = SeedStatus.passed - seed.updated_at = now_ts() - seed.latest_signal = run.signal - seed.last_error = None - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "direct_code.completed", - "Direct code agent completed from the project root.", - run_id=run_id, - ) - return run - - def finish_p_run( - self, - seed_id: str, - run_id: str, - stdout: str, - log_path: str | None = None, - stderr_log_path: str | None = None, - prompt_path: str | None = None, - ) -> StageRun: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - summary = self.extract_summary(stdout, StageName.p) or {} - seed.plan = PlanIdea( - title=summary.get("idea", "Generated plan"), - target_component=summary.get("target_component", "model"), - description=summary.get("description", ""), - source_refs=summary.get("source_refs", []), - commit_sha=summary.get("commit_sha"), - ) - # Single branch per seed (SSOT): worktree is already on seed_id branch. - commit_sha = self.git_service.commit_sha(seed.seed_id) - run.status = RunStatus.succeeded - run.updated_at = now_ts() - run.log_path = log_path - run.stderr_log_path = stderr_log_path - run.prompt_path = prompt_path - # Preserve run.summary fields set earlier (e.g. commit_sha_before_p) when merging P output. - run.summary = run.summary | summary | {"commit_sha": commit_sha} - seed.status = SeedStatus.generated - seed.updated_at = now_ts() - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "p.completed", - "Plan completed on seed branch.", - commit_sha=commit_sha, - ) - self.queue_dca( - seed.seed_id, - restore_ref=run.summary.get("commit_sha_before_p"), - ) - return run - - @staticmethod - def combine_output(stdout: str, stderr: str | None = None) -> str: - if stdout and stderr: - return f"{stdout}\n{stderr}" - return stdout or stderr or "" - - def finish_dca_run( - self, - seed_id: str, - run_id: str, - stdout: str, - stderr: str | None = None, - log_path: str | None = None, - stderr_log_path: str | None = None, - prompt_path: str | None = None, - metrics_recovery: bool = False, - merge_resolution: bool = False, - ) -> StageRun: - seed = self.require_seed(seed_id) - run = self.require_run(run_id) - branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) - last_val_bpb = float(branch_metrics["last_val_bpb"]) if branch_metrics and branch_metrics.get("last_val_bpb") is not None else None - output_text = self.combine_output(stdout, stderr) - summary = self.extract_summary(output_text, StageName.dca) or {} - metrics = self.extract_dca_metrics(output_text, summary) - signal = self.evaluate_signal(metrics, last_val_bpb, PROMOTION_THRESHOLD) - commit_sha = summary.get("commit_sha") - if not (isinstance(commit_sha, str) and commit_sha.strip()): - try: - commit_sha = self.git_service.commit_sha(seed.seed_id) - except GitCommandError: - commit_sha = "" - run.status = RunStatus.succeeded - run.updated_at = now_ts() - run.log_path = log_path - run.stderr_log_path = stderr_log_path - run.prompt_path = prompt_path - run.summary = summary | {"commit_sha": commit_sha} - run.metrics = metrics - run.signal = signal - seed.updated_at = now_ts() - if signal == "error" and not metrics_recovery: - run.summary = run.summary | {"metrics_recovery_queued": True} - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "dca.metrics_recovery_queued", - "DCA completed without recoverable metrics in the structured report; queued a follow-up DCA to inspect saved logs.", - run_id=run_id, - ) - self.queue_dca( - seed.seed_id, - metrics_recovery=True, - source_run_id=run_id, - source_stdout_log_path=log_path, - source_stderr_log_path=stderr_log_path, - ) - return run - seed.latest_metrics = metrics - seed.latest_signal = signal - terminal_status = self._status_from_dca_signal(signal) - merge_commit_sha = None # set when seed branch is successfully merged into baseline - if seed.seed_id == BASELINE_SEED_ID and last_val_bpb is None: - if "val_bpb" not in metrics: - seed.status = SeedStatus.failed - event_message = ( - "Baseline metrics recovery could not recover metrics; marked as failed." - if metrics_recovery - else "Baseline measurement completed without metrics; marked as failed." - ) - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - event_message, - signal=signal, - metrics=metrics, - ) - return run - target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch - # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. - if signal != "positive_signal": - self.metrics_repo.update_for_branch( - target_branch, - {"last_val_bpb": metrics["val_bpb"]}, - ) - seed.status = terminal_status - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - "Baseline measurement completed (no promotion); not merged into baseline branch.", - signal=signal, - metrics=metrics, - ) - return run - try: - merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) - self.metrics_repo.update_for_branch( - target_branch, - { - "last_val_bpb": metrics["val_bpb"], - "promoted_branch": seed.seed_id, - "promoted_idea": "Initial baseline adaptation", - "promoted_at": summary.get("completed_at"), - }, - ) - seed.status = SeedStatus.passed - event_message = f"Baseline measurement completed and __baseline__ was merged into {target_branch}; waiting seeds can now start Plan." - self.run_repo.save(run) - self.seed_repo.save(seed) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - event_message, - signal=signal, - metrics=metrics, - commit_sha=merge_commit_sha, - ) - self._release_seeds_waiting_for_baseline(target_branch) - return run - except GitCommandError as merge_err: - tried_sha = commit_sha or "" - try: - tried_sha = self.git_service.commit_sha(seed.seed_id) - except GitCommandError: - pass - self.seed_repo.append_event( - seed.seed_id, - "dca.merge_failed", - f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", - commit_sha=tried_sha or None, - target_branch=target_branch, - ) - if not merge_resolution: - self.queue_dca( - seed.seed_id, - merge_resolution=True, - last_metrics=metrics, - last_summary=summary, - ) - seed.status = SeedStatus.dca_queued - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - "Baseline measurement completed but merge failed; conflict-resolution DCA queued.", - signal=signal, - metrics=metrics, - ) - return run - self.metrics_repo.update_for_branch( - target_branch, - { - "last_val_bpb": metrics["val_bpb"], - "promoted_branch": seed.seed_id, - "promoted_idea": "Initial baseline adaptation", - "promoted_at": summary.get("completed_at"), - }, - ) - seed.status = SeedStatus.passed - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - "Baseline measurement completed; merge into baseline branch failed again after resolution run (loop avoided). Baseline metrics recorded; manual merge may be needed.", - signal=signal, - metrics=metrics, - ) - self._release_seeds_waiting_for_baseline(target_branch) - return run - if terminal_status is SeedStatus.promoted: - try: - self.metrics_repo.update_for_branch( - seed.baseline_branch, - { - "last_val_bpb": metrics["val_bpb"], - "promoted_branch": seed.seed_id, - "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], - "promoted_at": summary.get("completed_at"), - }, - ) - merge_commit_sha = self.git_service.promote_seed_branch(seed) - seed.status = terminal_status - event_message = "DCA succeeded and seed branch was promoted into baseline." - except GitCommandError as merge_err: - tried_sha = commit_sha or "" - try: - tried_sha = self.git_service.commit_sha(seed.seed_id) - except GitCommandError: - pass - self.seed_repo.append_event( - seed.seed_id, - "dca.merge_failed", - f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", - commit_sha=tried_sha or None, - target_branch=seed.baseline_branch, - ) - self.queue_dca( - seed.seed_id, - merge_resolution=True, - last_metrics=metrics, - last_summary=summary, - ) - seed.status = SeedStatus.dca_queued - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event( - seed.seed_id, "dca.completed", "DCA run completed but merge failed; conflict-resolution DCA queued.", signal=signal, metrics=metrics - ) - if seed.ralph_loop_enabled: - try: - self.queue_p(seed.seed_id) - self.seed_repo.append_event(seed.seed_id, "ralph.requeued", "Ralph loop queued the next Plan run.") - except (RuntimeError, GitCommandError) as exc: - self.seed_repo.append_event(seed.seed_id, "ralph.requeue_failed", f"Ralph loop could not queue the next Plan run: {exc}") - return run - elif terminal_status is SeedStatus.failed: - seed.status = terminal_status - event_message = ( - "DCA metrics recovery could not recover metrics; marked as failed." - if metrics_recovery - else "DCA completed but metrics were missing; marked as failed." - ) - else: - seed.status = terminal_status - event_message = "DCA completed without promotion." - self.run_repo.save(run) - self.seed_repo.save(seed) - event_commit_sha = merge_commit_sha if merge_commit_sha else run.summary.get("commit_sha") - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - event_message, - signal=signal, - metrics=metrics, - **({"commit_sha": event_commit_sha} if event_commit_sha else {}), - ) - if ( - seed.ralph_loop_enabled - and signal in ("negative_signal", "neutral", "error") - and not merge_resolution - and not metrics_recovery - and seed.seed_id != BASELINE_SEED_ID - ): - ref = run.summary.get("restore_ref") or run.summary.get("baseline_commit_at_dca_start") - if ref: - try: - self.git_service.reset_seed_branch_to(seed, ref) - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restored", - "Restored seed worktree to commit before P for next Plan.", - commit_sha=ref, - ) - except GitCommandError as exc: - self.seed_repo.append_event( - seed.seed_id, - "ralph.worktree_restore_failed", - f"Could not restore seed worktree to commit before P: {exc}", - commit_sha=ref, - ) - if seed.ralph_loop_enabled: - try: - self.queue_p(seed.seed_id) - self.seed_repo.append_event( - seed.seed_id, - "ralph.requeued", - "Ralph loop queued the next Plan run.", - ) - except (RuntimeError, GitCommandError) as exc: - self.seed_repo.append_event( - seed.seed_id, - "ralph.requeue_failed", - f"Ralph loop could not queue the next Plan run: {exc}", - ) - return run - - def build_dashboard(self, selected_seed_id: str | None = None) -> DashboardViewModel: - seeds = self.seed_repo.list() - selected_seed = self.seed_repo.get(selected_seed_id) if selected_seed_id else None - baseline_metrics_by_branch = self.metrics_repo.get_all() - available_branches: list[str] = [] - setup_error = self.git_service.setup_error() - if setup_error is None: - try: - all_branches = self.git_service.list_branches() - if not all_branches: - setup_error = "No local branches found yet. Create an initial commit/branch, then reload." - else: - available_branches = [ - b for b in all_branches - if not self.git_service.is_seed_specific_branch(b) - ] - # Use only branches that exist in the repo; do not add DEFAULT_BASELINE_BRANCH - # if it does not exist, so the dropdown never shows a non-existent branch. - except GitCommandError as exc: - setup_error = str(exc) - # Default to first existing branch so the selected value is always valid. - default_baseline_branch = (available_branches[0] if available_branches else DEFAULT_BASELINE_BRANCH) or "master" - status_column_map = { - SeedStatus.draft: "seedInbox", - SeedStatus.queued: "seedInbox", - SeedStatus.planning: "generated", - SeedStatus.generated: "generated", - SeedStatus.dca_queued: "generated", - SeedStatus.adapting: "activeDca", - SeedStatus.running: "activeDca", - SeedStatus.passed: "completed", - SeedStatus.failed: "completed", - SeedStatus.promoted: "completed", - } - seeds_by_column: dict[str, list[SeedRecord]] = { - "seedInbox": [], - "generated": [], - "activeDca": [], - "completed": [], - } - for seed in seeds: - self._reconcile_seed_status_signal(seed) - column_id = status_column_map.get(seed.status, "seedInbox") - seeds_by_column[column_id].append(seed) - columns = [ - DashboardColumn( - id="seedInbox", - title="Seed", - description="New prompts and queued planning work.", - seeds=seeds_by_column["seedInbox"], - ), - DashboardColumn( - id="generated", - title="Plan", - description="Planning and generated code ready for Do-Check-Action.", - seeds=seeds_by_column["generated"], - ), - DashboardColumn( - id="activeDca", - title="Do-Check-Action", - description="Adapting, fixing, and running the seed run.", - seeds=seeds_by_column["activeDca"], - ), - DashboardColumn( - id="completed", - title="Completed", - description="Finished runs; promoted seeds merged into baseline.", - seeds=seeds_by_column["completed"], - ), - ] - return DashboardViewModel( - setup_error=setup_error, - baseline_metrics_by_branch=baseline_metrics_by_branch, - default_baseline_branch=default_baseline_branch, - available_branches=available_branches, - seed_count=len(seeds), - columns=columns, - selected_seed=selected_seed, - daemon_status=get_daemon_status(), - ) - - def seed_detail(self, seed_id: str) -> dict[str, object]: - seed = self.require_seed(seed_id) - expected_worktree = ( - self._baseline_worktree_path() - if seed.seed_id == BASELINE_SEED_ID - else self._seed_worktree_path(seed.seed_id) - ) - needs_save = False - if expected_worktree is not None and not seed.worktree_path: - seed.worktree_path = expected_worktree - needs_save = True - if needs_save: - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self._reconcile_seed_status_signal(seed) - return { - "seed": seed, - "can_edit_prompt": self.can_edit_seed_prompt(seed), - "runs": self.run_repo.list(seed_id), - "events": self.seed_repo.events(seed_id), - "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), - "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), - } - - def extract_summary(self, output_text: str, stage: StageName) -> dict[str, object] | None: - start_marker, end_marker = SUMMARY_MARKERS[stage.value] - pattern = rf"{start_marker}\s*(\{{.*?\}})\s*{end_marker}" - match = re.search(pattern, output_text, flags=re.DOTALL) - if not match: - return None - try: - return json.loads(match.group(1)) - except json.JSONDecodeError: - return {"raw_summary": match.group(1)} - - def extract_metrics(self, output_text: str) -> dict[str, float | int]: - patterns = { - "val_bpb": r"^val_bpb:\s+([0-9.]+)", - "training_seconds": r"^training_seconds:\s+([0-9.]+)", - "total_seconds": r"^total_seconds:\s+([0-9.]+)", - "startup_seconds": r"^startup_seconds:\s+([0-9.]+)", - "peak_vram_mb": r"^peak_vram_mb:\s+([0-9.]+)", - "mfu_percent": r"^mfu_percent:\s+([0-9.]+)", - "total_tokens_M": r"^total_tokens_M:\s+([0-9.]+)", - "num_steps": r"^num_steps:\s+([0-9]+)", - "num_params_M": r"^num_params_M:\s+([0-9.]+)", - "depth": r"^depth:\s+([0-9]+)", - } - metrics: dict[str, float | int] = {} - for key, pattern in patterns.items(): - match = re.search(pattern, output_text, flags=re.MULTILINE) - if not match: - continue - metrics[key] = int(match.group(1)) if key in {"num_steps", "depth"} else float(match.group(1)) - return metrics - - def extract_dca_metrics( - self, output_text: str, summary: dict[str, object] | None = None - ) -> dict[str, float | int]: - if summary: - summary_metrics = summary.get("metrics") - if isinstance(summary_metrics, dict): - parsed: dict[str, float | int] = {} - int_keys = {"num_steps", "depth"} - float_keys = { - "val_bpb", - "training_seconds", - "total_seconds", - "startup_seconds", - "peak_vram_mb", - "mfu_percent", - "total_tokens_M", - "num_params_M", - } - for key in int_keys | float_keys: - value = summary_metrics.get(key) - if value is None: - continue - try: - parsed[key] = int(value) if key in int_keys else float(value) - except (TypeError, ValueError): - continue - if parsed: - return parsed - return self.extract_metrics(output_text) - - @staticmethod - def evaluate_signal( - metrics: dict[str, float | int], - last_val_bpb: float | None, - promotion_threshold: float = PROMOTION_THRESHOLD, - ) -> str: - val_bpb = metrics.get("val_bpb") - if val_bpb is None: - return "error" - if last_val_bpb is None: - return "positive_signal" - delta = float(last_val_bpb) - float(val_bpb) - if delta >= promotion_threshold: - return "positive_signal" - if delta <= -promotion_threshold: - return "negative_signal" - return "neutral" - - -def default_workflow_service() -> WorkflowService: - return WorkflowService() +from __future__ import annotations + +import json +from typing import Any +import re +import subprocess +from pathlib import Path + +from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD +from component_system.domain.models import ( + DashboardColumn, + DashboardViewModel, + PlanIdea, + RunStatus, + SeedRecord, + SeedStatus, + StageName, + StageRun, +) +from component_system.repositories.state import ( + BaselineBranchMapRepository, + BaselineMetricsRepository, + RunRepository, + SeedRepository, +) +from component_system.task import ( + COMPONENT_SYSTEM_ROOT, + WORKTREE_ROOT, + get_daemon_status, + move_to_error, + now_ts, + new_run_id, + new_seed_id, + read_task, + write_task, +) + +SUMMARY_MARKERS = { + "p": ("AUTORESEARCH_P_SUMMARY_BEGIN", "AUTORESEARCH_P_SUMMARY_END"), + "dca": ("AUTORESEARCH_DCA_SUMMARY_BEGIN", "AUTORESEARCH_DCA_SUMMARY_END"), +} + +BASELINE_SEED_ID = "__baseline__" + +# Short display labels for timeline (kind -> one-line text). Events not in this map use message as-is (truncated if long). +TIMELINE_SHORT_MESSAGES = { + "seed.created": "Seed created", + "seed.updated": "Seed updated", + "seed.worktree_ready": "Worktree ready", + "ralph.enabled": "Ralph loop enabled", + "ralph.disabled": "Ralph loop disabled", + "p.queued": "Plan queued", + "p.started": "Plan started", + "p.completed": "Plan completed", + "p.failed": "Plan failed", + "dca.queued": "DCA queued", + "dca.started": "DCA started", + "dca.completed": "DCA completed", + "dca.merge_failed": "Merge into baseline failed", + "p.sync_resolution_queued": "Sync failed; merge resolution queued", + "p.sync_resolution_done": "Sync resolution done; Plan re-queued", + "dca.failed": "DCA failed", + "direct_code.failed": "Direct code failed", +} + + +def _timeline_display_events(events: list[dict[str, Any]]) -> list[dict[str, Any]]: + """Return events in reverse order (newest first), deduplicated by (kind, message), with concise display text.""" + if not events: + return [] + reversed_list = list(reversed(events)) + seen: set[tuple[str, str]] = set() + out: list[dict[str, Any]] = [] + for e in reversed_list: + kind = e.get("kind", "") + message = e.get("message", "") + key = (kind, message) + if key in seen: + continue + seen.add(key) + display = TIMELINE_SHORT_MESSAGES.get(kind) + if display is not None: + # Keep commit_sha / target_branch in a short suffix when present + parts = [display] + if e.get("commit_sha"): + parts.append(f"commit: {e.get('commit_sha', '')[:7]}") + if e.get("target_branch"): + parts.append(f"→ {e.get('target_branch')}") + display = " · ".join(parts) + else: + display = message if len(message) <= 80 else message[:77] + "..." + out.append({**e, "display_message": display}) + return out + + +class GitCommandError(RuntimeError): + pass + + +class SyncResolutionQueued(RuntimeError): + """Raised when P run cannot start because worktree sync with baseline failed; a sync-resolution DCA task was queued.""" + + +class DuplicateRunStartError(RuntimeError): + """Raised when mark_run_started is called for a run that was already started (e.g. restored in-progress task).""" + + +class GitService: + def __init__(self) -> None: + pass + + def _run_git(self, *args: str, cwd: Path | None = None) -> str: + try: + result = subprocess.run( + ["git", *args], + cwd=str(cwd) if cwd else None, + capture_output=True, + text=True, + check=True, + ) + except FileNotFoundError as exc: + raise GitCommandError("Git is not installed or not available on PATH.") from exc + except subprocess.CalledProcessError as exc: + stderr = (exc.stderr or exc.stdout or "").strip() + raise GitCommandError(stderr or f"git {' '.join(args)} failed") from exc + return result.stdout.strip() + + def repo_root(self) -> Path: + return Path(self._run_git("rev-parse", "--show-toplevel")) + + def current_head(self) -> str: + return self._run_git("rev-parse", "HEAD") + + def branch_exists(self, branch: str) -> bool: + try: + self._run_git("rev-parse", "--verify", branch) + return True + except GitCommandError: + return False + + def ensure_branch(self, branch: str, start_point: str) -> None: + if not self.branch_exists(branch): + self._run_git("branch", branch, start_point) + + def list_branches(self) -> list[str]: + output = self._run_git("branch", "--format=%(refname:short)") + branches = [line.strip() for line in output.splitlines() if line.strip()] + if not branches: + # Unborn repositories can have HEAD pointing to a branch name even before first commit. + try: + head_branch = self._run_git("symbolic-ref", "--short", "HEAD").strip() + if head_branch: + branches.append(head_branch) + except GitCommandError: + pass + return sorted(set(branches)) + + @staticmethod + def is_seed_specific_branch(branch: str) -> bool: + """True if this branch is the single working branch for a seed (seed_id), not a baseline choice.""" + if branch == BASELINE_SEED_ID: + return True + # One branch per seed: seed- + 6 hex chars, e.g. seed-e57b95 + if branch.startswith("seed-") and len(branch) == 11 and all( + c in "abcdef0123456789" for c in branch[5:] + ): + return True + return False + + def setup_error(self) -> str | None: + try: + self.repo_root() + return None + except GitCommandError as exc: + return str(exc) + + def setup_error_for_branches(self, baseline_branch: str) -> str | None: + try: + root = self.repo_root() + if not baseline_branch: + return "Please select a baseline branch." + if not self.branch_exists(baseline_branch): + return ( + f"Git repo found at {root}, but branch {baseline_branch!r} does not exist yet. " + "Select an existing baseline branch." + ) + return None + except GitCommandError as exc: + return str(exc) + + def ensure_seed_worktrees(self, seed: SeedRecord) -> SeedRecord: + """Ensure the seed worktree exists on the single branch for this seed: seed_id (SSOT).""" + repo_head = self.current_head() + self.ensure_branch(seed.baseline_branch, repo_head) + + seed_worktree = WORKTREE_ROOT / seed.seed_id + if seed_worktree.exists(): + seed.worktree_path = str(seed_worktree) + return seed + # One branch per seed: branch name = seed_id, created from baseline. + try: + self._run_git("worktree", "add", "-B", seed.seed_id, str(seed_worktree), seed.baseline_branch) + except GitCommandError as exc: + # Recover from stale git worktree metadata like: + # "__baseline__ is already checked out at /old/path/__baseline__" + if not self._recover_checked_out_worktree_conflict( + seed.seed_id, seed_worktree, seed.baseline_branch, str(exc) + ): + raise + + seed.worktree_path = str(seed_worktree) + return seed + + @staticmethod + def _extract_checked_out_path(error: str) -> Path | None: + # git message example: fatal: '__baseline__' is already checked out at '/path' + match = re.search(r"already checked out at ['\"]([^'\"]+)['\"]", error) + if not match: + return None + return Path(match.group(1)) + + def _recover_checked_out_worktree_conflict( + self, branch: str, target_worktree: Path, start_point: str, error: str + ) -> bool: + if "already checked out at" not in error: + return False + # First, prune stale registrations from missing worktrees. + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + conflict_path = self._extract_checked_out_path(error) + if conflict_path is not None: + # Force-remove the conflicting worktree from registry (same path after hard reset or different path). + try: + self._run_git("worktree", "remove", "--force", str(conflict_path)) + except GitCommandError: + pass + try: + self._run_git("worktree", "prune") + except GitCommandError: + pass + self._run_git("worktree", "add", "-B", branch, str(target_worktree), start_point) + return True + + def commit_sha(self, ref: str) -> str: + return self._run_git("rev-parse", "--short", ref) + + def head_sha_at(self, cwd: Path) -> str: + """Return the short commit SHA of HEAD in the given worktree directory.""" + return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) + + def reset_seed_branch_to(self, seed: SeedRecord, ref: str) -> None: + """Reset the seed worktree's branch to the given ref (e.g. commit before P). + No-op for baseline seed or when worktree is missing.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("reset", "--hard", ref, cwd=worktree_path) + + def sync_seed_worktree_with_baseline(self, seed: SeedRecord) -> None: + """Merge the baseline branch into the seed branch in the seed worktree. + Call before each P run so the worktree has the latest baseline.""" + if seed.seed_id == BASELINE_SEED_ID: + return + if not seed.worktree_path: + return + worktree_path = Path(seed.worktree_path) + if not worktree_path.is_dir(): + return + self._run_git("merge", "--no-edit", seed.baseline_branch, cwd=worktree_path) + + def promote_seed_branch( + self, seed: SeedRecord, target_branch: str | None = None + ) -> str: + """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. + If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, + pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" + merge_into = target_branch if target_branch is not None else seed.baseline_branch + baseline_worktree = WORKTREE_ROOT / "baseline" + if baseline_worktree.exists(): + try: + self._run_git("worktree", "remove", "--force", str(baseline_worktree)) + except GitCommandError: + pass + self._run_git( + "worktree", + "add", + "--force", + "-B", + merge_into, + str(baseline_worktree), + merge_into, + ) + self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) + return self.commit_sha(merge_into) + + +class WorkflowService: + def __init__( + self, + seed_repo: SeedRepository | None = None, + run_repo: RunRepository | None = None, + branch_map_repo: BaselineBranchMapRepository | None = None, + metrics_repo: BaselineMetricsRepository | None = None, + git_service: GitService | None = None, + ) -> None: + self.seed_repo = seed_repo or SeedRepository() + self.run_repo = run_repo or RunRepository() + self.branch_map_repo = branch_map_repo or BaselineBranchMapRepository() + self.metrics_repo = metrics_repo or BaselineMetricsRepository() + self.git_service = git_service or GitService() + + @staticmethod + def _seed_worktree_path(seed_id: str) -> str: + return str(WORKTREE_ROOT / seed_id) + + @staticmethod + def _baseline_worktree_path() -> str: + return str(WORKTREE_ROOT / BASELINE_SEED_ID) + + def _normalize_seed_runtime_state(self, seed: SeedRecord) -> SeedRecord: + """Ensure baseline seed worktree_path matches the canonical path.""" + if seed.seed_id != BASELINE_SEED_ID: + return seed + expected_worktree = self._baseline_worktree_path() + if seed.worktree_path == expected_worktree: + return seed + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + + def ensure_seed_worktree_ready(self, seed_id: str) -> SeedRecord: + """Ensure the runtime seed worktree exists; recreate only when missing.""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + expected_worktree = self._baseline_worktree_path() + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.baseline_branch) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing baseline worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + expected_worktree = self._seed_worktree_path(seed.seed_id) + if Path(expected_worktree).is_dir(): + if seed.worktree_path != expected_worktree: + seed.worktree_path = expected_worktree + seed.updated_at = now_ts() + self.seed_repo.save(seed) + return seed + seed = self.git_service.ensure_seed_worktrees(seed) + seed.updated_at = now_ts() + self.seed_repo.save(seed) + commit_sha = "" + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "seed.worktree_ready", + "Recreated missing seed worktree before the run started.", + commit_sha=commit_sha or None, + ) + return seed + + def _preferred_baseline_branch(self) -> str: + setup_error = self.git_service.setup_error() + if setup_error is not None: + return DEFAULT_BASELINE_BRANCH + try: + branches = [ + branch + for branch in self.git_service.list_branches() + if not self.git_service.is_seed_specific_branch(branch) + ] + except GitCommandError: + return DEFAULT_BASELINE_BRANCH + if branches and DEFAULT_BASELINE_BRANCH in branches: + return DEFAULT_BASELINE_BRANCH + return branches[0] if branches else DEFAULT_BASELINE_BRANCH + + def _first_user_seed_baseline_branch(self) -> str | None: + """Return the baseline_branch of the earliest-created user seed (excluding __baseline__), or None.""" + user_seeds = [s for s in self.seed_repo.list() if s.seed_id != BASELINE_SEED_ID] + if not user_seeds: + return None + first = min(user_seeds, key=lambda s: s.created_at) + return first.baseline_branch or None + + def _enqueue_plan_run(self, seed: SeedRecord, event_kind: str = "p.queued", event_message: str = "Queued Plan stage for the seed.") -> StageRun: + run = StageRun( + run_id=new_run_id("p"), + seed_id=seed.seed_id, + stage=StageName.p, + status=RunStatus.queued, + task_id=new_run_id("task-p"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message) + write_task( + "p", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + }, + task_id=run.task_id, + ) + return run + + def _release_seeds_waiting_for_baseline(self, branch: str) -> None: + """Release seeds that were waiting for baseline result on the given branch.""" + branch_metrics = self.metrics_repo.get_for_branch(branch) + if not branch_metrics or branch_metrics.get("best_val_bpb") is None: + return + waiting_seeds = sorted(self.seed_repo.list(), key=lambda item: item.created_at) + for seed in waiting_seeds: + if seed.seed_id == BASELINE_SEED_ID: + continue + if seed.baseline_branch != branch: + continue + if seed.status is not SeedStatus.queued or seed.latest_run_id is not None: + continue + self._enqueue_plan_run( + seed, + event_kind="p.released", + event_message="Baseline is ready; queued Plan stage for the waiting seed.", + ) + + def _commit_sha_for_branch(self, branch: str) -> str: + """Return current commit SHA for branch, or 'unknown' if unavailable (ensures baseline_metrics never has null commit_sha).""" + try: + sha = self.git_service.commit_sha(branch) + return sha if (isinstance(sha, str) and sha.strip()) else "unknown" + except GitCommandError: + return "unknown" + + @staticmethod + def _status_from_dca_signal(signal: str) -> SeedStatus: + """Centralized mapping from DCA signal to terminal seed status.""" + if signal == "positive_signal": + return SeedStatus.promoted + if signal == "error": + return SeedStatus.failed + return SeedStatus.passed + + def _reconcile_seed_status_signal(self, seed: SeedRecord) -> bool: + """ + Auto-heal known inconsistent terminal combinations from historical data. + + Returns True when the seed was updated and persisted. + """ + if seed.status is SeedStatus.passed and seed.latest_signal == "error": + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "seed.reconciled", + "Reconciled inconsistent terminal state (passed + error) to failed.", + ) + return True + return False + + def create_seed( + self, + prompt: str, + baseline_branch: str | None = None, + ralph_loop_enabled: bool = False, + ) -> SeedRecord: + seed_id = new_seed_id() + selected_baseline = (baseline_branch or DEFAULT_BASELINE_BRANCH).strip() + seed = SeedRecord( + seed_id=seed_id, + prompt=prompt.strip(), + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=selected_baseline, + worktree_path=self._seed_worktree_path(seed_id), + ralph_loop_enabled=ralph_loop_enabled, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, selected_baseline) + try: + pass # branch seed_id is created when Plan is queued (ensure_seed_worktrees) + except GitCommandError: + # Keep seed creation non-blocking; branch creation will be retried at P queue time. + pass + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from prompt.") + if ralph_loop_enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + return seed + + def create_direct_code_seed(self, prompt: str) -> tuple[SeedRecord, StageRun]: + cleaned_prompt = prompt.strip() + if not cleaned_prompt: + raise RuntimeError("Prompt cannot be empty.") + baseline_branch = self._preferred_baseline_branch() + seed_id = new_seed_id("direct") + now = now_ts() + run = StageRun( + run_id=new_run_id("direct"), + seed_id=seed_id, + stage=StageName.direct, + status=RunStatus.queued, + task_id=new_run_id("task-direct"), + created_at=now, + updated_at=now, + ) + seed = SeedRecord( + seed_id=seed_id, + prompt=cleaned_prompt, + status=SeedStatus.adapting, + created_at=now, + updated_at=now, + baseline_branch=baseline_branch, + worktree_path=str(COMPONENT_SYSTEM_ROOT.parent), + latest_run_id=run.run_id, + plan=PlanIdea( + title="Direct code agent", + target_component="project_root", + description="Direct code agent run requested from the dashboard and executed from the project root.", + ), + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(seed_id, baseline_branch) + self.run_repo.save(run) + self.seed_repo.append_event(seed.seed_id, "seed.created", "Seed created from direct code agent prompt.") + self.seed_repo.append_event( + seed.seed_id, + "direct_code.queued", + "Queued direct code agent run from the project root.", + run_id=run.run_id, + ) + write_task( + "direct", + { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": None, + }, + task_id=run.task_id, + ) + return seed, run + + def _get_or_create_baseline_seed(self, baseline_branch: str | None = None) -> SeedRecord: + """Return the baseline seed for establishing initial val_bpb; create with baseline_branch if missing.""" + seed = self.seed_repo.get(BASELINE_SEED_ID) + if seed is not None: + return self._normalize_seed_runtime_state(seed) + branch = baseline_branch if baseline_branch is not None else ( + self._first_user_seed_baseline_branch() or DEFAULT_BASELINE_BRANCH + ) + seed = SeedRecord( + seed_id=BASELINE_SEED_ID, + prompt="Baseline measurement: run training on current code without changes.", + status=SeedStatus.draft, + created_at=now_ts(), + updated_at=now_ts(), + baseline_branch=branch, + worktree_path=self._baseline_worktree_path(), + ralph_loop_enabled=False, + ) + self.seed_repo.save(seed) + self.branch_map_repo.set_branch_for_seed(BASELINE_SEED_ID, branch) + self.seed_repo.append_event( + seed.seed_id, + "seed.created", + "Baseline seed created for initial measurement.", + ) + return seed + + def ensure_baseline_result(self, baseline_branch: str) -> None: + """ + If there is no baseline result (best_val_bpb) for the given branch, ensure a baseline seed exists for that + branch, ensure its worktree is checked out from baseline_branch, then queue DCA to establish baseline. + Idempotent; safe to call before queue_p for any user seed. Call with seed.baseline_branch. + """ + seed = self._get_or_create_baseline_seed(baseline_branch) + branch_metrics = self.metrics_repo.get_for_branch(baseline_branch) + if branch_metrics and branch_metrics.get("best_val_bpb") is not None: + return + if seed.baseline_branch != baseline_branch: + return + if seed.status in (SeedStatus.dca_queued, SeedStatus.adapting, SeedStatus.running): + return + if seed.status in (SeedStatus.passed, SeedStatus.failed, SeedStatus.promoted): + branch_metrics = self.metrics_repo.get_for_branch(baseline_branch) + if branch_metrics and branch_metrics.get("best_val_bpb") is not None: + return + setup_error = self.git_service.setup_error() + if setup_error is not None: + return + try: + self.git_service.ensure_branch(baseline_branch, self.git_service.current_head()) + except GitCommandError: + return + setup_error = self.git_service.setup_error_for_branches(baseline_branch) + if setup_error is not None: + return + self.ensure_seed_worktree_ready(BASELINE_SEED_ID) + seed = self.require_seed(BASELINE_SEED_ID) + seed.status = SeedStatus.generated + seed.plan = PlanIdea(title="Baseline", description="No changes; measure current baseline.") + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "baseline.queued", + "Queued DCA to establish baseline result before first seed.", + ) + self.queue_dca(seed.seed_id) + + def set_ralph_loop(self, seed_id: str, enabled: bool) -> SeedRecord: + seed = self.require_seed(seed_id) + if seed.ralph_loop_enabled == enabled: + return seed + seed.ralph_loop_enabled = enabled + seed.updated_at = now_ts() + self.seed_repo.save(seed) + if enabled: + self.seed_repo.append_event( + seed.seed_id, + "ralph.enabled", + "Ralph loop enabled; Plan will auto-requeue after each DCA completion.", + ) + else: + self.seed_repo.append_event(seed.seed_id, "ralph.disabled", "Ralph loop disabled by user.") + return seed + + def can_edit_seed_prompt(self, seed: SeedRecord) -> bool: + return seed.status in {SeedStatus.draft, SeedStatus.queued} + + def update_seed_prompt(self, seed_id: str, prompt: str) -> SeedRecord: + seed = self.require_seed(seed_id) + if not self.can_edit_seed_prompt(seed): + raise RuntimeError("Seed prompt can only be edited before Plan starts.") + updated_prompt = prompt.strip() + if not updated_prompt: + raise RuntimeError("Prompt cannot be empty.") + if updated_prompt == seed.prompt: + return seed + seed.prompt = updated_prompt + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "seed.updated", "Seed prompt was edited before execution.") + return seed + + def queue_p(self, seed_id: str) -> StageRun | None: + seed = self.require_seed(seed_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) if seed_id != BASELINE_SEED_ID else None + has_baseline = branch_metrics is not None and branch_metrics.get("best_val_bpb") is not None + if seed_id != BASELINE_SEED_ID and not has_baseline: + self.ensure_baseline_result(seed.baseline_branch) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + has_baseline = branch_metrics is not None and branch_metrics.get("best_val_bpb") is not None + if not has_baseline: + baseline_seed = self.seed_repo.get(BASELINE_SEED_ID) + # Only wait for baseline when the baseline seed is for this branch (e.g. master). + # For another branch (e.g. dev), no baseline run is queued for it, so allow planning; + # the first DCA completion on this branch will establish baseline metrics. + if baseline_seed is not None and baseline_seed.baseline_branch == seed.baseline_branch: + if not (seed.status is SeedStatus.queued and seed.latest_run_id is None): + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + seed.latest_run_id = None + seed.last_error = None + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.waiting_for_baseline", + "Baseline run is still in progress; Plan will queue after baseline finishes.", + ) + return None + # Branch has no baseline and is not the baseline seed's branch: proceed with planning. + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + return self._enqueue_plan_run(seed) + + def queue_dca( + self, + seed_id: str, + merge_resolution: bool = False, + metrics_recovery: bool = False, + source_run_id: str | None = None, + source_stdout_log_path: str | None = None, + source_stderr_log_path: str | None = None, + last_metrics: dict[str, Any] | None = None, + last_summary: dict[str, Any] | None = None, + commit_sha_before_p: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + if not metrics_recovery and seed.status in {SeedStatus.draft, SeedStatus.queued, SeedStatus.planning}: + raise RuntimeError("Run Plan first. Do-Check-Action is available after code is generated into the seed branch.") + if not metrics_recovery: + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + if seed.seed_id != BASELINE_SEED_ID: + try: + # Ref to restore worktree to on negative signal (commit before P when from finish_p_run, else baseline). + run.summary["commit_sha_before_p"] = ( + commit_sha_before_p + if commit_sha_before_p is not None + else self.git_service.commit_sha(seed.baseline_branch) + ) + except GitCommandError: + pass + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.queued", + "Queued DCA for merge conflict resolution." + if merge_resolution + else "Queued DCA for metrics recovery from saved logs." + if metrics_recovery + else "Queued DCA stage for the seed.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "merge_resolution": merge_resolution, + "metrics_recovery": metrics_recovery, + } + if merge_resolution: + payload["baseline_branch"] = seed.baseline_branch + if last_metrics is not None: + payload["last_metrics"] = last_metrics + if last_summary is not None: + payload["last_summary"] = last_summary + if metrics_recovery: + payload["source_run_id"] = source_run_id + payload["source_stdout_log_path"] = source_stdout_log_path + payload["source_stderr_log_path"] = source_stderr_log_path + payload["worktree_path"] = None + write_task("dca", payload, task_id=run.task_id) + return run + + def queue_sync_resolution(self, seed_id: str) -> StageRun: + """Queue a merge-resolution run to resolve 'merge baseline into seed' in the seed worktree (e.g. after sync failed before P).""" + seed = self.require_seed(seed_id) + if seed.seed_id == BASELINE_SEED_ID: + raise RuntimeError("Sync resolution is not used for the baseline seed.") + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + run = StageRun( + run_id=new_run_id("dca"), + seed_id=seed.seed_id, + stage=StageName.dca, + status=RunStatus.queued, + task_id=new_run_id("task-dca"), + created_at=now_ts(), + updated_at=now_ts(), + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + seed.latest_run_id = run.run_id + seed.last_error = None + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "p.sync_resolution_queued", + "Worktree sync with baseline failed; queued merge-resolution to resolve and re-run Plan.", + ) + payload = { + "seed_id": seed.seed_id, + "run_id": run.run_id, + "prompt": seed.prompt, + "worktree_path": seed.worktree_path, + "baseline_branch": seed.baseline_branch, + "sync_resolution": True, + } + write_task("dca", payload, task_id=run.task_id) + return run + + def finish_sync_resolution(self, seed_id: str, run_id: str) -> None: + """Mark sync-resolution run completed and re-queue Plan for the seed.""" + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + self.run_repo.save(run) + seed.status = SeedStatus.queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.sync_resolution_done", + "Sync resolution completed; Plan re-queued.", + run_id=run_id, + ) + self._enqueue_plan_run( + seed, + event_kind="p.queued", + event_message="Re-queued Plan after sync resolution.", + ) + + def require_seed(self, seed_id: str) -> SeedRecord: + seed = self.seed_repo.get(seed_id) + if seed is None: + raise KeyError(f"Unknown seed_id={seed_id}") + return self._normalize_seed_runtime_state(seed) + + def require_run(self, run_id: str) -> StageRun: + run = self.run_repo.get(run_id) + if run is None: + raise KeyError(f"Unknown run_id={run_id}") + return run + + def is_seed_eligible_for_stage(self, seed_id: str | None, stage: str) -> bool: + """True if this seed is in a state that allows the given stage to run (used at claim time to avoid P/DCA races).""" + if not seed_id: + return False + seed = self.seed_repo.get(seed_id) + if seed is None: + return False + seed = self._normalize_seed_runtime_state(seed) + if stage == "p": + return seed.status not in (SeedStatus.adapting, SeedStatus.running, SeedStatus.dca_queued) + if stage == "dca": + return seed.status is not SeedStatus.planning + if stage == "direct": + return True + return False + + def mark_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + if run.status != RunStatus.queued: + raise DuplicateRunStartError( + f"Run {run_id} already started (status={run.status}); possible restored in-progress task." + ) + run.status = RunStatus.running + run.updated_at = now_ts() + if run.stage is StageName.p: + setup_error = self.git_service.setup_error() + if setup_error is not None: + raise RuntimeError(setup_error) + try: + self.git_service.ensure_branch(seed.baseline_branch, self.git_service.current_head()) + except GitCommandError: + pass + setup_error = self.git_service.setup_error_for_branches(seed.baseline_branch) + if setup_error is not None: + raise RuntimeError(setup_error) + seed = self.ensure_seed_worktree_ready(seed.seed_id) + # Sync seed worktree with baseline branch before P so Plan runs from latest baseline. + try: + self.git_service.sync_seed_worktree_with_baseline(seed) + except GitCommandError as sync_err: + run.status = RunStatus.failed + run.error = f"Worktree sync with baseline failed: {sync_err}" + self.run_repo.save(run) + self.queue_sync_resolution(seed.seed_id) + raise SyncResolutionQueued( + f"Worktree sync with baseline failed: {sync_err}. Queued merge-resolution." + ) from sync_err + # Record baseline val_bpb at sync time for positive/negative/neutral judgement in DCA. + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + former = branch_metrics.get("best_val_bpb") if branch_metrics else None + if run.summary is None: + run.summary = {} + run.summary["former_val_bpb"] = former + seed.former_val_bpb = float(former) if former is not None else None + if seed.worktree_path: + worktree_path = Path(seed.worktree_path) + if worktree_path.is_dir(): + try: + run.summary["commit_sha_before_p"] = self.git_service.head_sha_at( + worktree_path + ) + except GitCommandError: + pass + seed.status = SeedStatus.planning + event_kind = "p.started" + event_message = "Plan stage started in the candidate worktree." + else: + seed.status = SeedStatus.adapting + event_kind = "dca.started" + event_message = ( + "Baseline measurement started in the baseline worktree." + if seed.seed_id == BASELINE_SEED_ID + else "DCA stage started in the seed worktree." + ) + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, event_kind, event_message, run_id=run_id) + return seed, run + + def mark_direct_code_run_started(self, seed_id: str, run_id: str) -> tuple[SeedRecord, StageRun]: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.running + run.updated_at = now_ts() + seed.status = SeedStatus.adapting + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.started", + "Direct code agent started from the project root.", + run_id=run_id, + ) + return seed, run + + def mark_direct_code_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, "direct_code.failed", error, run_id=run_id) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def _ralph_try_restore_worktree(self, seed: SeedRecord, ref: str | None) -> None: + """Reset seed worktree to ref (e.g. commit before P) and log result. No-op if ref missing or baseline seed.""" + if not ref or not str(ref).strip() or seed.seed_id == BASELINE_SEED_ID: + return + try: + self.git_service.reset_seed_branch_to(seed, ref) + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restored", + "Restored seed worktree to commit before P for next Plan.", + commit_sha=ref, + ) + except GitCommandError as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.worktree_restore_failed", + f"Could not restore seed worktree to commit before P: {exc}", + commit_sha=ref, + ) + + def mark_run_failed( + self, + seed_id: str, + run_id: str, + error: str, + task_path: Path | None = None, + prompt_path: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + ) -> None: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + task_payload: dict[str, Any] = {} + if task_path is not None and task_path.exists(): + task_payload = read_task(task_path) + run.status = RunStatus.failed + run.updated_at = now_ts() + run.error = error + if prompt_path is not None: + run.prompt_path = prompt_path + if log_path is not None: + run.log_path = log_path + if stderr_log_path is not None: + run.stderr_log_path = stderr_log_path + seed.status = SeedStatus.failed + seed.updated_at = now_ts() + seed.last_error = error + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event(seed.seed_id, f"{run.stage.value}.failed", error, run_id=run_id) + if ( + run.stage is StageName.dca + and seed.ralph_loop_enabled + and seed.seed_id != BASELINE_SEED_ID + and task_payload.get("merge_resolution") is not True + and task_payload.get("metrics_recovery") is not True + ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run after failed DCA.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run after failed DCA: {exc}", + ) + if task_path is not None and task_path.exists(): + move_to_error(task_path) + + def finish_direct_code_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + run.summary = { + "mode": "direct_code_agent", + "cwd": str(COMPONENT_SYSTEM_ROOT.parent), + "stdout_bytes": len(stdout.encode("utf-8", errors="replace")), + "stderr_bytes": len((stderr or "").encode("utf-8", errors="replace")), + } + run.signal = "direct_code_completed" + seed.status = SeedStatus.passed + seed.updated_at = now_ts() + seed.latest_signal = run.signal + seed.last_error = None + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "direct_code.completed", + "Direct code agent completed from the project root.", + run_id=run_id, + ) + return run + + def finish_p_run( + self, + seed_id: str, + run_id: str, + stdout: str, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + summary = self.extract_summary(stdout, StageName.p) or {} + seed.plan = PlanIdea( + title=summary.get("idea", "Generated plan"), + target_component=summary.get("target_component", "model"), + description=summary.get("description", ""), + source_refs=summary.get("source_refs", []), + commit_sha=summary.get("commit_sha"), + ) + # Single branch per seed (SSOT): worktree is already on seed_id branch. + commit_sha = self.git_service.commit_sha(seed.seed_id) + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve run.summary fields set earlier (e.g. commit_sha_before_p) when merging P output. + run.summary = run.summary | summary | {"commit_sha": commit_sha} + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "p.completed", + "Plan completed on seed branch.", + commit_sha=commit_sha, + ) + self.queue_dca( + seed.seed_id, + commit_sha_before_p=run.summary.get("commit_sha_before_p"), + ) + return run + + @staticmethod + def combine_output(stdout: str, stderr: str | None = None) -> str: + if stdout and stderr: + return f"{stdout}\n{stderr}" + return stdout or stderr or "" + + def finish_dca_run( + self, + seed_id: str, + run_id: str, + stdout: str, + stderr: str | None = None, + log_path: str | None = None, + stderr_log_path: str | None = None, + prompt_path: str | None = None, + metrics_recovery: bool = False, + merge_resolution: bool = False, + ) -> StageRun: + seed = self.require_seed(seed_id) + run = self.require_run(run_id) + branch_metrics = self.metrics_repo.get_for_branch(seed.baseline_branch) + best_val_bpb = float(branch_metrics["best_val_bpb"]) if branch_metrics and branch_metrics.get("best_val_bpb") is not None else None + # Use baseline at sync-before-P time (former_val_bpb) when available; else branch best for baseline seed. + baseline_for_signal = seed.former_val_bpb if (seed.former_val_bpb is not None and seed.seed_id != BASELINE_SEED_ID) else best_val_bpb + output_text = self.combine_output(stdout, stderr) + summary = self.extract_summary(output_text, StageName.dca) or {} + metrics = self.extract_dca_metrics(output_text, summary) + signal = self.evaluate_signal(metrics, baseline_for_signal, PROMOTION_THRESHOLD) + commit_sha = summary.get("commit_sha") + if not (isinstance(commit_sha, str) and commit_sha.strip()): + try: + commit_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + commit_sha = "" + run.status = RunStatus.succeeded + run.updated_at = now_ts() + run.log_path = log_path + run.stderr_log_path = stderr_log_path + run.prompt_path = prompt_path + # Preserve runner-set keys (e.g. commit_sha_before_p, former_val_bpb) for restore and comparison. + preserved = {k: run.summary[k] for k in ("commit_sha_before_p", "former_val_bpb") if run.summary and k in run.summary} + if seed.former_val_bpb is not None and "former_val_bpb" not in preserved: + preserved["former_val_bpb"] = seed.former_val_bpb + run.summary = summary | {"commit_sha": commit_sha} | preserved + run.metrics = metrics + run.signal = signal + seed.updated_at = now_ts() + if signal == "error" and not metrics_recovery: + run.summary = run.summary | {"metrics_recovery_queued": True} + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.metrics_recovery_queued", + "DCA completed without recoverable metrics in the structured report; queued a follow-up DCA to inspect saved logs.", + run_id=run_id, + ) + self.queue_dca( + seed.seed_id, + metrics_recovery=True, + source_run_id=run_id, + source_stdout_log_path=log_path, + source_stderr_log_path=stderr_log_path, + ) + if ( + seed.ralph_loop_enabled + and seed.seed_id != BASELINE_SEED_ID + ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) + return run + seed.latest_metrics = metrics + seed.latest_signal = signal + terminal_status = self._status_from_dca_signal(signal) + merge_commit_sha = None # set when seed branch is successfully merged into baseline + if seed.seed_id == BASELINE_SEED_ID and best_val_bpb is None: + if "val_bpb" not in metrics: + seed.status = SeedStatus.failed + event_message = ( + "Baseline metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "Baseline measurement completed without metrics; marked as failed." + ) + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + ) + return run + target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch + # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. + if signal != "positive_signal": + self.metrics_repo.append_baseline_run(target_branch, metrics["val_bpb"]) + seed.status = terminal_status + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed (no promotion); not merged into baseline branch.", + signal=signal, + metrics=metrics, + ) + return run + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) + effective_sha = ( + merge_commit_sha + if (isinstance(merge_commit_sha, str) and merge_commit_sha.strip()) + else self._commit_sha_for_branch(target_branch) + ) + self.metrics_repo.append_promotion_for_branch( + target_branch, + { + "val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + "commit_sha": effective_sha, + }, + ) + seed.status = SeedStatus.passed + event_message = f"Baseline measurement completed and __baseline__ was merged into {target_branch}; waiting seeds can now start Plan." + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + commit_sha=merge_commit_sha, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts.", + commit_sha=tried_sha or None, + target_branch=target_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + effective_sha = self._commit_sha_for_branch(target_branch) + self.metrics_repo.append_promotion_for_branch( + target_branch, + { + "val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": "Initial baseline adaptation", + "promoted_at": summary.get("completed_at"), + "commit_sha": effective_sha, + }, + ) + seed.status = SeedStatus.passed + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Baseline measurement completed; merge into baseline branch failed again after resolution run (loop avoided). Baseline metrics recorded; manual merge may be needed.", + signal=signal, + metrics=metrics, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run + if terminal_status is SeedStatus.promoted: + # Merge seed into baseline first on positive signal; then update metrics/state. + try: + merge_commit_sha = self.git_service.promote_seed_branch(seed) + effective_sha = ( + merge_commit_sha + if (isinstance(merge_commit_sha, str) and merge_commit_sha.strip()) + else self._commit_sha_for_branch(seed.baseline_branch) + ) + self.metrics_repo.append_promotion_for_branch( + seed.baseline_branch, + { + "val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], + "promoted_at": summary.get("completed_at"), + "commit_sha": effective_sha, + }, + ) + seed.status = terminal_status + event_message = "DCA succeeded and seed branch was promoted into baseline." + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( + seed.seed_id, + "dca.merge_failed", + ( + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts." + if not merge_resolution + else f"Merge into baseline failed again after a conflict-resolution DCA: {merge_err}. " + "Ralph can proceed with the next Plan run." + ), + commit_sha=tried_sha or None, + target_branch=seed.baseline_branch, + ) + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "DCA run completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + # Resolution run also failed to merge; avoid infinite resolution loop and continue Ralph. + seed.status = SeedStatus.generated + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "Conflict-resolution DCA completed but merge still failed; proceeding to next Plan run.", + signal=signal, + metrics=metrics, + ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run after unresolved merge conflict.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run after unresolved merge conflict: {exc}", + ) + return run + elif terminal_status is SeedStatus.failed: + seed.status = terminal_status + event_message = ( + "DCA metrics recovery could not recover metrics; marked as failed." + if metrics_recovery + else "DCA completed but metrics were missing; marked as failed." + ) + else: + seed.status = terminal_status + event_message = "DCA completed without promotion." + self.run_repo.save(run) + self.seed_repo.save(seed) + event_commit_sha = merge_commit_sha if merge_commit_sha else run.summary.get("commit_sha") + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + event_message, + signal=signal, + metrics=metrics, + **({"commit_sha": event_commit_sha} if event_commit_sha else {}), + ) + if ( + seed.ralph_loop_enabled + and signal in ("negative_signal", "neutral", "error") + and not merge_resolution + and not metrics_recovery + and seed.seed_id != BASELINE_SEED_ID + ): + self._ralph_try_restore_worktree(seed, run.summary.get("commit_sha_before_p")) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run: {exc}", + ) + return run + + def build_dashboard(self, selected_seed_id: str | None = None) -> DashboardViewModel: + seeds = self.seed_repo.list() + selected_seed = self.seed_repo.get(selected_seed_id) if selected_seed_id else None + baseline_metrics_by_branch = self.metrics_repo.get_all() + available_branches: list[str] = [] + setup_error = self.git_service.setup_error() + if setup_error is None: + try: + all_branches = self.git_service.list_branches() + if not all_branches: + setup_error = "No local branches found yet. Create an initial commit/branch, then reload." + else: + available_branches = [ + b for b in all_branches + if not self.git_service.is_seed_specific_branch(b) + ] + # Use only branches that exist in the repo; do not add DEFAULT_BASELINE_BRANCH + # if it does not exist, so the dropdown never shows a non-existent branch. + except GitCommandError as exc: + setup_error = str(exc) + # Default to first existing branch so the selected value is always valid. + default_baseline_branch = (available_branches[0] if available_branches else DEFAULT_BASELINE_BRANCH) or "master" + status_column_map = { + SeedStatus.draft: "seedInbox", + SeedStatus.queued: "seedInbox", + SeedStatus.planning: "generated", + SeedStatus.generated: "generated", + SeedStatus.dca_queued: "generated", + SeedStatus.adapting: "activeDca", + SeedStatus.running: "activeDca", + SeedStatus.passed: "completed", + SeedStatus.failed: "completed", + SeedStatus.promoted: "completed", + } + seeds_by_column: dict[str, list[SeedRecord]] = { + "seedInbox": [], + "generated": [], + "activeDca": [], + "completed": [], + } + for seed in seeds: + self._reconcile_seed_status_signal(seed) + column_id = status_column_map.get(seed.status, "seedInbox") + seeds_by_column[column_id].append(seed) + columns = [ + DashboardColumn( + id="seedInbox", + title="Seed", + description="New prompts and queued planning work.", + seeds=seeds_by_column["seedInbox"], + ), + DashboardColumn( + id="generated", + title="Plan", + description="Planning and generated code ready for Do-Check-Action.", + seeds=seeds_by_column["generated"], + ), + DashboardColumn( + id="activeDca", + title="Do-Check-Action", + description="Adapting, fixing, and running the seed run.", + seeds=seeds_by_column["activeDca"], + ), + DashboardColumn( + id="completed", + title="Completed", + description="Finished runs; promoted seeds merged into baseline.", + seeds=seeds_by_column["completed"], + ), + ] + return DashboardViewModel( + setup_error=setup_error, + baseline_metrics_by_branch=baseline_metrics_by_branch, + default_baseline_branch=default_baseline_branch, + available_branches=available_branches, + seed_count=len(seeds), + columns=columns, + selected_seed=selected_seed, + daemon_status=get_daemon_status(), + ) + + def seed_detail(self, seed_id: str) -> dict[str, object]: + seed = self.require_seed(seed_id) + expected_worktree = ( + self._baseline_worktree_path() + if seed.seed_id == BASELINE_SEED_ID + else self._seed_worktree_path(seed.seed_id) + ) + needs_save = False + if expected_worktree is not None and not seed.worktree_path: + seed.worktree_path = expected_worktree + needs_save = True + if needs_save: + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self._reconcile_seed_status_signal(seed) + raw_events = self.seed_repo.events(seed_id) + return { + "seed": seed, + "can_edit_prompt": self.can_edit_seed_prompt(seed), + "runs": self.run_repo.list(seed_id), + "events": _timeline_display_events(raw_events), + "baseline_metrics_for_branch": self.metrics_repo.get_for_branch(seed.baseline_branch), + "setup_error": self.git_service.setup_error_for_branches(seed.baseline_branch), + } + + def seed_detail_versions(self, seed_id: str) -> dict[str, str]: + """Return version fingerprints for runs and timeline so the client can skip refresh when unchanged.""" + self.require_seed(seed_id) + runs = self.run_repo.list(seed_id) + events = self.seed_repo.events(seed_id) + runs_version = ( + ",".join(f"{r.run_id}:{r.status.value}:{r.updated_at}" for r in runs) + if runs + else "0" + ) + timeline_version = ( + ",".join(str(e.get("created_at", "")) for e in events[-20:]) + if events + else "0" + ) + return { + "runs_version": runs_version, + "timeline_version": timeline_version, + } + + def extract_summary(self, output_text: str, stage: StageName) -> dict[str, object] | None: + start_marker, end_marker = SUMMARY_MARKERS[stage.value] + pattern = rf"{start_marker}\s*(\{{.*?\}})\s*{end_marker}" + match = re.search(pattern, output_text, flags=re.DOTALL) + if not match: + return None + try: + return json.loads(match.group(1)) + except json.JSONDecodeError: + return {"raw_summary": match.group(1)} + + def extract_metrics(self, output_text: str) -> dict[str, float | int]: + patterns = { + "val_bpb": r"^val_bpb:\s+([0-9.]+)", + "training_seconds": r"^training_seconds:\s+([0-9.]+)", + "total_seconds": r"^total_seconds:\s+([0-9.]+)", + "startup_seconds": r"^startup_seconds:\s+([0-9.]+)", + "peak_vram_mb": r"^peak_vram_mb:\s+([0-9.]+)", + "mfu_percent": r"^mfu_percent:\s+([0-9.]+)", + "total_tokens_M": r"^total_tokens_M:\s+([0-9.]+)", + "num_steps": r"^num_steps:\s+([0-9]+)", + "num_params_M": r"^num_params_M:\s+([0-9.]+)", + "depth": r"^depth:\s+([0-9]+)", + } + metrics: dict[str, float | int] = {} + for key, pattern in patterns.items(): + match = re.search(pattern, output_text, flags=re.MULTILINE) + if not match: + continue + metrics[key] = int(match.group(1)) if key in {"num_steps", "depth"} else float(match.group(1)) + return metrics + + def extract_dca_metrics( + self, output_text: str, summary: dict[str, object] | None = None + ) -> dict[str, float | int]: + if summary: + summary_metrics = summary.get("metrics") + if isinstance(summary_metrics, dict): + parsed: dict[str, float | int] = {} + int_keys = {"num_steps", "depth"} + float_keys = { + "val_bpb", + "training_seconds", + "total_seconds", + "startup_seconds", + "peak_vram_mb", + "mfu_percent", + "total_tokens_M", + "num_params_M", + } + for key in int_keys | float_keys: + value = summary_metrics.get(key) + if value is None: + continue + try: + parsed[key] = int(value) if key in int_keys else float(value) + except (TypeError, ValueError): + continue + if parsed: + return parsed + return self.extract_metrics(output_text) + + @staticmethod + def evaluate_signal( + metrics: dict[str, float | int], + baseline_val_bpb: float | None, + promotion_threshold: float = PROMOTION_THRESHOLD, + ) -> str: + val_bpb = metrics.get("val_bpb") + if val_bpb is None: + return "error" + if baseline_val_bpb is None: + return "positive_signal" + delta = float(baseline_val_bpb) - float(val_bpb) + if delta >= promotion_threshold: + return "positive_signal" + if delta <= -promotion_threshold: + return "negative_signal" + return "neutral" + + +def default_workflow_service() -> WorkflowService: + return WorkflowService() diff --git a/component_system/task.py b/component_system/task.py index 7aa32f09f..ea6f40dcf 100644 --- a/component_system/task.py +++ b/component_system/task.py @@ -7,7 +7,7 @@ import time import uuid from pathlib import Path -from typing import Any +from typing import Any, Callable, Literal COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent HISTORY_ROOT = COMPONENT_SYSTEM_ROOT / "history" @@ -26,6 +26,7 @@ "dca": QUEUE_ROOT / "dca", "direct": QUEUE_ROOT / "direct", } +IN_PROGRESS_DIR = QUEUE_ROOT / "in_progress" DONE_DIR = QUEUE_ROOT / "done" ERROR_DIR = QUEUE_ROOT / "error" DAEMON_HEARTBEAT_PATH = STATE_ROOT / "daemon_heartbeat.json" @@ -80,6 +81,7 @@ def ensure_queue_layout() -> None: HISTORY_ROOT.mkdir(parents=True, exist_ok=True) for d in STAGE_DIRS.values(): d.mkdir(parents=True, exist_ok=True) + IN_PROGRESS_DIR.mkdir(parents=True, exist_ok=True) DONE_DIR.mkdir(parents=True, exist_ok=True) ERROR_DIR.mkdir(parents=True, exist_ok=True) SEEDS_ROOT.mkdir(parents=True, exist_ok=True) @@ -126,6 +128,10 @@ def read_task(path: Path) -> dict[str, Any]: def move_to_done(path: Path) -> Path: ensure_queue_layout() dest = DONE_DIR / path.name + if not path.exists(): + raise FileNotFoundError( + f"Task file already moved: {path}; possible duplicate daemon or double completion." + ) if dest.exists(): dest.unlink() path.rename(dest) @@ -148,6 +154,60 @@ def list_pending(stage: str) -> list[Path]: return sorted(STAGE_DIRS[stage].glob("*.json")) +def _is_aux_dca_task(payload: dict[str, Any]) -> bool: + return payload.get("metrics_recovery") is True or payload.get("merge_resolution") is True + + +def claim_pending( + stage: str, + lane: Literal["any", "gpu", "aux"] = "any", + eligible_fn: Callable[[dict[str, Any]], bool] | None = None, +) -> Path | None: + """Atomically claim the oldest pending task for a stage/lane. If eligible_fn is set, only claim tasks for which it returns True (avoids P/DCA races).""" + ensure_queue_layout() + if stage not in STAGE_DIRS: + raise KeyError(f"Unknown stage {stage!r}") + if lane not in {"any", "gpu", "aux"}: + raise KeyError(f"Unknown lane {lane!r}") + for path in sorted(STAGE_DIRS[stage].glob("*.json")): + payload = _read_json(path, {}) + if eligible_fn is not None and not eligible_fn(payload): + continue + if stage == "dca" and lane != "any": + is_aux = _is_aux_dca_task(payload) + if lane == "aux" and not is_aux: + continue + if lane == "gpu" and is_aux: + continue + claimed_path = IN_PROGRESS_DIR / path.name + try: + path.rename(claimed_path) + return claimed_path + except FileNotFoundError: + continue + except OSError: + # Another worker likely claimed the task first. + continue + return None + + +def restore_in_progress_tasks() -> dict[str, int]: + """Move stranded in-progress tasks back to their stage queue.""" + ensure_queue_layout() + restored = {stage: 0 for stage in STAGE_DIRS} + for path in sorted(IN_PROGRESS_DIR.glob("*.json")): + payload = _read_json(path, {}) + stage = payload.get("stage") + if stage not in STAGE_DIRS: + continue + dest = STAGE_DIRS[stage] / path.name + if dest.exists(): + dest.unlink() + path.rename(dest) + restored[stage] += 1 + return restored + + def seed_path(seed_id: str) -> Path: return SEEDS_ROOT / f"{seed_id}.json" @@ -225,14 +285,21 @@ def save_baseline_branch_map(mapping: dict[str, str]) -> None: _write_json(BASELINE_BRANCHES_PATH, mapping) -def load_baseline_metrics() -> dict[str, dict[str, Any]]: - """Load baseline_branch -> { last_val_bpb, promoted_branch, promoted_at, promoted_idea }.""" +def load_baseline_metrics() -> dict[str, list[dict[str, Any]]]: + """Load baseline_branch -> list of promotion/measurement records. Each record: val_bpb, promoted_branch?, promoted_idea?, promoted_at?, commit_sha?.""" ensure_queue_layout() - return _read_json(BASELINE_METRICS_PATH, {}) - - -def save_baseline_metrics(metrics_by_branch: dict[str, dict[str, Any]]) -> None: - """Persist per-branch baseline metrics.""" + raw = _read_json(BASELINE_METRICS_PATH, {}) + result: dict[str, list[dict[str, Any]]] = {} + for branch, value in raw.items(): + if isinstance(value, list): + result[branch] = value + else: + result[branch] = [] + return result + + +def save_baseline_metrics(metrics_by_branch: dict[str, list[dict[str, Any]]]) -> None: + """Persist per-branch baseline metrics (branch -> list of records).""" ensure_queue_layout() _write_json(BASELINE_METRICS_PATH, metrics_by_branch) diff --git a/component_system/web/app.py b/component_system/web/app.py index 18a82ae21..971b666e2 100644 --- a/component_system/web/app.py +++ b/component_system/web/app.py @@ -1,5 +1,7 @@ from __future__ import annotations +import time +from datetime import datetime, timezone from pathlib import Path from fastapi import FastAPI @@ -15,11 +17,31 @@ TEMPLATE_ROOT = WEB_ROOT / "templates" STATIC_ROOT = WEB_ROOT / "static" + +def _static_version() -> str: + """Cache-busting version from app.js mtime so browsers load fresh static assets after changes.""" + app_js = STATIC_ROOT / "app.js" + if app_js.exists(): + return str(int(app_js.stat().st_mtime)) + return str(int(time.time())) + + def create_app() -> FastAPI: ensure_queue_layout() app = FastAPI(title="Component System", version="0.1.0") app.state.workflow = default_workflow_service() + app.state.static_version = _static_version() app.state.templates = Jinja2Templates(directory=str(TEMPLATE_ROOT)) + + def _format_ts(ts: float | None) -> str: + if ts is None: + return "" + try: + return datetime.fromtimestamp(ts, tz=timezone.utc).strftime("%Y-%m-%d %H:%M UTC") + except (TypeError, OSError): + return "" + + app.state.templates.env.filters["format_ts"] = _format_ts app.mount("/static", StaticFiles(directory=str(STATIC_ROOT)), name="static") app.include_router(router, prefix="/component-system") diff --git a/component_system/web/routes.py b/component_system/web/routes.py index e5424ccc7..b1d7c01a2 100644 --- a/component_system/web/routes.py +++ b/component_system/web/routes.py @@ -92,10 +92,48 @@ def seed_detail_partial(request: Request, seed_id: str) -> HTMLResponse: "dashboard": dashboard, "selected_seed_id": seed_id, "oob": True, + "daemon_status": get_daemon_status(), } return _render(request, "partials/seed_detail_response.html", context) +@router.get("/api/seeds/{seed_id}/versions") +def seed_versions(request: Request, seed_id: str) -> dict[str, str]: + workflow = _workflow(request) + try: + return workflow.seed_detail_versions(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + + +@router.get("/partials/seeds/{seed_id}/runs", response_class=HTMLResponse) +def seed_runs_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render( + request, + "partials/seed_runs_inner.html", + {"seed": detail["seed"], "runs": detail["runs"]}, + ) + + +@router.get("/partials/seeds/{seed_id}/timeline", response_class=HTMLResponse) +def seed_timeline_partial(request: Request, seed_id: str) -> HTMLResponse: + workflow = _workflow(request) + try: + detail = workflow.seed_detail(seed_id) + except KeyError as exc: + raise HTTPException(status_code=404, detail=str(exc)) from exc + return _render( + request, + "partials/seed_timeline_inner.html", + {"seed": detail["seed"], "events": detail["events"]}, + ) + + @router.get("/api/runs/{run_id}/prompt") def run_prompt(request: Request, run_id: str) -> dict[str, object]: workflow = _workflow(request) @@ -174,7 +212,7 @@ def seed_detail_page(request: Request, seed_id: str) -> HTMLResponse: detail = workflow.seed_detail(seed_id) except KeyError as exc: raise HTTPException(status_code=404, detail=str(exc)) from exc - return _render(request, "seed_detail_page.html", detail) + return _render(request, "seed_detail_page.html", {**detail, "daemon_status": get_daemon_status()}) @router.post("/actions/seeds", response_class=HTMLResponse) @@ -267,6 +305,7 @@ def update_seed_prompt(request: Request, seed_id: str, prompt: str = Form(...)) "dashboard": dashboard, "selected_seed_id": seed_id, "oob": True, + "daemon_status": get_daemon_status(), } return _render(request, "partials/seed_detail_response.html", context) diff --git a/component_system/web/static/app.js b/component_system/web/static/app.js index 77b514abc..34de6c3af 100644 --- a/component_system/web/static/app.js +++ b/component_system/web/static/app.js @@ -22,6 +22,12 @@ function applySelectedSeed(seedId) { let dashboardPollInFlight = false; let seedDetailPollInFlight = false; +let seedVersionsPollInFlight = false; +const lastSeedVersions = {}; +const savedScrollPositions = { runs: null, timeline: null }; +const INTERACTION_DEBOUNCE_MS = 3000; +let lastRunsInteraction = 0; +let lastTimelineInteraction = 0; function seedDetailUrl(seedId) { const detail = document.getElementById("seed-detail"); @@ -32,6 +38,27 @@ function seedDetailUrl(seedId) { return template.replace("__SEED_ID__", encodeURIComponent(seedId)); } +function seedVersionsUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedVersionsUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function seedRunsUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedRunsUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + +function seedTimelineUrl(seedId) { + const detail = document.getElementById("seed-detail"); + const template = detail?.dataset.seedTimelineUrlTemplate; + if (!template || !seedId) return null; + return template.replace("__SEED_ID__", encodeURIComponent(seedId)); +} + function isLogViewerOpen() { const target = document.getElementById("seed-detail"); if (!target) { @@ -91,15 +118,81 @@ function pollSeedDetail() { }); } -function pollDashboard() { - if (document.hidden) { - return; +function applyRunsPartial(seedId) { + const listEl = document.getElementById("seed-runs-list"); + const paneEl = document.getElementById("seed-runs-scroll-pane"); + const url = seedRunsUrl(seedId); + if (!listEl || !url) return Promise.resolve(); + savedScrollPositions.runs = paneEl ? paneEl.scrollTop : null; + return htmx.ajax("GET", url, { target: "#seed-runs-list", swap: "innerHTML" }); +} + +function applyTimelinePartial(seedId) { + const listEl = document.getElementById("seed-timeline-list"); + const paneEl = document.getElementById("seed-timeline-scroll-pane"); + const url = seedTimelineUrl(seedId); + if (!listEl || !url) return Promise.resolve(); + savedScrollPositions.timeline = paneEl ? paneEl.scrollTop : null; + return htmx.ajax("GET", url, { target: "#seed-timeline-list", swap: "innerHTML" }); +} + +function pollSeedDetailSections() { + const seedId = selectedSeedIdFromUrl(); + if (!seedId || isLogViewerOpen()) return; + const versionsUrl = seedVersionsUrl(seedId); + if (!versionsUrl || seedVersionsPollInFlight) return; + seedVersionsPollInFlight = true; + fetch(versionsUrl) + .then((r) => (r.ok ? r.json() : null)) + .then((data) => { + if (!data) return; + const prev = lastSeedVersions[seedId] || {}; + const runsChanged = data.runs_version !== prev.runs_version; + const timelineChanged = data.timeline_version !== prev.timeline_version; + lastSeedVersions[seedId] = { + runs_version: data.runs_version, + timeline_version: data.timeline_version, + }; + const now = Date.now(); + const runsIdle = now - lastRunsInteraction >= INTERACTION_DEBOUNCE_MS; + const timelineIdle = now - lastTimelineInteraction >= INTERACTION_DEBOUNCE_MS; + const promises = []; + if (runsChanged && runsIdle) promises.push(applyRunsPartial(seedId)); + if (timelineChanged && timelineIdle) promises.push(applyTimelinePartial(seedId)); + return Promise.all(promises); + }) + .finally(() => { + seedVersionsPollInFlight = false; + }); +} + +function attachScrollPaneInteractionGuards() { + const runsPane = document.getElementById("seed-runs-scroll-pane"); + const timelinePane = document.getElementById("seed-timeline-scroll-pane"); + function onRunsActivity() { + lastRunsInteraction = Date.now(); } - if (isLogViewerOpen()) { - return; + function onTimelineActivity() { + lastTimelineInteraction = Date.now(); } + runsPane?.addEventListener("scroll", onRunsActivity, { passive: true }); + runsPane?.addEventListener("mouseenter", onRunsActivity); + runsPane?.addEventListener("focusin", onRunsActivity); + timelinePane?.addEventListener("scroll", onTimelineActivity, { passive: true }); + timelinePane?.addEventListener("mouseenter", onTimelineActivity); + timelinePane?.addEventListener("focusin", onTimelineActivity); +} + +function pollDashboard() { + if (document.hidden) return; + if (isLogViewerOpen()) return; pollDashboardBoard(); - pollSeedDetail(); + const seedId = selectedSeedIdFromUrl(); + if (seedId && document.getElementById("seed-runs-list")) { + pollSeedDetailSections(); + } else if (seedId && !document.getElementById("seed-runs-list")) { + pollSeedDetail(); + } } document.body.addEventListener("htmx:beforeRequest", (event) => { @@ -123,8 +216,28 @@ document.body.addEventListener("click", (event) => { document.body.addEventListener("htmx:afterSettle", (event) => { const target = event.detail?.target; - if (target && target.id === "seed-detail") { + if (!target) return; + if (target.id === "seed-detail") { applySelectedSeed(selectedSeedIdFromUrl()); + attachScrollPaneInteractionGuards(); + return; + } + if (target.id === "seed-runs-list") { + const pane = document.getElementById("seed-runs-scroll-pane"); + if (pane && savedScrollPositions.runs != null) { + pane.scrollTop = savedScrollPositions.runs; + savedScrollPositions.runs = null; + } + initializeLogStreams(target.closest("#seed-detail") || document); + return; + } + if (target.id === "seed-timeline-list") { + const pane = document.getElementById("seed-timeline-scroll-pane"); + if (pane && savedScrollPositions.timeline != null) { + pane.scrollTop = savedScrollPositions.timeline; + savedScrollPositions.timeline = null; + } + return; } }); @@ -133,6 +246,7 @@ window.addEventListener("popstate", () => { }); applySelectedSeed(selectedSeedIdFromUrl()); +attachScrollPaneInteractionGuards(); window.setInterval(pollDashboard, 5000); const logStreamIntervals = new Map(); diff --git a/component_system/web/templates/base.html b/component_system/web/templates/base.html index ee1ac5364..ef14a6341 100644 --- a/component_system/web/templates/base.html +++ b/component_system/web/templates/base.html @@ -8,8 +8,8 @@ - - + +
diff --git a/component_system/web/templates/dashboard.html b/component_system/web/templates/dashboard.html index 5bff902d4..103f87f32 100644 --- a/component_system/web/templates/dashboard.html +++ b/component_system/web/templates/dashboard.html @@ -55,13 +55,13 @@

Create Seed

{% endwith %}

Baseline branches

-

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

+

Per-branch best val_bpb from baseline_metrics.json. Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

{% if dashboard.baseline_metrics_by_branch %}
{% for branch, m in dashboard.baseline_metrics_by_branch.items() %}
{{ branch }}
-
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}
+
val_bpb {{ "%.6f"|format(m.get('best_val_bpb')) if m.get('best_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}{% if m.get('commit_sha') %} · {{ m.get('commit_sha')[:7] }}{% endif %}
{% endfor %}
@@ -97,6 +97,9 @@

Create Seed

{% if detail %} {% with @@ -104,7 +107,8 @@

Create Seed

runs=detail.runs, events=detail.events, baseline_metrics_for_branch=detail.baseline_metrics_for_branch, - setup_error=detail.setup_error + setup_error=detail.setup_error, + daemon_status=dashboard.daemon_status %} {% include "partials/seed_detail.html" %} {% endwith %} diff --git a/component_system/web/templates/partials/seed_detail.html b/component_system/web/templates/partials/seed_detail.html index 93f5439cb..89e81a0cf 100644 --- a/component_system/web/templates/partials/seed_detail.html +++ b/component_system/web/templates/partials/seed_detail.html @@ -125,155 +125,13 @@

Plan

Runs

- {% if runs and seed.status.value in ['queued', 'planning'] %} + {% if runs and seed.status.value in ['queued', 'planning'] and (daemon_status|default('')) != 'running' %}

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

{% endif %} -
- {% if runs %} - {% for run in runs %} -
-
-
-

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

-

{{ run.run_id }}

-
-
- {% if run.signal %} - {{ run.signal }} - {% endif %} - -
-
- {% if run.metrics %} -
- {% for key, value in run.metrics.items() %} -
-
{{ key }}
-
{{ value }}
-
- {% endfor %} -
- {% endif %} -
- - {% endfor %} - {% else %} -

No runs yet. Use Run Plan to start.

- {% endif %} +
+
+ {% include "partials/seed_runs_inner.html" %} +
@@ -302,23 +160,10 @@

Timeline

-
- {% if events %} - {% for event in events %} -
-

{{ event.message }}

- {% if event.commit_sha %} -

commit: {{ event.commit_sha }}

- {% endif %} - {% if event.target_branch %} -

target branch: {{ event.target_branch }}

- {% endif %} -

{{ event.kind }} · {{ event.created_at_human }}

-
- {% endfor %} - {% else %} -

No events yet.

- {% endif %} +
+
+ {% include "partials/seed_timeline_inner.html" %} +
diff --git a/component_system/web/templates/partials/seed_detail_response.html b/component_system/web/templates/partials/seed_detail_response.html index ca64317e3..c2b27e70a 100644 --- a/component_system/web/templates/partials/seed_detail_response.html +++ b/component_system/web/templates/partials/seed_detail_response.html @@ -1,4 +1 @@ -{% with oob=True %} - {% include "partials/dashboard_board.html" %} -{% endwith %} {% include "partials/seed_detail.html" %} diff --git a/component_system/web/templates/partials/seed_runs_inner.html b/component_system/web/templates/partials/seed_runs_inner.html index 5ca9a1525..3d4c3c676 100644 --- a/component_system/web/templates/partials/seed_runs_inner.html +++ b/component_system/web/templates/partials/seed_runs_inner.html @@ -2,11 +2,14 @@ {% for run in runs %}
-
-

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}{% if run.created_at %} {{ run.created_at|format_ts }}{% endif %}

-

{{ run.run_id }}

+
+ {% if run.created_at %} + + {% endif %} +

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

+

{{ run.run_id }}

-
+
{% if run.signal %} {{ run.signal }} {% endif %} @@ -26,6 +29,22 @@
+ {% if run.stage.value == 'p' and run.summary and (run.summary.get('idea') or run.summary.get('description')) %} +
+ {% if run.summary.get('idea') %} +
+ +

{{ run.summary.get('idea', '') }}

+
+ {% endif %} + {% if run.summary.get('description') %} +
+ +

{{ run.summary.get('description', '') }}

+
+ {% endif %} +
+ {% endif %} {% if run.metrics %}
{% for key, value in run.metrics.items() %} diff --git a/component_system/web/templates/seed_detail_page.html b/component_system/web/templates/seed_detail_page.html index ec7ca146c..4fa3d6f0c 100644 --- a/component_system/web/templates/seed_detail_page.html +++ b/component_system/web/templates/seed_detail_page.html @@ -9,6 +9,9 @@
{% include "partials/seed_detail.html" %}
diff --git a/pyproject.toml b/pyproject.toml index d95798a99..8882b6fab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,7 @@ description = "Autonomous pretraining research swarm" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "arxiv>=2.4.1", "fastapi>=0.116.0", "jinja2>=3.1.6", "kernels>=0.11.7", diff --git a/scripts/clean_history.py b/scripts/clean_history.py index df085d0ce..922809145 100644 --- a/scripts/clean_history.py +++ b/scripts/clean_history.py @@ -7,16 +7,32 @@ 3) Delete all local branches except main 4) Clear component_system runtime state/history folders 5) Remove .pytest_cache, __pycache__, and results.tsv + +With --preserve-seeds SEED_IDS: keep everything for those seeds (state, events, runs, +queue tasks, worktrees, branches, logs, baseline mappings); remove only other seeds' data. +SEED_IDS can be comma-separated, e.g. --preserve-seeds seed-a,seed-b,seed-c. """ from __future__ import annotations import argparse +import json import shutil import subprocess from pathlib import Path +def _read_json(path: Path, default: object) -> object: + if not path.exists(): + return default + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_json(path: Path, payload: object) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8") + + def run_git(args: list[str], cwd: Path, dry_run: bool = False) -> list[str]: cmd = ["git", *args] if dry_run: @@ -66,15 +82,196 @@ def remove_pycache_dirs(repo_root: Path, dry_run: bool = False) -> None: shutil.rmtree(pycache, ignore_errors=True) +def _gather_preserved_seed_info( + repo_root: Path, seed_ids: list[str] +) -> tuple[set[str], set[str]]: + """Return (preserved_run_ids, baseline_branches). Exits if any seed not found.""" + comp = repo_root / "component_system" + history = comp / "history" + state = history / "state" + seeds_dir = state / "seeds" + runs_dir = state / "runs" + preserved_ids = set(seed_ids) + baseline_branches: set[str] = set() + run_ids: set[str] = set() + + for seed_id in seed_ids: + seed_file = seeds_dir / f"{seed_id}.json" + if not seed_file.exists(): + raise SystemExit(f"Seed not found: {seed_id} (no {seed_file})") + seed_data = _read_json(seed_file, {}) + if isinstance(seed_data, dict): + bl = seed_data.get("baseline_branch") + if isinstance(bl, str): + baseline_branches.add(bl) + + for path in runs_dir.glob("*.json"): + data = _read_json(path, {}) + if isinstance(data, dict) and data.get("seed_id") in preserved_ids: + rid = data.get("run_id") + if isinstance(rid, str): + run_ids.add(rid) + return run_ids, baseline_branches + + +def _clean_state_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + preserved_run_ids: set[str], + dry_run: bool, +) -> None: + comp = repo_root / "component_system" + history = comp / "history" + state = history / "state" + seeds_dir = state / "seeds" + events_dir = state / "events" + runs_dir = state / "runs" + + for path in seeds_dir.glob("*.json"): + if path.stem not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + for path in events_dir.glob("*.json"): + if path.stem not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + for path in runs_dir.glob("*.json"): + data = _read_json(path, {}) + rid = data.get("run_id") if isinstance(data, dict) else None + if rid not in preserved_run_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _clean_queue_preserving_seeds( + repo_root: Path, preserved_seed_ids: set[str], dry_run: bool +) -> None: + history = repo_root / "component_system" / "history" / "queue" + stage_dirs = [ + history / "p", + history / "dca", + history / "direct", + history / "in_progress", + history / "done", + history / "error", + ] + for stage_dir in stage_dirs: + if not stage_dir.exists(): + continue + for path in stage_dir.glob("*.json"): + data = _read_json(path, {}) + task_seed = data.get("seed_id") if isinstance(data, dict) else None + if task_seed not in preserved_seed_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _clean_worktrees_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + dry_run: bool, +) -> None: + worktrees_dir = repo_root / "component_system" / "history" / "worktrees" + if not worktrees_dir.exists(): + return + keep_names = preserved_seed_ids | {"baseline"} + for child in worktrees_dir.iterdir(): + if child.is_dir() and child.name not in keep_names: + if dry_run: + print(f"[dry-run] remove {child}") + else: + shutil.rmtree(child, ignore_errors=True) + + +def _clean_logs_preserving_seed( + repo_root: Path, + preserved_run_ids: set[str], + dry_run: bool, +) -> None: + logs_dir = repo_root / "component_system" / "history" / "logs" + if not logs_dir.exists(): + return + for path in logs_dir.iterdir(): + if not path.is_file(): + continue + # logs: {run_id}.stdout.log, {run_id}.stderr.log, {run_id}.prompt.txt + run_id = path.stem + if path.suffix in (".log", ".txt"): + run_id = run_id.rsplit(".", 1)[0] if "." in run_id else run_id + if run_id not in preserved_run_ids: + if dry_run: + print(f"[dry-run] remove {path}") + else: + path.unlink(missing_ok=True) + + +def _filter_baseline_jsons_preserving_seeds( + repo_root: Path, + preserved_seed_ids: set[str], + baseline_branches: set[str], + dry_run: bool, +) -> None: + comp = repo_root / "component_system" + branches_path = comp / "baseline_branches.json" + metrics_path = comp / "baseline_metrics.json" + + if branches_path.exists(): + data = _read_json(branches_path, {}) + if isinstance(data, dict): + new_data = {k: v for k, v in data.items() if k in preserved_seed_ids} + if dry_run: + print(f"[dry-run] write {branches_path} (keep {preserved_seed_ids})") + else: + _write_json(branches_path, new_data) + + if metrics_path.exists(): + data = _read_json(metrics_path, {}) + if isinstance(data, dict): + keep_branches = preserved_seed_ids | baseline_branches + new_data = {k: v for k, v in data.items() if k in keep_branches} + if dry_run: + print(f"[dry-run] write {metrics_path} (keep branches {keep_branches})") + else: + _write_json(metrics_path, new_data) + + def main() -> None: parser = argparse.ArgumentParser(description="Clean local branches/worktrees and runtime history.") - parser.add_argument("--main-branch", default="master", help="Branch to keep. Default: main") + parser.add_argument("--main-branch", default="master", help="Branch to keep. Default: master") + parser.add_argument( + "--preserve-seeds", + metavar="SEED_IDS", + help="Comma-separated seed IDs to keep (e.g. seed-a,seed-b). Keep their state, events, runs, queue, worktrees, branches, logs, baseline mappings; remove only other seeds.", + ) parser.add_argument("--dry-run", action="store_true", help="Print actions without changing anything") args = parser.parse_args() repo_root = Path.cwd().resolve() print(f"Repository: {repo_root}") + raw_preserve = getattr(args, "preserve_seeds", None) + preserve_seeds: list[str] = ( + [s.strip() for s in raw_preserve.split(",") if s.strip()] if raw_preserve else [] + ) + preserved_run_ids: set[str] = set() + baseline_branches: set[str] = set() + preserved_seed_ids: set[str] = set() + if preserve_seeds: + preserved_seed_ids = set(preserve_seeds) + print(f"Preserving everything for seeds: {', '.join(sorted(preserved_seed_ids))}") + preserved_run_ids, baseline_branches = _gather_preserved_seed_info(repo_root, preserve_seeds) + print(f" runs to keep: {len(preserved_run_ids)}") + print("Verifying git repository...") run_git(["rev-parse", "--is-inside-work-tree"], cwd=repo_root, dry_run=args.dry_run) @@ -89,36 +286,59 @@ def main() -> None: if line.startswith("worktree "): worktrees.append(Path(line[len("worktree ") :]).resolve()) + branches_to_keep = {args.main_branch} | preserved_seed_ids + worktree_keep_names = preserved_seed_ids | {"baseline"} if preserved_seed_ids else set() + for wt in worktrees: - if wt != repo_root: - print(f" - removing worktree {wt}") - try: - run_git(["worktree", "remove", "--force", str(wt)], cwd=repo_root, dry_run=args.dry_run) - except RuntimeError as error: - if not is_broken_worktree_remove_error(error): - raise - print(f" ! stale/broken worktree metadata detected, deleting directory: {wt}") - if args.dry_run: - print(f"[dry-run] remove {wt}") - else: - shutil.rmtree(wt, ignore_errors=True) + if wt == repo_root: + continue + if worktree_keep_names and wt.name in worktree_keep_names: + print(f" - keeping worktree {wt} (preserved: {wt.name})") + continue + print(f" - removing worktree {wt}") + try: + run_git(["worktree", "remove", "--force", str(wt)], cwd=repo_root, dry_run=args.dry_run) + except RuntimeError as error: + if not is_broken_worktree_remove_error(error): + raise + print(f" ! stale/broken worktree metadata detected, deleting directory: {wt}") + if args.dry_run: + print(f"[dry-run] remove {wt}") + else: + shutil.rmtree(wt, ignore_errors=True) run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) - print(f"Deleting local branches except '{args.main_branch}'...") + print(f"Deleting local branches except {sorted(branches_to_keep)}...") branches = run_git( ["for-each-ref", "--format=%(refname:short)", "refs/heads"], cwd=repo_root, dry_run=args.dry_run, ) for branch in branches: - if branch != args.main_branch: + if branch not in branches_to_keep: print(f" - deleting branch {branch}") run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) - print("Clearing component-system runtime/history artifacts...") history_root = repo_root / "component_system" / "history" - for name in ("state", "queue", "worktrees", "logs"): - remove_children(history_root / name, dry_run=args.dry_run) + if preserved_seed_ids: + print("Clearing component-system state (keeping preserved seeds)...") + _clean_state_preserving_seeds( + repo_root, preserved_seed_ids, preserved_run_ids, args.dry_run + ) + print("Clearing queue (keeping tasks for preserved seeds)...") + _clean_queue_preserving_seeds(repo_root, preserved_seed_ids, args.dry_run) + print("Clearing worktrees (keeping preserved seeds + baseline)...") + _clean_worktrees_preserving_seeds(repo_root, preserved_seed_ids, args.dry_run) + print("Clearing logs (keeping logs for preserved seed runs)...") + _clean_logs_preserving_seed(repo_root, preserved_run_ids, args.dry_run) + print("Filtering baseline_branches.json and baseline_metrics.json...") + _filter_baseline_jsons_preserving_seeds( + repo_root, preserved_seed_ids, baseline_branches, args.dry_run + ) + else: + print("Clearing component-system runtime/history artifacts...") + for name in ("state", "queue", "worktrees", "logs"): + remove_children(history_root / name, dry_run=args.dry_run) pytest_cache = repo_root / ".pytest_cache" if pytest_cache.exists(): diff --git a/uv.lock b/uv.lock index 931a2d7d9..90a4e722b 100644 --- a/uv.lock +++ b/uv.lock @@ -50,11 +50,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, ] +[[package]] +name = "arxiv" +version = "2.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "feedparser" }, + { name = "requests" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/6e/647dd134e66d3ea6ff8aba2a177a37c74245625cfc58184e3aff99c8d8ec/arxiv-2.4.1.tar.gz", hash = "sha256:691606c1069bcca8316fcb082f5d15e65f1f24a021b0b87f01b9fa56347f63c8", size = 74975, upload-time = "2026-03-04T03:05:33.991Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/6a/297380dc42fa25dff095feda66d46f7abba77ba54579d079071a2459e8d3/arxiv-2.4.1-py3-none-any.whl", hash = "sha256:060d678410ffc224ada01089f877b7676f250e37f96c140bad6c287afadb15d8", size = 12106, upload-time = "2026-03-04T03:05:33.029Z" }, +] + [[package]] name = "autoresearch" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "arxiv" }, { name = "fastapi" }, { name = "jinja2" }, { name = "kernels" }, @@ -74,6 +89,7 @@ dependencies = [ [package.metadata] requires-dist = [ + { name = "arxiv", specifier = ">=2.4.1" }, { name = "fastapi", specifier = ">=0.116.0" }, { name = "jinja2", specifier = ">=3.1.6" }, { name = "kernels", specifier = ">=0.11.7" }, @@ -412,6 +428,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e4/72/42e900510195b23a56bde950d26a51f8b723846bfcaa0286e90287f0422b/fastapi-0.135.1-py3-none-any.whl", hash = "sha256:46e2fc5745924b7c840f71ddd277382af29ce1cdb7d5eab5bf697e3fb9999c9e", size = 116999, upload-time = "2026-03-01T18:18:30.831Z" }, ] +[[package]] +name = "feedparser" +version = "6.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sgmllib3k" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/79/db7edb5e77d6dfbc54d7d9df72828be4318275b2e580549ff45a962f6461/feedparser-6.0.12.tar.gz", hash = "sha256:64f76ce90ae3e8ef5d1ede0f8d3b50ce26bcce71dd8ae5e82b1cd2d4a5f94228", size = 286579, upload-time = "2025-09-10T13:33:59.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/eb/c96d64137e29ae17d83ad2552470bafe3a7a915e85434d9942077d7fd011/feedparser-6.0.12-py3-none-any.whl", hash = "sha256:6bbff10f5a52662c00a2e3f86a38928c37c48f77b3c511aedcd51de933549324", size = 81480, upload-time = "2025-09-10T13:33:58.022Z" }, +] + [[package]] name = "filelock" version = "3.24.3" @@ -1997,6 +2025,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/c6/76dc613121b793286a3f91621d7b75a2b493e0390ddca50f11993eadf192/setuptools-82.0.0-py3-none-any.whl", hash = "sha256:70b18734b607bd1da571d097d236cfcfacaf01de45717d59e6e04b96877532e0", size = 1003468, upload-time = "2026-02-08T15:08:38.723Z" }, ] +[[package]] +name = "sgmllib3k" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/bd/3704a8c3e0942d711c1299ebf7b9091930adae6675d7c8f476a7ce48653c/sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9", size = 5750, upload-time = "2010-08-24T14:33:52.445Z" } + [[package]] name = "shellingham" version = "1.5.4" From 1b0ec7f3a1ac756ebc71fcb339493c4b44e2221b Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 20:28:50 +0800 Subject: [PATCH 10/24] Add arxiv dependency --- pyproject.toml | 1 + uv.lock | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d95798a99..9844e098a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,7 @@ description = "Autonomous pretraining research swarm" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "arxiv", "fastapi>=0.116.0", "jinja2>=3.1.6", "kernels>=0.11.7", diff --git a/uv.lock b/uv.lock index 931a2d7d9..87dce5a92 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'linux'", @@ -50,11 +50,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, ] +[[package]] +name = "arxiv" +version = "2.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "feedparser" }, + { name = "requests" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/6e/647dd134e66d3ea6ff8aba2a177a37c74245625cfc58184e3aff99c8d8ec/arxiv-2.4.1.tar.gz", hash = "sha256:691606c1069bcca8316fcb082f5d15e65f1f24a021b0b87f01b9fa56347f63c8", size = 74975, upload-time = "2026-03-04T03:05:33.991Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/6a/297380dc42fa25dff095feda66d46f7abba77ba54579d079071a2459e8d3/arxiv-2.4.1-py3-none-any.whl", hash = "sha256:060d678410ffc224ada01089f877b7676f250e37f96c140bad6c287afadb15d8", size = 12106, upload-time = "2026-03-04T03:05:33.029Z" }, +] + [[package]] name = "autoresearch" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "arxiv" }, { name = "fastapi" }, { name = "jinja2" }, { name = "kernels" }, @@ -74,6 +89,7 @@ dependencies = [ [package.metadata] requires-dist = [ + { name = "arxiv" }, { name = "fastapi", specifier = ">=0.116.0" }, { name = "jinja2", specifier = ">=3.1.6" }, { name = "kernels", specifier = ">=0.11.7" }, @@ -412,6 +428,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e4/72/42e900510195b23a56bde950d26a51f8b723846bfcaa0286e90287f0422b/fastapi-0.135.1-py3-none-any.whl", hash = "sha256:46e2fc5745924b7c840f71ddd277382af29ce1cdb7d5eab5bf697e3fb9999c9e", size = 116999, upload-time = "2026-03-01T18:18:30.831Z" }, ] +[[package]] +name = "feedparser" +version = "6.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sgmllib3k" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/79/db7edb5e77d6dfbc54d7d9df72828be4318275b2e580549ff45a962f6461/feedparser-6.0.12.tar.gz", hash = "sha256:64f76ce90ae3e8ef5d1ede0f8d3b50ce26bcce71dd8ae5e82b1cd2d4a5f94228", size = 286579, upload-time = "2025-09-10T13:33:59.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/eb/c96d64137e29ae17d83ad2552470bafe3a7a915e85434d9942077d7fd011/feedparser-6.0.12-py3-none-any.whl", hash = "sha256:6bbff10f5a52662c00a2e3f86a38928c37c48f77b3c511aedcd51de933549324", size = 81480, upload-time = "2025-09-10T13:33:58.022Z" }, +] + [[package]] name = "filelock" version = "3.24.3" @@ -1997,6 +2025,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/c6/76dc613121b793286a3f91621d7b75a2b493e0390ddca50f11993eadf192/setuptools-82.0.0-py3-none-any.whl", hash = "sha256:70b18734b607bd1da571d097d236cfcfacaf01de45717d59e6e04b96877532e0", size = 1003468, upload-time = "2026-02-08T15:08:38.723Z" }, ] +[[package]] +name = "sgmllib3k" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/bd/3704a8c3e0942d711c1299ebf7b9091930adae6675d7c8f476a7ce48653c/sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9", size = 5750, upload-time = "2010-08-24T14:33:52.445Z" } + [[package]] name = "shellingham" version = "1.5.4" From e4f489580bcf76a7124194a7efd2956b1c4cafdb Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 21:00:29 +0800 Subject: [PATCH 11/24] Add local changes to model, trainer, and clean_history --- component_system/components/model.py | 88 +++++--------------------- component_system/components/trainer.py | 23 ++----- scripts/clean_history.py | 18 ++++-- 3 files changed, 31 insertions(+), 98 deletions(-) diff --git a/component_system/components/model.py b/component_system/components/model.py index 59c5be10c..f74d89386 100644 --- a/component_system/components/model.py +++ b/component_system/components/model.py @@ -13,18 +13,12 @@ def _get_fa3(): if torch.cuda.is_available(): cap = torch.cuda.get_device_capability() - repo = ( - "varunneal/flash-attention-3" - if cap == (9, 0) - else "kernels-community/flash-attn3" - ) + repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" return get_kernel(repo).flash_attn_interface return None - _fa3 = None - def get_fa3(): global _fa3 if _fa3 is None: @@ -51,9 +45,7 @@ def has_ve(layer_idx: int, n_layer: int) -> bool: return layer_idx % 2 == (n_layer - 1) % 2 -def apply_rotary_emb( - x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor -) -> torch.Tensor: +def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: assert x.ndim == 4 d = x.shape[3] // 2 x1, x2 = x[..., :d], x[..., d:] @@ -106,9 +98,7 @@ def forward( fa3 = get_fa3() if fa3 is None: - raise RuntimeError( - "Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path." - ) + raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) y = y.contiguous().view(batch_size, seq_len, -1) return self.c_proj(y) @@ -117,19 +107,13 @@ def forward( class MLP(nn.Module): def __init__(self, config: GPTConfig) -> None: super().__init__() - # SwiGLU: 2/3 * 4x = ~2.67x expansion for comparable parameters to ReLU^2 - hidden_dim = int(4 * config.n_embd * 2 / 3) - hidden_dim = ((hidden_dim + 127) // 128) * 128 # Round to multiple of 128 - self.gate_proj = nn.Linear(config.n_embd, hidden_dim, bias=False) - self.up_proj = nn.Linear(config.n_embd, hidden_dim, bias=False) - self.down_proj = nn.Linear(hidden_dim, config.n_embd, bias=False) + self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) + self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: - # SwiGLU: gate * SiLU(up_proj) - gate = self.gate_proj(x) - up = self.up_proj(x) - x = gate * F.silu(up) - x = self.down_proj(x) + x = self.c_fc(x) + x = F.relu(x).square() + x = self.c_proj(x) return x @@ -190,9 +174,8 @@ def init_weights(self) -> None: torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) torch.nn.init.zeros_(block.attn.c_proj.weight) - torch.nn.init.uniform_(block.mlp.gate_proj.weight, -scale, scale) - torch.nn.init.uniform_(block.mlp.up_proj.weight, -scale, scale) - torch.nn.init.zeros_(block.mlp.down_proj.weight) + torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) + torch.nn.init.zeros_(block.mlp.c_proj.weight) self.resid_lambdas.fill_(1.0) self.x0_lambdas.fill_(0.1) for ve in self.value_embeds.values(): @@ -302,46 +285,11 @@ def setup_optimizer( dmodel_lr_scale = (model_dim / 768) ** -0.5 print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") param_groups = [ - dict( - kind="adamw", - params=lm_head_params, - lr=unembedding_lr * dmodel_lr_scale, - betas=adam_betas, - eps=1e-10, - weight_decay=0.0, - ), - dict( - kind="adamw", - params=embedding_params, - lr=embedding_lr * dmodel_lr_scale, - betas=adam_betas, - eps=1e-10, - weight_decay=0.0, - ), - dict( - kind="adamw", - params=value_embeds_params, - lr=embedding_lr * dmodel_lr_scale, - betas=adam_betas, - eps=1e-10, - weight_decay=0.0, - ), - dict( - kind="adamw", - params=resid_params, - lr=scalar_lr * 0.01, - betas=adam_betas, - eps=1e-10, - weight_decay=0.0, - ), - dict( - kind="adamw", - params=x0_params, - lr=scalar_lr, - betas=(0.96, 0.95), - eps=1e-10, - weight_decay=0.0, - ), + dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), + dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), ] for shape in sorted({p.shape for p in matrix_params}): group_params = [p for p in matrix_params if p.shape == shape] @@ -375,11 +323,7 @@ def forward( x0 = x for layer_idx, block in enumerate(self.transformer.h): x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 - ve = ( - self.value_embeds[str(layer_idx)](idx) - if str(layer_idx) in self.value_embeds - else None - ) + ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) x = norm(x) logits = self.lm_head(x).float() diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py index c753818a4..fd300348e 100644 --- a/component_system/components/trainer.py +++ b/component_system/components/trainer.py @@ -6,7 +6,6 @@ from typing import Any import torch -import torch.nn.utils as nn_utils from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader @@ -28,7 +27,6 @@ class TrainingSettings: adam_betas: tuple[float, float] = (0.8, 0.95) warmup_ratio: float = 0.0 warmdown_ratio: float = 0.5 - max_grad_norm: float | None = None final_lr_frac: float = 0.0 depth: int = 8 device_batch_size: int = 32 # 24GB vram @@ -76,9 +74,7 @@ def run_training_session( tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN assert settings.total_batch_size % tokens_per_fwdbwd == 0 grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd - train_loader = make_dataloader( - tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train" - ) + train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") x, y, epoch = next(train_loader) print(f"Vocab size: {tokenizer.get_vocab_size():,}") @@ -113,16 +109,11 @@ def run_training_session( group["momentum"] = muon_momentum group["weight_decay"] = muon_weight_decay - if settings.max_grad_norm is not None and settings.max_grad_norm > 0: - nn_utils.clip_grad_norm_(model.parameters(), settings.max_grad_norm) - optimizer.step() model.zero_grad(set_to_none=True) train_loss_f = train_loss.item() if train_loss_f > 100: - raise RuntimeError( - "Training aborted because loss exceeded the fast-fail threshold." - ) + raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") torch.cuda.synchronize(device=device) dt = time.time() - t0 @@ -134,17 +125,11 @@ def run_training_session( debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) pct_done = 100 * progress tok_per_sec = int(settings.total_batch_size / dt) - mfu = ( - 100 - * num_flops_per_token - * settings.total_batch_size - / dt - / H100_BF16_PEAK_FLOPS - ) + mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS remaining = max(0.0, TIME_BUDGET - total_training_time) print( f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " - f"lrm: {lrm:.2f} | dt: {dt * 1000:.0f}ms | tok/sec: {tok_per_sec:,} | " + f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", end="", flush=True, diff --git a/scripts/clean_history.py b/scripts/clean_history.py index 922809145..13b4b1f74 100644 --- a/scripts/clean_history.py +++ b/scripts/clean_history.py @@ -4,7 +4,7 @@ Actions: 1) Checkout main branch (configurable) 2) Remove all extra git worktrees -3) Delete all local branches except main +3) Delete only local branches that match seed-* or equal __baseline__ (other branches are left intact) 4) Clear component_system runtime state/history folders 5) Remove .pytest_cache, __pycache__, and results.tsv @@ -286,9 +286,13 @@ def main() -> None: if line.startswith("worktree "): worktrees.append(Path(line[len("worktree ") :]).resolve()) - branches_to_keep = {args.main_branch} | preserved_seed_ids + branches_to_keep = {args.main_branch} | preserved_seed_ids | baseline_branches worktree_keep_names = preserved_seed_ids | {"baseline"} if preserved_seed_ids else set() + def is_clearable_branch(name: str) -> bool: + """Only branches matching seed-xxx or exactly __baseline__ may be cleared.""" + return name.startswith("seed-") or name == "__baseline__" + for wt in worktrees: if wt == repo_root: continue @@ -308,16 +312,16 @@ def main() -> None: shutil.rmtree(wt, ignore_errors=True) run_git(["worktree", "prune"], cwd=repo_root, dry_run=args.dry_run) - print(f"Deleting local branches except {sorted(branches_to_keep)}...") branches = run_git( ["for-each-ref", "--format=%(refname:short)", "refs/heads"], cwd=repo_root, dry_run=args.dry_run, ) - for branch in branches: - if branch not in branches_to_keep: - print(f" - deleting branch {branch}") - run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) + clearable = [b for b in branches if is_clearable_branch(b) and b not in branches_to_keep] + print(f"Deleting clearable branches (seed-* or __baseline__): {sorted(clearable)}") + for branch in clearable: + print(f" - deleting branch {branch}") + run_git(["branch", "-D", branch], cwd=repo_root, dry_run=args.dry_run) history_root = repo_root / "component_system" / "history" if preserved_seed_ids: From 63e87c5a480adf2bbaac5a685a3bd02208cd0c29 Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 21:09:32 +0800 Subject: [PATCH 12/24] Remove .ipynb_checkpoints directories from git tracking These checkpoint files are auto-generated by Jupyter and are already listed in .gitignore. Removing them to keep the repository clean. --- .ipynb_checkpoints/progress-checkpoint.png | Bin 46321 -> 0 bytes .ipynb_checkpoints/results-checkpoint.tsv | 9 - .../.ipynb_checkpoints/app-checkpoint.js | 399 ------------------ .../dashboard-checkpoint.html | 124 ------ .../seed_detail-checkpoint.html | 326 -------------- 5 files changed, 858 deletions(-) delete mode 100644 .ipynb_checkpoints/progress-checkpoint.png delete mode 100644 .ipynb_checkpoints/results-checkpoint.tsv delete mode 100644 component_system/web/static/.ipynb_checkpoints/app-checkpoint.js delete mode 100644 component_system/web/templates/.ipynb_checkpoints/dashboard-checkpoint.html delete mode 100644 component_system/web/templates/partials/.ipynb_checkpoints/seed_detail-checkpoint.html diff --git a/.ipynb_checkpoints/progress-checkpoint.png b/.ipynb_checkpoints/progress-checkpoint.png deleted file mode 100644 index e58ab3cb3a449fa3004fa32be7112315fd23bfd2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46321 zcmeFa2UOH&_ccml5@U?|Mh(V-m^j!FP!SLiFfl|&1XK{Dt5m590@C%hy^4ws(whyC zE?v4Mh)5X(X@h`DlO_T(bolle!I<2;*8l$3`tJI^yY4qD1qEh)^ZcIYoPGA*=gfsY zDm&(Ux8yrMKE653oxktnP@sO=CpYoyO zM=foRTOK*Q!ok?q?ud=Gxah_WqFdIlFgt$ysGW?MnANAB5Vf&26?>>-Uy5g$b#$ke z9UtGK`SkA>reWoHtrR`x@4u>_2<{qp^w%@4m$x=>F<~(iu5bJ8JBj(>%pbm3(Ei=N z`Cnb*E8lPV`>#XHS#O$V4E;x7PrKr_Gk3q;m-oe2cLwj?^xv~!1>@(h%T20AXZBbw zHvMv~aPe2A{aad(vUA?`)f!j*pj(}*8NTM?FSrceLn!)4=a*AopPaY+)K^nq-#Gr| ztf?=zwSR>xa^IglwCB<~=h1lb-lwOL=(pD-8fxfRrCMfnr%As)v!JipTQ$u-AY0WsZ=8EcKvr^* z-tp(Q?EMQB<8wzkx^e~^7H>A&Bjflk*t$N`IK;lHFs;ryXrtj1qnG!O1xQ#_^)1Q` zesV=1DABOMq_4TqO7GcscB9?tk^7=jhN5C(ilSrU17ANr-xix(`Yc%5j+3d5_sqGl zYwtouoke*651zF*xb&1foGBO_8R{I(sk*uS{PCU!IdRj{-G+vSscy|ruVuWsC|r&g zNL%AGSco@Z*AW$IGW7m+#yGdm&nYxO+|(o?hx<&x@#S8tn3xzPujO)^EUIGHYp2}h zxW7KTs8z{eq}*0LXKbC%E|$^QQ19Z6hkp;MO1kyZeYAV{L!?3oqrE=MalmFcF1^8= z|2r+y5PRFsxO9{EuO3?MU4;wo{*M6N*aNGo^&Iv7xb)LCTOKq~R ztyPO?)@dBkU^Um8hLz-wzN<_78XKof^hK&Y?WI`PQ1_AcOUvbC`rFv-b4#uLgRC>x z&iLkgv#Q9nG|3>FTqns!*ZyTLU5U~k%C^_t)z*+yRTw`WpY<-2Q8>^2{pn>MW4$F; zsw*pvZ(o*t_Qz7I>rGc=MQw5hG<2>P%6{mq7E>55RNZ97NzrwvHRiq3Mz=xZn7B0a z&T?FV*>(ro>j$GkPQ1B*{h=Xa-*Np|)05P1SB|C2yNxt^d5jr4bGvHa)VzpNv%?!U zskwKkos(*j>Chc`=<&Ih`%PDhgcGvMJ6ye2tHw9vxnoy3y~}8`{qu(%Mz0>6YRB(Y z;SL4U3&=WJ+ZTJQ$_G5Zy~8vvwQ5(SN=QhMRjXBmaP2a($4UGv~7|P zKKyqu)~D9&_T}SblSQ`9C#s`GN&EbO~et4#*$9oZc*387a6WK9MtHQDd7 zdO~seA`ZoVxgWyS*ZFimeb6CyKcL>JGH`uY|8k0TR zN<(72CI=Ec_E%?3PK-9%zdSW_x&N;4@;Cgm#Z8Kz7OoBpE|IKTFX#MWXTcSLiUfl^ z%arGT4EOov1=m?;#c|TxbIkkNRXs9m?X0`gzP^0
iI+QGYGlJ#8f8fPLluv8rHCIqC8n3+BO zdXetc?(|0anAoIUSd@x9k4e{w{xA=A-N}WD*Jg`lZ_u&HvCnku2@REVF~PQbX5-!; zv{3Q+8=KA$--suSjL~;l;?5u5e!pb%nh+WLUF=Y~aSm0 zxx?ha$uB4x;%Bd=R_n{k_GH`PFi(i#uG(yD#o;RYV&<}|KW{&G2gi?g=r?ngT&GZ$ zQlV1hDV94FpD>$`Z(Gn>+V5*|OjGYhMXhBmJTKr-V_Ra6Ah;;Ud0-uDA@gFqUKYPz zmP6b`LzZLD8yk;N9nWLltCXX;8Lfd9(RLr+KEHi8E-r34?n(E>uV?cw^cXHOxQNBY z@8W&(W5aMv5uhzn({KTcm=_nQY`IxOKg>|n)6+|K_FfnnY`ZI_sf`_G(3RfkUg<6& zA)(mQkQ0-j$2vSYJ}Ab&{y^E`E`(WK#@e~^L$BVvz3ml$WwIxA9)ePSZknH-gVtUH z1A}fI{O9<)ml-22gYW7+5kI2x?_=HX?c9eYy?YZU)kl> zRhw*@+&EZc=yiBvY*@s6ITF>iJ44vb+OA!Og2JqYxc}zd-1x*NOY-k9y)T-ExuyNI z$!O=bJ8#Rwm7XJ^n1>TDUw{TqNN#F)%Qoh?Ew1Iph7K*^QmuGZ1DM@G!@EAL01>x14|I62pes z#O@#KXKRXmu)6>HT)v)T{eC%zwh%}DrIz=8#aqUT_rCEIlUzDly6v|F{hW)~rZ@dJ z9QqBJxEG7duP`~xj#)HQb!+dRw@lfg`N)l$Ior0laJ$~PG>+=qU(vD72v>AdUX+Tz z(t1%{y{IkJ#k#jj$7a7pBi1&$X%eY4^{2^h8;>mKy&vv)tE@9A_QyiiEqXMWH`y~8 za|MwhukNsAg{qeS%>wdGM%v5R?QjNMVUc6Xn zZ*QNEjH$UhaC4NIrRDl1OP6|jjCUI3x8ZWlPc~(=g<43at-V?nuH<(sH#c|7*!#zx zZDAg{YcAgMM|S8fGMGqOvTsZK9dGSew`Xf3Q|_DPrL0m8RL|iXMeK?7-*W8DzRP)i z&xB*X@Y6~*N#(TRshzN3b?G)6Z5p!8M-QDB&|s|nX5Mn$m(HDW`L`53FWx#adU&w8 z(A(3lR64S9uq#LOg;dG&4;dUIZ{>@f@mcZo+F00)rlrAeuV ze!ql&e@>5^%>@JYDy8{5?<3U4xRpw`1@osmc2>o`MabP~{qnx~PiEF$rd;5>e z(cgUGjxefO7r8rN-!kX7H{(j{n>{86Jz@$9E-hBu9V8it1*3y-Q4r~^^5i3zujet;GZWybTHKoQALr03L_r@&(k#2DZ<$jc_9DvJoy8J zN%HaZdH#-b`1o#E@zC=<^+3@lLPs2awL0R4h#0SbJZzKkF3%>h?DxM7=segV=yp8W zu;+UQ(rs+**Db4NE7GjdyG^j|Sh_GC9_-v&_cK?5=x- z8z4u?m7JkBo)kT=W#>Hp&Eg4`p`Q7w#m~UnA$IN6_tuTB6GH_G`iPcDU;Oq!hR%wt zH`RF)_A!RI8C=92+|Uvm_hc;>wH1esEwXCV8qMsCuX}S~@u@SoTPv?@xm7DwFNKAA z6Js!Ler#g6Z4pk_?=d~5-U#yQVqIY_?=s@JM>vNLBJr1S6#A}tA>vd1RNCLe`f9#t zMlz>uaa^bO4`+Jr3g34e`S(PNk7l5NMdrbV47;{B?U7;D8@V|D301r} zEcP_7g1I^b@s+SrxU;)rqS2`{^$vA$oVIH_mLJ*cVr6C3`~LB~bQv!2cia`dm!|1! z@7;U0E=GOEZ;HL#{NafH{5K9y%tkD8HCMc@HxJ%?WZzyL9UaOICq6viq0=o6jC~kc zRFQQU#Xu!DCO$J7nN8p6t?VIm^x=O)}$>L=XI{~}mjZp~+HqUd5M6=Wd1p!Z^)Yo}- z_YXCBi8)N9R%r{LUuL`0lN%TqNQjWhnzdk6^M%!+cb8`&w+$~d9bAV}RA&_)NO>a?CS2DX4%X}RPK4Q(qt$j~dN{@CW+tnO8-(n~@ zdSGOr(+{;$vyZ0!k&S8F&(D?ay7!pH$Qv%P(O#A}*x(Y=Q;oY(+jRf|nZ;0x*u8dm zu@J(SJF8jPV0ae+W%DpV=BCAxmIsi+uYOEVFYfNua;&%4Ny+OM_h`PhBf_RJ7i)s6 zF~QwY8egU@j>xqYdvy5CqCEf3ggkjB!91BD%Os{}F3`ROonMW%jE?9SR`z&$-hKAF zBatD#i~vb1ZR?B{U-MB!;9$f0E zj>WhUBtx|#%sVe}F6W#gljhjdP>X1)bIC1IB(3IfqD2DG6?31PK+sF}awfYC9Wm#^dwz*xxL*EbFSF zDx$gjUq!gr0a&g?=bJ80n>TN6#>rD=q3U)>>`}^dAK!HB&7TZU51VI0#j6W}F8o@e z7*4WB_e81hi&8JgF}F-ATt&q&ive7AWT4lh&Lqf+U)Hpu@nNLGc>9YNDH9%AZp|wN znUfRaK?AD$+`MMaUFHuAi}eAX8%9K%SkCUMx3C-;%^YqGY8;Tu2B`4f>@hF>rEO7c z(zA5|h_HzFF=Hqait{E05*8{Z)(lV-`CdR~^Xx^U#mHqo;-yD=j`O%h=LwT$zgWC4yVm`eJHd;g#f>-x?vOsS76)w1LA5+A4jR*9ujRIXU_^}G~1R28dLY4b+V{>5IA zDp{P0WTaP<*N@Kfo`vn&D&Afhqbbi#^lCv=k309ZZ`FtP3UQB#F>K5&4c$3MoRt@) zkN&UYd*^K(pAo~1Q`ar!oT029QdC!greUu1{epZ0dtC&5*S z*_YKg9`AhWe*3Y;TsLvs=BNCIGRNOuMSNdJ_gqDOxY-ibNIOt;sB2&0SmWfl!MQn0 zeFZ0n3Y7E)W%Hg-P@cTd;0L@cIv~^IXOWO2NhAhp_kO@36X5F%93(2#klYR>HsSqg zn(2Ag?KL`*t#etqr^zvIv|h-0o(*2dqzQqd00?lcpRO&FikDa1gKYiz@T7NpIlPKtIQI@d)%Ax2dtP%J3k4x28 z+Wn&7^3wF(cG>+)pQ5sRb9gkTzb4Dg!bx$Y$Mba0W^9EHxB5mcgNi0c7wev_`mJ`I zfLW}3o59InZ4rSr1k=;(9-4v%h(%5ckaKZ}&+dE1n9R_7OvC}uvY5j=x;79;<|y3K zii4%a@h&Xn2clHNZ{Ga5V7ZIsa$unF=u`yr%KBOI-}jUx9SF?}+OMf6QRv~Y;XttC zsEhu5<1sh;ZjJ;&9pVItLvo_~2i}MZ)yj|s6FR8%>9&nw9ZCV=u2t*1h!9vu*!5nd zf8yqnQfv`38y;DCU>#L!f8f+KciRLh;OUgMrnT1%Z9n%tGfdu%#YhKgI6T@2=*w)# z_0PIL`Y6GSZpdNBABK<=d=)+Ug_uMunY&ap%@lOqKu0ph5Xd)x{=`hm=hwNe(IZ)B z>CFg0MP@%SsHS$HWxU4ercSzbWkKB8*|_^LH_tYXwMsb+y*}R$ShD)aa+4EcK>xKs z-?rle)seZm@``)a6@rula|813x7C9uw8Xh*vH-WOFH}!#I5Lv87$qDk1fA~w1ds8U zCF@SKHI^Vk`ZjmpcmTAnBOr^b+6v~YXun}aV>K`x=Z)mgMbRFU6VmVQH}xT?$TDq< zMAEa{{Bzw#Tkqb!@@%nb@U`addh@_XdvUtDFYvOMQa2Gynf9H&xQD_kcU*C0+X%4z zH?AbCtsk7AzOEKK7nvUMQyejl-%#IuxP|VA3acw`azaSqw9p{XS{n|7_49_B8h?V_ zOx7H!mm35}YYL(Z=!RrwW)=rYB{)BPpEnN9sd#L-zXEUD{K4(-!zMmNCcn$pDXHFfKsz!U=IXAGL8cYiThLgIdDlAGFwW!cyx`{v;kTV;}8nYpeu#c)4&CBPPJDVY46Jr=``)q9w#}N!E z7+aNrLB~-`Atop+@6D5zBJKW@%V&bh)E^T25$o*B0Zt5~h>Gl3d}h`HWWJzgB5}mo zTh+QNfISe}7^}lvZL1JrbrEDsT8Abl@+QSlT0KMfP6lz=8tU>cYIznYA^MXEFf?&? zR}?0O2D>xH(~(PB31+#E)-`J6{V@J$wwMvP682DUlXtd2*@;L*_qL$MF%wqvhYD5p z+Q{I2IeX;nT8o2^motZ-d%0g{EEYG}U3I|la(PD_$Zk}lLAYrz?W^^MK~ngD&k;e! zjO7jw=1qoxvy;-XOjImIvM8@o;UNefUft}X>h}ci5n{BA4ebNThV{)D|jI5is^@rh=J+=PBeT z6OiQ6BX4gLDJhorWL`AkvL&U%*ubF#)+?)Rk+`Mb{AZ;tGT~|_O0E>(>?oNui|Y7c z6!}5T-F3Fh5GE@36aWiSbTUHG0-Al$w(>rT{$|hvyI8?atpOxYkPM;W1=?f{C74_k z1*Qn?CAYogwdEY}yUpTZ!@EUuK9rlJhx>yucSl96$(}`PHBbX|9blDKyNV85Kggym zP6M_FF;~vkRI&SwW+2?f{_AV(tM#`5eq#;*JbN-K!@%W2xt(DCo`Ws!=%JdO7Ms6Q1#ASz6f{s=m8ARFka<`>Q8TD(Z~S^% zd)ZQX*Av(7Y=OFR5oAW%&V5L6H{XKfOm1^|b76Iw%peZ3CopL`Cm|vtg7=;3@YL!% zYc9Tdb$XsCYa!z1`%RVl;15v8-vV%}Ohp3T^P#&g{Vs|jjY*>F!voXTG>i|>CP{qw z%~EwQl-5~4fdt!VR(9hJz=?iWaL;jMNy(VrkgyoA!m)12C6dDW&h6ojAw7UpaonCM zk|Uq%*7j1lM+@|MpqsBppLbZwPmZYLC0*i12YU>N`Kv)74IFSGilE-HF{f4rr1Bab z>y=SiS($pkubHD_lp;z!w}>N%K@mWK6OcH-5ke3V|OUa!b>+%=9*#DgfDz{(fcoVO@J zrUtqr{Ka?NS5uJ=uT3lskxd`k?E&b!HUbofK*fi_nh$EAGHK0fort7fKQYYq$h9cJ zF0cx>F1!u$mapt3r6nSi2#)mo<&6mug41^!s4NbZlf~XBEW5c~+;!+ZT@#UY+Lr`z zd+DiKfu)aGgKOIzx>XvRXQX=w%x)bq-b=S8Pre1WOo@IBn7g#ceK2cmv~f~}=x7{9 zBKn8BOckXX^NjP*!u@E$mqjF%ptmp|jZl3yc^6Kn$1r^^gL8qzX|&`wOB%9osTPi zj18_o2t?j$@LFsT(O0Qb%yAhkr4&Sg#ib|5F%7Cx$NiOFTQ`l5@-jqB*tLj_kI&61 zy1Vzb?TfuTBP^Gt_YAn6JR&5jZ5b2r3!paY3uv;t)Mme@}f`Fet zM(u-eQKg7|xXnfU@Sp!7ZBV-9y4og(u9`H_^DXbow$C%}FOgDZ?GE0u5mmhVQqA9W zz?#gMIky-Jgujw#27`d0jvvJ6_ooKm8T^bV<2z6|j|`xFUiPaNat|l^cf5spKACBv^pZDr12!L~)I_<)2~rx1fg9%{-ygtEIc6Md z#=FgUrd3SZV-N|NfrjrcZ^RKnv>Ce-_YAc5P}vVWrCfdwzlipAWH(WtT|$sApHg{+ zGAAys=#AU^{;&WU};57O-+?CU&^bouTnHiRkyh`znuD_1%I=nR9UzI z%#^s}yO+kxZO)&WcOxR?hgG}%%bxhSZYT+q5N4L&-r*$~|J4oYw$(RsY~8=)tZZBL z@e5)6jhwBub}!1XG~K4||EVT}7>iH72l;O=-rBi$p_Sf~^M62{Po6aHg;KfB=hTTg%jHZ!N(Mg7!~Up25tjCPAvIu+GDXk9EbMy zl0Qi1$OILI03}(=Kl%9C+*wNTYc3XtDJak-QCScCMwB29xc<$~&z=BKj=?0Jo>Io; znBk-AF8*>MWWKLDL|jz5emFiYwA&C#HBPK}D2x3{k9!LPbFD%d$cAQ3<_Ft1yP1#( zH^)A9%_m!k((8~#$Ld8-`jp5jB!Bwb2fKFs7Y2}Bd!y6?$038*KIq=D}XU0L$UOLOm03Gy;C2?UjoWE)B7s;+Mq z1Z;J(g&o0^u%FSGstYqstJxoLO5JMC6B&kYUc=j)$*@t zBa&HUYl(q>!D{)i>In9;mr`w!7Hul+buliP4)3mK*p#v@Dok5?FR)D;#?D&lnTsP1 zYybVuj;d`c0&1F?L8T!wCO!@IDDV7h?HZpIC8*eR?hBG0 zdUe_k(1;}PBE)rX1`(_T8vHU#Pku2&+X_mz3n|>O-E_ateZQDBTlD@4!SS~{NLC<4 zoS1IB*dBAeY~|@=cO%5vn6uQWKW@0aDqIbLEZGomvOe?pvml!ic1!}>{PuS`ju2s6 zZh4A6qcVdCGrG6l4AP!ZU1kV{MkXfq>UmYaSU)V(b6JwJ1|*omCgApE$Nv1G9ngn| zL}nWd{RT*u#s9@OzG2e`Xq#**b{HtsiktCil}!R2=JdvahI80HA|W9EB&nuuz_tl? z?uaV6V&7G>kx7t;ze)eZ>$Ceh--1s=g_Z!~B(G3SRn7-o@sl?^(VjpkU4U@JhRoc6-X(gCeF z8!yS|qlH!o9kt*9Nsj3dS;jJgd5g&+y3h&mCZ-$O@Oa)B)MF9jZukVmNs6X8ZPHDS zl-?&xy%`p9sTyl7dIdj)&+Zq2F$2uO_z*Q69q%LJCT(#}vy;sBZf^)oUr}LFtYT%Q zoaxvdETP$1Q57zF4NPRjzNltGgjh6h#%NEThh)vc(-ePrRu(sI?dq_hKUeeURAL3z zc8VUjw}MEiWJDWd_`ymiMtj2c-u}ZFzOcno*18aIgMkQ>uBQ&o`)g}QBTQJPJbVz# ziQJLt5XF9(-r6z{kSsnJ=hV&#myhALU%M>%lfTM<04MW>P!isqX;02zX|_|pEm(FT zCYkMsIG`&uVTbZ?IwV%wz|rYy2lVSR?Tbi^CC0l5i|R|OHSB>k3 zQh02>hx*0IhU(Kk4ZKLbVAXE3rd{&>CMAJB4D8Ku88^1oO^)fl+^VqIpiq|o$Hx;NiH>+Vx=yNlVCSC7Xl zS9NPJt21}$_*F(vPDD(Af?=`Uxyc#uD%d^vVx|$E-X`eL34>3`F~v}oZCxxOCea)b zbvdAVD2%HjQ7AfYzA4(ns;X1J!r^PtQy~tVwLI<^y3ZKOvxPf{u9`K5?fQ5pPR@(E zwZ^vH&eOJR#_aw`zo7F!u#EH#`y&08+Tm~P@Ez1Fbqh1kkbcN0B-@TvjQYC8TTdJ% zd6KL?RE?6K4W#q_QBs$rJMP}z1Wp1ZxM^yoM$yx2EBis(ryMW@Sb&;KZp_xw5T2`) zJba)8li@39L3tm;%|TtBT&bZGjyhKhWqJQ)8`qWOM~q1oJ2JbrX2|2 z18uN+?n}-E3S`e(9ekDe)DT%mUfGS+ZX;>=u)$+GF|}ZZDgf)K)&^_)4B<1iX2PoE zucTaN%$92IGF|F>x2KjZd52~6;Re@b)(Id`CJXqI7n~@n*}`Q*c2u6PFx{q+QAnIS zx!5Wql#8GMUWMJPb+|2zjeNui(s#*J{$+K$q0tAr|>8BMX9>2qu&T|YzRQn zFAGF#NRl8dV3F256E`yYccX%;gFdmf>9l&;bXIIo)8oQ-?u;SZ~`{w=610TAc2+du&dAKrpPHnWR za#pt6_Pq-$Z6<7BIE2{(`+%hPKQAmGPoIe)(UP#8h@c?VRZ$rK0F1;a1nlz$FD6A! zAQDE5FH2OJNw$$1OM(-RMTOM9ib=``v7$U2L`y*y86s9zGl^)oM7|lW?{W7-%_xF9 z#GPM#^zF^_CcuFuODtlq5t%|#csvwhQZ)$u_A%TJEjmfa8 ztfEv*%yAdC@QTA-;Qz^g-iC;| z3%s9Q{*RhQu#z8no}Q{RmrO!REMj@6WcO@BjcZ-i6GFL_>_A#N%F4>*ykyj3>+4)P z!uj#gv+HY>bRWZWdcS4~e;Z0n71pI?(n*y~d@9a?Pp8cgNH0C+P(?FJt-|LNsr{g0!Wu?&p&zNt?;JLF5-OW%}DJOlSk8xio`#b-b-#JT7cas+QIR;@-zIi!j&gTg<-IL%E~~JX z9ewacrQTv~aY!Hh_M7F}@++S=P@@Fzk+RZ8q)l&;ucPZ?+RQ@l>cATqYd3|Ys}9-p zWY2k&v`p;wx%?xRuCi81xG&^JHn7ZtH^n%h(_HMcbuUe|lFiLIfCKN%V9O2a#uNl` z7NswJG`e?th4oA&wZ}lnECwAZ)IO=d8d=mYvm0Uwtl}J=D}t={mC}x6gSChZN#oBu+U#djx)92(CW3sf)Pzs^S&!K57= zx%<##K{BfI>@-N{l92p%V`GwXWI{ozVo9mO}4|flL=M zg~@_8L^-z@Da*F6aJ4Dy?6t7n*itq}ykNoulGL46*NUxuw^Z8b=~W(i3{of=Yu#KC zHgTNW=rKtSt6}`%`i!|-4xY{VrfBU}v;Qy}k4 z=9C@Fj{Ob|%v)jX^*q9M6bg6=1*xMkLV|2EwT_K$I+vt-vioh}({9BHWD|Ey5|V$<*TN2zae-lLb+q2_I&Z~jzSZK-%duR?)rB_r!Y zOyS)L!R(M9h7SH!J{JH_c{iP}YVPdhr+|{agjJeam|4HBU3>6#?GLAXyrzr12fJ=q z=m{+yoWJd0%b~`(Q^)_m(=>G8J5m?!gGQSpd@6jf6n1EZBH@33dVzg+)z@KntFnp% zC1NheoC>e_5D=G;VDS3Xnam$$WIw**gPD6!B$F0R4koP>v%R3nCUSLOwJ3m^^o|{} z8TR-WsCR@h;@AS@-JDtJ3Xg>Y|9Y7Y@Nbk0sNV)cK&5APwg>3IU#{L;4?f)oYHIv@ z`%i(%cOT9N8Q_qxP3ReM)KKbjGGpiYLwP@gNJFte;%t)#Na%yJ_|=1-(v} z3&ZrSi|Jp@>Zp>pr&6QTT~{tX&>I}a`>m7QQq5Jb^~MJCa~2= zZB%UhP3^Upm0S}O3KK%Do>n9;)!d`VXJhC75$F{p1&B z(%tfrvpGtu#fqh>I`2K|oFShr8j`)ibBRvqX*%fU+_7}F>g?88e|*t>-flseeo3go zoyf`KGx%Q3_-(}(+8Z7BUJHtloKy&3a`6|{9GOu8k>rZNNPT5@?w{YSpZiKq8r5v& z7#RK=Kb9(Q8?Cr4I-DocYMl3T2#3h}1ERR9xDzHSKNzrRNpFZ0YhPbB`l@UF$YjqM zI1q$|Pkm*UucPE>;Xn(qrgG_Q&tlybsL>ug19PPPJR>bmn72fDvW!YJUX?A6tBYf) zNE~sfJIC22)R|Bv=eDGk!HlSOu&}pu6B@495U2=`?{mNYo$J%=jJr=4_2^t3j&o%` zK~PJe5}(lytg%${%lCq~WS9Ptu_u;J;~&yQK{4+_br|C4^S`|JrmG<@zIHh30;+j^d)@ck;iw&}m!|Mm5|G2O0i>dp%vdEev!b+O`c-5Kf3!3&O5G@MhzV1ht@n%!Cwa!Gl7t$G+mdeC+ocoNbSw zb%zi&NA3#}O21#ebqhM=3?ecdU}d2OBR2U+kAmPkB*rfT>SzUo zXF%oX+QbVBR=1K;hN~j=(=unEib-VIIuar2vW<}1N>C+a?Pj0uySyAJh-D!1P%-Z| z&e<0;+(v&o;qmD>*Y8@oa|nJ39gZ%F43mLgkW+pZ@ksaUnPl&XfSIS{$~YKiC3Feo za#Iub1L*V6G&NYrMfkuAr z5{oMvrMylJ`c21$hPaedXSjFQTpYs%D6vR26S;yumjNaVWLRW=B2i*cqYII%S6?lg z8&{pPs$gDRVRf}gpO>bLk^A=YoE_(5vG?QcebPkd4gG8S=yQZmotX~!C&XKk=C_~mi-@1Njf>;^IxXN1Y`nmV5ObMCBge+c>HOF;Uh8YGUpc+1g)EF?r`uc=qy*ckdqo~9hB0Ex3= z*7A?P!>8f=iYXOK^3=cqL?PV6`qc7X!HEr#ue(L?iXxUfu-O;>Z0uu~% z9PbJl1i4TI`NT&hM5YB|?pPLg90Ya%hr1w%%LtZV zCJRlpn}a&ZxerH6)!{BG3U00G%V4{S!tmo-9Vi7_cnlT(Gyjc-NmvbI5~g6Oq!`o& z&)_q1MZ_WsDveBcdvD81E&padS$$suK#Oo1YYAJ6Jl7>v+%tnGO<2K&9CsxL)+=uY zid_9$spP_==iyn&sNQC>upzb*CE@`2PAG6nV&vmwee9S1HREqE`NcvN5*Uy;%7Q84 z(@wdDoY}BDsM_B?KBOwPyY^a)k&lXCVAi`YyMsM=->-9Fw~C4!vH>){~znSA4(6Exd`T=_KF0Mww8 zo0}Uc{^+;-1*Bu1UzGb)bTG5|h!p;BijNmf=r_9#H{j3c!+(Zc9YSqD1Qu7R1p6nm zsme~oShw9>hbRD|(pE0O~t_0SNCfXE zb!5gY8dgAWgeb7u!=54}yMUhb2x#Si&F+VbRGw-Q>UsnY&1v%o{nAEW=TH!C1cpdE z3l#7GSqMoh%*2XX2W=r2o1i)FDJgV9(<{qLDQ4f4%4029d zhUXY>*BOBW_ZmYB%(opHQriIOtR2SwT%cM^8;-oVB^;DbA$Y2dn)*)0Bq@-c6Jr6M zk#h)tCq&AJicI^R!ES0|qsN1bAhff-)9$%@-`TzAir)8>5IegCL5r1Xy#oii>&+X#c0JTggB zbH(Bf2e%R9Mk8Aamf7uQHB*;s@-Z3(Lf1jgecnx9WZ=4yBvNu&n%dpAQ9)8+`-^>7mFKsZa zbr%~PdmnXhLWpo2R-D?j4@P&cIJ6j&UrJ`@Lm2tAm)U&xGh?z=`87HFFvEw%K)kWE zZ;0SlwNBTOJ{Pt<+63P{8|@P;2C4Qs%kr?sEC!@S2YZ*Ra7N#O63t}6w|_6+A6>J? zZ~nZ%4D5L@wU~Uux92IZcny2G@wvGIfi%72`Y`C0`;0>Bx<$B)=Exr@ze9Z`OcsgM zm0TPpk{X#T8ny(*tI`mQUIcqnB{^)Nk6V!_dwt)^sn#}F zagy63g7A82O!zk6!7NSI_^{z((@i)fCY{0dRa!*v_hF)l3Cto-iEV)&zYQwJQxeT+ z><=VMy#~K&PL}f$w8sgUgvq6tYUW{Dhm}nG-HuxFMOwA*9sD&pjb<_Y5_OKS`Q@v>;#dRhxa;4T_`X{SoZhollRO>ctf=|e}QFwt+4 z(DI3ABU-5fo|7csDJKmd=Vr@X#uz$9pOH*YaR(J7)9=FQLj(btB&nf}>>Jc4NJ=Oatfx37eJFUvscnpR z(#h>eEFAGgw9__$df5~<9h=LZ_)1^B2eo%Kle!mZ7F8VrVHUR$jeOBwJ=oG@-p}|@ zu4D&E6J|E)BJJsYQ8u@7edmMQS7g>vB8MfvE!*^`*LhX|3B%}l2*H#QNr+f~CizYu ztQ57P?CpDTJsedN>`Oo&2w zYshxO&2_PbN}IyboEG@_#ey zXI71m3>2Z);+gg|r(t#;%S3Tf3p1=5lN+&#pJ`8%HD;0c=7*j}1<-96FaSd?tiq(p z{^R#4{1S^q*64gLX`+49Aqcm$@MjkuEo^bC$xA^VV5Wx*7%ez+g|NzmFo;B?_C1oY ziQFZaOQN%}$h2=eWqWw{)eSg5M4)Ry)$Y43IKBqwMH$>X7DK^^G8r{{QBWW%nyiRE ze_x5;SVTe7g^~n~E`AYWUBO9;gKb&2pEoSXY*%wm>Hmb7n@#(Y_ASq)AuCtcMXCMi&50^MprQE#@(GrpXKt{znulua^Z;mIv>aih{W z=}6dw9K~TwYtfqC8V6>r*IaQ30Kc*Tx%?@Zd3`tsDgJ*GB)7g zo!Gx#?{3PrN7V04dU;Q64?29=Kc8Q=uj;RhSJz(gGZUkB*z$TUT>q^``X!1&m1f@YLRLBAj$3sQm`cKbdSM=k~pJW{@ zv4bGaV)7jlU?AMeAM^@nj?eJxc^>z-7EQ;f?w7V-&liPDkHOr#kY)lVlmFn}fiyLd#ww5zJ95oM6F2I^g7Z(5rmy|& z%-8+=nufcP6_*+kKpI=8s?su$9TDex?Puqn0v7tbOwlc_Yx&bnnz+&5S0v{5`QhZ> zNnAYYow&g+DIk4J!@2j5-GZSEz6cJja--v*!gX)GnqIPded|Q7-Xqf@CjOP6C3#Vu zDaP!;mu5;9<-Y|1{+(MUhMEW@bis;&Nl>_ z{RNwm2dB_^htZwQ)T}|u4AsS7s(E`MBw z><$5Y_2nL8y=c>0ZY(-o4yJ6kH&)$(L&vjwyJ%1Cnu9|7IzBls`3k=S&PPu=-Jm{9 z$`ldH8?$BcCJo|92El?Ij!_53VxOLrj*c~=pliF50g$X3Osbf|LbWnVVV--RhC;Re zbqv+CSG=YXe>9neJeV{|klfu5PtRP#~xdY3msqSjj6q5_J#J7bW!(8O*; ziX>s%f7}{48Is^C%r(se@Q{K9MH=-F}YDW-27U^{eJ}LV2svrg0J5WW6$oO zgEpT$QTVzU<#8$M4vo*lQhMwRF?5~tTT$FY!Vzqy(KqOABp}yfD~LxqjNwT z(DX%ZZuE3$cffEF#l-b9EX>UT60}qD>@p*oCUbrd-^bj({c+OzqWM2zh7rpMqd%;4 z3ujiKI}F7Zbxr+OU^^;B+4>7#&(GBhKd19&P@QB{PPc3 zo%;HJHWdl{uoVhA)=Oy<$~Kd3>bHmfqZ>e*u608dlO!wI5372+KJaUZF_mgnn5J~ywU37OOghqLU2o&;OG zQbc0nMvSc0(E2N^?*S%6+%rN`b_s{q0tWLnHQ=Ksn4~F77_q5n`&;;%%^(dPn5(i8 zJ#1mHiLam#O)Qv|AV>?c)_z`v;0RE2CeV-m_&BP*5(Lg9L@cS!LHvmv+yB_oX^rlq zv~a^!>A5~USOMd*T9i5|-IASypl*zy39tJ+%8Fb)`e@ebJ7{9!*`6ujfV5CZ!nIRj z7|$io7{OO!TRwj|dt>FzM_)bQm4yV|@TOak5l&Dun?xt|jv4`57b6Jhe0~lnJrf;= zSSrdL!kjvqQS&)+D*vl%q0A`2aVL7g+q@YdJXBE~4-5)P;6;t~mQ3Axvo@4N8$ec| zi?11U%MKbON%jMuUB(xlpaB^<(M;>F!Z^ctF2l7$Eeu26nCLqU9G^Cf-77wI27O-R zdLGkdjC{8FUwvz@PXAZUg#Qm5XTl;^Hy}q)-1tYe#H0V{KL5R1dIUPJyr$;BKmHav zs1L}EmB5^$7E_%Cdt_Gg;p z`8*wo3E^M(vv9f=+XIstGzAwaUW5Hpn({=9o3PKWe;fr&Lh<>mJfb&>u=Y7Vk+9O6 zx@COtAF4U8cuhBp1X|ix)c8-CWWQHEwF`l^4aFc4zno7M$(8F4Mduhaqk{%Nn?Ap# z$Yh}m2c(hc}r$%G6W(kqG{qU$2rlU{+@V9p@%b-a&)Hd214{*YAWU8*mnd zUSMWsxunVTI&f~{fMX0$A>2CLcsJ_rK)%^ZedvFCp;Kq5r;{=RXqeO*d8&0t3MWtY z-*T4AYHDSm3Bxq_nKxpWS16(BUz0lKs8AvkFR}RvZrv82A7|i4)C?gY)FsHHPnHh2 zSUq;Ks2h`7#4yB!+|1;!-HkW&j~L1*q-K5^7)HK#>)hd1>QN=?1Dq+1pd%YRBH!mg z1(ritmHP>QFxQK=G)S3e3 zXshD(W&_h_`C!bgQo0A1aH8v&`mcMnLgtwhIrr0!F9pfDNLjmZ%jK{S#d`W zpf~&~K@TR4n5LIPzwJ|+i~0|k!s{FFLZGXK!Pona*0$H^7-iE1P!~NBDP7>*f%@sz8ja>p0BWhQ{rNK(v(`crcHaHxJ^Fc!FxY49#a~W; zGv^r<{v9Gqr{46*dGXjtVIwahwZHloT=`<^Grq-7t+QA=^#eCTa3GK3#)PAR7&JKz zh89N?3pAmmi0P%!_yud80=}MdffJxrGLi{(yz!=_p>wVU^&@N&#v;=+S&AL1>-QiZ zHloA_2rUALrrGqoo-6`rQ~pvioel8M1;MHH4=RR%0*x{+Z}^bE;gzUw7Sau%vGExo z7wl**6)qXa-B@(GWjbXg_RT~c6)QL`*(2hRkABQpHuYA^RIV@KrHq)YpXq6+4VlGI z+)Mm%EhxCDU307WubMIfH6@Z){rZ!85E*&qbKGa_uzrjo+=R%u&O&;6AqB9I?@@)A zQp?(ZeppH7JoHD0{cGFQPoTjI_kt#Lp`t4aoX#!>E>vEZxVB-s_9(lPA}~J?w1;rU ztAEe_h``bDz`ElxIl&ujL=7%9Hmp`@`d7MlgDb*##Z_by{KyqWchkTRY8$7i>(Y~B z{d7||`Ay#+U})>w_oT1T(506dcFBNIyc!1s)rd`QrD5^Zi6L#*+U#jCc9>4WKjIg4 z%LJV}caDm{;m3j#S4n=Lk(5LSQ0YN~U3g3*%s*s%^mECU zbP48D$2rYWA>s(ggakXGB{VjS_zhl%SDa2djUu7uVv?PR!Soauq-{p*0+Bp4UV!>l zX{Zu`x1tZzxBrIR+HT)DG<8KDBeF~&PUFgBUNn|QZ-iWBio6Pj^>qU2f!Dw4m zz|FN4Hz4Enxut!GkXxy_el2^NG~nz4T%@TdASPYe?xbQt>w7|c zO!Go0+)R1Fqwd~)$`VpCm#D@$@k2*H&52jVfMcq2RhK0j6|ykSVKqJ|qL_G9^xP$C zPtsHrc&M_U+^5eHKx_Xvv{z0`EZBLWkcTQ5y*`Y%|fiq^Ccgr?B3hxKp~TE#IxKH&K@a?((=*w9+7o|W7zBD6;Y;W=R-&~9q zs}>d(#(>|UM~iF`dL!*Ss}x0a_<0RdDVDXHC|iIm(22w5WV11-C=g_xolg_B$x^Y@ zlQ-$Uzmez%7#`bjo@kz5nn~l-qVmNa!YyQZeL0gxLX#Voaw@flvb=Kd_h3*6K!aYh ztug7WAFEi>$Vub4f>X5_hDK1x78sV0O)?gBpp9_5bqrsUBn@gwPGO?gs{+&swVIHM zL@z;XFOG0^Wb|!B?HEfB=oqwo#^CH@hE=qD(82GU_0`G!NeW-s`;5=J>)EBeO;6l| zXWmLAH>csuJPqUgi@1-*MZPVV z#c2&)UU2`w9Fz^vhYEkx)V+p%ejiApimaMygQB4+1J|-PH}KdH1I!Y?US?GyJ0G0=u+l(cPFXydz_eFFSs5S+GZQyONwO84(Q)A`T4S20e?GwPT+Ht)tFVd{)qN}li zB}O#UL=1AbQh3x(v(%EX#?g&gmTdbH?W1sW_AJ!fG_?U@_ovQf&STqMl8dcTaVxk-QD~hp7KK=2_273&rp6#HX zdgr!B;M;PvPo1&XLR)YWMj@Gkj-@$&mkWf6^JY?st0}v@nFKcUGRKTe*3+lI03iwu zuem?HGdq*CXU<1!+Wv!!EgGnA`_q^ZUE4yT7`et*4^Q(xr~(oni1_&Lgm!}xrKy`N zl7TCq@FtFtO9VAj3jja~eVRw}?TTMYw=`YKMye#T4HKd>I0Mt36X{Bst9z_zH1fH5 zg8KG3$J@(!ZB)@5g47t#v5HBlnTP?HQ6*ZJp!1p5N%4lm{EnJY~|Q4t{KfN?pPxmot1t^|a%xaG)ff z=%ICvkSI0l5_zD79YKhl6dd~0iw+E2Gb|>e7h;+`SuzlTSoll@Q$&hFlDH7p6|Cyg z#??Q`zj_+cv3Qg`AQFQ7woAZGb>^c{?=jUA`@KYO{KRW)q|Y5F_K6MCIHyK>>?X}G z_k`d?^Y*%t&6gd4(w>?|=UrOWEnEh!iX}2NChLqO0caH4TiOsSo zHc4Ki!O>{*{^dYBdb|tivx7`XhC`sw;e3)bU+l$}hG4ad%tO@Am||YJjwaDjbfne@ z8jVC0IHQmTnUk(Z3^C0eqwGRqm@Fv1B09wPD&{PjTJRfJ%=XlsLS3dw$^lM;6jhug zQ1J?>DD{z=jXpj;YkhVNCrpkcz-6(9x)-hdrV)6@J?vgwtX_sC3L$R5o&hZTpyFJ8PDjL~xh z_B{jVPkryej!I$2sqM`7VB|AWBlr{5w`a@runoQ&M?ZCLZhQTg-R{w+kqCTe|N88$ z@6t6OOyrJQBgdym`c5O)j+2#h8D!fg_kb-72d&Wkig;cSEC;0uB4|lttHsvnH<%nS zaB2_t)5tQLURY}jENqdV7#r4ufvwm>O!@CtK!&I8Q71g1Er}%bsS=grct)CASB}qt zq0tC5gg`TIx;ng}5R#h^i}*p>mf zFGL(AkR+2u0Lzv)b`cU^rNgwQbWmVV=eFNi2p;_x=3&B`Dhms^|H|-vhp9Jn za6D1_(|td=Kg#LPho&0a)Y}0Wm7b5#$`V zh#CkYMyR>>ld7m^dS-gg%sGALgnv|!s;|E9_j|wh?z{Is14Ba?49Uc3%{aUdP8!%1 zqn&k^yEZFg$`7cdg)J_w!``fb<0y!N)@-oO#^&eDj!nojDxj`DvqM`!RqNrB0ea|R z)3O7$%j)4R`Tc43dz+BST}C>X#BJV)!s5j@v_^Fv8!8p;rzyGNM53{v{o#;OGlRut zvhz`8{D=Nq;?Z3>KXw)}C(3sex0l!T(N?*zLYqIg2>}853secY1$hg$#T*=T7)Nlu zgLVaGe}e^}y}=LW>A9`B$Fu~LrtAWkByS1cn&qNGA$elHu6GwvD;+$NSQFYZk*e1%88Tg*fl`sv01WKP_@XLnABXKciAbeU#!7^Or1O5|u| zIKjLsKhfT-)_q>du^%DMP-;u-gK%i3)SyiI7$3g@xeJ_TNx!0d1yJGj#DdU0c$;ut zLBK`2pO1F6)?V5WUK-(inqF{P9|j~$m6U_0$+LxqVMONT(eHB8@g?a9hf6*7Fiv|{ zBiwS9+U-Ng2ORD+=6#Q!ko_!@MM900`Bu7%K^hDZIqoe)SKU_+z}jTTm!;;;`y^gU zD6&}_cN3ngM~r+<94*v6*tTdKEwUH~ElWxCpcETOdoS`0xNtb;Z6y%d*6cJPWF+b3@T6>~oe}MIPtFd(Jms)|6sO`(mYGds?d1!P74NsjM zzfEl%{e<@7zdn-mv;QgqK#)L6;04*V$)A)F7@e+bBeq7r``l0%B!nYh;~c}NAB%C0 zn1tyXhQr@5ik$_aCV^9l41%dN;E6nkzDU#(ojg7tJ=X3xP>u?$32OLhsJtUl%O3C; zQiZycTvxnvybj`V^q%5RatXSKo|QTZ)covxn=?GcnB!ptiocVN?9`Tt8V|(Lb%|y{ zdr>oR(unO6ErO7}O@Fl6RUTEaJ()%zg;)*w+sH>8iZ`3h9+Bq!24>+bbNX$kFZTa* zJ4^H8C={TbU?Fe0U93CL^P>!nV(($h%edcw`(6ANvdpy1to|n!9pU{{h#vO#YyN5I z?Kh)q971~&l7J*Kw;V6};0XkWtM5-3Q>+cWj^{?}+r0wY_>#^8cgc6NLljh&&=Zv| z3E{{MS0N3a8Kym_Qg-2q5Mo1CV?{a`P2iFLrX4yo6`zo-TJkpqlfG8>(nNsa(^0yw zeCA_w3?QKQ^TPZ{a&30@kquAdbjBc|WsTp(J|Xex050qaZ}MB%s?WXywUgb;ge_>cSy$_Y)!#qWI7IQT*qI#DMzC(;TJk? zPv*8^FN$e8EF|v$`}i*&5vjZDSOkeEU` zN+LS3+6t%#Ny4+$QJ*Ropm3-9_8I^P3m8i9sOB((s)s-f&~s^MB1(;%(z{ZJI!|)( zKzojy53-i&^w^1>VG{!CA{o&HTEBBz3w&$G7U>@GkTLppb~1c#HHH-t@4vcC8`awW z^3>+9M3}@boG$TFO5?z!cK6xpg@h)ny}bV^*i3TAabs(1+ghcFM~RVv-3Xk@4#~62 zO8YctB;9TeY1)?f{M_nKx3go3g@FSjXT269MyhiL1pB_5X8o_5|AXB9jy55U?plgx zNbxPo_nsNoH*oOBGk0Fz_4(zAy0kkG|`;7#& zYdLZ(oMDO;@r%+hfnW;OvxmA^Z-V*ai+kS;ClwA^h35g?8_Zv;95~d_Ejx3w56S{c z&uI-K9jDD^vKtWd@7*OTyS}}CjM*gZ%^8ku&s^=0j!#s*4y&i{d&pHg;Y+g`FlbhG zwzt|6A+9FTL$_z18>T0wwY5M~#c3I%H#Pex5|pXBhHGmqwnN^j4vtz%KtCgZJz+p8 zoL4{tN0M?`*$43RbB->UKx1?)7)K$tYI>427jP*Q)Uz0(Kdnqyy#(2a*HXiR`4fbp z$35}OW5(KTL@`i+b({fYU2!`Url`yg+24m6`I+5<&2oUAU9L5}$1_J1d(NDj0Ew(j5QfZB_74^GI@IKGHBnfWPAGSe^Ylg}Me3__aRpy8nEI)Avp4{sO zMzQ%w64am$XAheufD?cwXVVxG_lc&M6@W2L0WYyY1@vS*;mLsU1aZtuaYxpm7}yV3 zQH^sZg839JXk5E{ySHjK5QuvwRw1`u$ax6T5cd7>5WghEhje2Jyqzj=x*Xa&sDGif z@`#$8fy36~M)Y&lzi2@sn@P@j*R-ol)Rv6QY8I~B%f!nd&J->?UNPFdy|oZ|2su&C zS$k9}m9!llsn#x4iKbz?E*oh(Mqp>xM9;6=!7UCKunxeQ6e+WI5?>%=8iP^HiA>Xk z4&kb}dayR+MED|y1zjL-y(Pvy36mty$J|GZWx=-AW!Uj*HeJH1T-^iU52W<@?I0(B zOkY#smVkGw?+<~FZ>caF_GnOzG0nQ3ISDXQC>5PNoCIJKaK6+tcb8Qwd&oEm5K%o# zA~!wr0w!C2vS*8d?5ddJ7P34zsUpo}OIGG@J3DiM(XRm7e~b^H7s}JQ1i6)BU;YNa#g=H=T|HV5um&+}+#+nG*-L;)h3`e7}6uinM_c z3^2(SV08m2it`pWgX#B3LG%dc?E0lB@1tz7|d4IoD*B0&X6qxo4IvT8s2NBD<#&g8nM@KWuO^t~&NB!mFem(t>U9qNlg zmf9tg*?@F+9<&ASza`VzrTE?}u#x*Tp#K#_ZM&a31)oTP6*y+Bu7DCIbp9D9n?Vf1 z&d~2>r7+$!_115v-J$Pn&d>}Wl(RKBVq8NIQc#$hq8; z5I`IOA8v+(um%T^*rEU6?CF_+u@2Li7(cTdIfDwfH^#beBU5G$v67H-(L%NwOcaIW ziH>lXp}OPgoCZeBvniLSb)BjYmU2y)S8$196}Ix835+a2@xWbt=1MDPN=z{zM{~@h zc(X7$8S&@PGiwpU1v=22aP*=$H#aRr zAZy|raduGP(HW?$`GgjtC;*#d7Y>)8?S<#-j7Mx%!?6y)+c@;Ejx)XReJ7`%v(D14 z7So;l*+T=qFU$vnNQ(qM4hR}`Zp37(Q}X)=`8jS~{o%;Qry?cuujqN~C%>2*o&2Z( zBmhfT0>g17!&TwN7NRh>3tb7GrEZ@TLvTP`T;aP>#^3Bgz`LJt4q48dc$}eF85-GW1B)pd4R9OZV~MH&neeAtjj#J9K&b89;d5m`-WIHX)#>Wlm0C;>f9#<^r!b z0nX~dE(SWJ8R*(1&s&WP<1`?QB%xf-idC?(kIFa@k9yASVq=ccn|ck<6lWTFYm3N; z1&H7X45|&tby>XwND+Et06$C5T9Uxsi7OSbP%bYdl6mW|O2Y6w$C)mb5fyaJ!%^D4 z)J&Km!jTx0vG&d2(oOA&W?mGgi!+o$v{gt#%cEfJ@qq;N?(R@erC>`#b}Wov-lCup zT*AHNR>yT?+0J3j?eD++#0_*O7t12=!Yk&|O1>rU79Y?qc(@(XR$|cwER;daBz8}p zE4eX1h}JSapfe+}C2V|OxHhVAuMXlO(|P34a+I?!Xr=+@jXjhTp;6cyb8{(*6M^oH z)WS=SVQa!ybuNxhJ_Tx;X#L!Y?mwNx?SQIvpw?>vNN&%~#7SHuEPrv_YvQ|N)#ZS> zn>w1r5(HJ!_1S&L4%;Mz49>+u){sQSRNTtR&?K?rF2of`^l~q2eINaD0`pIdA`Xf* z$-!nFd_=4gd6R`vnaF;2lTc5~YigJHXt9<|4&5n8gp@ZS_(b(20?|F zlaqJu-K%ocvsq$=HsY7XftQyb{v+#-E{h>b>(~5U6U{>sDV3t}3g;{|)1i^B`oNS( zuI6k&x+HR&*&3g$&ha=5`>o)*0?tY8;q)r*1%R-`rD(w_zJT1$JLDRB(^yKD1fuoL zq(`gU`j4w_h)9ueGZ* zIXzYkOQc8VuKY@|Dq1{V0q3=Wrh3|+4|KcYFyyE+8qOD@`rxYMlu@+3l1Yl7E{};1 zc9gXQVebpCLL)r2J?sTZ2odV{BHk^L(LmSr+%~RVT%_XBPe8$BO_Zbps3b8**`d|` z`YrKnw>bk5{C>;{vSbRxRv%gi0pQTk@tAkz6r!pi$|F-R@H@Yq*024zQZT9l=0Hc< z(WY#1J|2S#eTR3I6)Fm(3}Q)Gxn#ze0&S9#9V9*QhvT@{BnXgTfl_X9ZeXM zcsj=lNPTPb0{?d!n3_PHGi0{=qBz5txexx0A$lT4mU5DybKZ5Sb4``y&N zAj)gJ1IcMB@1Zsk_h&rbCtns1%?xTbTJhziI|<8j+N%~8UfR2}w|z|Jgvcm%<|`79 zooKYI0nPzZM{(goZT{PpwDHaU&-y=Zul-L!(*Lq=_xlU?KN``n-*3h5x8m^~c(Sqm z`>ObTRs5^2inib>Kh&*BzD)KCX~XOXWw|WMa*EfysV2iW7uni=o1uRa z`7<4}*<44b_8<_TIV^I};3~k6q}?U34Tkj=ne_cZTe0fHPrzk%x2T~j1RCTsD9+)4 zO)9D%?$Ne^ZN7H_lA5icqUAl=qil&Fr_m$2V!bb+FH z^e@|&)m#PN*tM_}9afIQFa@7~sV4nFC6u2YcSefbF7yqmbVdGUTrhV)#N|iR;>;SF z%gWxjw-3mCK4rR&35MzmSfQfpL4C}%Xx3sEKJfn&NXu_wK6y+zx`;aFto6EF_5`V~WqV$KhBiYxeL^*KsPa1iDe@ZBRoPdqQDRZBN-f-2lNCu=9eH5~ z*<)l4$Etx8uk$Dp`x09B%?T_wXmCH6l#h|hk?B1Tf*(bFdP4Q&r}(z~TSJ*qR&^nV z6YW0Rw`9EIv8TA3NwJQGsHSo@`#3o{eZ4iH0kC5y0I-~}UBpt@6C^f6Ptm0S^s3*} z_G=x-4#bt?hRc_Y05x;@*P6zKFKD6?&nzMbo7Mdsz!zjNt_Q=`%+0WWG3?o9^X{8- zfW?t}EL#Q4)a>Co5E;aE05yUpj_3g4MGeV-0|>=}KNb*W@0(ryeXrB?g@1z!>HQKe+80DX2V_fG6G%CW1*7(QZ_@7s zLRK5~Qv~nn?n#|CE`BjEEw)5S0wRhALb6lH0oGd7{u7uXWsMR}X(i1aNDxTws?qIz z754r<40i!B#PRFi#s=6-_hvP326a$+NeN!4Gz2)f;;lPyv{;PR00|?4E*Rty%FENx zk7B>i8GKJ^w+1}UMIBe)7zXamueF>WOVD3tl*kUo~Ne1eQkDzSA*j7;Dk3Pgw9&}{ovMlx=jMQq?OJ_=U*zriJpIJ zTR@!J7&qAMi^o(%oAJe_TWzB{RG*vGm(KPNG~V*lH>dEO@$!M9=&G8O{+3hlUvQ7Y z8!oAf?pLWj3lFWnHXZadbz0z>}`Uf`$i2QtRY!^;Qyol#E=lh{fy-SY(&(}j2 z<^8Tjb(z;c$g@|xAimvrXhotKKd|5Tv|h(o{@zLT^7UmtneAX)%^ae=mf&+c+tf{y ztd<}5`)+vGk(+1xQePCEKJiV4JAWzK^j1xJ)bm?Zj%QLPwYHo$cldxWS^nFzfzbA$ zG9=a{bcIRjmheq|_wMgl)uU+(^){(1wJn}luet!{eD?nSC9nFBZ}<76R`njqw>Lhs_Y+`U^D5v>;-SU9rDOY1fG1-UtleFqAbIUmxZr? zj=B1Kg8%rgiGHtupWrk9zAFBItcqM0Pkh~(Ink0a`v)JJ;viCF*Fni~`gYS%R zJXQ-$0kbsQ`_rGo#DlI=xsB9;G#I7+-O@{&Gm&%z0^2OQ=n=}694WIK#*57U;Y+Mw z{REKID`vyH10&ymLi!+bSp!Lr!LOqn$JOnBd;$H*Zq*hC9kutL!==*DKvso~)8;DK zzDYWA#Xm={)FmR(Pc#0bmj*(UOBzKvN=p*y-&|gZ$qW;BqosW0TH1gK`LvRQ<}Pr! zMN=&Ga5#i;244#pFyT0|&nakp`cffA|19<@NP5KJ00PD&{T0~{GIwtxUavIVU~&5ST#$)fw}yIEm9ZENi+>^b zG^c_WnuDrXi|8WT6)DirFnjn=2e1uzRTBX4jdZH>Lj!(S^=Kh8yQMRU)15Gp+#8L3&;e&+uSu-t zkN+Y1x#8!5N|@pWCw~0hf~toz9FjcJRUTtzh)>V(fU2_)of}f7C<8AC^gLcq~aXSgtmf zgNGg#|6Fi1dsHU4<^11dX&w!2ygqcOO{nEC!po}z^6ZmdWCklVAahd8=o{%zz%y)F7Cs?^T#bBgkxkC|f~M@C*s%Hz-asFI9;q$Z4lVkqE_EXx+b#zb_&o5>(<;5QWvo zBWh1)qyDGtj7)g)0o2Ar{Ngz7BBQ^5DNkzv}Co8OVEs$6m(TDZ$Y#C3Zy51 zK!DH)pAK#ElM~g4hrL=SN(L}CSVEd>Zb2#HzeENv={#j35_em6(y7D880T5vd_Ka2 zLFo2~tq&G*1(PzK0C73FT6Ass{zQb~hQQO$-^#G=4TLjU0+9upSF3!6+n9mH;m&a$ z%{0sA8K&4LCozBy!O-RTUVk@h zDx-h)I?hz2doiL2-;t6(A&QXS%oZhdC^QHmnYyGSE{|_&!0TKMNh8e#IzbGdhEOzf zgN4ZKr3vOnsGu(b@k++yO+{OtoPclua?>Ov_yKB)J7(v%tpIcrF(nhGU<9&fPfATW zl&HK0nm%`6B~TwbGdwLDL$8xl5XmGmA{24GFOD-_ETi%#C}%pVunS<0Bw#4t6tEL7 zQ$ft|t~MSzMMByr3Fw;;3d@0jvJHx0xVY%zK=UcoK;y&GO(Oo}=;*Rlgz~7X5-eW9 zw!@9jAK=+j>?4C0PX7BERUr_mTxH^eGW|U#MVW2b$h*Z<1-jLh78C=Qd3-tFvcFC4 z3;myAx)To8(cY8dXnlws_l0h0Th1ZHozNE>ljX2Ma{BonY1s>b_1@hM^Rv8u{qfr` zc1MoBeH%ZPHIc0et^DCa`#}#p_z*yZD5Z>4Z9KR(I{T4TF18kLp7a-Y7Z}{6$iu?zzu7{z=MmZVGP5&SdNAmuh8pE$8=qOYQc`~n!|)GN!Ijr%fmN80FmDXaYHaSqtp1AR0^)Z#pYa*NPJE$IpEc_x6 zI)xIN1_5_1L<}Lsp9b}ZiSG_u+6H@#ZxgvLwOh4xDha%r87-02h zN@%$e5BARxW}GD#mvjz#=fQKq`_9Nu^(z097JX2$x!*TkN0NJufZB)3Vs_bBLzw5j zsWHxdg=fTj^G_$<2P1D4hhl<4L255;^NFJ|n_7dV#!QUUJ{eHd#$!Y&4jVx^nGN)l z;3@MZ22_B9uY*n!Oe7ReHqAJ-jW#48pmAy!^{(_(p!r$>NIW!e5yA{~$!1>dyBl(F+e_IY zhRLrdrW)+aHvrT)#!gh7$j8dyGkg8*T8cjTiRey(^h7vNDdYfsH}_h=KvCs*5{x=A zyLc{M^gIXoV!xy#p4WlaL`lA22!IsP`Hm@WH}vO6u(+E)o3Qpg6b0Y{762MlLlv%!l3JG160)U-@pQRdjAe9r|;?k_bn|m}q#>U#>u*3J9+&;gJLsB_WoZh~!A$V+dvX8;X z^(H+!|6sqcBl?6IEID+(C{S7Pbtqh80+HpC#WN#0vOvBC+7zld;WNO&66CmnJyq5M zuTr6@1tO@``eJOiCS=$b$#LdDJM57wRJdbFRUh8~15UmKxV3{ z{Oc1+;`xg-zM(uH45;k0y>M{AWk_V;-=yLs1Y1QylNI(lV2@cUzjFY200KRQh+?)> zaE0nZzJuYM*&Cm zVvwy7GtF?=7-yh>tAB?%FkcR8M!~HPrNNgJfn7oMP3Ck9mAfFLxp{s=g9tQ4f)Uqv zOd<#BgbC7Ait?-GV{t6S4yRB`ETM@m><~&@TPi%pOFep6aZQ&n-<21F@w^5HEp!5X z(mzvDb3Qzw>rdT41omrAFgyR4-WXz;@k!)~bAeq1lJ#_Z&S0I$~2rQ^_TRu%97|Mc~R)=cg z2-+A7yv; zCsP<3kL^e9KblL3Cm(p{Fvxvtpeq+CIx< zBXHDcnk4v7(c<$&D5~w`K;CiId-wzSRih^Dk*1ej8~OJqkrB6$f{x-loGdpw4w8q- z4j4Iw%2{Vh8}O0jXI?O@nTS4<*Riz+n7!1meAT%4mvSRmq_DVB&U#nIz9#iarl9dk zU;8YMHfv2otRF1Mi5fwvzw0Z|&7na@!5|A?5c=@462aIEVbM+||6qpr0s_EpPD^2p zNi~xkugvt8wMGGSlWLTS%LDXKLgWp5U3`Js(J|hyfT)~vy++Q%nmsO3?+o)!nNE(p zx8U&-%){9!v3eMKxM0aEh>6jMvLEJgU!UIGVm+y|5%*9(1VJ!NaFak!jVyg44^&~I z7Sq&jA{|#h1hS_~bJ30ShHp2!{Oo(NlEQ3= z53e55@6MJUh%2qx>YHIl>H@Z@{6GgSGZ_SZAC|{gpGUnV3a%ASRln!nLQPoqPBRuh zcn!9!*1?E(IWYF#Is~4Rs{3&<`WxZlDvSK(FYucfE0!=V(9CN|vIv(R%`vd~Igoh` z($KuCSl*ptiNY<%sxTB;u4@P)!cv|2q2^{H)do8^!GG50W(jnx$Is5U-3(#G?o_WS zE0H!)x$x+(8gBX&bK&x$V~<%FcXpXeXhRW*a!QiU#zb;pw!S0 zArw+|qN}yHCBr?+S+mHntr0Pp)xTzAC~R`1#aCKi!PZe${K2grTT|hK8m7TDV+lF8 z2Ej!76!yXp1CKOHk?Foh{xXXt#$l#F*PvIM{Pt?tTxbp#BWz1#ECTS(kPmm6Vl{eM zMtzm!cJQO`@Yn_S=x{SdHQ*KbxsRbW9#VwxL;5_S8;QW>We-?zrlv@)@6b;EG3{<`=khTg6RZ&dr#zchbc?*63 zIm3wx27+{0_pa^aqEnnog*Ln7!Za27U83mD{I1(=JEYKRSQ%b`a(tqSviE{~_J za}%X-oCQI#2E&E?4l5Z}+6Z$dLh zluHU=6x^x>GYmXc09RJr|GaEuGi$I10KG2cMm-ZX!nOzbT@p^o7Bc#U!U)U5dGw{*hIk2HHgrlK&;3!)7z*K?BzHO%(GyXM7DIYod>FF z;ye@aG+aXz^WWlj-61$68ErtH7^Qj$vQuVU)M#V?y#`)|?`J;O8Yd*=_`Arj_x1k} x#rN( { - const target = event.detail.target; - if (!target) { - return; - } - target.innerHTML = `
Request failed.
`; -}); - -function selectedSeedIdFromUrl() { - const params = new URLSearchParams(window.location.search); - return params.get("seed_id"); -} - -function applySelectedSeed(seedId) { - const cards = document.querySelectorAll(".seed-card[data-seed-id]"); - cards.forEach((card) => { - const isSelected = seedId !== null && card.dataset.seedId === seedId; - card.classList.toggle("is-selected", isSelected); - card.setAttribute("aria-current", isSelected ? "true" : "false"); - }); -} - -let dashboardPollInFlight = false; -let seedDetailPollInFlight = false; - -function seedDetailUrl(seedId) { - const detail = document.getElementById("seed-detail"); - const template = detail?.dataset.seedDetailUrlTemplate; - if (!template || !seedId) { - return null; - } - return template.replace("__SEED_ID__", encodeURIComponent(seedId)); -} - -function isLogViewerOpen() { - const target = document.getElementById("seed-detail"); - if (!target) { - return false; - } - if (target.querySelector('[data-log-viewer-open="true"]')) { - return true; - } - if (target.querySelector("[data-log-stream]")) { - return true; - } - const seedId = selectedSeedIdFromUrl(); - return Boolean(seedId && localStorage.getItem(`seed-active-run-${seedId}`)); -} - -function dashboardBoardUrl() { - const board = document.getElementById("dashboard-board"); - const base = board?.dataset.dashboardPartialUrl; - if (!base) { - return null; - } - const seedId = selectedSeedIdFromUrl(); - if (!seedId) { - return base; - } - const separator = base.includes("?") ? "&" : "?"; - return `${base}${separator}seed_id=${encodeURIComponent(seedId)}`; -} - -function pollDashboardBoard() { - const target = document.getElementById("dashboard-board"); - const url = dashboardBoardUrl(); - if (!target || !url || dashboardPollInFlight) { - return; - } - dashboardPollInFlight = true; - htmx - .ajax("GET", url, { target: "#dashboard-board", swap: "outerHTML" }) - .finally(() => { - dashboardPollInFlight = false; - }); -} - -function pollSeedDetail() { - const seedId = selectedSeedIdFromUrl(); - const target = document.getElementById("seed-detail"); - const url = seedDetailUrl(seedId); - if (!target || !url || seedDetailPollInFlight) { - return; - } - if (isLogViewerOpen()) { - return; - } - seedDetailPollInFlight = true; - htmx.ajax("GET", url, { target: "#seed-detail", swap: "morph:innerHTML" }).finally(() => { - seedDetailPollInFlight = false; - }); -} - -function pollDashboard() { - if (document.hidden) { - return; - } - if (isLogViewerOpen()) { - return; - } - pollDashboardBoard(); - pollSeedDetail(); -} - -document.body.addEventListener("htmx:beforeRequest", (event) => { - const target = event.detail?.target; - if (!target || !isLogViewerOpen()) { - return; - } - // Pause daemon status auto-refresh while viewing logs. - if (target.id === "daemon-status-panel") { - event.preventDefault(); - } -}); - -document.body.addEventListener("click", (event) => { - const card = event.target.closest(".seed-card[data-seed-id]"); - if (!card) { - return; - } - applySelectedSeed(card.dataset.seedId); -}); - -document.body.addEventListener("htmx:afterSettle", (event) => { - const target = event.detail?.target; - if (target && target.id === "seed-detail") { - applySelectedSeed(selectedSeedIdFromUrl()); - } -}); - -window.addEventListener("popstate", () => { - applySelectedSeed(selectedSeedIdFromUrl()); -}); - -applySelectedSeed(selectedSeedIdFromUrl()); -window.setInterval(pollDashboard, 5000); - -const logStreamIntervals = new Map(); -const logStreamState = new Map(); -const ansiCtor = window.AnsiUp || window.ansi_up?.AnsiUp || null; -const ansiRenderer = ansiCtor ? new ansiCtor() : null; - -if (ansiRenderer && Object.prototype.hasOwnProperty.call(ansiRenderer, "escape_html")) { - ansiRenderer.escape_html = true; -} - -function stripAnsiSequences(value) { - // CSI: \x1b[...m, OSC: \x1b]...\x07 or \x1b\ ; then any remaining ESC controls. - return (value || "") - .replace(/\u001b\][^\u0007]*(?:\u0007|\u001b\\)/g, "") - .replace(/\u001b\[[0-?]*[ -/]*[@-~]/g, "") - .replace(/\u001b[@-_]/g, ""); -} - -function isRunComplete(status) { - return status === "succeeded" || status === "failed"; -} - -function updateLogStatus(runId, text) { - const nodes = document.querySelectorAll(`[data-log-status][data-run-id="${runId}"]`); - nodes.forEach((node) => { - node.textContent = text; - }); -} - -function updateCopyButtonState(runId, stream, enabled) { - const buttons = document.querySelectorAll( - `[data-log-copy][data-run-id="${runId}"][data-stream="${stream}"]` - ); - buttons.forEach((button) => { - button.disabled = !enabled; - }); -} - -function appendLogContent(pre, chunk) { - const currentRaw = pre.dataset.rawLog || ""; - const nextRaw = currentRaw + (chunk || ""); - - // Keep the viewer responsive for very large logs. - const maxChars = 200_000; - const trimmedRaw = - nextRaw.length > maxChars ? nextRaw.slice(nextRaw.length - maxChars) : nextRaw; - - pre.dataset.rawLog = trimmedRaw; - if (ansiRenderer) { - pre.innerHTML = ansiRenderer.ansi_to_html(trimmedRaw); - } else { - pre.textContent = stripAnsiSequences(trimmedRaw); - } - - pre.scrollTop = pre.scrollHeight; -} - -async function pollLogStream(pre) { - const runId = pre.dataset.runId; - const stream = pre.dataset.stream || "stdout"; - if (!runId) { - return; - } - - const state = logStreamState.get(pre) || { offset: 0, complete: false }; - const response = await fetch( - `/component-system/api/runs/${encodeURIComponent(runId)}/log?stream=${encodeURIComponent(stream)}&offset=${state.offset}` - ); - if (!response.ok) { - throw new Error(`Failed to fetch logs for ${runId}: ${response.status}`); - } - - const payload = await response.json(); - const chunk = payload.chunk || ""; - const nextOffset = Number(payload.next_offset || 0); - const complete = Boolean(payload.complete); - - appendLogContent(pre, chunk); - updateCopyButtonState(runId, stream, pre.textContent.length > 0); - logStreamState.set(pre, { offset: nextOffset, complete }); - - if (complete) { - updateLogStatus(runId, "Completed"); - const intervalId = logStreamIntervals.get(pre); - if (intervalId) { - clearInterval(intervalId); - logStreamIntervals.delete(pre); - } - return; - } - - if (chunk) { - updateLogStatus(runId, "Streaming..."); - } else { - updateLogStatus(runId, "Waiting for log output..."); - } -} - -function cleanupDetachedLogStreams() { - for (const [pre, intervalId] of logStreamIntervals.entries()) { - if (!document.body.contains(pre)) { - clearInterval(intervalId); - logStreamIntervals.delete(pre); - logStreamState.delete(pre); - } - } -} - -function initializeLogCopyButtons(root) { - root.querySelectorAll("[data-log-copy]").forEach((button) => { - if (button.dataset.logCopyReady === "true") { - return; - } - button.dataset.logCopyReady = "true"; - button.addEventListener("click", async () => { - const runId = button.dataset.runId; - if (!runId) { - return; - } - const stream = button.dataset.stream || "stdout"; - const pre = root.querySelector( - `[data-log-stream][data-run-id="${runId}"][data-stream="${stream}"]` - ); - if (!pre || !pre.textContent) { - return; - } - try { - await navigator.clipboard.writeText(pre.textContent); - const labelBefore = button.textContent; - button.textContent = "Copied!"; - setTimeout(() => { - button.textContent = labelBefore || "Copy"; - }, 1200); - } catch (error) { - console.error("Failed to copy log output", error); - } - }); - }); -} - -async function loadPromptContent(pre) { - const runId = pre.dataset.runId; - if (!runId) return; - try { - const response = await fetch( - `/component-system/api/runs/${encodeURIComponent(runId)}/prompt` - ); - if (!response.ok) return; - const payload = await response.json(); - const content = payload.content ?? ""; - pre.textContent = content; - const copyBtn = document.querySelector( - `[data-prompt-copy][data-run-id="${runId}"]` - ); - if (copyBtn) copyBtn.disabled = false; - } catch (err) { - console.error("Failed to load prompt for run", runId, err); - } -} - -function initializePromptDisplays(root) { - root.querySelectorAll("[data-prompt-content]").forEach((pre) => { - if (pre.dataset.promptLoaded === "true") return; - pre.dataset.promptLoaded = "true"; - loadPromptContent(pre); - }); - root.querySelectorAll("[data-prompt-copy]").forEach((button) => { - if (button.dataset.promptCopyReady === "true") return; - button.dataset.promptCopyReady = "true"; - button.addEventListener("click", async () => { - const runId = button.dataset.runId; - if (!runId) return; - const pre = root.querySelector( - `[data-prompt-content][data-run-id="${runId}"]` - ); - if (!pre || !pre.textContent) return; - try { - await navigator.clipboard.writeText(pre.textContent); - const labelBefore = button.textContent; - button.textContent = "Copied!"; - setTimeout(() => { - button.textContent = labelBefore || "Copy"; - }, 1200); - } catch (err) { - console.error("Failed to copy prompt", err); - } - }); - }); -} - -function initializeLogStreams(root = document) { - cleanupDetachedLogStreams(); - initializeLogCopyButtons(root); - initializePromptDisplays(root); - - root.querySelectorAll("[data-log-stream]").forEach((pre) => { - if (pre.dataset.logStreamReady === "true") { - return; - } - pre.dataset.logStreamReady = "true"; - const runStatus = pre.dataset.runStatus || ""; - const runId = pre.dataset.runId; - if (!runId) { - return; - } - - if (isRunComplete(runStatus)) { - updateLogStatus(runId, "Completed"); - } else { - updateLogStatus(runId, "Connecting..."); - } - - const runPoll = async () => { - try { - await pollLogStream(pre); - } catch (error) { - updateLogStatus(runId, "Log fetch failed"); - console.error(error); - } - }; - - runPoll(); - const intervalId = window.setInterval(runPoll, 2000); - logStreamIntervals.set(pre, intervalId); - }); -} - -function observeLogStreamMounts() { - const observer = new MutationObserver((mutations) => { - for (const mutation of mutations) { - if (mutation.type !== "childList" || mutation.addedNodes.length === 0) { - continue; - } - for (const node of mutation.addedNodes) { - if (!(node instanceof Element)) { - continue; - } - if ( - node.matches?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") || - node.querySelector?.("[data-log-stream], [data-log-copy], [data-prompt-content], [data-prompt-copy]") - ) { - initializeLogStreams(node); - return; - } - } - } - }); - - observer.observe(document.body, { childList: true, subtree: true }); -} - -document.body.addEventListener("htmx:afterSettle", (event) => { - const target = event.detail?.target; - if (!target) { - return; - } - if (target.id === "seed-detail") { - initializeLogStreams(target); - } -}); - -initializeLogStreams(document); -observeLogStreamMounts(); diff --git a/component_system/web/templates/.ipynb_checkpoints/dashboard-checkpoint.html b/component_system/web/templates/.ipynb_checkpoints/dashboard-checkpoint.html deleted file mode 100644 index 82b1056da..000000000 --- a/component_system/web/templates/.ipynb_checkpoints/dashboard-checkpoint.html +++ /dev/null @@ -1,124 +0,0 @@ -{% extends "base.html" %} -{% block title %}Component System Dashboard{% endblock %} -{% block content %} -
-
-
-

Create Seed

-

Start a new seed from a prompt. Baseline branch is selected here; each seed has one branch (seed id).

-
-
-
- - -
-
-

One branch per seed: the seed id is the branch name (e.g. seed-a1b2c3).

- - - -
- {% if dashboard.setup_error %} -
-

Git setup required

-

{{ dashboard.setup_error }}

-
- {% endif %} - {% with daemon_status=dashboard.daemon_status %} - {% include "partials/daemon_status.html" %} - {% endwith %} -
-

Baseline branches

-

Per-branch metrics (last val_bpb, promoted seed). Workflow-managed, read-only: component_system/baseline_branches.json (per-branch mapping), component_system/baseline_metrics.json (baseline run metrics).

- {% if dashboard.baseline_metrics_by_branch %} -
- {% for branch, m in dashboard.baseline_metrics_by_branch.items() %} -
-
{{ branch }}
-
val_bpb {{ "%.6f"|format(m.get('last_val_bpb')) if m.get('last_val_bpb') is not none else "—" }} · {{ m.get('promoted_branch') or "—" }}{% if m.get('commit_sha') %} · {{ m.get('commit_sha')[:7] }}{% endif %}
-
- {% endfor %} -
- {% else %} -

No baseline metrics yet. Run the first DCA to establish baseline for a branch.

- {% endif %} -
-
-

Direct Code Agent

-

Run the configured code agent from the project root with a dedicated single-worker executor. New runs appear in the Do-Check-Action column.

-
- - - -
-
-
-
- {% include "partials/dashboard_board.html" %} -
- {% if detail %} - {% with - seed=detail.seed, - runs=detail.runs, - events=detail.events, - baseline_metrics_for_branch=detail.baseline_metrics_for_branch, - setup_error=detail.setup_error, - daemon_status=dashboard.daemon_status - %} - {% include "partials/seed_detail.html" %} - {% endwith %} - {% else %} -
- Select a seed to inspect its worktree, plan, runs, logs, and promotion history. -
- {% endif %} -
-
-
-
-{% endblock %} diff --git a/component_system/web/templates/partials/.ipynb_checkpoints/seed_detail-checkpoint.html b/component_system/web/templates/partials/.ipynb_checkpoints/seed_detail-checkpoint.html deleted file mode 100644 index 93f5439cb..000000000 --- a/component_system/web/templates/partials/.ipynb_checkpoints/seed_detail-checkpoint.html +++ /dev/null @@ -1,326 +0,0 @@ -
-
-
- -

{{ seed.seed_id }}

- {% if can_edit_prompt %} -
- - - -
- {% else %} -

{{ seed.prompt }}

- {% endif %} -
-
- {% if seed.ralph_loop_enabled %} -
- -
- {% else %} -
- -
- {% endif %} -
- -
-
- -
-
-
- - {% if setup_error %} -
- {{ setup_error }} -
- {% endif %} - -
-
-
- - {{ seed.status.value|replace('_', ' ')|title }} -
-

Ralph loop: {% if seed.ralph_loop_enabled %}enabled{% else %}disabled{% endif %}

-

Latest signal: {% if seed.latest_signal %}{{ seed.latest_signal }}{% else %}{% endif %}

-
-
- -
-
Baseline
{{ seed.baseline_branch }}
-
Branch
{{ seed.seed_id }}
-
-
-
- -
-
Seed worktree
{{ seed.worktree_path or "—" }}
-
-
-
- -
-
-
-

Plan

- {% if seed.plan %} -
-
- -

{{ seed.plan.title }}

-
-
- -

{{ seed.plan.target_component }}

-
-
- -

{{ seed.plan.description }}

-
- {% if seed.plan.commit_sha %} -
- -

{{ seed.plan.commit_sha }}

-
- {% endif %} -
- {% else %} -

No plan yet. Click Run Plan to queue the task; the plan is generated when the daemon runs it.

- {% endif %} -
- -
-
-

Runs

-
- {% if runs and seed.status.value in ['queued', 'planning'] %} -

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

- {% endif %} -
- {% if runs %} - {% for run in runs %} -
-
-
-

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

-

{{ run.run_id }}

-
-
- {% if run.signal %} - {{ run.signal }} - {% endif %} - -
-
- {% if run.metrics %} -
- {% for key, value in run.metrics.items() %} -
-
{{ key }}
-
{{ value }}
-
- {% endfor %} -
- {% endif %} -
- - {% endfor %} - {% else %} -

No runs yet. Use Run Plan to start.

- {% endif %} -
-
-
- -
-
-

Latest Metrics

- {% if seed.latest_metrics %} -
- {% for key, value in seed.latest_metrics.items() %} -
- -
{{ value }}
-
- {% endfor %} -
- {% else %} -

Metrics appear here after Do-Check-Action runs the training entrypoint.

- {% endif %} -
- -
-
-

Timeline

- -
-
- {% if events %} - {% for event in events %} -
-

{{ event.message }}

- {% if event.commit_sha %} -

commit: {{ event.commit_sha }}

- {% endif %} - {% if event.target_branch %} -

target branch: {{ event.target_branch }}

- {% endif %} -

{{ event.kind }} · {{ event.created_at_human }}

-
- {% endfor %} - {% else %} -

No events yet.

- {% endif %} -
-
-
-
-
From e0e0e47dd9c86a6eb5c62a525bddf83ccbe964ee Mon Sep 17 00:00:00 2001 From: Laurence Date: Thu, 12 Mar 2026 22:16:46 +0800 Subject: [PATCH 13/24] feat: improve worktree handling and merge resolution - Add prompt_audit directory to .gitignore - Enhance worktree detection in run.py with separate workflows for worktree vs root modes - Add merge resolution logic and temp branch handling in workflow.py - Improve DCA stage with better baseline promotion tracking --- .gitignore | 3 + component_system/run.py | 46 +++-- component_system/services/workflow.py | 242 +++++++++++++++++++------- 3 files changed, 212 insertions(+), 79 deletions(-) diff --git a/.gitignore b/.gitignore index a3fb245de..e6555d1a5 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,9 @@ queue/ CLAUDE.md AGENTS.md +# Prompt audit output (generated by tests) +component_system/prompt_audit/ + # Experimental code/artifacts dev/ diff --git a/component_system/run.py b/component_system/run.py index 9f6c274a2..b7f989746 100644 --- a/component_system/run.py +++ b/component_system/run.py @@ -480,7 +480,8 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: worktree_dir = Path(agent_cwd) # Worktree runs must stay entirely within the copied seed workspace to avoid external_directory requests. - if worktree_dir.resolve() != PROJECT_ROOT.resolve(): + in_worktree = worktree_dir.resolve() != PROJECT_ROOT.resolve() + if in_worktree: context_protocol = " - component_system/protocol.md" docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) task_block = ( @@ -493,12 +494,14 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: "Use only paths relative to your current working directory. " "Do not request access to absolute paths, parent-directory paths, or files outside the worktree.\n" ) + scope_note = "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" else: context_protocol = " - component_system/protocol.md" docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) task_path_rel = f" - {rel_task}" task_block = f"Task file:\n{task_path_rel}\n\nTask content:\n{task_json}\n\n" worktree_note = "Your working directory is the project root.\n" + scope_note = "Do not edit files outside your current directory (project root) unless the prompt explicitly requires it.\n\n" required_context = ( "Required context (read first; paths relative to your cwd):\n" @@ -517,26 +520,43 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: f"{baseline_files_note}" f"{task_block}" f"{worktree_note}" - "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" + f"{scope_note}" ) if stage == "p": + if in_worktree: + p_workflow = ( + "Workflow:\n" + "1. Refine the seed prompt into a concrete implementation idea.\n" + "2. Implement the first generated version of that idea in the provided worktree.\n" + "3. Create a git commit in the seed branch (current branch in the worktree).\n" + "4. Print a JSON summary between these exact markers:\n" + "AUTORESEARCH_P_SUMMARY_BEGIN\n" + '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' + "AUTORESEARCH_P_SUMMARY_END\n" + "One branch per seed: you are already on the seed branch in the worktree.\n" + "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + ) + else: + p_workflow = ( + "Workflow:\n" + "1. Refine the seed prompt into a concrete implementation idea.\n" + "2. Implement the first generated version of that idea in the current directory (project root).\n" + "3. Create a git commit on the current branch.\n" + "4. Print a JSON summary between these exact markers:\n" + "AUTORESEARCH_P_SUMMARY_BEGIN\n" + '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' + "AUTORESEARCH_P_SUMMARY_END\n" + "One branch per seed: you are in the project root; use the current branch for your commit.\n" + "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + ) return header + ( "You are the P stage.\n\n" "## Read results.tsv first (avoid idea duplication)\n" "Before choosing a hypothesis, read `results.tsv` in your cwd if it exists. " "Use it to avoid proposing ideas already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). " "See component_system/PDCA-PLAN.md for full guidance.\n\n" - "Workflow:\n" - "1. Refine the seed prompt into a concrete implementation idea.\n" - "2. Implement the first generated version of that idea in the provided worktree.\n" - "3. Create a git commit in the seed branch (current branch in the worktree).\n" - "4. Print a JSON summary between these exact markers:\n" - "AUTORESEARCH_P_SUMMARY_BEGIN\n" - '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' - "AUTORESEARCH_P_SUMMARY_END\n" - "One branch per seed: you are already on the seed branch in the worktree.\n" - "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + f"{p_workflow}" ) if stage == "dca": sync_resolution = task.get("sync_resolution") is True @@ -737,7 +757,7 @@ def eligible(payload: dict) -> bool: merge_resolution=task.get("merge_resolution") is True, ) if not run.summary.get("metrics_recovery_queued"): - description = run.summary.get("notes") or run.summary.get("idea") or seed_id + description = run.summary.get("idea") or run.summary.get("notes") or seed_id _append_results_tsv(seed_id, run.metrics, run.signal or "error", str(description)) _regenerate_progress_png() if salvaged_dca: diff --git a/component_system/services/workflow.py b/component_system/services/workflow.py index 6842b1da1..3fab2a1e2 100644 --- a/component_system/services/workflow.py +++ b/component_system/services/workflow.py @@ -1,10 +1,11 @@ from __future__ import annotations import json -from typing import Any import re +import shutil import subprocess from pathlib import Path +from typing import Any from component_system.config import DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD from component_system.domain.models import ( @@ -246,6 +247,14 @@ def _recover_checked_out_worktree_conflict( def commit_sha(self, ref: str) -> str: return self._run_git("rev-parse", "--short", ref) + def _current_branch(self, cwd: Path | None = None) -> str | None: + """Return the current branch name, or None if detached HEAD.""" + try: + branch = self._run_git("branch", "--show-current", cwd=cwd) + return branch.strip() or None + except GitCommandError: + return None + def head_sha_at(self, cwd: Path) -> str: """Return the short commit SHA of HEAD in the given worktree directory.""" return self._run_git("rev-parse", "--short", "HEAD", cwd=cwd) @@ -279,25 +288,77 @@ def promote_seed_branch( ) -> str: """Merge the seed's branch (seed_id) into the target branch. Only DCA Action may call this; Plan must never merge. If target_branch is None, use seed.baseline_branch (e.g. for normal seed promotion). For __baseline__ completion, - pass the first user seed's selected branch so the merge goes there instead of a fixed config value.""" + pass the first user seed's selected branch so the merge goes there instead of a fixed config value. + When the target branch is already checked out (e.g. master in the main repo), we merge in place to avoid + 'cannot force update the branch used by worktree' from creating a second worktree on the same branch.""" merge_into = target_branch if target_branch is not None else seed.baseline_branch + repo_root = self.repo_root() + current = self._current_branch(cwd=repo_root) + + def do_merge(cwd: Path | None) -> None: + self._run_git("merge", "--no-edit", seed.seed_id, cwd=cwd) + + def merge_already_up_to_date(cwd: Path | None) -> bool: + try: + self._run_git( + "merge-base", "--is-ancestor", seed.seed_id, "HEAD", cwd=cwd + ) + return True + except GitCommandError: + return False + + if current == merge_into: + # Target branch is already checked out (e.g. main repo on master). Merge in place. + try: + do_merge(cwd=repo_root) + except GitCommandError as merge_err: + if merge_already_up_to_date(cwd=repo_root): + return self.commit_sha(merge_into) + raise merge_err + return self.commit_sha(merge_into) + + # Target is not current branch: use a temporary worktree with a temp branch so we don't + # try to check out the same branch in two worktrees (Git forbids that). baseline_worktree = WORKTREE_ROOT / "baseline" + temp_branch = f"__promote_{merge_into}__" if baseline_worktree.exists(): try: self._run_git("worktree", "remove", "--force", str(baseline_worktree)) except GitCommandError: pass + if baseline_worktree.exists(): + shutil.rmtree(baseline_worktree, ignore_errors=True) self._run_git( "worktree", "add", "--force", "-B", - merge_into, + temp_branch, str(baseline_worktree), merge_into, + cwd=repo_root, ) - self._run_git("merge", "--no-edit", seed.seed_id, cwd=baseline_worktree) - return self.commit_sha(merge_into) + try: + try: + do_merge(cwd=baseline_worktree) + except GitCommandError as merge_err: + if merge_already_up_to_date(cwd=baseline_worktree): + result_sha = self._run_git("rev-parse", "HEAD", cwd=baseline_worktree) + self._run_git("branch", "-f", merge_into, result_sha, cwd=repo_root) + return self.commit_sha(merge_into) + raise merge_err + result_sha = self._run_git("rev-parse", "HEAD", cwd=baseline_worktree) + self._run_git("branch", "-f", merge_into, result_sha, cwd=repo_root) + return self.commit_sha(merge_into) + finally: + try: + self._run_git("worktree", "remove", "--force", str(baseline_worktree)) + except GitCommandError: + pass + try: + self._run_git("branch", "-D", temp_branch) + except GitCommandError: + pass class WorkflowService: @@ -1245,6 +1306,13 @@ def finish_dca_run( ) return run target_branch = self._first_user_seed_baseline_branch() or seed.baseline_branch + _idea = summary.get("idea") or summary.get("notes") + if isinstance(_idea, str) and _idea.strip(): + baseline_promoted_idea = _idea[:80] + elif _idea: + baseline_promoted_idea = str(_idea)[:80] + else: + baseline_promoted_idea = "Initial baseline adaptation" # Only positive_signal is merged into the per-seed baseline branch; record baseline value otherwise. if signal != "positive_signal": self.metrics_repo.append_baseline_run(target_branch, metrics["val_bpb"]) @@ -1259,6 +1327,32 @@ def finish_dca_run( metrics=metrics, ) return run + # Merge-resolution DCA: agent already ran merge (or "Already up to date"). Treat as pass; do not run promote_seed_branch again. + if merge_resolution: + effective_sha = self._commit_sha_for_branch(target_branch) + self.metrics_repo.append_promotion_for_branch( + target_branch, + { + "val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": baseline_promoted_idea, + "promoted_at": summary.get("completed_at"), + "commit_sha": effective_sha, + }, + ) + seed.status = SeedStatus.passed + self.run_repo.save(run) + self.seed_repo.save(seed) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + f"Merge resolution DCA completed; __baseline__ merged or already up to date with {target_branch}.", + signal=signal, + metrics=metrics, + commit_sha=effective_sha, + ) + self._release_seeds_waiting_for_baseline(target_branch) + return run try: merge_commit_sha = self.git_service.promote_seed_branch(seed, target_branch=target_branch) effective_sha = ( @@ -1271,7 +1365,7 @@ def finish_dca_run( { "val_bpb": metrics["val_bpb"], "promoted_branch": seed.seed_id, - "promoted_idea": "Initial baseline adaptation", + "promoted_idea": baseline_promoted_idea, "promoted_at": summary.get("completed_at"), "commit_sha": effective_sha, }, @@ -1328,7 +1422,7 @@ def finish_dca_run( { "val_bpb": metrics["val_bpb"], "promoted_branch": seed.seed_id, - "promoted_idea": "Initial baseline adaptation", + "promoted_idea": baseline_promoted_idea, "promoted_at": summary.get("completed_at"), "commit_sha": effective_sha, }, @@ -1347,13 +1441,9 @@ def finish_dca_run( return run if terminal_status is SeedStatus.promoted: # Merge seed into baseline first on positive signal; then update metrics/state. - try: - merge_commit_sha = self.git_service.promote_seed_branch(seed) - effective_sha = ( - merge_commit_sha - if (isinstance(merge_commit_sha, str) and merge_commit_sha.strip()) - else self._commit_sha_for_branch(seed.baseline_branch) - ) + # Merge-resolution DCA: agent already ran merge (or "Already up to date"). Treat as pass; do not run promote_seed_branch again. + if merge_resolution: + effective_sha = self._commit_sha_for_branch(seed.baseline_branch) self.metrics_repo.append_promotion_for_branch( seed.baseline_branch, { @@ -1365,71 +1455,91 @@ def finish_dca_run( }, ) seed.status = terminal_status - event_message = "DCA succeeded and seed branch was promoted into baseline." - except GitCommandError as merge_err: - tried_sha = commit_sha or "" + event_message = "Merge resolution DCA completed; seed merged or already up to date with baseline." + else: try: - tried_sha = self.git_service.commit_sha(seed.seed_id) - except GitCommandError: - pass - self.seed_repo.append_event( - seed.seed_id, - "dca.merge_failed", - ( - f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts." - if not merge_resolution - else f"Merge into baseline failed again after a conflict-resolution DCA: {merge_err}. " - "Ralph can proceed with the next Plan run." - ), - commit_sha=tried_sha or None, - target_branch=seed.baseline_branch, - ) - if not merge_resolution: - self.queue_dca( + merge_commit_sha = self.git_service.promote_seed_branch(seed) + effective_sha = ( + merge_commit_sha + if (isinstance(merge_commit_sha, str) and merge_commit_sha.strip()) + else self._commit_sha_for_branch(seed.baseline_branch) + ) + self.metrics_repo.append_promotion_for_branch( + seed.baseline_branch, + { + "val_bpb": metrics["val_bpb"], + "promoted_branch": seed.seed_id, + "promoted_idea": seed.plan.title if seed.plan else seed.prompt[:80], + "promoted_at": summary.get("completed_at"), + "commit_sha": effective_sha, + }, + ) + seed.status = terminal_status + event_message = "DCA succeeded and seed branch was promoted into baseline." + except GitCommandError as merge_err: + tried_sha = commit_sha or "" + try: + tried_sha = self.git_service.commit_sha(seed.seed_id) + except GitCommandError: + pass + self.seed_repo.append_event( seed.seed_id, - merge_resolution=True, - last_metrics=metrics, - last_summary=summary, + "dca.merge_failed", + ( + f"Merge into baseline failed: {merge_err}. Queued a new DCA run to resolve conflicts." + if not merge_resolution + else f"Merge into baseline failed again after a conflict-resolution DCA: {merge_err}. " + "Ralph can proceed with the next Plan run." + ), + commit_sha=tried_sha or None, + target_branch=seed.baseline_branch, ) - seed.status = SeedStatus.dca_queued + if not merge_resolution: + self.queue_dca( + seed.seed_id, + merge_resolution=True, + last_metrics=metrics, + last_summary=summary, + ) + seed.status = SeedStatus.dca_queued + seed.updated_at = now_ts() + self.seed_repo.save(seed) + self.run_repo.save(run) + self.seed_repo.append_event( + seed.seed_id, + "dca.completed", + "DCA run completed but merge failed; conflict-resolution DCA queued.", + signal=signal, + metrics=metrics, + ) + return run + # Resolution run also failed to merge; avoid infinite resolution loop and continue Ralph. + seed.status = SeedStatus.generated seed.updated_at = now_ts() self.seed_repo.save(seed) self.run_repo.save(run) self.seed_repo.append_event( seed.seed_id, "dca.completed", - "DCA run completed but merge failed; conflict-resolution DCA queued.", + "Conflict-resolution DCA completed but merge still failed; proceeding to next Plan run.", signal=signal, metrics=metrics, ) + if seed.ralph_loop_enabled: + try: + self.queue_p(seed.seed_id) + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeued", + "Ralph loop queued the next Plan run after unresolved merge conflict.", + ) + except (RuntimeError, GitCommandError) as exc: + self.seed_repo.append_event( + seed.seed_id, + "ralph.requeue_failed", + f"Ralph loop could not queue the next Plan run after unresolved merge conflict: {exc}", + ) return run - # Resolution run also failed to merge; avoid infinite resolution loop and continue Ralph. - seed.status = SeedStatus.generated - seed.updated_at = now_ts() - self.seed_repo.save(seed) - self.run_repo.save(run) - self.seed_repo.append_event( - seed.seed_id, - "dca.completed", - "Conflict-resolution DCA completed but merge still failed; proceeding to next Plan run.", - signal=signal, - metrics=metrics, - ) - if seed.ralph_loop_enabled: - try: - self.queue_p(seed.seed_id) - self.seed_repo.append_event( - seed.seed_id, - "ralph.requeued", - "Ralph loop queued the next Plan run after unresolved merge conflict.", - ) - except (RuntimeError, GitCommandError) as exc: - self.seed_repo.append_event( - seed.seed_id, - "ralph.requeue_failed", - f"Ralph loop could not queue the next Plan run after unresolved merge conflict: {exc}", - ) - return run elif terminal_status is SeedStatus.failed: seed.status = terminal_status event_message = ( From 5a9c5ec53346b6904e793332f70849bca733348f Mon Sep 17 00:00:00 2001 From: Laurence Date: Sat, 14 Mar 2026 11:32:14 +0800 Subject: [PATCH 14/24] refactor: transition from component_system to pdca_system - Update .gitignore to reflect new directory structure for PDCA system - Modify README to reference the PDCA system instead of the component system - Add pytest configuration for PDCA tests in pyproject.toml - Remove obsolete component_system files including config, entrypoint, and training modules - Clean up related documentation and templates to align with the new structure --- .gitignore | 13 +- README.md | 2 +- component_system/PDCA-DO-CHECK-ACTION.md | 75 --- component_system/README.md | 99 --- component_system/components/model.py | 380 ----------- component_system/components/optimizer.py | 179 ----- component_system/components/trainer.py | 191 ------ component_system/config.py | 31 - .../SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md | 177 ----- component_system/entrypoint.py | 18 - component_system/training/mainline.py | 82 --- .../partials/seed_detail_runs_content.html | 148 ----- .../seed_detail_timeline_content.html | 16 - pdca_system/PDCA-Check-Action.md | 75 +++ .../PDCA-Plan-Do.md | 123 ++-- pdca_system/README.md | 104 +++ pdca_system/config.py | 30 + .../run.py => pdca_system/daemon.py | 418 +++++++----- .../domain/models.py | 34 +- pdca_system/logging_utils.py | 44 ++ .../package.json | 2 +- .../postcss.config.js | 0 {component_system => pdca_system}/protocol.md | 146 ++-- .../repositories/state.py | 37 +- pdca_system/requirements.txt | 8 + .../run_arxiv.py | 8 +- .../scripts}/clean_history.py | 56 +- .../services/workflow.py | 564 ++++++++-------- .../tailwind.config.js | 0 {component_system => pdca_system}/task.py | 24 +- pdca_system/tests/__init__.py | 1 + pdca_system/tests/test_daemon.py | 306 +++++++++ pdca_system/tests/test_helpers.py | 75 +++ pdca_system/tests/test_logging_utils.py | 39 ++ pdca_system/tests/test_prompt_audit.py | 177 +++++ pdca_system/tests/test_web.py | 114 ++++ pdca_system/tests/test_workflow.py | 629 ++++++++++++++++++ .../tests/test_workflow_git_service.py | 210 ++++++ pdca_system/tests/test_workflow_ralph.py | 412 ++++++++++++ {component_system => pdca_system}/web/app.py | 22 +- .../web/routes.py | 24 +- .../web/static/app.css | 2 +- .../web/static/app.js | 4 +- .../web/static/tailwind.input.css | 0 .../web/templates/base.html | 6 +- .../web/templates/dashboard.html | 10 +- .../web/templates/partials/action_error.html | 0 .../web/templates/partials/daemon_status.html | 4 +- .../templates/partials/dashboard_board.html | 4 +- .../web/templates/partials/seed_detail.html | 18 +- .../partials/seed_detail_response.html | 0 .../templates/partials/seed_runs_inner.html | 8 +- .../partials/seed_timeline_inner.html | 0 .../web/templates/seed_detail_page.html | 0 pyproject.toml | 3 + 55 files changed, 3088 insertions(+), 2064 deletions(-) delete mode 100644 component_system/PDCA-DO-CHECK-ACTION.md delete mode 100644 component_system/README.md delete mode 100644 component_system/components/model.py delete mode 100644 component_system/components/optimizer.py delete mode 100644 component_system/components/trainer.py delete mode 100644 component_system/config.py delete mode 100644 component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md delete mode 100644 component_system/entrypoint.py delete mode 100644 component_system/training/mainline.py delete mode 100644 component_system/web/templates/partials/seed_detail_runs_content.html delete mode 100644 component_system/web/templates/partials/seed_detail_timeline_content.html create mode 100644 pdca_system/PDCA-Check-Action.md rename component_system/PDCA-PLAN.md => pdca_system/PDCA-Plan-Do.md (61%) create mode 100644 pdca_system/README.md create mode 100644 pdca_system/config.py rename component_system/run.py => pdca_system/daemon.py (63%) rename {component_system => pdca_system}/domain/models.py (65%) create mode 100644 pdca_system/logging_utils.py rename {component_system => pdca_system}/package.json (88%) rename {component_system => pdca_system}/postcss.config.js (100%) rename {component_system => pdca_system}/protocol.md (56%) rename {component_system => pdca_system}/repositories/state.py (71%) create mode 100644 pdca_system/requirements.txt rename {component_system => pdca_system}/run_arxiv.py (94%) rename {scripts => pdca_system/scripts}/clean_history.py (88%) rename {component_system => pdca_system}/services/workflow.py (80%) rename {component_system => pdca_system}/tailwind.config.js (100%) rename {component_system => pdca_system}/task.py (93%) create mode 100644 pdca_system/tests/__init__.py create mode 100644 pdca_system/tests/test_daemon.py create mode 100644 pdca_system/tests/test_helpers.py create mode 100644 pdca_system/tests/test_logging_utils.py create mode 100644 pdca_system/tests/test_prompt_audit.py create mode 100644 pdca_system/tests/test_web.py create mode 100644 pdca_system/tests/test_workflow.py create mode 100644 pdca_system/tests/test_workflow_git_service.py create mode 100644 pdca_system/tests/test_workflow_ralph.py rename {component_system => pdca_system}/web/app.py (70%) rename {component_system => pdca_system}/web/routes.py (95%) rename {component_system => pdca_system}/web/static/app.css (96%) rename {component_system => pdca_system}/web/static/app.js (98%) rename {component_system => pdca_system}/web/static/tailwind.input.css (100%) rename {component_system => pdca_system}/web/templates/base.html (86%) rename {component_system => pdca_system}/web/templates/dashboard.html (90%) rename {component_system => pdca_system}/web/templates/partials/action_error.html (100%) rename {component_system => pdca_system}/web/templates/partials/daemon_status.html (85%) rename {component_system => pdca_system}/web/templates/partials/dashboard_board.html (91%) rename {component_system => pdca_system}/web/templates/partials/seed_detail.html (93%) rename {component_system => pdca_system}/web/templates/partials/seed_detail_response.html (100%) rename {component_system => pdca_system}/web/templates/partials/seed_runs_inner.html (94%) rename {component_system => pdca_system}/web/templates/partials/seed_timeline_inner.html (100%) rename {component_system => pdca_system}/web/templates/seed_detail_page.html (100%) diff --git a/.gitignore b/.gitignore index e6555d1a5..80e96c378 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ CLAUDE.md AGENTS.md # Prompt audit output (generated by tests) -component_system/prompt_audit/ +pdca_system/prompt_audit/ # Experimental code/artifacts dev/ @@ -26,9 +26,10 @@ dev/ # Results file results.tsv -# Component-system runtime artifacts (logs, queue, state, worktrees under history/) -component_system/history/ -component_system/baseline_branches.json -component_system/baseline_metrics.json +# PDCA-system runtime artifacts (logs, queue, state, worktrees under history/) +pdca_system/history/ +pdca_system/baseline_branches.json +pdca_system/baseline_metrics.json *.log -.ipynb_checkpoints/ \ No newline at end of file +.ipynb_checkpoints/ +pdca_system/scripts/migrate_history_to_pdca.py diff --git a/README.md b/README.md index 15ee32f53..523935819 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Hi have a look at program.md and let's kick off a new experiment! let's do the s The `program.md` file is essentially a super lightweight "skill". -For the component-system workflow, see `component_system/README.md`. +For the PDCA-system workflow, see `pdca_system/README.md`. ## Project structure diff --git a/component_system/PDCA-DO-CHECK-ACTION.md b/component_system/PDCA-DO-CHECK-ACTION.md deleted file mode 100644 index d90418c9c..000000000 --- a/component_system/PDCA-DO-CHECK-ACTION.md +++ /dev/null @@ -1,75 +0,0 @@ -# DCA — Do, Check, Action - -## Responsibility -Take the generated plan from P, adapt/fix it in the seed worktree, -run the canonical training entrypoint, evaluate results against baseline, and -promote only when the signal is positive. Do not propose new ideas or optimize for better metrics; only adapt/fix so the plan runs and report outcomes. - -## Workspace and paths -**CWD = seed worktree.** Read and edit only inside it; use relative paths only. Treat `component_system/` in the worktree as canonical context. - -## Input -- Runner prompt (task content). -- Baseline: `component_system/baseline_branches.json`, `component_system/baseline_metrics.json`. -- Worktree-local files only. - -## Baseline measurement (seed_id __baseline__) -Retry until the run succeeds and you report real metrics. No empty metrics. - -- **OOM:** Reduce `device_batch_size` in `component_system/components/trainer.py` (default 128); keep `total_batch_size % (device_batch_size * sequence_length) == 0`. Rerun until training completes. -- Only trivial fixes (e.g. batch size); no model/training logic changes. -- **Commit before reporting.** Uncommitted changes break the follow-up merge. - -## Workflow -1. Work in the seed worktree (one branch per seed). -2. Adapt/fix until it runs (runtime only: bugs, OOM, imports, config; no model/hyperparameter/training-logic changes for better metrics). -3. Run canonical command (**≥900s**): `timeout 900 uv run --active component_system/entrypoint.py > training.log 2>&1` (or `... 2>&1 | tee training.log` to also see output). **Must set command/tool timeout ≥900s**. After the run, inspect `training.log` to confirm completion and recover or verify metrics. -4. On bug/OOM: fix and rerun; for baseline, retry until success. -5. Commit on seed branch before reporting. -6. Print DCA summary block with `commit_sha` in JSON. -7. Runner evaluates signal and handles promotion. - -## Output Format -Print the summary block. Put metrics in JSON; runner falls back to stdout/stderr parsing if missing. - -```text -AUTORESEARCH_DCA_SUMMARY_BEGIN -{"checks":["entrypoint"],"notes":"...","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.24,...}} -AUTORESEARCH_DCA_SUMMARY_END -``` - -If no final metrics, use `"metrics": {}`. Runner extracts from stdout/stderr: `val_bpb`, `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, `total_tokens_M`, `num_steps`, `num_params_M`, `depth`. No metrics → recovery DCA inspects logs; only then treat as failed. - -## Check: Signal Rules - -| Condition | Signal | -|-----------|--------| -| `val_bpb` drops >= 0.001 vs baseline | `positive_signal` | -| `val_bpb` rises >= 0.001 vs baseline | `negative_signal` | -| difference < 0.001 | `neutral` | -| no historical baseline (best_val_bpb) | `positive_signal` (first recording) | -| metrics missing or training error | `error` | - -The threshold is defined in `component_system/config.py` (`PROMOTION_THRESHOLD`). - -## Action: Promotion Rules - -Only DCA may trigger a merge into baseline; P must not. Runner records `commit_sha`; on positive signal the workflow merges seed into baseline first, then updates metrics/state. Merge conflict → system queues merge-resolution DCA. - -### Promotion (`positive_signal`) -1. System merges seed into baseline first (you do not run merge). -2. Workflow updates `baseline_metrics.json` / `baseline_branches.json`. -3. Metadata in seed/run state. - -### Merge failure -- **Normal seed:** In seed worktree: `git merge __baseline__`, resolve conflicts, commit, print DCA summary for retry. -- **Baseline seed (__baseline__):** Merge __baseline__ into target (e.g. master). Run from worktree that has target checked out (`git worktree list`); do not run from __baseline__ worktree or `git merge master` there. - -### Non-promotion -`neutral` / `negative_signal` / `error`: log only. Failure info in queue/state logs. - -## Constraints -- No model/optimizer/training-logic changes for better metrics; only make the plan run (bugs, OOM, etc.). -- Use `run_mainline_training` (or equivalent); do not skip `val_bpb` evaluation. -- Do not edit baseline JSON files; only DCA promotion updates them. -- Canonical runner: `component_system/entrypoint.py`. Traceability: git + state files. diff --git a/component_system/README.md b/component_system/README.md deleted file mode 100644 index baa4f3a92..000000000 --- a/component_system/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# autoresearch - -![teaser](progress.png) - -*One day, frontier AI research used to be done by meat computers in between eating, sleeping, having other fun, and synchronizing once in a while using sound wave interconnect in the ritual of "group meeting". That era is long gone. Research is now entirely the domain of autonomous swarms of AI agents running across compute cluster megastructures in the skies. The agents claim that we are now in the 10,205th generation of the code base, in any case no one could tell if that's right or wrong as the "code" is now a self-modifying binary that has grown beyond human comprehension. This repo is the story of how it all began. -@karpathy, March 2026*. - -The idea: give an AI agent a small but real LLM training setup and let it experiment autonomously overnight. It modifies the code, trains for 5 minutes, checks if the result improved, keeps or discards, and repeats. You wake up in the morning to a log of experiments and (hopefully) a better model. The training code here is a simplified single-GPU implementation of [nanochat](https://github.com/karpathy/nanochat). The core idea is that you're not touching any of the Python files like you normally would as a researcher. Instead, you are programming the `program.md` Markdown files that provide context to the AI agents and set up your autonomous research org. The default `program.md` in this repo is intentionally kept as a bare bones baseline, though it's obvious how one would iterate on it over time to find the "research org code" that achieves the fastest research progress, how you'd add more agents to the mix, etc. A bit more context on this project is here in this [tweet](https://x.com/karpathy/status/2029701092347630069). - -## How it works - -The repo is deliberately kept small and only really has a three files that matter: - -- **`prepare.py`** — fixed constants, one-time data prep (downloads training data, trains a BPE tokenizer), and runtime utilities (dataloader, evaluation). Not modified. -- **`train.py`** — the single file the agent edits. Contains the full GPT model, optimizer (Muon + AdamW), and training loop. Everything is fair game: architecture, hyperparameters, optimizer, batch size, etc. **This file is edited and iterated on by the agent**. -- **`program.md`** — baseline instructions for one agent. Point your agent here and let it go. **This file is edited and iterated on by the human**. - -By design, training runs for a **fixed 5-minute time budget** (wall clock, excluding startup/compilation), regardless of the details of your compute. The metric is **val_bpb** (validation bits per byte) — lower is better, and vocab-size-independent so architectural changes are fairly compared. - -## Quick start - -**Requirements:** A single NVIDIA GPU (tested on H100), Python 3.10+, [uv](https://docs.astral.sh/uv/). - -```bash - -# 1. Install uv project manager (if you don't already have it) -curl -LsSf https://astral.sh/uv/install.sh | sh - -# 2. Install dependencies -uv sync - -# 3. Download data and train tokenizer (one-time, ~2 min) -uv run prepare.py - -# 4. Manually run a single training experiment (~5 min) -uv run train.py -``` - -If the above commands all work ok, your setup is working and you can go into autonomous research mode. - -## Running the agent - -Simply spin up your Claude/Codex or whatever you want in this repo (and disable all permissions), then you can prompt something like: - -``` -Hi have a look at program.md and let's kick off a new experiment! let's do the setup first. -``` - -The `program.md` file is essentially a super lightweight "skill". - -### Component-system workflow - -**Seed → P → DCA** loop: daemon runs two workers that poll a file queue and dispatch to an external agent (Claude, Codex, or OpenCode). - -1. **Dashboard** (optional): `uv run uvicorn component_system.web.app:app --reload` → http://127.0.0.1:8000/component-system -2. **Daemon:** `uv run component_system/run.py` (or `PDCA_AGENT=codex|opencode` for other backends) -3. **Bootstrap:** Have the agent follow `component_system/protocol.md`, create a seed and queue it for P, then start the daemon. Do not run P/DCA stages manually in-session. - -Seeds flow: `queue/p/` → P → `queue/dca/` → DCA → `state/`. Results in dashboard. - -## Project structure - -``` -prepare.py — constants, data prep + runtime utilities (do not modify) -train.py — model, optimizer, training loop (agent modifies this) -program.md — agent instructions -pyproject.toml — dependencies -``` - -## Design choices - -- **Single file to modify.** The agent only touches `train.py`. This keeps the scope manageable and diffs reviewable. -- **Fixed time budget.** Training always runs for exactly 5 minutes, regardless of your specific platform. This means you can expect approx 12 experiments/hour and approx 100 experiments while you sleep. There are two upsides of this design decision. First, this makes experiments directly comparable regardless of what the agent changes (model size, batch size, architecture, etc). Second, this means that autoresearch will find the most optimal model for your platform in that time budget. The downside is that your runs (and results) become not comparable to other people running on other compute platforms. -- **Self-contained.** No external dependencies beyond PyTorch and a few small packages. No distributed training, no complex configs. One GPU, one file, one metric. - -## Platform support - -This code currently requires that you have a single NVIDIA GPU. In principle it is quite possible to support CPU, MPS and other platforms but this would also bloat the code. I'm not 100% sure that I want to take this on personally right now. People can reference (or have their agents reference) the full/parent nanochat repository that has wider platform support and shows the various solutions (e.g. a Flash Attention 3 kernels fallback implementation, generic device support, autodetection, etc.), feel free to create forks or discussions for other platforms and I'm happy to link to them here in the README in some new notable forks section or etc. - -Seeing as there seems to be a lot of interest in tinkering with autoresearch on much smaller compute platforms than an H100, a few extra words. If you're going to try running autoresearch on smaller computers (Macbooks etc.), I'd recommend one of the forks below. On top of this, here are some recommendations for how to tune the defaults for much smaller models for aspiring forks: - -1. To get half-decent results I'd use a dataset with a lot less entropy, e.g. this [TinyStories dataset](https://huggingface.co/datasets/karpathy/tinystories-gpt4-clean). These are GPT-4 generated short stories. Because the data is a lot narrower in scope, you will see reasonable results with a lot smaller models (if you try to sample from them after training). -2. You might experiment with decreasing `vocab_size`, e.g. from 8192 down to 4096, 2048, 1024, or even - simply byte-level tokenizer with 256 possibly bytes after utf-8 encoding. -3. In `prepare.py`, you'll want to lower `MAX_SEQ_LEN` a lot, depending on the computer even down to 256 etc. As you lower `MAX_SEQ_LEN`, you may want to experiment with increasing `DEVICE_BATCH_SIZE` in `train.py` slightly to compensate. The number of tokens per fwd/bwd pass is the product of these two. -4. Also in `prepare.py`, you'll want to decrease `EVAL_TOKENS` so that your validation loss is evaluated on a lot less data. -5. In `train.py`, the primary single knob that controls model complexity is the `DEPTH` (default 8, here). A lot of variables are just functions of this, so e.g. lower it down to e.g. 4. -6. You'll want to most likely use `WINDOW_PATTERN` of just "L", because "SSSL" uses alternating banded attention pattern that may be very inefficient for you. Try it. -7. You'll want to lower `TOTAL_BATCH_SIZE` a lot, but keep it powers of 2, e.g. down to `2**14` (~16K) or so even, hard to tell. - -I think these would be the reasonable hyperparameters to play with. Ask your favorite coding agent for help and copy paste them this guide, as well as the full source code. - -## Notable forks - -- [miolini/autoresearch-macos](https://github.com/miolini/autoresearch-macos) (MacOS) -- [trevin-creator/autoresearch-mlx](https://github.com/trevin-creator/autoresearch-mlx) (MacOS) -- [jsegov/autoresearch-win-rtx](https://github.com/jsegov/autoresearch-win-rtx) (Windows) - -## License - -MIT diff --git a/component_system/components/model.py b/component_system/components/model.py deleted file mode 100644 index f74d89386..000000000 --- a/component_system/components/model.py +++ /dev/null @@ -1,380 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass - -import torch -import torch.nn as nn -import torch.nn.functional as F -from kernels import get_kernel - -from prepare import MAX_SEQ_LEN - - -def _get_fa3(): - if torch.cuda.is_available(): - cap = torch.cuda.get_device_capability() - repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3" - return get_kernel(repo).flash_attn_interface - return None - -_fa3 = None - -def get_fa3(): - global _fa3 - if _fa3 is None: - _fa3 = _get_fa3() - return _fa3 - - -@dataclass -class GPTConfig: - sequence_len: int = 2048 - vocab_size: int = 32768 - n_layer: int = 12 - n_head: int = 6 - n_kv_head: int = 6 - n_embd: int = 768 - window_pattern: str = "SSSL" - - -def norm(x: torch.Tensor) -> torch.Tensor: - return F.rms_norm(x, (x.size(-1),)) - - -def has_ve(layer_idx: int, n_layer: int) -> bool: - return layer_idx % 2 == (n_layer - 1) % 2 - - -def apply_rotary_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: - assert x.ndim == 4 - d = x.shape[3] // 2 - x1, x2 = x[..., :d], x[..., d:] - y1 = x1 * cos + x2 * sin - y2 = x1 * (-sin) + x2 * cos - return torch.cat([y1, y2], 3) - - -class CausalSelfAttention(nn.Module): - def __init__(self, config: GPTConfig, layer_idx: int) -> None: - super().__init__() - self.n_head = config.n_head - self.n_kv_head = config.n_kv_head - self.n_embd = config.n_embd - self.head_dim = self.n_embd // self.n_head - assert self.n_embd % self.n_head == 0 - assert self.n_kv_head <= self.n_head and self.n_head % self.n_kv_head == 0 - self.c_q = nn.Linear(self.n_embd, self.n_head * self.head_dim, bias=False) - self.c_k = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) - self.c_v = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False) - self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False) - self.ve_gate_channels = 32 - self.ve_gate = ( - nn.Linear(self.ve_gate_channels, self.n_kv_head, bias=False) - if has_ve(layer_idx, config.n_layer) - else None - ) - - def forward( - self, - x: torch.Tensor, - ve: torch.Tensor | None, - cos_sin: tuple[torch.Tensor, torch.Tensor], - window_size: tuple[int, int], - ) -> torch.Tensor: - batch_size, seq_len, _ = x.size() - q = self.c_q(x).view(batch_size, seq_len, self.n_head, self.head_dim) - k = self.c_k(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) - v = self.c_v(x).view(batch_size, seq_len, self.n_kv_head, self.head_dim) - - # Value residual (ResFormer): mix in value embedding with input-dependent gate per head - if ve is not None: - ve = ve.view(batch_size, seq_len, self.n_kv_head, self.head_dim) - gate = 2 * torch.sigmoid(self.ve_gate(x[..., : self.ve_gate_channels])) - v = v + gate.unsqueeze(-1) * ve - - cos, sin = cos_sin - q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin) - q, k = norm(q), norm(k) - - fa3 = get_fa3() - if fa3 is None: - raise RuntimeError("Flash Attention 3 is unavailable; component_system model should match train.py and requires the same kernel path.") - y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size) - y = y.contiguous().view(batch_size, seq_len, -1) - return self.c_proj(y) - - -class MLP(nn.Module): - def __init__(self, config: GPTConfig) -> None: - super().__init__() - self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) - self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.c_fc(x) - x = F.relu(x).square() - x = self.c_proj(x) - return x - - -class Block(nn.Module): - def __init__(self, config: GPTConfig, layer_idx: int) -> None: - super().__init__() - self.attn = CausalSelfAttention(config, layer_idx) - self.mlp = MLP(config) - - def forward( - self, - x: torch.Tensor, - ve: torch.Tensor | None, - cos_sin: tuple[torch.Tensor, torch.Tensor], - window_size: tuple[int, int], - ) -> torch.Tensor: - x = x + self.attn(norm(x), ve, cos_sin, window_size) - x = x + self.mlp(norm(x)) - return x - - -class GPT(nn.Module): - def __init__(self, config: GPTConfig) -> None: - super().__init__() - self.config = config - self.window_sizes = self._compute_window_sizes(config) - self.transformer = nn.ModuleDict( - { - "wte": nn.Embedding(config.vocab_size, config.n_embd), - "h": nn.ModuleList([Block(config, i) for i in range(config.n_layer)]), - } - ) - self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.resid_lambdas = nn.Parameter(torch.ones(config.n_layer)) - self.x0_lambdas = nn.Parameter(torch.zeros(config.n_layer)) - head_dim = config.n_embd // config.n_head - kv_dim = config.n_kv_head * head_dim - self.value_embeds = nn.ModuleDict( - { - str(i): nn.Embedding(config.vocab_size, kv_dim) - for i in range(config.n_layer) - if has_ve(i, config.n_layer) - } - ) - self.rotary_seq_len = config.sequence_len * 10 - cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) - self.register_buffer("cos", cos, persistent=False) - self.register_buffer("sin", sin, persistent=False) - - @torch.no_grad() - def init_weights(self) -> None: - torch.nn.init.normal_(self.transformer.wte.weight, mean=0.0, std=1.0) - torch.nn.init.normal_(self.lm_head.weight, mean=0.0, std=0.001) - n_embd = self.config.n_embd - scale = 3**0.5 * n_embd**-0.5 - for block in self.transformer.h: - torch.nn.init.uniform_(block.attn.c_q.weight, -scale, scale) - torch.nn.init.uniform_(block.attn.c_k.weight, -scale, scale) - torch.nn.init.uniform_(block.attn.c_v.weight, -scale, scale) - torch.nn.init.zeros_(block.attn.c_proj.weight) - torch.nn.init.uniform_(block.mlp.c_fc.weight, -scale, scale) - torch.nn.init.zeros_(block.mlp.c_proj.weight) - self.resid_lambdas.fill_(1.0) - self.x0_lambdas.fill_(0.1) - for ve in self.value_embeds.values(): - torch.nn.init.uniform_(ve.weight, -scale, scale) - for block in self.transformer.h: - if block.attn.ve_gate is not None: - torch.nn.init.zeros_(block.attn.ve_gate.weight) - head_dim = self.config.n_embd // self.config.n_head - cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim) - self.cos, self.sin = cos, sin - self.transformer.wte.to(dtype=torch.bfloat16) - for ve in self.value_embeds.values(): - ve.to(dtype=torch.bfloat16) - - def _precompute_rotary_embeddings( - self, - seq_len: int, - head_dim: int, - base: int = 10000, - device: torch.device | None = None, - ) -> tuple[torch.Tensor, torch.Tensor]: - if device is None: - device = self.transformer.wte.weight.device - channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device) - inv_freq = 1.0 / (base ** (channel_range / head_dim)) - t = torch.arange(seq_len, dtype=torch.float32, device=device) - freqs = torch.outer(t, inv_freq) - cos, sin = freqs.cos(), freqs.sin() - cos, sin = cos.bfloat16(), sin.bfloat16() - return cos[None, :, None, :], sin[None, :, None, :] - - def _compute_window_sizes(self, config: GPTConfig) -> list[tuple[int, int]]: - pattern = config.window_pattern.upper() - assert all(c in "SL" for c in pattern) - long_window = config.sequence_len - short_window = long_window // 2 - char_to_window = {"L": (long_window, 0), "S": (short_window, 0)} - window_sizes = [] - for layer_idx in range(config.n_layer): - char = pattern[layer_idx % len(pattern)] - window_sizes.append(char_to_window[char]) - window_sizes[-1] = (long_window, 0) - return window_sizes - - def estimate_flops(self) -> float: - nparams = sum(p.numel() for p in self.parameters()) - value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values()) - nparams_exclude = ( - self.transformer.wte.weight.numel() - + value_embeds_numel - + self.resid_lambdas.numel() - + self.x0_lambdas.numel() - ) - n_head = self.config.n_head - head_dim = self.config.n_embd // self.config.n_head - seq_len = self.config.sequence_len - attn_flops = 0 - for window_size in self.window_sizes: - window = window_size[0] - effective_seq = seq_len if window < 0 else min(window, seq_len) - attn_flops += 12 * n_head * head_dim * effective_seq - return 6 * (nparams - nparams_exclude) + attn_flops - - def num_scaling_params(self) -> dict[str, int]: - wte = sum(p.numel() for p in self.transformer.wte.parameters()) - value_embeds = sum(p.numel() for p in self.value_embeds.parameters()) - lm_head = sum(p.numel() for p in self.lm_head.parameters()) - transformer_matrices = sum(p.numel() for p in self.transformer.h.parameters()) - scalars = self.resid_lambdas.numel() + self.x0_lambdas.numel() - total = wte + value_embeds + lm_head + transformer_matrices + scalars - return { - "wte": wte, - "value_embeds": value_embeds, - "lm_head": lm_head, - "transformer_matrices": transformer_matrices, - "scalars": scalars, - "total": total, - } - - def setup_optimizer( - self, - unembedding_lr: float = 0.004, - embedding_lr: float = 0.2, - matrix_lr: float = 0.02, - weight_decay: float = 0.0, - adam_betas: tuple[float, float] = (0.8, 0.95), - scalar_lr: float = 0.5, - ): - from component_system.components.optimizer import MuonAdamW - - model_dim = self.config.n_embd - matrix_params = list(self.transformer.h.parameters()) - value_embeds_params = list(self.value_embeds.parameters()) - embedding_params = list(self.transformer.wte.parameters()) - lm_head_params = list(self.lm_head.parameters()) - resid_params = [self.resid_lambdas] - x0_params = [self.x0_lambdas] - assert len(list(self.parameters())) == ( - len(matrix_params) - + len(embedding_params) - + len(lm_head_params) - + len(value_embeds_params) - + len(resid_params) - + len(x0_params) - ) - # Scale LR ∝ 1/√dmodel (tuned at 768 dim) - dmodel_lr_scale = (model_dim / 768) ** -0.5 - print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}") - param_groups = [ - dict(kind="adamw", params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0), - dict(kind="adamw", params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0), - ] - for shape in sorted({p.shape for p in matrix_params}): - group_params = [p for p in matrix_params if p.shape == shape] - param_groups.append( - dict( - kind="muon", - params=group_params, - lr=matrix_lr, - momentum=0.95, - ns_steps=5, - beta2=0.95, - weight_decay=weight_decay, - ) - ) - optimizer = MuonAdamW(param_groups) - for group in optimizer.param_groups: - group["initial_lr"] = group["lr"] - return optimizer - - def forward( - self, - idx: torch.Tensor, - targets: torch.Tensor | None = None, - reduction: str = "mean", - ) -> torch.Tensor: - _, seq_len = idx.size() - assert seq_len <= self.cos.size(1) - cos_sin = self.cos[:, :seq_len], self.sin[:, :seq_len] - x = self.transformer.wte(idx) - x = norm(x) - x0 = x - for layer_idx, block in enumerate(self.transformer.h): - x = self.resid_lambdas[layer_idx] * x + self.x0_lambdas[layer_idx] * x0 - ve = self.value_embeds[str(layer_idx)](idx) if str(layer_idx) in self.value_embeds else None - x = block(x, ve, cos_sin, self.window_sizes[layer_idx]) - x = norm(x) - logits = self.lm_head(x).float() - softcap = 15 - logits = softcap * torch.tanh(logits / softcap) - if targets is None: - return logits - return F.cross_entropy( - logits.view(-1, logits.size(-1)), - targets.view(-1), - ignore_index=-1, - reduction=reduction, - ) - - -def build_model_config( - depth: int, - *, - vocab_size: int, - aspect_ratio: int = 64, - head_dim: int = 128, - window_pattern: str = "SSSL", -) -> GPTConfig: - base_dim = depth * aspect_ratio - model_dim = ((base_dim + head_dim - 1) // head_dim) * head_dim - num_heads = model_dim // head_dim - return GPTConfig( - sequence_len=MAX_SEQ_LEN, - vocab_size=vocab_size, - n_layer=depth, - n_head=num_heads, - n_kv_head=num_heads, - n_embd=model_dim, - window_pattern=window_pattern, - ) - - -def create_model( - config: GPTConfig, - *, - device: torch.device | None = None, - compile_model: bool = True, -) -> tuple[GPT, dict[str, int], float]: - if device is None: - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - with torch.device("meta"): - model = GPT(config) - model.to_empty(device=device) - model.init_weights() - param_counts = model.num_scaling_params() - num_flops_per_token = model.estimate_flops() - if compile_model: - model = torch.compile(model, dynamic=False) - return model, param_counts, num_flops_per_token diff --git a/component_system/components/optimizer.py b/component_system/components/optimizer.py deleted file mode 100644 index 227caaea9..000000000 --- a/component_system/components/optimizer.py +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import annotations - -import torch - - -polar_express_coeffs = [ - (8.156554524902461, -22.48329292557795, 15.878769915207462), - (4.042929935166739, -2.808917465908714, 0.5000178451051316), - (3.8916678022926607, -2.772484153217685, 0.5060648178503393), - (3.285753657755655, -2.3681294933425376, 0.46449024233003106), - (2.3465413258596377, -1.7097828382687081, 0.42323551169305323), -] - - -@torch.compile(dynamic=False, fullgraph=True) -def adamw_step_fused( - p: torch.Tensor, - grad: torch.Tensor, - exp_avg: torch.Tensor, - exp_avg_sq: torch.Tensor, - step_t: torch.Tensor, - lr_t: torch.Tensor, - beta1_t: torch.Tensor, - beta2_t: torch.Tensor, - eps_t: torch.Tensor, - wd_t: torch.Tensor, -) -> None: - p.mul_(1 - lr_t * wd_t) - exp_avg.lerp_(grad, 1 - beta1_t) - exp_avg_sq.lerp_(grad.square(), 1 - beta2_t) - bias1 = 1 - beta1_t**step_t - bias2 = 1 - beta2_t**step_t - denom = (exp_avg_sq / bias2).sqrt() + eps_t - step_size = lr_t / bias1 - p.add_(exp_avg / denom, alpha=-step_size) - - -@torch.compile(dynamic=False, fullgraph=True) -def muon_step_fused( - stacked_grads: torch.Tensor, - stacked_params: torch.Tensor, - momentum_buffer: torch.Tensor, - second_momentum_buffer: torch.Tensor, - momentum_t: torch.Tensor, - lr_t: torch.Tensor, - wd_t: torch.Tensor, - beta2_t: torch.Tensor, - ns_steps: int, - red_dim: int, -) -> None: - momentum = momentum_t.to(stacked_grads.dtype) - momentum_buffer.lerp_(stacked_grads, 1 - momentum) - g = stacked_grads.lerp_(momentum_buffer, momentum) - x = g.bfloat16() - x = x / (x.norm(dim=(-2, -1), keepdim=True) * 1.02 + 1e-6) - if g.size(-2) > g.size(-1): - for a, b, c in polar_express_coeffs[:ns_steps]: - a_matrix = x.mT @ x - b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) - x = a * x + x @ b_matrix - else: - for a, b, c in polar_express_coeffs[:ns_steps]: - a_matrix = x @ x.mT - b_matrix = b * a_matrix + c * (a_matrix @ a_matrix) - x = a * x + b_matrix @ x - g = x - beta2 = beta2_t.to(g.dtype) - v_mean = g.float().square().mean(dim=red_dim, keepdim=True) - red_dim_size = g.size(red_dim) - v_norm_sq = v_mean.sum(dim=(-2, -1), keepdim=True) * red_dim_size - v_norm = v_norm_sq.sqrt() - second_momentum_buffer.lerp_(v_mean.to(dtype=second_momentum_buffer.dtype), 1 - beta2) - step_size = second_momentum_buffer.clamp_min(1e-10).rsqrt() - scaled_sq_sum = (v_mean * red_dim_size) * step_size.float().square() - v_norm_new = scaled_sq_sum.sum(dim=(-2, -1), keepdim=True).sqrt() - final_scale = step_size * (v_norm / v_norm_new.clamp_min(1e-10)) - g = g * final_scale.to(g.dtype) - lr = lr_t.to(g.dtype) - wd = wd_t.to(g.dtype) - mask = (g * stacked_params) >= 0 - stacked_params.sub_(lr * g + lr * wd * stacked_params * mask) - - -class MuonAdamW(torch.optim.Optimizer): - def __init__(self, param_groups: list[dict]) -> None: - super().__init__(param_groups, defaults={}) - self._adamw_step_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._adamw_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._adamw_beta1_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._adamw_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._adamw_eps_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._adamw_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._muon_momentum_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._muon_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._muon_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - self._muon_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu") - - def _step_adamw(self, group: dict) -> None: - for p in group["params"]: - if p.grad is None: - continue - grad = p.grad - state = self.state[p] - if not state: - state["step"] = 0 - state["exp_avg"] = torch.zeros_like(p) - state["exp_avg_sq"] = torch.zeros_like(p) - state["step"] += 1 - self._adamw_step_t.fill_(state["step"]) - self._adamw_lr_t.fill_(group["lr"]) - self._adamw_beta1_t.fill_(group["betas"][0]) - self._adamw_beta2_t.fill_(group["betas"][1]) - self._adamw_eps_t.fill_(group["eps"]) - self._adamw_wd_t.fill_(group["weight_decay"]) - adamw_step_fused( - p, - grad, - state["exp_avg"], - state["exp_avg_sq"], - self._adamw_step_t, - self._adamw_lr_t, - self._adamw_beta1_t, - self._adamw_beta2_t, - self._adamw_eps_t, - self._adamw_wd_t, - ) - - def _step_muon(self, group: dict) -> None: - params = group["params"] - if not params: - return - first_param = params[0] - state = self.state[first_param] - num_params = len(params) - shape, device, dtype = first_param.shape, first_param.device, first_param.dtype - if "momentum_buffer" not in state: - state["momentum_buffer"] = torch.zeros(num_params, *shape, dtype=dtype, device=device) - if "second_momentum_buffer" not in state: - state_shape = (num_params, shape[-2], 1) if shape[-2] >= shape[-1] else (num_params, 1, shape[-1]) - state["second_momentum_buffer"] = torch.zeros(state_shape, dtype=dtype, device=device) - red_dim = -1 if shape[-2] >= shape[-1] else -2 - stacked_grads = torch.stack([p.grad for p in params]) - stacked_params = torch.stack(params) - self._muon_momentum_t.fill_(group["momentum"]) - self._muon_beta2_t.fill_(group["beta2"] if group["beta2"] is not None else 0.0) - self._muon_lr_t.fill_(group["lr"] * max(1.0, shape[-2] / shape[-1]) ** 0.5) - self._muon_wd_t.fill_(group["weight_decay"]) - muon_step_fused( - stacked_grads, - stacked_params, - state["momentum_buffer"], - state["second_momentum_buffer"], - self._muon_momentum_t, - self._muon_lr_t, - self._muon_wd_t, - self._muon_beta2_t, - group["ns_steps"], - red_dim, - ) - torch._foreach_copy_(params, list(stacked_params.unbind(0))) - - @torch.no_grad() - def step(self) -> None: - for group in self.param_groups: - if group["kind"] == "adamw": - self._step_adamw(group) - elif group["kind"] == "muon": - self._step_muon(group) - - -def create_optimizer(model: torch.nn.Module, settings: object) -> MuonAdamW: - return model.setup_optimizer( - unembedding_lr=settings.unembedding_lr, - embedding_lr=settings.embedding_lr, - matrix_lr=settings.matrix_lr, - weight_decay=settings.weight_decay, - adam_betas=settings.adam_betas, - scalar_lr=settings.scalar_lr, - ) diff --git a/component_system/components/trainer.py b/component_system/components/trainer.py deleted file mode 100644 index fd300348e..000000000 --- a/component_system/components/trainer.py +++ /dev/null @@ -1,191 +0,0 @@ -from __future__ import annotations - -import gc -import time -from dataclasses import dataclass -from typing import Any - -import torch - -from prepare import MAX_SEQ_LEN, TIME_BUDGET, evaluate_bpb, make_dataloader - - -H100_BF16_PEAK_FLOPS = 989.5e12 - - -@dataclass -class TrainingSettings: - aspect_ratio: int = 64 - head_dim: int = 128 - window_pattern: str = "SSSL" - total_batch_size: int = 2**19 - embedding_lr: float = 0.6 - unembedding_lr: float = 0.004 - matrix_lr: float = 0.04 - scalar_lr: float = 0.5 - weight_decay: float = 0.2 - adam_betas: tuple[float, float] = (0.8, 0.95) - warmup_ratio: float = 0.0 - warmdown_ratio: float = 0.5 - final_lr_frac: float = 0.0 - depth: int = 8 - device_batch_size: int = 32 # 24GB vram - seed: int = 42 - compile_model: bool = True - - -def default_training_settings() -> TrainingSettings: - return TrainingSettings() - - -def get_lr_multiplier(progress: float, settings: TrainingSettings) -> float: - if progress < settings.warmup_ratio: - return progress / settings.warmup_ratio if settings.warmup_ratio > 0 else 1.0 - if progress < 1.0 - settings.warmdown_ratio: - return 1.0 - cooldown = (1.0 - progress) / settings.warmdown_ratio - return cooldown + (1 - cooldown) * settings.final_lr_frac - - -def get_muon_momentum(step: int) -> float: - frac = min(step / 300, 1) - return (1 - frac) * 0.85 + frac * 0.95 - - -def get_weight_decay(progress: float, settings: TrainingSettings) -> float: - return settings.weight_decay * (1 - progress) - - -def run_training_session( - *, - model: torch.nn.Module, - optimizer: torch.optim.Optimizer, - tokenizer: Any, - settings: TrainingSettings, - param_counts: dict[str, int], - num_flops_per_token: float, - baseline_binding: dict[str, Any], -) -> dict[str, Any]: - t_start = time.time() - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - autocast_device = "cuda" if device.type == "cuda" else "cpu" - autocast_ctx = torch.amp.autocast(device_type=autocast_device, dtype=torch.bfloat16) - - tokens_per_fwdbwd = settings.device_batch_size * MAX_SEQ_LEN - assert settings.total_batch_size % tokens_per_fwdbwd == 0 - grad_accum_steps = settings.total_batch_size // tokens_per_fwdbwd - train_loader = make_dataloader(tokenizer, settings.device_batch_size, MAX_SEQ_LEN, "train") - x, y, epoch = next(train_loader) - - print(f"Vocab size: {tokenizer.get_vocab_size():,}") - print(f"Time budget: {TIME_BUDGET}s") - print(f"Gradient accumulation steps: {grad_accum_steps}") - print("Training session started") - - t_start_training = time.time() - smooth_train_loss = 0.0 - total_training_time = 0.0 - step = 0 - - while True: - if device.type == "cuda": - torch.cuda.synchronize(device=device) - t0 = time.time() - for _ in range(grad_accum_steps): - with autocast_ctx: - loss = model(x, y) - train_loss = loss.detach() - loss = loss / grad_accum_steps - loss.backward() - x, y, epoch = next(train_loader) - - progress = min(total_training_time / TIME_BUDGET, 1.0) - lrm = get_lr_multiplier(progress, settings) - muon_momentum = get_muon_momentum(step) - muon_weight_decay = get_weight_decay(progress, settings) - for group in optimizer.param_groups: - group["lr"] = group["initial_lr"] * lrm - if group["kind"] == "muon": - group["momentum"] = muon_momentum - group["weight_decay"] = muon_weight_decay - - optimizer.step() - model.zero_grad(set_to_none=True) - train_loss_f = train_loss.item() - if train_loss_f > 100: - raise RuntimeError("Training aborted because loss exceeded the fast-fail threshold.") - - torch.cuda.synchronize(device=device) - dt = time.time() - t0 - if step > 10: - total_training_time += dt - - ema_beta = 0.9 - smooth_train_loss = ema_beta * smooth_train_loss + (1 - ema_beta) * train_loss_f - debiased_smooth_loss = smooth_train_loss / (1 - ema_beta ** (step + 1)) - pct_done = 100 * progress - tok_per_sec = int(settings.total_batch_size / dt) - mfu = 100 * num_flops_per_token * settings.total_batch_size / dt / H100_BF16_PEAK_FLOPS - remaining = max(0.0, TIME_BUDGET - total_training_time) - print( - f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | " - f"lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | " - f"mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", - end="", - flush=True, - ) - - if step == 0: - gc.collect() - gc.freeze() - gc.disable() - elif (step + 1) % 5000 == 0: - gc.collect() - - step += 1 - if step > 10 and total_training_time >= TIME_BUDGET: - break - - print() - total_tokens = step * settings.total_batch_size - model.eval() - with autocast_ctx: - val_bpb = evaluate_bpb(model, tokenizer, settings.device_batch_size) - - t_end = time.time() - peak_vram_mb = torch.cuda.max_memory_allocated() / 1024 / 1024 - steady_state_mfu = ( - 100 - * num_flops_per_token - * settings.total_batch_size - * (step - 10) - / total_training_time - / H100_BF16_PEAK_FLOPS - if total_training_time > 0 - else 0.0 - ) - num_params = param_counts["total"] - metrics = { - "val_bpb": float(val_bpb), - "training_seconds": float(total_training_time), - "total_seconds": float(t_end - t_start), - "peak_vram_mb": float(peak_vram_mb), - "mfu_percent": float(steady_state_mfu), - "total_tokens_M": float(total_tokens / 1e6), - "num_steps": int(step), - "num_params_M": float(num_params / 1e6), - "depth": int(settings.depth), - "startup_seconds": float(t_start_training - t_start), - } - - print("---") - print(f"val_bpb: {metrics['val_bpb']:.6f}") - print(f"training_seconds: {metrics['training_seconds']:.1f}") - print(f"total_seconds: {metrics['total_seconds']:.1f}") - print(f"peak_vram_mb: {metrics['peak_vram_mb']:.1f}") - print(f"mfu_percent: {metrics['mfu_percent']:.2f}") - print(f"total_tokens_M: {metrics['total_tokens_M']:.1f}") - print(f"num_steps: {metrics['num_steps']}") - print(f"num_params_M: {metrics['num_params_M']:.1f}") - print(f"depth: {metrics['depth']}") - return metrics diff --git a/component_system/config.py b/component_system/config.py deleted file mode 100644 index ffe3ef7cb..000000000 --- a/component_system/config.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Static configuration for the component system. No dynamic or per-run values.""" -from __future__ import annotations - -from pathlib import Path - -COMPONENT_SYSTEM_ROOT = Path(__file__).resolve().parent - -# Module import paths for training (used by mainline assembler) -MODEL_MODULE = "component_system.components.model" -OPTIMIZER_MODULE = "component_system.components.optimizer" -TRAINING_STEP_MODULE = "component_system.components.trainer" - -# Promotion threshold: improve val_bpb by at least this much to promote -PROMOTION_THRESHOLD = 0.001 - -# Worktree root relative to project -WORKTREE_ROOT = "component_system/history/worktrees" - -# Default branch name suggested in UI when no branches exist (not a global baseline) -DEFAULT_BASELINE_BRANCH = "master" - - -def get_training_binding() -> dict[str, str | float]: - """Return a static dict used by training mainline/trainer (no baseline_version).""" - return { - "model_module": MODEL_MODULE, - "optimizer_module": OPTIMIZER_MODULE, - "training_step_module": TRAINING_STEP_MODULE, - "promotion_threshold": PROMOTION_THRESHOLD, - "worktree_root": WORKTREE_ROOT, - } diff --git a/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md b/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md deleted file mode 100644 index 854cbf0e2..000000000 --- a/component_system/docs/SEED_LIFECYCLE_AND_CONCURRENCY_REVIEW.md +++ /dev/null @@ -1,177 +0,0 @@ -# Seed Lifecycle, State Transitions, and Concurrency Review - -## 1. Seed lifecycle and state transitions - -### 1.1 Seed status enum (`SeedStatus`) - -| Status | Meaning | -|-------------|---------| -| `draft` | Newly created, not yet queued for Plan | -| `queued` | Plan run created and task in queue (or waiting for baseline) | -| `planning` | P run in progress | -| `generated` | P completed; code generated, DCA not yet queued | -| `dca_queued`| DCA run created and task in queue (includes sync/merge resolution) | -| `adapting` | DCA run in progress | -| `running` | **Never set in code** — see gap below | -| `passed` | DCA completed, no promotion | -| `failed` | Terminal failure (P failed, DCA failed, or reconciled from passed+error) | -| `promoted` | DCA completed with positive signal; seed merged into baseline | - -### 1.2 Documented transitions (from code) - -``` -draft → queued queue_p / _enqueue_plan_run -queued → queued queue_p (waiting for baseline; latest_run_id cleared) -queued → planning mark_run_started (stage P) -planning → generated finish_p_run -planning → failed mark_run_failed (P failed) -generated → dca_queued queue_dca -generated → queued finish_sync_resolution (then _enqueue_plan_run) -dca_queued → adapting mark_run_started (stage DCA) -adapting → passed finish_dca_run (neutral/negative signal) -adapting → failed finish_dca_run (error) or mark_run_failed -adapting → promoted finish_dca_run (positive_signal) -adapting → dca_queued finish_dca_run (merge/sync failed → queue_dca merge_resolution=True) -adapting → generated finish_dca_run (ralph neutral_signal) -passed → failed _reconcile_seed_status_signal (passed but latest_signal=="error") -``` - -Baseline seed: `draft` → `generated` (ensure_baseline_result) → `dca_queued` → … → `passed` / `failed`. - ---- - -## 2. Gaps and issues in state transitions - -### 2.1 `SeedStatus.running` is never set - -- **Code:** `SeedStatus.running` appears only in: - - `is_seed_eligible_for_stage` (P not eligible if `adapting`, `running`, or `dca_queued`) - - `ensure_baseline_result` (early return if `dca_queued`, `adapting`, `running`) - - Dashboard `status_column_map` → `activeDca` -- **Issue:** No assignment `seed.status = SeedStatus.running` anywhere. DCA-in-progress uses `adapting`. -- **Recommendation:** Either remove `running` from the enum and all checks, or document it as reserved and start setting it (e.g. for a future “running but not adapting” phase). Otherwise it’s dead code and the enum is misleading. - -### 2.2 Sync failure in `mark_run_started` (P): run/seed consistency - -- **Flow:** When a P run is started, `mark_run_started`: - 1. Sets `run.status = RunStatus.running` (in memory). - 2. Calls `ensure_seed_worktree_ready`, then `sync_seed_worktree_with_baseline`. - 3. On `GitCommandError`, calls `queue_sync_resolution(seed_id)` and raises `SyncResolutionQueued`. - 4. Only at the end (after sync and other logic) does it `run_repo.save(run)` and `seed_repo.save(seed)`. -- **Effect:** When sync fails: - - The **run** is never saved as `running`; it remains `queued` in the run repo. - - The **seed** is updated by `queue_sync_resolution`: `seed.status = dca_queued`, new DCA run and task written. - - The **P task** is moved to error in `run.py` (`move_to_error(task_path)`). -- **Result:** The original P **run** is orphaned: it stays `queued` forever and is never completed or failed. `seed.latest_run_id` points to the new sync-resolution DCA run. A later Plan enqueue creates a new P run and task. -- **Recommendation:** When raising `SyncResolutionQueued`, either: - - Mark the current P run as failed (e.g. “sync_failed”) and save it, or - - Explicitly not create a run for that P task until after sync succeeds (e.g. move run creation to after sync). That would require a larger refactor. - -### 2.3 Other transitions - -- All other transitions are consistent with the intended flow: P → generated/failed, DCA → passed/failed/promoted/dca_queued/generated, and reconciliation of `passed` + `error` → `failed`. - ---- - -## 3. Multiple seeds running at the same time — race conditions and conflicts - -### 3.1 Task claiming is atomic per task - -- **Mechanism:** `claim_pending` uses `path.rename(path, IN_PROGRESS_DIR / path.name)`. Only one process can rename a given file; others get `FileNotFoundError` or `OSError` and skip that task. -- **Effect:** Each task file is claimed by at most one worker; no double execution of the same task. - -### 3.2 Per-seed eligibility prevents P vs DCA overlap - -- **Mechanism:** Before running a task, the worker calls `claim_pending(..., eligible_fn=eligible)`. `eligible` uses `WORKFLOW.is_seed_eligible_for_stage(seed_id, stage)`: - - **P:** eligible only if `seed.status not in (adapting, running, dca_queued)`. - - **DCA:** eligible only if `seed.status is not SeedStatus.planning`. -- **Effect:** For a given seed, P and DCA are never both considered eligible. So the same seed cannot have a P task and a DCA task running at the same time, and a seed in `planning` or `adapting` will not get another stage started until the run finishes. - -### 3.3 Read–modify–write on seed/run state - -- **Risk:** Multiple workers can run concurrently (e.g. 2 P workers, 1 DCA-GPU, 1 DCA-AUX). Each worker loads seed/run, modifies, and saves. There is no locking or optimistic concurrency (e.g. version field). -- **Mitigation in practice:** - - Each **task** is for a specific (seed_id, run_id). Different tasks imply different runs (and usually different seeds for P/DCA). - - Eligibility ensures that for a given seed, only one “kind” of work (P or DCA) is allowed at a time. -- **Remaining risk:** If two tasks for the same seed could ever be in flight (e.g. due to a bug or a restored task), two workers could both read the same seed, update it, and save; the last write would win and one update could be lost. With the current design (one active run per seed per stage), this should not happen for normal execution. - -### 3.4 Git worktrees - -- **Design:** Each seed has its own worktree (path `worktrees/`). Different seeds use different directories. -- **Effect:** No filesystem conflict between seeds; multiple seeds can run P or DCA in parallel in separate worktrees. Baseline seed uses `worktrees/__baseline__`. - -### 3.5 Shared JSON state (repos) - -- **State:** Seeds, runs, metrics, branch map, and queue dirs are file-based (JSON under `history/state/`, `history/queue/`). -- **Risk:** Two workers writing different seeds at the same time can overwrite each other only if they wrote the same file (same seed or same run). Since each task is bound to one run and one seed, and eligibility prevents overlapping stages for the same seed, concurrent updates to the same seed/run are not expected for correct flows. -- **Recommendation:** For extra safety, consider short-lived file locking or atomic write (write to temp + rename) for seed/run saves if the daemon scales to many workers. - ---- - -## 4. Edge case: automatic merge fails — can dependent tasks start prematurely? - -### 4.1 Sync failure (merge baseline into seed) before P - -- **When:** In `mark_run_started` (stage P), `sync_seed_worktree_with_baseline(seed)` raises `GitCommandError`. -- **What happens:** - 1. `queue_sync_resolution(seed_id)` runs: seed set to `dca_queued`, new DCA task with `sync_resolution: True` is written. - 2. `SyncResolutionQueued` is raised; in `run.py` the P task is moved to error (not re-queued). - 3. Seed remains `dca_queued`; only the sync-resolution DCA task is for that seed. -- **Eligibility:** For P, a seed in `dca_queued` is **not** eligible. So no other P task for this seed can start. No dependent “normal” P runs until the sync-resolution DCA completes and Plan is re-queued in `finish_sync_resolution`. So **dependent tasks do not start prematurely**. - -### 4.2 DCA merge into baseline fails (normal or baseline seed) - -- **When:** In `finish_dca_run`, `promote_seed_branch` raises `GitCommandError`. -- **What happens:** - 1. A new DCA run is queued with `merge_resolution=True` (and seed stays `dca_queued`). - 2. No new P run or normal DCA run is enqueued for that seed until the merge-resolution DCA finishes. -- **Eligibility:** While seed is `dca_queued` or `adapting`, P is not eligible. So **dependent tasks do not start prematurely**. - -### 4.3 Baseline merge fails - -- Same pattern: baseline seed gets a merge-resolution DCA task, stays `dca_queued`. `_release_seeds_waiting_for_baseline` is only called after a successful merge (or after the “loop avoided” path). Waiting seeds are not released until baseline is merged. So **dependent tasks do not start prematurely**. - -**Conclusion:** When the workflow’s automatic merge (sync or promote) fails, the seed is put in `dca_queued` with a resolution DCA task. Eligibility and the fact that no new P/normal DCA is enqueued until resolution completes ensure that dependent tasks do **not** start before merge resolution. - ---- - -## 5. Other edge cases - -### 5.1 Restored in-progress tasks - -- On daemon start, `restore_in_progress_tasks()` moves all tasks from `in_progress/` back to the stage queue. Those tasks are then eligible to be claimed again. -- **Risk:** If a task was in progress (worker had already called `mark_run_started` and set seed to `planning`/`adapting`) and the daemon died before the worker finished, the run and seed are already updated. After restore, the task is back in the queue; a worker can claim it and call `mark_run_started` again. That would re-use the same run_id and could lead to duplicate “started” events or inconsistent state (e.g. two workers both thinking they own the run). The code does not detect “this run was already started.” -- **Recommendation:** Before updating run/seed in `mark_run_started`, check that `run.status` is still `queued`; if it is already `running`, treat the task as a duplicate (e.g. move to error or skip and don’t run again). - -### 5.2 Ralph loop and merge_resolution / metrics_recovery - -- After a failed DCA, `mark_run_failed` can call `queue_p(seed_id)` for Ralph seeds, but only when the task is not `merge_resolution` and not `metrics_recovery`. So Ralph does not re-queue P on merge-resolution or metrics-recovery DCA failure, which is correct. - -### 5.3 Baseline seed and sync - -- Baseline seed does not call `sync_seed_worktree_with_baseline` (early return in that function). So sync failure path does not apply to __baseline__. `queue_sync_resolution` explicitly raises if seed is baseline. No issue. - ---- - -## 6. Summary table - -| Area | Status | Notes | -|-----------------------------|--------|--------| -| Seed status enum | Gap | `SeedStatus.running` never set; remove or use. | -| P/DCA transition consistency| OK | Transitions match design. | -| Sync fail (before P) | Bug | P run left `queued`; orphaned run. | -| Task claiming | OK | Atomic rename prevents double run of same task. | -| P vs DCA same seed | OK | Eligibility prevents concurrent P and DCA for one seed. | -| Multiple seeds concurrent | OK | Different worktrees; eligibility per seed. | -| Merge/sync fail → dependents| OK | Seed stays `dca_queued`; no premature P/DCA. | -| Restored in-progress tasks | Risk | Re-claiming can lead to duplicate start for same run. | - ---- - -## 7. Implemented fixes - -1. **Sync failure in `mark_run_started`:** Before raising `SyncResolutionQueued`, mark the current P run as failed (e.g. error “sync with baseline failed”) and save it, so the run is not orphaned. -2. **`SeedStatus.running`:** Either remove it from the enum and from all checks, or introduce a clear rule (e.g. “DCA in progress” = `adapting` only) and document that `running` is unused. -3. **Restored tasks:** In `mark_run_started`, if `run.status != RunStatus.queued`, do not update run/seed and do not run the agent; move the task to error or a “duplicate” bucket and return. - -**Not changed:** `SeedStatus.running` is still never set; it could be removed from the enum in a follow-up or left as reserved. diff --git a/component_system/entrypoint.py b/component_system/entrypoint.py deleted file mode 100644 index 33fc2d426..000000000 --- a/component_system/entrypoint.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Standalone entrypoint for the component_system baseline.""" -from __future__ import annotations - -import sys -from pathlib import Path - -if __package__ in {None, ""}: - sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) - -from component_system.training.mainline import run_mainline_training - - -def main() -> None: - run_mainline_training() - - -if __name__ == "__main__": - main() diff --git a/component_system/training/mainline.py b/component_system/training/mainline.py deleted file mode 100644 index e91771d85..000000000 --- a/component_system/training/mainline.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Mainline assembler: reads static config, dynamically loads components, runs training.""" -from __future__ import annotations - -if __package__ in {None, ""}: - import sys - from pathlib import Path - - sys.path.insert(0, str(Path(__file__).resolve().parents[2])) - -import importlib -import os -from dataclasses import asdict -from typing import Any - -import torch - -from prepare import Tokenizer - -from component_system.config import get_training_binding - - -def _prepare_environment() -> None: - os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True" - os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" - torch.manual_seed(42) - if torch.cuda.is_available(): - torch.cuda.manual_seed(42) - torch.set_float32_matmul_precision("high") - torch.cuda.reset_peak_memory_stats() - - -def _import_module(path: str) -> Any: - return importlib.import_module(path) - - -def run_mainline_training(binding_path: str | None = None) -> dict[str, Any]: - _prepare_environment() - binding = get_training_binding() - - tokenizer = Tokenizer.from_directory() - vocab_size = tokenizer.get_vocab_size() - - model_module = _import_module(binding["model_module"]) - optimizer_module = _import_module(binding["optimizer_module"]) - training_step_module = _import_module(binding["training_step_module"]) - - settings = training_step_module.default_training_settings() - config = model_module.build_model_config( - depth=settings.depth, - vocab_size=vocab_size, - aspect_ratio=settings.aspect_ratio, - head_dim=settings.head_dim, - window_pattern=settings.window_pattern, - ) - - print("Loaded training binding from config") - print(f"Model config: {asdict(config)}") - - model, param_counts, num_flops_per_token = model_module.create_model( - config, - compile_model=settings.compile_model, - ) - - print("Parameter counts:") - for key, value in param_counts.items(): - print(f" {key:24s}: {value:,}") - print(f"Estimated FLOPs per token: {num_flops_per_token:e}") - - optimizer = optimizer_module.create_optimizer(model, settings) - return training_step_module.run_training_session( - model=model, - optimizer=optimizer, - tokenizer=tokenizer, - settings=settings, - param_counts=param_counts, - num_flops_per_token=num_flops_per_token, - baseline_binding=binding, - ) - - -if __name__ == "__main__": - run_mainline_training() diff --git a/component_system/web/templates/partials/seed_detail_runs_content.html b/component_system/web/templates/partials/seed_detail_runs_content.html deleted file mode 100644 index 278a12ba0..000000000 --- a/component_system/web/templates/partials/seed_detail_runs_content.html +++ /dev/null @@ -1,148 +0,0 @@ -{% if runs and seed.status.value in ['queued', 'planning'] %} -

Runs stay queued until the daemon is running. Start: uv run component_system/run.py

-{% endif %} -{% if runs %} - {% for run in runs %} -
-
-
-

{% if run.stage.value == 'p' %}Plan{% else %}{{ run.stage.value|upper }}{% endif %} · {{ run.status.value }}

-

{{ run.run_id }}

-
-
- {% if run.signal %} - {{ run.signal }} - {% endif %} - -
-
- {% if run.metrics %} -
- {% for key, value in run.metrics.items() %} -
-
{{ key }}
-
{{ value }}
-
- {% endfor %} -
- {% endif %} -
- - {% endfor %} -{% else %} -

No runs yet. Use Run Plan to start.

-{% endif %} diff --git a/component_system/web/templates/partials/seed_detail_timeline_content.html b/component_system/web/templates/partials/seed_detail_timeline_content.html deleted file mode 100644 index 8fabcdd54..000000000 --- a/component_system/web/templates/partials/seed_detail_timeline_content.html +++ /dev/null @@ -1,16 +0,0 @@ -{% if events %} - {% for event in events %} -
-

{{ event.message }}

- {% if event.commit_sha %} -

commit: {{ event.commit_sha }}

- {% endif %} - {% if event.target_branch %} -

target branch: {{ event.target_branch }}

- {% endif %} -

{{ event.kind }} · {{ event.created_at_human }}

-
- {% endfor %} -{% else %} -

No events yet.

-{% endif %} diff --git a/pdca_system/PDCA-Check-Action.md b/pdca_system/PDCA-Check-Action.md new file mode 100644 index 000000000..18719d58c --- /dev/null +++ b/pdca_system/PDCA-Check-Action.md @@ -0,0 +1,75 @@ +# CA — Do, Check, Action + +## Responsibility +Take the generated plan from PD, adapt/fix it in the seed worktree, +run the project's canonical command (script defined in protocol and below; e.g. train.py) using the **Python executable injected by the daemon**, evaluate results against baseline, and +promote only when the signal is positive. Do not propose new ideas or optimize for better metrics; only adapt/fix so the plan runs and report outcomes. + +## Workspace and paths +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. Treat `pdca_system/` in the worktree as canonical context. + +## Input +- Runner prompt (task content). +- Baseline: `pdca_system/baseline_branches.json`, `pdca_system/baseline_metrics.json`. +- Worktree-local files only. + +## Baseline measurement (seed_id __baseline__) +Retry until the run succeeds and you report real metrics. No empty metrics. + +- **OOM:** Reduce `DEVICE_BATCH_SIZE` in `train.py` (default 128); keep `TOTAL_BATCH_SIZE % (DEVICE_BATCH_SIZE * MAX_SEQ_LEN) == 0`. Rerun until training completes. +- Only trivial fixes (e.g. batch size); no model/training logic changes. +- **Commit before reporting.** Uncommitted changes break the follow-up merge. + +## Workflow +1. Work in the seed worktree (one branch per seed). +2. Adapt/fix until it runs (runtime only: bugs, OOM, imports, config; no model/hyperparameter/training-logic changes for better metrics). +3. Run the **canonical command** (**≥900s**): the daemon injects the **Python executable** (the one running the daemon) into your task prompt. Use that Python for every Python command in this stage, together with the canonical script defined in this doc and protocol (e.g. `train.py` or the script your project uses), for example `{python_exe} train.py > training.log 2>&1` (or `{python_exe} train.py 2>&1 | tee training.log`). **Must set command/tool timeout ≥900s**. After the run, inspect `training.log` to confirm completion and recover or verify metrics. +4. On bug/OOM: fix and rerun; for baseline, retry until success. +5. Commit on seed branch before reporting. +6. Output CA summary block with `commit_sha` in JSON. +7. Runner evaluates signal and handles promotion. + +## Output Format +Write the summary JSON to the file named `autoresearch_summary.json` in your current working directory (cwd root). Do not print it to stdout or stderr. Put metrics in the JSON; the runner reads only this file. + +```json +{"checks":["entrypoint"],"notes":"...","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"git sha","metrics":{"val_bpb":1.24,...}} +``` + +If no final metrics, use `"metrics": {}`. The **target metric** key (see `pdca_system/config.py`: `TARGET_METRIC_KEY`), plus `training_seconds`, `total_seconds`, `peak_vram_mb`, `mfu_percent`, `total_tokens_M`, `num_steps`, `num_params_M`, `depth` must be in the JSON `metrics` block. No metrics → recovery CA inspects logs; only then treat as failed. + +## Check: Signal Rules + +The **target metric** (key and direction) is configured in `pdca_system/config.py`: `TARGET_METRIC_KEY`, `TARGET_METRIC_LOWER_IS_BETTER`, `TARGET_METRIC_LABEL`. Default: `val_bpb`, lower is better. + +| Condition | Signal | +|-----------|--------| +| target metric improves by >= threshold vs baseline (e.g. `val_bpb` drops >= 0.001) | `positive_signal` | +| target metric regresses by >= threshold vs baseline (e.g. `val_bpb` rises >= 0.001) | `negative_signal` | +| difference < threshold | `neutral` | +| no historical baseline (best target metric) | `positive_signal` (first recording) | +| metrics missing or training error | `error` | + +The threshold is defined in `pdca_system/config.py` (`PROMOTION_THRESHOLD`). + +## Action: Promotion Rules + +Only CA may trigger a merge into baseline; PD must not. Runner records `commit_sha`; on positive signal the workflow merges seed into baseline first, then updates metrics/state. Merge conflict → system queues merge-resolution CA. + +### Promotion (`positive_signal`) +1. System merges seed into baseline first (you do not run merge). +2. Workflow updates `baseline_metrics.json` / `baseline_branches.json`. +3. Metadata in seed/run state. + +### Merge failure +- **Normal seed:** In seed worktree: `git merge __baseline__`, resolve conflicts, commit, output CA summary for retry. +- **Baseline seed (__baseline__):** Merge __baseline__ into target (e.g. master). Run from worktree that has target checked out (`git worktree list`); do not run from __baseline__ worktree or `git merge master` there. + +### Non-promotion +`neutral` / `negative_signal` / `error`: log only. Failure info in queue/state logs. + +## Constraints +- No model/optimizer/training-logic changes for better metrics; only make the plan run (bugs, OOM, etc.). +- Run the canonical command from the worktree using the **Python executable injected by the daemon** and the script/command defined in protocol and this doc. Do not skip target metric evaluation (output `{TARGET_METRIC_KEY}: {value}` in stdout and/or include in JSON). +- Do not edit baseline JSON files; only CA promotion updates them. +- Traceability: git + state files. diff --git a/component_system/PDCA-PLAN.md b/pdca_system/PDCA-Plan-Do.md similarity index 61% rename from component_system/PDCA-PLAN.md rename to pdca_system/PDCA-Plan-Do.md index 7564ffeb8..feef17f08 100644 --- a/component_system/PDCA-PLAN.md +++ b/pdca_system/PDCA-Plan-Do.md @@ -1,61 +1,62 @@ -# P - Seed Planning And Generation - -## Responsibility -Extract exactly one testable improvement hypothesis from the seed prompt, -generate the first implementation in a candidate worktree, and hand the result -to DCA through the runner. - -## Workspace and paths -**CWD = seed worktree.** Read and edit only inside it; use relative paths only. - -## arXiv search (CLI) - -Run from repo root with uv (e.g. `uv run python component_system/run_arxiv.py ...`); arxiv is already a project dependency. - -### Search (CLI script) - -From repo root, use the script in this component: - -```bash -uv run python component_system/run_arxiv.py --query "machine learning" --max-results 5 -uv run python component_system/run_arxiv.py --id 1605.08386v1 --output json -``` - -**CLI arguments:** `--query` / `-q`, `--id` (one or more arXiv IDs; overrides query), `--max-results` / `-n`, `--sort-by` (relevance | submittedDate | lastUpdatedDate), `--sort-order` (ascending | descending), `--output` / `-o` (text | json), `--download-dir`, `--verbose` / `-v`. - -### Hypothesis from results -1. Read abstracts; pick one concrete change (not just a concept). -2. Map to component: `model`, `optimizer`, or `trainer`. -3. State expected benefit; reduce to one isolated, evaluable improvement. - -## Input -- **results.tsv** in cwd (if present) ? read first to avoid duplicating tried/discarded ideas. -- arXiv via arxiv-search; past failures in `queue/done/`; manual seed files. - -## One-Improvement Rule - -One seed = one hypothesis = one causal change. Do not bundle ideas. If the prompt has several options, pick the single best for this run. Prefer the smallest coherent change that tests the hypothesis. - -**Good:** one optimizer schedule change; one architectural block; one training heuristic. **Bad:** model + optimizer + batch together; multiple paper ideas in one seed; "cleanup + new feature" in one candidate. - -## Output Format -Print a summary block for the runner: -```text -AUTORESEARCH_P_SUMMARY_BEGIN -{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} -AUTORESEARCH_P_SUMMARY_END -``` - -## Runner / worktree -Before each P run, the runner syncs the seed worktree with its baseline branch (merge baseline into seed) so P always starts from the latest baseline. - -## Steps -1. Read `results.tsv` if present. -2. Refine prompt ? one concrete idea ? one isolated improvement; name target component. -3. Implement in worktree (from baseline); commit on seed branch. -4. Print summary block (runner records commit). Description must be enough for DCA. - -## Constraints -- One component, one improvement per seed. Smallest viable implementation. -- No exploratory cleanup or opportunistic refactors unless required for the one change. -- Commit on seed branch; runner does not merge. **P must never merge;** only DCA triggers merge into baseline. +# PD - Seed Planning And Generation + +## Responsibility +Extract exactly one testable improvement hypothesis from the seed prompt, +generate the first implementation in a candidate worktree, and hand the result +to CA through the runner. + +## Workspace and paths +**CWD = seed worktree.** Read and edit only inside it; use relative paths only. + +## arXiv search (CLI) + +Run from **project root** using the Python executable provided in your task prompt when available (for example, `{python_exe} pdca_system/run_arxiv.py ...`). If no Python executable is provided, use your project's normal Python runner. Arxiv is a project dependency. + +### Search (CLI script) + +From project root, use the script in this component with the provided Python executable: + +```bash +{python_exe} pdca_system/run_arxiv.py --query "machine learning" --max-results 5 +{python_exe} pdca_system/run_arxiv.py --id 1605.08386v1 --output json +``` + +If your task prompt does not provide `{python_exe}`, substitute your project's normal Python launcher for the examples above and ensure the project environment is active or `PYTHONPATH` includes the project root. + +**CLI arguments:** `--query` / `-q`, `--id` (one or more arXiv IDs; overrides query), `--max-results` / `-n`, `--sort-by` (relevance | submittedDate | lastUpdatedDate), `--sort-order` (ascending | descending), `--output` / `-o` (text | json), `--download-dir`, `--verbose` / `-v`. + +### Hypothesis from results +1. Read abstracts; pick one concrete change (not just a concept). +2. Map to component: `model`, `optimizer`, or `trainer`. +3. State expected benefit; reduce to one isolated, evaluable improvement. + +## Input +- **results.tsv** in cwd (if present) ? read first to avoid duplicating tried/discarded ideas. +- arXiv via arxiv-search; past failures in `queue/done/`; manual seed files. + +## One-Improvement Rule + +One seed = one hypothesis = one causal change. Do not bundle ideas. If the prompt has several options, pick the single best for this run. Prefer the smallest coherent change that tests the hypothesis. + +**Good:** one optimizer schedule change; one architectural block; one training heuristic. **Bad:** model + optimizer + batch together; multiple paper ideas in one seed; "cleanup + new feature" in one candidate. + +## Output Format +Write the summary JSON to the file named `autoresearch_summary.json` in your current working directory (cwd root). Do not print it to stdout or stderr. Use this shape: + +```json +{"idea":"short title","target_component":"model | optimizer | trainer","description":"change details, hypothesis, expected benefit","source_refs":["arXiv:"],"commit_sha":"git sha","completed_at":"YYYY-MM-DD HH:MM:SS"} +``` + +## Runner / worktree +Before each P run, the runner syncs the seed worktree with its baseline branch (merge baseline into seed) so P always starts from the latest baseline. The CA stage receives the **Python executable** from the daemon (the one running the daemon); the canonical script to run (e.g. train.py) is defined in protocol and PDCA-Check-Action.md. + +## Steps +1. Read `results.tsv` if present. +2. Refine prompt ? one concrete idea ? one isolated improvement; name target component. +3. Implement in worktree (from baseline); commit on seed branch. +4. Write summary JSON to `autoresearch_summary.json` in cwd (runner records commit). Description must be enough for CA. + +## Constraints +- One component, one improvement per seed. Smallest viable implementation. +- No exploratory cleanup or opportunistic refactors unless required for the one change. +- Commit on seed branch; runner does not merge. **PD must never merge;** only CA triggers merge into baseline. diff --git a/pdca_system/README.md b/pdca_system/README.md new file mode 100644 index 000000000..4a55a3418 --- /dev/null +++ b/pdca_system/README.md @@ -0,0 +1,104 @@ +# PDCA System + +A **portable orchestrator** for an autonomous Plan-Do–Check-Action loop. It drives an external AI agent (Claude, Codex, OpenCode, etc.) through file-based queues: the agent plans and implements in the **Plan-Do** stage, then adapts, runs, and evaluates in the **Check-Action** stage. State, logs, and worktrees live under `pdca_system/` so the host project stays clean. + +**Designed to be copied into any repo.** Add `pdca_system` to your project, point the agent at the protocol, give it a prompt (e.g. “Improve X”, “Fix Y”), and run the daemon. The system adapts to your project’s layout and conventions via the protocol and stage docs you keep (or edit) inside `pdca_system/`. + +--- + +## Use in a new project + +All commands in this document are run from the **project root**. Use your project’s Python runner for executables (e.g. `uv run` or `python -m`) so the same environment is used everywhere. + +1. **Copy** the `pdca_system` folder into your project root. +2. **Git:** If the project has no `.git` directory (not a git repo yet), run `git init`, then add all project files to git (e.g. `git add .`) so the project’s original files, `pdca_system/`, and `.gitignore` are tracked. If the project is already a repo, add `pdca_system/` and any new or changed project files to git (`git add pdca_system` or `git add .` as appropriate). Add the following to your project’s `.gitignore` so runtime artifacts are not committed: + ``` + pdca_system/history/ + pdca_system/prompt_audit/ + pdca_system/baseline_branches.json + pdca_system/baseline_metrics.json + ``` +3. **Install** PDCA dependencies from the project root (use your project's runner): + ```bash + uv pip install -r pdca_system/requirements.txt + ``` + Or add them to your project’s `pyproject.toml` / `requirements.txt`. The system needs FastAPI, uvicorn, pydantic, jinja2, and python-multipart only. You do **not** need Node/npm to run the dashboard; the CSS is pre-built in `web/static/app.css`. To rebuild CSS after editing Tailwind source: from project root, run `npm install` then `npm run build:css` (from the directory that contains `package.json`). +4. **Optionally adapt** the protocol and stage docs to your project: + - `protocol.md` — overall workflow, constraints, and what “baseline” and “promotion” mean in your context. + - `PDCA-Plan-Do.md` — what the agent should do in the Plan-Do stage (e.g. which files to edit, which command to run). + - `PDCA-Check-Action.md` — what the agent should do in the Check-Action stage (e.g. run tests, collect metrics, decide promote/discard). + - `config.py` — promotion threshold, default baseline branch, paths if you change them. +5. **Run** the dashboard (optional) and daemon from the **project root**: + - Dashboard: `uv run uvicorn pdca_system.web.app:app --reload` (or `python -m uvicorn pdca_system.web.app:app --reload`) → http://127.0.0.1:8000/pdca-system + - Daemon: `uv run pdca_system/daemon.py` (or `python -m pdca_system.daemon`; set `PDCA_AGENT=codex` / `PDCA_AGENT=opencode` / `PDCA_AGENT=kimi` for other backends) +6. **Bootstrap:** Have your agent read `pdca_system/protocol.md`, create a seed from your prompt, queue it for Plan-Do, then start the daemon. The daemon will hand off tasks to the agent via the queue; do not run Plan-Do or Check-Action stages manually in the same session. + +Seeds flow: **queue/pd/** → Plan-Do → **queue/ca/** → Check-Action → **state/**. View runs and status in the dashboard. + +--- + +## Concepts + +- **Seed** — One experiment or task, created from a human prompt. Has a branch, optional worktree, and a sequence of runs (Plan-Do then Check-Action). +- **Plan-Do (PD)** — Stage where the agent turns the prompt into a concrete plan and implements it (e.g. edit files, commit on the seed branch). +- **Check-Action (CA)** — Stage where the agent adapts/fixes, runs the canonical command (e.g. tests or training), and reports metrics; the system decides promote / keep / discard. +- **Daemon** — Long-lived process that polls `pdca_system/history/queue/pd/` and `pdca_system/history/queue/ca/`, dispatches tasks to your agent, and moves tasks to done/error. +- **Baseline** — Reference (e.g. a branch and metrics) that Check-Action compares against to decide promotion. Defined and updated by the workflow; see `protocol.md`. + +--- + +## Layout (inside pdca_system) + +``` +pdca_system/ + requirements.txt — Python deps for dashboard + daemon (from project root: uv pip install -r pdca_system/requirements.txt) + protocol.md — Workflow rules and constraints (customize for your project) + PDCA-Plan-Do.md — Plan-Do stage instructions + PDCA-Check-Action.md — Check-Action stage instructions + daemon.py — Entrypoint for the daemon + task.py — Queue and state helpers + config.py — Promotion threshold, target metric (TARGET_METRIC_KEY, TARGET_METRIC_LOWER_IS_BETTER, TARGET_METRIC_LABEL), default branch, paths + web/ — Dashboard (FastAPI + HTMX) + domain/ — Models (Seed, Run, Stage, Status) + services/ — Workflow and git logic + repositories/ — State persistence + history/ — Created at runtime: queue/, state/, logs/, worktrees/ + package.json — Optional: only if you want to rebuild dashboard CSS (Tailwind) + tailwind.config.js — Optional: Tailwind config for npm run build:css + postcss.config.js — Optional: PostCSS config for npm run build:css +``` + +All runtime data (queue, state, logs, worktrees) lives under `pdca_system/history/` so the host repo can stay clean and the system remains self-contained. + +**Shared data across worktrees:** Have the adapt agent modify the project’s data-prep process and path configuration to use `~/.cache//` (e.g. `~/.cache/my_project/`), and commit those changes. Then all worktrees share the same data naturally. + +**npm is not required** to run the dashboard. The built `web/static/app.css` is included; the dashboard uses it as-is. Install Node/npm and run `npm install` + `npm run build:css` only if you edit `web/static/tailwind.input.css` and need to regenerate the CSS. + +--- + +## Making it fit your project + +- **Protocol and stage docs** — Edit `protocol.md`, `PDCA-Plan-Do.md`, and `PDCA-Check-Action.md` to describe your repo’s layout, **canonical run command** (the script or module to run, e.g. `train.py` or `pytest`), success metric, and promotion rules. The daemon injects the **Python executable** (the one running the daemon) into CA prompts; the agent uses that Python with the canonical command defined in your protocol/docs. +- **Config** — In `config.py` you can change the promotion threshold, the **target metric** used by Check-Action (`TARGET_METRIC_KEY`, `TARGET_METRIC_LOWER_IS_BETTER`, `TARGET_METRIC_LABEL` — e.g. `val_bpb` vs `val_accuracy`), the default baseline branch name, and (if needed) path overrides. The run must output that metric in stdout (or stderr) so the workflow can parse and record it. The dashboard and daemon prompt examples read these values from config, so editing only `config.py` (and docs) is enough to adapt the metric; no other code changes are required. +- **Agent backend** — Set `PDCA_AGENT=codex`, `PDCA_AGENT=opencode`, or `PDCA_AGENT=kimi` (default is Claude). The daemon invokes the agent per task; ensure the agent can read the worktree and run commands as required by your protocol. + +No changes to the host project are required beyond adding the folder and dependencies; the system is a pure orchestrator and drives work *outside* `pdca_system` (e.g. your `train.py`, tests, or scripts). + +### Prompt to auto-edit the docs for a new project + +Give this to your AI agent (or use it as a checklist) so it rewrites the PDCA docs in the context of the current repo: + +``` +You are in a project that has pdca_system copied in. Absorb pdca_system into the project and adapt the PDCA docs to this project only. + +1. Git: If the project has no `.git` directory (not a git repo yet), run `git init`, then add all project files to git (e.g. `git add .`) so the project's original files, `pdca_system/`, and `.gitignore` are tracked. If the project is already a repo, add `pdca_system/` and any new or changed files (`git add pdca_system` or `git add .` as appropriate). Add these lines to the project's `.gitignore` (create the file if it does not exist) so runtime artifacts are not committed: pdca_system/history/, pdca_system/prompt_audit/, pdca_system/baseline_branches.json, pdca_system/baseline_metrics.json +2. Read the repo: identify the canonical "run" command (run from project root, e.g. `uv run train.py` or `python -m pytest`), which files the agent is allowed to edit, which are read-only, and what the success metric is (e.g. test pass, val loss, benchmark score). +3. If the project uses local data (datasets, checkpoints, etc.): modify the data-prep process and any path configuration to use `~/.cache//`, and commit those file changes, so worktrees share data naturally. +4. Edit pdca_system/protocol.md: replace any project-specific examples (paths, commands, metric names) with this project’s. Keep the structure (Seed → PD → CA, queue, baseline, promotion). State the canonical run command (from project root), the metric the Check-Action stage must report. +5. Edit pdca_system/protocol.md: replace any project-specific examples (paths, commands, metric names) with this project's. Keep the structure (Seed → PD → CA, queue, baseline, promotion). State the canonical run command (from project root) and the metric the Check-Action stage must report. +6. Edit pdca_system/PDCA-Plan-Do.md: say which files or dirs the agent may change in Plan-Do, what “done” looks like (e.g. commit on seed branch, ready for Check-Action). +7. Edit pdca_system/PDCA-Check-Action.md: say how to run the canonical command from project root, how to parse the metric from output or logs, and how to decide promote vs keep vs discard (threshold, comparison to baseline). +8. If the project uses a different default branch, promotion threshold, or success metric, update pdca_system/config.py: DEFAULT_BASELINE_BRANCH, PROMOTION_THRESHOLD, and the target metric (TARGET_METRIC_KEY, and optionally TARGET_METRIC_LABEL, TARGET_METRIC_LOWER_IS_BETTER) so the dashboard and promotion logic use the correct key and direction. + +Do not change pdca_system code (daemon, web, services). Only edit the markdown docs, config.py, and the project's .gitignore as above. The adapt agent may also modify the host project's data-prep process and commit those changes (step 3). +``` diff --git a/pdca_system/config.py b/pdca_system/config.py new file mode 100644 index 000000000..83b72838e --- /dev/null +++ b/pdca_system/config.py @@ -0,0 +1,30 @@ +"""Static configuration for the PDCA system. No dynamic or per-run values.""" +from __future__ import annotations + +from pathlib import Path + +PDCA_SYSTEM_ROOT = Path(__file__).resolve().parent + +# Promotion threshold: improve target metric (see TARGET_METRIC_KEY) by at least this much to promote +PROMOTION_THRESHOLD = 0.001 + +# Target metric: key reported by run (e.g. val_bpb, val_accuracy), direction, and display label +TARGET_METRIC_KEY = "val_bpb" +TARGET_METRIC_LOWER_IS_BETTER = True # True = minimize (e.g. bpb, loss), False = maximize (e.g. accuracy) +TARGET_METRIC_LABEL = "Validation BPB" + + +def best_target_metric_key() -> str: + """Key for best value in branch metrics view (e.g. best_val_bpb).""" + return f"best_{TARGET_METRIC_KEY}" + + +def former_target_metric_key() -> str: + """Key for former baseline value in run summary / seed context (e.g. former_val_bpb).""" + return f"former_{TARGET_METRIC_KEY}" + +# Worktree root relative to project +WORKTREE_ROOT = "pdca_system/history/worktrees" + +# Default branch name suggested in UI when no branches exist (not a global baseline) +DEFAULT_BASELINE_BRANCH = "master" diff --git a/component_system/run.py b/pdca_system/daemon.py similarity index 63% rename from component_system/run.py rename to pdca_system/daemon.py index b7f989746..8e8f53c99 100644 --- a/component_system/run.py +++ b/pdca_system/daemon.py @@ -1,4 +1,4 @@ -"""Seed -> P -> DCA daemon for the component-system web app.""" +"""Seed -> PD -> CA daemon for the pdca-system web app.""" from __future__ import annotations if __package__ in {None, ""}: @@ -20,17 +20,21 @@ from pathlib import Path from typing import Any -from component_system.domain.models import StageName -from component_system.services.workflow import ( +from pdca_system.config import ( + TARGET_METRIC_KEY, + TARGET_METRIC_LABEL, + TARGET_METRIC_LOWER_IS_BETTER, +) +from pdca_system.services.workflow import ( BASELINE_SEED_ID, DuplicateRunStartError, SyncResolutionQueued, WorkflowService, ) -from component_system.task import ( +from pdca_system.task import ( BASELINE_BRANCHES_PATH, BASELINE_METRICS_PATH, - COMPONENT_SYSTEM_ROOT, + PDCA_SYSTEM_ROOT, claim_pending, DAEMON_HEARTBEAT_PATH, daemon_heartbeat, @@ -42,7 +46,7 @@ restore_in_progress_tasks, ) -PROJECT_ROOT = COMPONENT_SYSTEM_ROOT.parent +PROJECT_ROOT = PDCA_SYSTEM_ROOT.parent LOG_DIR = LOG_ROOT RESULTS_TSV = PROJECT_ROOT / "results.tsv" PROGRESS_PNG = PROJECT_ROOT / "progress.png" @@ -51,20 +55,21 @@ _shutdown = False WORKFLOW = WorkflowService() -DEFAULT_TIMEOUTS = {"p": 900, "dca": 3600, "direct": 3600} +DEFAULT_TIMEOUTS = {"pd": 900, "ca": 3600, "direct": 3600} -# Canonical DCA entrypoint run: require ≥900s so training can complete. Agent must set command/tool timeout ≥ this. -DCA_CANONICAL_RUN_TIMEOUT_SECONDS = 900 +# Canonical CA run: execute project's train.py (≥900s). Agent must set command/tool timeout ≥ this. +CA_CANONICAL_RUN_TIMEOUT_SECONDS = 900 STAGE_DOCS = { - "p": ["PDCA-PLAN.md"], - "dca": ["PDCA-DO-CHECK-ACTION.md"], + "pd": ["PDCA-Plan-Do.md"], + "ca": ["PDCA-Check-Action.md"], } AGENT_CONFIGS: dict[str, dict[str, Any]] = { "claude": {"cmd": ["claude", "-p", "--verbose"], "via": "stdin"}, "codex": {"cmd": ["codex", "exec", "-a", "never", "--sandbox", "workspace-write"], "via": "arg"}, "opencode": {"cmd": ["opencode", "run"], "via": "arg"}, + "kimi": {"cmd": ["kimi", "--yolo", "-p"], "via": "arg"}, } @@ -85,6 +90,16 @@ def _build_log_paths(run_id: str) -> tuple[Path, Path]: return stdout_path, stderr_path +# Fixed filename in agent cwd; daemon reads from agent_cwd / SUMMARY_FILENAME. +SUMMARY_FILENAME = "autoresearch_summary.json" + + +def _summary_json_path_in_cwd(worktree_path: str | None) -> Path: + """Path where the agent must write the run summary: fixed filename in agent cwd (worktree root or project root).""" + cwd = _agent_cwd(worktree_path) + return Path(cwd) / SUMMARY_FILENAME + + def _write_prompt_file(run_id: str, prompt: str) -> Path: """Save the agent prompt to a file for debugging. Returns the path.""" LOG_DIR.mkdir(parents=True, exist_ok=True) @@ -93,27 +108,17 @@ def _write_prompt_file(run_id: str, prompt: str) -> Path: return prompt_path -def _is_root_venv_active() -> bool: - expected = (PROJECT_ROOT / ".venv").resolve() - active = os.environ.get("VIRTUAL_ENV") - if not active: - return False - try: - return Path(active).resolve() == expected - except OSError: - return False - - -def _dca_command_guidance() -> tuple[str, str]: - timeout_prefix = f"timeout {DCA_CANONICAL_RUN_TIMEOUT_SECONDS}" - if _is_root_venv_active(): - return ( - f"{timeout_prefix} uv run --active component_system/entrypoint.py", - "Root .venv is active; use --active to reuse it from the worktree.", - ) +def _ca_command_guidance() -> tuple[str, str, str]: + """Return (python_exe, note, runner_label) for the CA prompt. Only the Python executable is given; the project's canonical script (e.g. train.py) is defined in protocol/docs.""" + python_exe = sys.executable + if " " in python_exe: + python_exe_quoted = f'"{python_exe}"' + else: + python_exe_quoted = python_exe return ( - f"{timeout_prefix} uv run component_system/entrypoint.py", - "No active root .venv; uv run without --active.", + python_exe_quoted, + "Daemon is running with this Python; use it in the worktree so the same environment is used.", + sys.executable, ) @@ -160,13 +165,20 @@ def _agent_failure_reason(exit_code: int, stdout: str, stderr: str) -> str: return f"Agent exited with code {exit_code}. See stdout/stderr logs for details." -def _should_salvage_completed_dca(stage: str, exit_code: int, output_text: str) -> bool: - """Accept a DCA run when canonical metrics were printed despite agent exit issues.""" - if stage != "dca" or exit_code == 0: +def _should_salvage_completed_ca( + stage: str, exit_code: int, run_id: str, worktree_path: str | None +) -> bool: + """Accept a CA run when the summary file exists in agent cwd and contains the target metric despite agent exit code.""" + if stage != "ca" or exit_code == 0: + return False + path = _summary_json_path_in_cwd(worktree_path) + if not path.exists(): + return False + try: + summary = json.loads(path.read_text(encoding="utf-8")) + return summary.get("metrics", {}).get(TARGET_METRIC_KEY) is not None + except (json.JSONDecodeError, OSError): return False - summary = WORKFLOW.extract_summary(output_text, StageName.dca) or {} - metrics = WORKFLOW.extract_dca_metrics(output_text, summary) - return metrics.get("val_bpb") is not None def _agent_cwd(worktree_path: str | None) -> str: @@ -204,12 +216,12 @@ def _sync_results_tsv_into_worktree(worktree_path: str | None) -> None: def _sync_baseline_json_into_worktree(worktree_path: str | None) -> None: - """Copy baseline_metrics.json and baseline_branches.json from project component_system into the worktree. + """Copy baseline_metrics.json and baseline_branches.json from project pdca_system into the worktree. Worktrees check out from baseline-branch; without this sync the agent would see stale or missing baseline data.""" resolved = _resolve_worktree_path(worktree_path) if resolved is None: return - dest_dir = resolved / "component_system" + dest_dir = resolved / "pdca_system" dest_dir.mkdir(parents=True, exist_ok=True) for src_path, name in [ (BASELINE_METRICS_PATH, "baseline_metrics.json"), @@ -226,7 +238,7 @@ def _sync_baseline_json_into_worktree(worktree_path: str | None) -> None: def _sync_worktree_context(worktree_path: str | None) -> None: """Sync all workflow-managed live data into the worktree so the agent sees current state. - Call before invoking the agent when cwd is a worktree (P or DCA).""" + Call before invoking the agent when cwd is a worktree (PD or CA).""" _sync_results_tsv_into_worktree(worktree_path) _sync_baseline_json_into_worktree(worktree_path) @@ -242,7 +254,7 @@ def _invoke_agent( cmd = list(config["cmd"]) timeout = _get_timeout(stage) cwd = _agent_cwd(worktree_path) - # PYTHONUNBUFFERED=1 so child Python (e.g. uv run entrypoint.py) flushes stdout + # PYTHONUNBUFFERED=1 so child Python (e.g. uv run train.py) flushes stdout # immediately instead of block-buffering when stdout is a pipe; otherwise # stdout log only appears in one shot after the task finishes. env = {**os.environ, "PYTHONUNBUFFERED": "1"} @@ -335,7 +347,7 @@ def _invoke_agent( def _build_metrics_recovery_prompt(task: dict[str, Any]) -> str: - """Lightweight prompt for metrics-recovery DCA: no protocol/docs, just task, log paths, report shape.""" + """Lightweight prompt for metrics-recovery CA: no protocol/docs, just task, log paths, report shape.""" task_json = json.dumps(task, indent=2) source_run_id = task.get("source_run_id", "unknown") stdout_log = task.get("source_stdout_log_path", "missing") @@ -346,7 +358,7 @@ def _build_metrics_recovery_prompt(task: dict[str, Any]) -> str: "completed_at": "YYYY-MM-DD HH:MM:SS", "commit_sha": "", "metrics": { - "val_bpb": 1.24, + TARGET_METRIC_KEY: 1.24, "training_seconds": 300.1, "total_seconds": 360.4, "startup_seconds": 25.8, @@ -366,11 +378,11 @@ def _build_metrics_recovery_prompt(task: dict[str, Any]) -> str: f"Inspect logs for source run {source_run_id!r}:\n" f" stdout: {stdout_log}\n" f" stderr: {stderr_log}\n\n" - "Recover canonical metrics from those logs if present, then print the summary block below. " + f"Recover canonical metrics from those logs if present (the metrics object must include the target metric key {TARGET_METRIC_KEY!r}). " "If unrecoverable, use empty \"metrics\": {} and explain in notes.\n\n" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + f"Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory (cwd root). " + "Do not print this JSON to stdout or stderr. Use this shape (reference):\n\n" f"{report_json}\n" - "AUTORESEARCH_DCA_SUMMARY_END\n" ) @@ -384,16 +396,15 @@ def _build_sync_resolution_prompt(task: dict[str, Any]) -> str: "Steps:\n" f"1. Merge the baseline branch into the current branch: git merge {baseline_branch!r}\n" "2. Resolve any conflicts, then commit the merge (e.g. git add . && git commit -m 'Merge baseline into seed').\n" - "3. Do not run the training entrypoint.\n" - "4. Print the following block so the runner can confirm success:\n\n" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + "3. Do not run the training script (train.py).\n" + f"4. Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory (cwd root). " + "Do not print this JSON to stdout or stderr. Use this shape (reference):\n" '{"checks":["sync_resolution"],"notes":"Merged baseline into seed; conflicts resolved.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"","metrics":{}}\n' - "AUTORESEARCH_DCA_SUMMARY_END\n" ) def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: - """Lightweight prompt for merge-resolution DCA: no protocol/docs, just commit, merge, report.""" + """Lightweight prompt for merge-resolution CA: no protocol/docs, just commit, merge, report.""" task_json = json.dumps(task, indent=2) target_branch = task.get("baseline_branch", "master") # branch we want to merge into (e.g. master) worktree_path = task.get("worktree_path") or "" @@ -426,8 +437,8 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: f" cd # e.g. main repo\n" f" git merge {BASELINE_SEED_ID!r}\n" " Wrong (do not do this): cd to the __baseline__ worktree and run git merge master — that merges master into __baseline__.\n" - "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" - "4. Print the DCA summary block below (same metrics as the previous run). Include the current commit SHA (after committing the merge) in the DCA summary JSON.\n\n" + "3. Do not run the training script (train.py); the experiment already completed and metrics exist.\n" + f"4. Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory (same metrics as the previous run; metrics must include the target metric key {TARGET_METRIC_KEY!r}). Include the current commit SHA (after committing the merge). Do not print this JSON to stdout or stderr.\n\n" ) else: # Normal seed: we need to merge the SEED branch INTO the baseline branch (so baseline gets the seed's changes). @@ -440,7 +451,7 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: else: cwd_note = ( "Your working directory is the project root. " - f"The seed worktree is at component_system/history/worktrees/{seed_id!r} (use it only to commit any pending changes).\n\n" + f"The seed worktree is at pdca_system/history/worktrees/{seed_id!r} (use it only to commit any pending changes).\n\n" ) steps = ( "Steps:\n" @@ -449,8 +460,8 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: f" git checkout {target_branch!r}\n" f" git merge {seed_id!r}\n" " Resolve any conflicts, then commit the merge. The result must be: the baseline branch contains the seed's changes (merge direction: seed → baseline).\n" - "3. Do not run the training entrypoint; the experiment already completed and metrics exist.\n" - "4. Print the DCA summary block below (same metrics as the previous run). Use the merge commit SHA from the baseline branch (after the merge, from project root: git rev-parse HEAD).\n\n" + "3. Do not run the training script (train.py); the experiment already completed and metrics exist.\n" + f"4. Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory (same metrics as the previous run; metrics must include the target metric key {TARGET_METRIC_KEY!r}). Use the merge commit SHA from the baseline branch (after the merge, from project root: git rev-parse HEAD). Do not print this JSON to stdout or stderr.\n\n" ) return ( @@ -459,45 +470,44 @@ def _build_merge_resolution_prompt(task: dict[str, Any]) -> str: f"{task_json}\n\n" f"{cwd_note}" f"{steps}" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" + "Use this shape (reference) for the summary JSON:\n\n" f"{report_json}\n" - "AUTORESEARCH_DCA_SUMMARY_END\n" ) def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: """Build the agent prompt for a stage. Prompt types (by weight): - - P: full header (protocol, stage doc, baseline files, task) + P workflow. Heavy. - - DCA metrics_recovery: lightweight; task + log paths, report shape (no protocol/docs). Light. - - DCA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. - - DCA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. - - DCA normal: full header + adapt/run/commit/report. Heavy. + - PD: full header (protocol, stage doc, baseline files, task) + PD workflow. Heavy. + - CA metrics_recovery: lightweight; task + log paths, report shape (no protocol/docs). Light. + - CA merge_resolution: lightweight; task + commit, merge, report (no protocol/docs). Light. + - CA baseline_measurement: full header + baseline retry/OOM/commit/run. Heavy. + - CA normal: full header + adapt/run/commit/report. Heavy. """ task_json = json.dumps(task, indent=2) rel_task = task_path.relative_to(PROJECT_ROOT).as_posix() - worktree_path = task.get("worktree_path", "component_system/history/worktrees") + worktree_path = task.get("worktree_path", "pdca_system/history/worktrees") agent_cwd = _agent_cwd(worktree_path) worktree_dir = Path(agent_cwd) # Worktree runs must stay entirely within the copied seed workspace to avoid external_directory requests. in_worktree = worktree_dir.resolve() != PROJECT_ROOT.resolve() if in_worktree: - context_protocol = " - component_system/protocol.md" - docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + context_protocol = " - pdca_system/protocol.md" + docs = "\n".join(f" - pdca_system/{doc}" for doc in STAGE_DOCS[stage]) task_block = ( "Task content (provided inline; do not look up any external task file):\n" f"{task_json}\n\n" ) worktree_note = ( "Your working directory is the assigned workflow worktree (your current directory).\n" - "All required file context is already copied into this worktree under component_system/.\n" + "All required file context is already copied into this worktree under pdca_system/.\n" "Use only paths relative to your current working directory. " "Do not request access to absolute paths, parent-directory paths, or files outside the worktree.\n" ) scope_note = "Do not edit files outside the worktree unless the prompt explicitly requires it.\n\n" else: - context_protocol = " - component_system/protocol.md" - docs = "\n".join(f" - component_system/{doc}" for doc in STAGE_DOCS[stage]) + context_protocol = " - pdca_system/protocol.md" + docs = "\n".join(f" - pdca_system/{doc}" for doc in STAGE_DOCS[stage]) task_path_rel = f" - {rel_task}" task_block = f"Task file:\n{task_path_rel}\n\nTask content:\n{task_json}\n\n" worktree_note = "Your working directory is the project root.\n" @@ -505,17 +515,17 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: required_context = ( "Required context (read first; paths relative to your cwd):\n" - f" - component_system/protocol.md\n" + f" - pdca_system/protocol.md\n" f"{docs}\n" ) baseline_files_note = ( "Baseline reference files (workflow-managed; read-only):\n" - " - component_system/baseline_branches.json (per-branch baseline mapping)\n" - " - component_system/baseline_metrics.json (baseline run metrics)\n" + " - pdca_system/baseline_branches.json (per-branch baseline mapping)\n" + " - pdca_system/baseline_metrics.json (baseline run metrics)\n" "The workflow writes these; only read them for context.\n\n" ) header = ( - "You are working on the autoresearch component-system workflow.\n\n" + "You are working on the autoresearch pdca-system workflow.\n\n" f"{required_context}\n" f"{baseline_files_note}" f"{task_block}" @@ -523,19 +533,18 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: f"{scope_note}" ) - if stage == "p": + if stage == "pd": if in_worktree: p_workflow = ( "Workflow:\n" "1. Refine the seed prompt into a concrete implementation idea.\n" "2. Implement the first generated version of that idea in the provided worktree.\n" "3. Create a git commit in the seed branch (current branch in the worktree).\n" - "4. Print a JSON summary between these exact markers:\n" - "AUTORESEARCH_P_SUMMARY_BEGIN\n" + f"4. Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory. " + "Do not print this JSON to stdout or stderr. Use this shape (reference): " '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' - "AUTORESEARCH_P_SUMMARY_END\n" "One branch per seed: you are already on the seed branch in the worktree.\n" - "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + "Do not merge branches; only the CA stage may trigger a merge into baseline.\n" ) else: p_workflow = ( @@ -543,22 +552,22 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: "1. Refine the seed prompt into a concrete implementation idea.\n" "2. Implement the first generated version of that idea in the current directory (project root).\n" "3. Create a git commit on the current branch.\n" - "4. Print a JSON summary between these exact markers:\n" - "AUTORESEARCH_P_SUMMARY_BEGIN\n" + f"4. Write the summary JSON to the file named {SUMMARY_FILENAME} in your current working directory. " + "Do not print this JSON to stdout or stderr. Use this shape (reference): " '{"idea":"...","target_component":"model|optimizer|trainer","description":"...","source_refs":["..."],"commit_sha":"...","completed_at":"YYYY-MM-DD HH:MM:SS"}\n' - "AUTORESEARCH_P_SUMMARY_END\n" "One branch per seed: you are in the project root; use the current branch for your commit.\n" - "Do not merge branches; only the DCA stage may trigger a merge into baseline.\n" + "Do not merge branches; only the Check-Action stage may trigger a merge into baseline.\n" ) return header + ( - "You are the P stage.\n\n" - "## Read results.tsv first (avoid idea duplication)\n" + "You are working on the Plan-Do stage.\n\n" + f"The Check-Action stage will run your code and require the target metric key {TARGET_METRIC_KEY!r} in the summary JSON file it writes.\n\n" + "Read results.tsv first (avoid idea duplication)\n" "Before choosing a hypothesis, read `results.tsv` in your cwd if it exists. " "Use it to avoid proposing ideas already tried or discarded; only repeat an idea if you have a clear new angle (e.g. different implementation or target component). " - "See component_system/PDCA-PLAN.md for full guidance.\n\n" + "See pdca_system/PDCA-Plan-Do.md for full guidance.\n\n" f"{p_workflow}" ) - if stage == "dca": + if stage == "ca": sync_resolution = task.get("sync_resolution") is True merge_resolution = task.get("merge_resolution") is True metrics_recovery = task.get("metrics_recovery") is True @@ -568,57 +577,67 @@ def _build_prompt(stage: str, task: dict[str, Any], task_path: Path) -> str: return _build_merge_resolution_prompt(task) if metrics_recovery: return _build_metrics_recovery_prompt(task) - dca_cmd, dca_note = _dca_command_guidance() + python_exe, ca_note, runner_label = _ca_command_guidance() baseline_measurement = task.get("seed_id") == "__baseline__" conflict_block = "" if baseline_measurement: return header + conflict_block + ( "BASELINE MEASUREMENT: establish the first reference metrics in the dedicated baseline worktree.\n" "You must retry until the run completes successfully and you can report real metrics. Do not report empty metrics and stop.\n" - "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce device_batch_size (and if needed total_batch_size) in component_system/components/trainer.py (TrainingSettings) so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" + f"Python runner (use this): {runner_label}. ({ca_note})\n" + "If training fails with CUDA out of memory (OOM): the default batch size is for H100. Reduce DEVICE_BATCH_SIZE (and if needed TOTAL_BATCH_SIZE) in train.py so training fits in available VRAM, then rerun until the baseline run completes. Only trivial execution fixes (e.g. batch size) are allowed; do not change model architecture or training logic.\n" "If you modified any files (e.g. batch size for OOM), you must commit those changes on the baseline branch before reporting. An uncommitted worktree causes the follow-up merge to fail.\n" - f"Run the canonical command (≥{DCA_CANONICAL_RUN_TIMEOUT_SECONDS}s): `{dca_cmd} > training.log 2>&1`. Set your command/tool timeout to at least {DCA_CANONICAL_RUN_TIMEOUT_SECONDS} seconds. After the run, inspect training.log to confirm completion and recover or verify metrics.\n" - f"({dca_note})\n" - "Report the final result in JSON between these exact markers once training has completed successfully. Include the current commit SHA in the summary (commit any changes first).\n" - "AUTORESEARCH_DCA_SUMMARY_BEGIN\n" - '{"checks":["baseline_measurement"],"notes":"Measured the current baseline in the dedicated baseline worktree.","completed_at":"YYYY-MM-DD HH:MM:SS","commit_sha":"...","metrics":{"val_bpb":1.239972,"training_seconds":300.1,"total_seconds":360.4,"startup_seconds":25.8,"peak_vram_mb":11967.8,"mfu_percent":2.15,"total_tokens_M":140.5,"num_steps":268,"num_params_M":11.5,"depth":4}}\n' - "AUTORESEARCH_DCA_SUMMARY_END\n" - "If after all retries (including batch size reduction) metrics are still unavailable, only then print the same object with an empty metrics object and explain in notes.\n" + f"Use this Python executable for the canonical run: `{runner_label}`. Run the project's canonical command (see protocol; e.g. train.py or the script your project uses) with it, e.g. `{python_exe} @@ -16,9 +16,9 @@
- Component System + PDCA System -

Seed -> Plan -> Do-Check-Action orchestration with FastAPI, HTMX, Alpine, and Tailwind.

+

Seed -> Plan-Do -> Check-Action orchestration with FastAPI, HTMX, Alpine, and Tailwind.

@@ -62,7 +62,7 @@

Run log · {{ run.run_id }}

-

{{ run.stage.value|upper }} · {{ run.status.value }}

+

{{ run.stage.value|upper }} · {{ run.status.value }}{% if run.agent_type or (run.summary and run.summary.get('agent_type')) %} · {{ run.agent_type or run.summary.get('agent_type', '') }}{% endif %}

+
+ +
+
+
+

Terminate tasks

+

All tasks for this seed that are not yet completed (queued or running) will be terminated immediately and marked as failed. This cannot be undone.

+
+ + +
+
+