diff --git a/docs/260212-personal-server-refactor.md b/docs/260212-personal-server-refactor.md new file mode 100644 index 0000000..f25d41a --- /dev/null +++ b/docs/260212-personal-server-refactor.md @@ -0,0 +1,615 @@ +# Personal Server + DataBridge Refactor Plan + +## 1. Purpose + +This document captures a cross-repo investigation of how DataBridge currently bundles and manages Personal Server, why lifecycle/auth/communication issues keep recurring, and a concrete refactor plan. + +The goal is to make another engineer productive quickly without re-discovering context across: + +- Project root: `~/Users/kahtaf/Documents/workspace_vana` +- `/databridge` -> https://github.com/vana-com/databridge + - Also called dataConnect, the correct name + - Desktop app that bundles the personal server + - Helps procure data through data connectors +- `/personal-server-ts` -> https://github.com/vana-com/personal-server-ts + - Long running service that ingests data, permissions it via grants and serves it + - Publicly available through a tunnel at ${serverAddress}.server.vana.org +- `/data-gateway` -> https://github.com/vana-com/data-gateway + - A layer before the Vana L1, used to speed up onchain operations by batching them +- `/session-relay` -> https://github.com/vana-com/session-relay + - Centralized service that orchestrates third party apps data connection requests to a user's locally running dataConnect +- `/vana-connect` -> https://github.com/vana-com/vana-connect + - @opendatalabs/connect SDK for developers of third party apps + - /examples/nextjs-starter: example application to create a session and request data from users + +## 2. Confirmed Product Decisions + +These decisions are treated as constraints for the plan: + +1. Desktop mode lifecycle: + - Personal Server starts when DataBridge starts. + - It can start unauthenticated first, then transition/restart when owner signature is available. + - It shuts down when DataBridge closes. +2. Always-on behavior: + - Auto-start by default in desktop mode. +3. Local trust boundary: + - DataBridge to bundled Personal Server should not require wallet-signature auth. +4. Backward compatibility: + - No requirement to preserve current localhost REST control contract. +5. Packaging: + - Number of internal packages can change. Follow industry practice. +6. ODL Cloud fallback: + - Optional. +7. Cross-platform UX: + - Choose best user UX. +8. URL convention: + - Keep current server-address-based convention. + +## 3. Source of Truth Docs Used + +- Protocol spec: `databridge/docs/260121-data-portability-protocol-spec.md` +- DataBridge architecture doc: `databridge/docs/architecture.md` +- Grant/connect flow doc: `databridge/docs/260203-grant-connect-flow.md` +- Personal server scaffold/package docs: + - `/personal-server-ts/docs/260127-personal-server-scaffold.md` + - `/personal-server-ts/docs/260202-package-renaming.md` + - `/personal-server-ts/docs/260130-server-signing-delegation.md` + - `/personal-server-ts/docs/260204-frp-server-design.md` +- Session relay flow doc: `/session-relay/SESSION_RELAY_WORKFLOW.md` + +## 4. Cross-Repo Flow Today + +```mermaid +sequenceDiagram + participant Builder as Builder App (vana-connect/server) + participant Relay as Session Relay + participant DB as DataBridge + participant PS as Personal Server + participant GW as Data Gateway + + Builder->>Relay: POST /v1/session/init (Web3Signed) + Relay-->>Builder: {sessionId, deepLinkUrl(with secret), expiresAt} + Builder->>DB: open deep link + DB->>Relay: POST /v1/session/claim {sessionId, secret} + DB->>PS: start subprocess (phase 1 unauth) + DB->>DB: user auth flow -> master key signature + DB->>PS: restart/reconfigure (phase 2 auth) + DB->>PS: POST /v1/grants {granteeAddress, scopes} + PS->>GW: POST /v1/grants (EIP-712 signature in auth header) + GW-->>PS: {grantId} + DB->>Relay: POST /v1/session/:id/approve {secret, grantId, userAddress, scopes} + Relay-->>Builder: poll/webhook returns approved grant payload +``` + +## 5. Repo-by-Repo Findings + +## 5.1 DataBridge (desktop app) + +### Core lifecycle and process wiring + +- Tauri start/stop/cleanup commands: + - `databridge/src-tauri/src/commands/server.rs` + - `databridge/src-tauri/src/lib.rs` +- App-exit cleanup currently kills the Personal Server process: + - `cleanup_personal_server()` in `databridge/src-tauri/src/commands/server.rs` + - called on `RunEvent::Exit` in `databridge/src-tauri/src/lib.rs` + +### Frontend lifecycle orchestration + +- Personal Server hook: + - `databridge/src/hooks/usePersonalServer.ts` +- Hook is used in multiple places (global and page-level): + - `databridge/src/App.tsx` + - `databridge/src/pages/home/index.tsx` + - `databridge/src/pages/runs/use-runs-page.ts` + - `databridge/src/pages/settings/use-settings-page.ts` + - `databridge/src/pages/grant/use-grant-flow.ts` + +This creates multiple lifecycle controllers around one underlying process. + +Each of the 5+ hook instances installs its own full set of Tauri event listeners (`personal-server-ready`, `personal-server-error`, `personal-server-tunnel`, etc.) without deduplication. This means 5+ copies of each listener fire simultaneously, all updating shared module globals and calling `setStatus()`, causing redundant state updates and potential race conditions. + +### Wrapper and packaging + +- Wrapper used for bundled runtime: + - `databridge/personal-server/index.js` + - `databridge/personal-server/index.cjs` +- Build pipeline and native addon handling: + - `databridge/personal-server/scripts/build.js` + - `databridge/scripts/ensure-personal-server.js` + - `databridge/scripts/build-prod.js` + - `databridge/src-tauri/tauri.conf.json` + +Observed issue: runtime health depends on external `node_modules` being present beside bundled binary. + +The wrapper-spawned frpc subprocess (`fixTunnelProxyName()` in `index.js`) sits outside Tauri's process management. If it crashes, nothing restarts it. `cleanup_personal_server()` on app exit won't kill it. frpc lifecycle should be fully owned by personal-server-ts, not DataBridge. + +### Local service clients + +- Grants client: + - `databridge/src/services/personalServer.ts` +- Ingest client: + - `databridge/src/services/personalServerIngest.ts` +- Session relay client: + - `databridge/src/services/sessionRelay.ts` +- Pending split-failure retry: + - `databridge/src/hooks/usePendingApproval.ts` + +### Auth callback and server registration bridge + +- Auth page signs master key and typed server registration: + - `databridge/src/auth-page/auth.ts` +- Tauri local callback server proxies registration to gateway: + - `databridge/src-tauri/src/commands/auth.rs` + +Notable behavior: gateway registration URL is hardcoded in auth command path today. + +## 5.2 personal-server-ts + +### Package architecture + +- Workspace root: + - `/personal-server-ts/package.json` +- Packages: + - `/personal-server-ts/packages/core/package.json` + - `/personal-server-ts/packages/server/package.json` + - `/personal-server-ts/packages/cli/package.json` + - facade exports in `/personal-server-ts/packages/cli/src/index.ts` + +Current model is 3 internal workspaces. External consumers can still be exposed through a single facade package. + +### Runtime and server composition + +- Bootstrap/runtime: + - `/personal-server-ts/packages/server/src/bootstrap.ts` + - `/personal-server-ts/packages/server/src/index.ts` +- Route composition and CORS: + - `/personal-server-ts/packages/server/src/app.ts` +- Config schema: + - `/personal-server-ts/packages/core/src/schemas/server-config.ts` + +### Auth and access control + +- Web3Signed middleware and dev bypass: + - `/personal-server-ts/packages/server/src/middleware/web3-auth.ts` +- Owner check: + - `/personal-server-ts/packages/server/src/middleware/owner-check.ts` +- Grant check: + - `/personal-server-ts/packages/server/src/middleware/grant-check.ts` + +### Data and grants routes + +- Data routes: + - `/personal-server-ts/packages/server/src/routes/data.ts` +- Grants routes: + - `/personal-server-ts/packages/server/src/routes/grants.ts` +- Health route: + - `/personal-server-ts/packages/server/src/routes/health.ts` + +Design note: `POST /v1/data/:scope` is intentionally unauthenticated — it is a local-only ingest path called by DataBridge. However, the FRP tunnel exposes all HTTP routes without path filtering, meaning remote callers can reach this endpoint through `{address}.server.vana.org`. A local-only enforcement mechanism (e.g. rejecting requests with `X-Forwarded-For`) is needed to enforce the local trust boundary. + +The `devToken` mechanism in `web3-auth.ts` was designed for PS dev UI testing, not DataBridge auth. DataBridge's current use of it is incidental. + +### Tunnel subsystem + +- Tunnel manager/config: + - `/personal-server-ts/packages/server/src/tunnel/manager.ts` + - `/personal-server-ts/packages/server/src/tunnel/config.ts` + - `/personal-server-ts/packages/server/src/tunnel/auth.ts` + +Notable issue: generated FRP proxy name is static (`personal-server`), which forces downstream hacks in DataBridge wrapper. + +### Gateway client used by Personal Server + +- `/personal-server-ts/packages/core/src/gateway/client.ts` + +Important detail: gateway POST auth uses `Authorization: Web3Signed ` where the value is an EIP-712 signature string, not Web3Signed payload format. + +## 5.3 data-gateway + +### Public endpoint handlers + +- Files: + - `/data-gateway/api/v1/files.ts` +- Grants: + - `/data-gateway/api/v1/grants.ts` + - `/data-gateway/api/v1/grants/[grantId].ts` + - `/data-gateway/api/v1/grants/[grantId]/status.ts` +- Servers: + - `/data-gateway/api/v1/servers.ts` + - `/data-gateway/api/v1/servers/[address].ts` + - `/data-gateway/api/v1/servers/[address]/status.ts` + +### Signature and ID logic + +- Auth extraction: + - `/data-gateway/lib/auth.ts` +- EIP-712 verifier/recovery and attestation: + - `/data-gateway/lib/eip712.ts` +- Deterministic IDs/hashes: + - `/data-gateway/lib/files.ts` + - `/data-gateway/lib/grants.ts` + - `/data-gateway/lib/servers.ts` +- Persistence schema: + - `/data-gateway/db/schema.ts` + +### Critical interoperability details + +1. Gateway accepts signer delegation for file/grant operations: + - It recovers EIP-712 signer and allows either owner/grantor OR an active registered server for that owner. + - Implemented in: + - `/data-gateway/api/v1/files.ts` + - `/data-gateway/api/v1/grants.ts` + - `/data-gateway/api/v1/grants/[grantId].ts` +2. Authorization format is `Web3Signed `, where `` is raw EIP-712 signature string. +3. Server records are currently keyed and fetched by `serverAddress` path on `/v1/servers/:address`. + +## 5.4 session-relay + +### Core routes and state machine + +- Routes: + - `/session-relay/src/routes/sessions.ts` +- Validation schemas: + - `/session-relay/src/schemas.ts` +- DB model: + - `/session-relay/src/db/schema.ts` +- Secret generation/hash verification: + - `/session-relay/src/crypto/secrets.ts` +- Web3Signed verification for `/init`: + - `/session-relay/src/auth/web3-signed.ts` + +### Key behavior + +1. `/init` requires full Web3Signed payload signature and verifies builder registration via gateway client: + - `/session-relay/src/gateway/client.ts` +2. `claim/approve/deny` require `secret` token, with atomic status transitions. +3. Approved scopes must be subset of requested scopes. +4. Poll returns `grant` payload after approval, including optional `serverAddress`. + +This service is mostly well-aligned with expected consent session semantics. + +## 5.5 vana-connect + +### Server SDK paths + +- Session relay client: + - `/vana-connect/src/server/session-relay.ts` +- Request signer (Web3Signed payload model): + - `/vana-connect/src/server/request-signer.ts` +- Data client: + - `/vana-connect/src/server/data-client.ts` +- Convenience API: + - `/vana-connect/src/server/connect.ts` +- Constants: + - `/vana-connect/src/core/constants.ts` + +### React client + +- Hook: + - `/vana-connect/src/react/useVanaConnect.ts` + +### Notable interoperability details + +1. Uses Session Relay polling and deep link flow expected by DataBridge. +2. Uses Web3Signed payload signing for Personal Server data reads. +3. Resolves server URL from gateway via `/v1/servers/:address` with `grant.serverAddress ?? grant.userAddress` logic. +4. Constants are pinned to dev gateway/session-relay URLs in source constants (`/vana-connect/src/core/constants.ts`). This means the published SDK directs builders to dev infrastructure — a deployment risk. + +## 6. Cross-Repo Mismatches and Root Causes + +## 6.1 Lifecycle ownership is split + +Symptoms: + +- Multiple frontend hook instances coordinate one process. +- Process state is shared via module globals plus event listeners. +- Each hook instance installs its own set of Tauri event listeners (5+ copies of each event), causing parallel state updates and potential race conditions. + +Root cause: + +- No single authoritative lifecycle controller in DataBridge app layer. +- Readiness and spawned state are separate concepts crossing Tauri and frontend. + +Primary files: + +- `databridge/src/hooks/usePersonalServer.ts` +- `databridge/src-tauri/src/commands/server.rs` + +## 6.2 Control plane and data plane are mixed + +Symptoms: + +- DataBridge uses HTTP routes intended for protocol behavior. +- Mixed/no auth behavior on local owner operations. + +Root cause: + +- No dedicated local admin/control transport. Local owner operations piggyback on HTTP protocol routes. + +Primary files: + +- `databridge/src/services/personalServer.ts` +- `databridge/src/services/personalServerIngest.ts` +- `/personal-server-ts/packages/server/src/routes/data.ts` + +## 6.3 Tunnel ownership is layered incorrectly + +Symptoms: + +- DataBridge wrapper rewrites FRP config and spawns replacement frpc. +- Wrapper-spawned frpc process has no crash recovery and is invisible to Tauri's process cleanup on app exit. + +Root cause: + +- Static proxy naming in personal-server-ts tunnel config pushes lifecycle responsibility outward. + +Primary files: + +- `/personal-server-ts/packages/server/src/tunnel/config.ts` +- `databridge/personal-server/index.js` + +## 6.4 Config and contract drift + +Symptoms: + +- Wrapper mutates config keys not in canonical schema. + +Root cause: + +- Wrapper has integration-specific assumptions that drifted from package contracts. + +Primary files: + +- `databridge/personal-server/index.js` +- `/personal-server-ts/packages/core/src/schemas/server-config.ts` + +## 6.5 Packaging fragility + +Symptoms: + +- Bundled binary requires copied runtime tree and native modules. +- Build scripts patch around tool constraints. + +Root cause: + +- `pkg` snapshot limitations plus native addon loading constraints plus Tauri resource flattening. + +Primary files: + +- `databridge/personal-server/scripts/build.js` +- `databridge/scripts/build-prod.js` +- `databridge/src-tauri/tauri.conf.json` + +## 6.6 URL canonical-input drift in grant/connect flow + +Symptoms: + +- Architecture guidance says grant/connect canonical inputs should live in URL. +- Current implementation also passes pre-fetched session data in `location.state`. + +Root cause: + +- Optimization path for latency and duplicate fetches introduced parallel state channel. +- This increases coupling and edge cases when navigation history/state is missing. + +Primary files: + +- `databridge/docs/260203-grant-connect-flow.md` +- `databridge/docs/architecture.md` +- `databridge/src/pages/grant/use-grant-flow.ts` + +## 6.7 Field naming drift across SDK and relay + +Symptoms: + +- `vana-connect` sends `app_user_id` on session init. +- `session-relay` expects `appUserId` in schema. + +Root cause: + +- Snake_case and camelCase naming conventions diverged between client and service contracts. + +Impact: + +- `appUserId` can be silently dropped on init, which breaks correlation features that depend on it. + +Primary files: + +- `/vana-connect/src/server/session-relay.ts` +- `/session-relay/src/schemas.ts` +- `/session-relay/src/routes/sessions.ts` + +## 6.8 Tunnel exposes local-only routes + +Symptoms: + +- FRP tunnel proxies all HTTP traffic to local server without path or method filtering. +- No middleware distinguishes local vs tunneled requests. + +Root cause: + +- Single HTTP listener serves both local admin/ingest traffic and remote protocol traffic. +- frpc config has no path restrictions — `type = "http"` with full subdomain proxy. + +Impact: + +- `POST /v1/data/:scope` (unauthenticated ingest) and owner-only grant routes are reachable remotely through `{address}.server.vana.org`. + +Primary files: + +- `/personal-server-ts/packages/server/src/tunnel/config.ts` +- `/personal-server-ts/packages/server/src/app.ts` +- `/personal-server-ts/packages/server/src/routes/data.ts` + +## 7. Recommended Target Architecture + +This design is aligned to the confirmed decisions. + +## 7.1 Process model + +1. DataBridge starts Personal Server automatically on app launch. +2. Personal Server enters `ready-local-basic` state without owner signature. +3. After auth, Personal Server transitions to `ready-authenticated` (in-place if possible, controlled restart if needed). +4. DataBridge stops Personal Server on app close. + +## 7.2 Transport split + +1. Local control plane: + - Use IPC (Unix domain socket on macOS/Linux, named pipe on Windows). + - Expose owner/admin operations only on IPC. +2. Remote data plane: + - Keep HTTP routes for builder/protocol traffic. + - Keep Web3Signed and grant checks for builder routes. + +## 7.3 Auth policy matrix + +| Caller | Path class | Transport | Auth model | +| -------------------------- | ------------------------------------------------------------------------------------------ | ----------- | ---------------------------------------------------------------- | +| DataBridge desktop app | Owner/admin control (`createGrant`, `listGrants`, `revokeGrant`, ingest trigger, sync ops) | IPC | Local process trust boundary | +| Builder app/backend | Protocol read routes | HTTP/tunnel | `Authorization: Web3Signed .` + grant checks | +| Personal Server to Gateway | Gateway write routes | HTTPS | `Authorization: Web3Signed ` | + +## 7.4 Tunnel ownership + +1. Tunnel manager in personal-server-ts owns: + - proxy naming uniqueness + - claim refresh and restart + - verification state and health transitions +2. DataBridge should consume status events only, not rewrite tunnel config. + +## 7.5 Package strategy + +Use one public consumer package, keep internal modularity: + +1. Internal workspaces: + - `core` (protocol logic) + - `server` (HTTP routes/middleware) + - `runtime` (process lifecycle, tunnel, IPC control API) +2. Public package: + - `@opendatalabs/personal-server-ts` facade exports stable API. +3. External consumers install one package. + +This is a common industry pattern: internal package boundaries for maintainability, single public facade for consumers. + +## 8. Migration Plan + +## Phase 0: Contract and state model (design freeze) + +Deliverables: + +- Runtime state machine spec. +- IPC API spec. +- Endpoint ownership/auth matrix. + +Acceptance: + +- Approved doc used as implementation contract. + +## Phase 1: Stabilize current integration + +Deliverables: + +- Single lifecycle owner in DataBridge. +- Remove config drift in wrapper. +- Normalize local auth behavior for owner paths. + +Acceptance: + +- Deterministic start/readiness behavior and no duplicate lifecycle controllers. + +## Phase 2: Introduce runtime/daemon package + +Deliverables: + +- Personal-server-ts runtime module with process + tunnel lifecycle. +- DataBridge wrapper hacks removed. + +Acceptance: + +- Tunnel config rewrite logic deleted from DataBridge wrapper. + +## Phase 3: Move local owner/admin to IPC + +Deliverables: + +- IPC server in Personal Server runtime. +- DataBridge local clients switched from localhost REST to IPC. + +Acceptance: + +- Owner/admin operations no longer exposed as unauth local HTTP routes. + +## Phase 4: Packaging simplification + +Deliverables: + +- Single facade package for DataBridge consumption. +- Internal package boundaries retained behind facade. + +Acceptance: + +- DataBridge installs one package and uses facade imports only. + +## Phase 5: Hardening and observability + +Deliverables: + +- Cross-repo E2E tests for startup, auth transition, tunnel recovery, grant flow. +- Operational metrics/logging around lifecycle states. + +Acceptance: + +- Reproducible reliability across supported desktop platforms. + +## 9. Cross-Repo Test Plan (must-have) + +1. DataBridge: + - App start -> Personal Server starts unauthenticated. + - Auth complete -> server transitions to authenticated mode. + - App exit -> server stops cleanly. +2. personal-server-ts: + - Tunnel collision test with repeated starts. + - Grant routes and data ingest auth behavior split by caller class. + - IPC owner/admin route coverage. +3. data-gateway: + - Delegated server signer acceptance for file/grant create/revoke. + - Rejection for unauthorized signer. +4. session-relay: + - claim/approve/deny atomic transitions. + - expired and invalid secret behavior. +5. vana-connect: + - Session init + poll + approved grant path against relay and gateway. + - Data fetch uses serverAddress fallback behavior correctly. + +## 10. Risks and Mitigations + +1. Risk: Auth transition race during two-phase startup. + - Mitigation: explicit runtime state gates and idempotent transition API. +2. Risk: IPC migration breaks active flows. + - Mitigation: short compatibility shim window and contract tests. +3. Risk: Tunnel regression after ownership move. + - Mitigation: parity tests before deleting wrapper logic. +4. Risk: Hidden contract drift returns. + - Mitigation: typed contract tests across DataBridge and personal-server-ts. + +## 11. Industry References + +- OAuth 2.0 for Native Apps (RFC 8252): +- Tauri sidecar docs: +- Docker daemon local access model: +- Cloudflared service operation model: +- systemd service restart semantics: +- launchd basics: + +## 12. Suggested Implementation Order + +1. Phase 0 contract doc sign-off. +2. Tunnel ownership fix in personal-server-ts. +3. DataBridge lifecycle centralization. +4. IPC control plane implementation. +5. Facade package cleanup and integration. + +This order removes the highest churn/race sources first, then improves long-term structure. + +Note: Section 8 phase numbering (1=stabilize, 2=runtime) does not match this execution order (tunnel fix before lifecycle centralization). This section is the preferred execution order — it removes the highest churn sources first. diff --git a/docs/260213-dataconnect-integration-refactor.md b/docs/260213-dataconnect-integration-refactor.md new file mode 100644 index 0000000..e27a0d8 --- /dev/null +++ b/docs/260213-dataconnect-integration-refactor.md @@ -0,0 +1,711 @@ +# DataConnect (DataBridge) Integration Refactor — Implementation Guide + +## Context + +The personal-server-ts repo has been refactored to support: + +- **IPC transport** (Unix domain socket) for admin/owner operations +- **Dynamic tunnel proxy naming** (no more wrapper hacks) +- **Local-only middleware** (tunnel can't reach ingest routes) +- **Runtime package** with process supervisor, daemonization, PID file discovery +- **Admin app** serving grant CRUD, data ingest/delete, access logs, sync — all over IPC + +DataConnect currently uses `pkg` to compile personal-server-ts into a standalone binary, then patches around its limitations with native module copying, proxy name rewrites, and custom route injection. This guide covers every change needed to adopt the new architecture. + +--- + +## 1. Replace pkg with Node.js + npm Package + +### Why pkg must go + +`pkg` (@yao-pkg/pkg) compiles personal-server into a ~65MB binary, but it's **not actually standalone** — it still requires `node_modules/` alongside it for better-sqlite3 (native `.node` addon). This means pkg's core value proposition (single binary distribution) was never realized. All three alternatives evaluated (pkg, Node.js binary, Node SEA) end up at roughly the same on-disk size (~60-85MB) since they all embed a Node.js runtime. + +| Factor | pkg (current) | Node.js binary (recommended) | Node SEA | +| ---------------- | --------------------------------------- | ----------------------------- | ------------------------------- | +| On-disk size | ~60-80 MB | ~65-85 MB | ~55-75 MB | +| Build complexity | High (esbuild→pkg→copy→download) | **Low (npm ci + copy)** | High (bundle→blob→inject→sign) | +| Native modules | `eval('require')` hacks | **Standard npm resolution** | Temp file extraction at runtime | +| Code signing | Sign binary + each .node + re-sign .app | **Sign Node binary once** | Remove sig, inject, re-sign | +| Debugging | Poor (V8 snapshot, no source maps) | **Excellent (stock Node.js)** | Poor (bundled blob) | +| Maintenance risk | Community fork of abandoned project | **Stock Node.js** | Stability 1.1 (not stable) | +| Dev experience | Slow rebuild cycle | **`tsx watch`, no packaging** | Slow rebuild cycle | + +**SEA is not ready:** Node 22 SEA can embed assets, but native `.node` files must be extracted to temp directories at runtime. This introduces antivirus false positives, permission issues, and race conditions. SEA is stability level 1.1 (Active Development), not production-ready. + +**Future path:** Node.js has a built-in `node:sqlite` module (stability 1.1). When it stabilizes, better-sqlite3 can be replaced, eliminating the native addon entirely. At that point SEA becomes viable for true single-binary deployment. This is a 6-12 month horizon. + +### Current build pipeline (to delete) + +### Current build pipeline (to delete) + +``` +esbuild (with eval('require') plugins) + → pkg (embeds Node 22 + bundle) + → copy native modules (better-sqlite3, bindings, file-uri-to-path) + → download prebuilt better-sqlite3 for pkg's Node version +``` + +**Files:** + +- `personal-server/scripts/build.js` — esbuild plugins + pkg invocation + native module copy + - `dynamicNativeRequirePlugin` (line 119-135): replaces `require('better-sqlite3')` with `eval('require')('better-sqlite3')` to hide from pkg + - `inlinePackageJsonPlugin` (line 141-162): inlines package.json files at build time + - Native module copy loop (line 194-204): `['better-sqlite3', 'bindings', 'file-uri-to-path']` + - Prebuilt download (line 209-220): `npx prebuild-install -r node -t ${pkgNodeMajor}.0.0` +- `personal-server/index.cjs` — CJS entry point required by pkg (pkg doesn't support ESM entry) +- `scripts/ensure-personal-server.js` — staleness checker that triggers rebuild when source changes +- `scripts/build-prod.js`: + - line 88-94: `npm install && npm run build` in personal-server dir + - line 45-58: `copyNativeModulesIntoApp()` — copies `dist/node_modules` into `.app/Contents/Resources` +- `personal-server/package.json` — `pkg.assets` and `pkg.targets` config (lines 12-27) + +### Target: Ship Node.js binary + personal-server-ts as normal npm package + +**New Tauri resource layout:** + +``` +Resources/ +├── node # Node.js 22 binary (~50MB) +└── personal-server/ + ├── index.js # Thin entry point (ESM) + ├── package.json + └── node_modules/ # Normal npm install output + ├── @opendatalabs/... + ├── better-sqlite3/ # Native addon, resolved normally + └── ... +``` + +**Changes to `src-tauri/tauri.conf.json`:** + +Current resource config (line 65): + +```json +"../personal-server/dist/personal-server*": "personal-server/dist/" +``` + +New: + +```json +"../personal-server/": "personal-server/", +"../node/node": "node/" +``` + +**Changes to `src-tauri/src/commands/server.rs`:** + +Current binary resolution (lines 32-39): + +```rust +let candidates = [ + resource_dir.join("personal-server").join("dist").join(binary_name), + resource_dir.join("binaries").join(binary_name), + resource_dir.join("_up_").join("personal-server").join("dist").join(binary_name), +]; +``` + +Current node_modules check (lines 48-55): + +```rust +let dist_dir = candidate.parent().unwrap_or(candidate); +if !dist_dir.join("node_modules").exists() { + log::warn!("Skipping {:?}: node_modules/ not found alongside binary", candidate); + continue; +} +``` + +New approach: + +```rust +// Production: run bundled Node.js with personal-server entry point +let node_binary = resource_dir.join("node").join("node"); +let entry_script = resource_dir.join("personal-server").join("index.js"); + +// Dev mode fallback: use system Node +let (cmd, args) = if node_binary.exists() && entry_script.exists() { + (node_binary, vec![entry_script]) +} else { + // Development: node from PATH + ("node".into(), vec![dev_personal_server_path.join("index.js")]) +}; +``` + +Delete the codesign block (lines 163-177) — only need to sign the Node binary once during release, not per-launch. + +**Changes to `.github/workflows/release.yml`:** + +Delete: + +- Personal-server pkg build steps (lines 114-127) +- All `.node` file signing (lines 183-188) +- `node_modules` copy into `.app` (lines 202-203) +- `.app` re-signing after modification (lines 214-221) +- DMG `node_modules` verification (lines 243) +- Linux AppImage extract/repack for `node_modules` (lines 294-313) + +Add: + +- Download Node.js 22 binary for target platform from nodejs.org +- Place in release resources alongside personal-server folder + +**Files to delete entirely:** + +- `personal-server/scripts/build.js` +- `personal-server/index.cjs` +- `scripts/ensure-personal-server.js` + +--- + +## 2. Eliminate the Wrapper Entirely + +### Current wrapper (`personal-server/index.js`) + +The wrapper currently: + +1. Imports `createServer` from personal-server-ts packages (line 111-112) — **now in index.ts** +2. Adds custom `DELETE /v1/grants/:grantId` route (lines 120-151) — **now in admin-app.ts** +3. Adds custom `GET /status` route (lines 154-158) — **now in health.ts** +4. Calls `startBackgroundServices()` (line 176) — **now in index.ts** +5. Rewrites tunnel proxy name via `fixTunnelProxyName()` (lines 35-85) — **now native via `deriveProxyName()`** +6. Emits JSON-line stdout messages for Tauri to parse (lines 26-28, 163, 166-170, 202, 204, 219) — **no longer needed** + +All 6 responsibilities are now handled natively. **No changes to personal-server-ts are required** to eliminate the wrapper. + +### Why stdout messages are no longer needed + +The current stdout JSON-line protocol (`ready`, `tunnel`, `dev-token`, `error`, `log`) was the only remaining reason for the wrapper. Each message now has a better alternative: + +| Current stdout message | Replacement | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | +| `{ type: "ready", port }` | Poll PID file at `{storageRoot}/server.json` — written after HTTP+IPC are up. Contains `{ pid, port, socketPath, version, startedAt }` | +| `{ type: "tunnel", url }` | `GET /health` returns tunnel URL when available | +| `{ type: "dev-token", token }` | Eliminated — devToken auth replaced by IPC socket permissions | +| `{ type: "error", message }` | Process exit code + `daemon.log` in storageRoot | +| `{ type: "tunnel-failed" }` | Absence of tunnel info in `GET /health` response | +| `{ type: "log", message }` | pino logs to stdout/`daemon.log` — no special parsing needed | + +### What to delete in DataConnect + +**Delete entirely:** + +- `personal-server/index.js` +- `personal-server/index.cjs` +- `personal-server/scripts/build.js` +- `personal-server/package.json` (or repurpose as thin dependency spec with only `@opendatalabs/personal-server-ts` dependency) + +### What changes in `server.rs` + +**Replace stdout JSON parsing with PID file polling:** + +Current (lines 281-338): Reads stdout line-by-line, parses JSON, emits Tauri events. + +New: After spawning the Node process, poll for PID file appearance: + +```rust +// Spawn process +let child = Command::new(&node_binary) + .args([&entry_script]) + .envs(env_vars) + .spawn()?; + +// Poll PID file for readiness (replaces stdout "ready" message) +let pid_file = storage_root.join("server.json"); +let metadata = poll_for_file(&pid_file, Duration::from_secs(30))?; +// metadata contains: { pid, port, socketPath, version, startedAt } + +// Emit readiness event with port from PID file +app_handle.emit("personal-server-ready", json!({ "port": metadata.port })); + +// Tunnel URL: poll GET /status periodically or on-demand +// Returns: { status: "running", owner: string | null, port: number } +``` + +**Delete:** The entire stdout reader thread and JSON message parsing (lines 281-338), the `send()` function pattern, and the `dev-token` event handling. + +--- + +## 3. Switch Admin Calls from HTTP to IPC + +### Current HTTP calls + +**`src/services/personalServer.ts`:** + +All calls go through `serverFetch()` (lines 39-82) which uses `@tauri-apps/plugin-http` to make HTTP requests to `localhost:${port}`. + +| Function | Method | URL | Auth | +| -------------------------- | ------ | ----------------------- | -------------------- | +| `createGrant()` (line 84) | POST | `/v1/grants` | `Bearer ${devToken}` | +| `listGrants()` (line 102) | GET | `/v1/grants` | `Bearer ${devToken}` | +| `revokeGrant()` (line 122) | DELETE | `/v1/grants/${grantId}` | none | + +**`src/services/personalServerIngest.ts`:** + +| Function | Method | URL | Auth | +| ------------------------ | ------ | ------------------- | ---- | +| `ingestData()` (line 13) | POST | `/v1/data/${scope}` | none | + +### New IPC calls + +The admin app on IPC serves the same endpoints but with **no auth required** — socket file permissions (chmod 0600) enforce the trust boundary. + +**New client approach — two options:** + +**Option A: Use `IpcClient` from personal-server-ts runtime package** + +The `IpcClient` class (`packages/runtime/src/ipc-client.ts`) sends HTTP requests over a Unix domain socket: + +```typescript +import { IpcClient, readPidFile } from "@opendatalabs/personal-server-ts"; + +// Discover socket path from PID file +const metadata = await readPidFile(storageRoot); +// metadata = { pid, port, socketPath, version, startedAt } + +const client = new IpcClient({ storageRoot }); + +// Create grant +const res = await client.post("/v1/grants", { + granteeAddress: "0x...", + scopes: ["chatgpt.conversations"], + expiresAt: Math.floor(Date.now() / 1000) + 86400, +}); +const { grantId } = IpcClient.parseJson(res); + +// List grants +const res = await client.get("/v1/grants"); +const { grants } = IpcClient.parseJson(res); + +// Revoke grant +await client.delete(`/v1/grants/${grantId}`); + +// Ingest data +await client.post(`/v1/data/${scope}`, data); +``` + +**Limitation:** `IpcClient` uses Node.js `http.request()` with `socketPath` option. Tauri's frontend (React) runs in a webview and cannot directly use Node.js APIs. This means IPC communication must go through Tauri commands (Rust backend), not the React frontend. + +**Option B: Call IPC from Tauri Rust backend** + +Add a Tauri command that proxies admin calls over the Unix socket: + +```rust +// src-tauri/src/commands/server.rs + +#[tauri::command] +async fn ipc_request(method: String, path: String, body: Option) -> Result { + // Connect to Unix domain socket at known path + // Send HTTP request + // Return response body +} +``` + +The React frontend would then call: + +```typescript +const result = await invoke("ipc_request", { + method: "POST", + path: "/v1/grants", + body: JSON.stringify({ granteeAddress, scopes, expiresAt }), +}); +``` + +**Option C: Keep HTTP for now, migrate later** + +Admin routes still exist on the HTTP app during the migration period. The only behavioral change is that `POST /v1/data/:scope` on HTTP now requires local-only access (rejects tunnel traffic). DataConnect's HTTP calls to localhost still work unchanged. + +**Recommended approach:** Start with Option C (keep HTTP), then migrate to Option B (Tauri IPC command) when convenient. Option A is useful for CLI tools but doesn't work in the Tauri webview context. + +### What changes in `src/services/personalServer.ts` + +If keeping HTTP (Option C), the only change is **removing devToken auth headers**: + +Current (line 92-94): + +```typescript +const headers: Record = { "Content-Type": "application/json" }; +if (token) headers["Authorization"] = `Bearer ${token}`; +``` + +The devToken auth bypass is no longer needed — admin routes on HTTP during migration don't require it when called from localhost (local-only middleware only blocks tunnel traffic, not localhost). + +If switching to IPC (Option B), replace all `serverFetch()` calls with `invoke('ipc_request', ...)`. + +--- + +## 4. Centralize the Lifecycle Hook + +### Current problem + +`usePersonalServer.ts` is used in 5+ places: + +- `src/App.tsx` +- `src/pages/home/index.tsx` +- `src/pages/runs/use-runs-page.ts` +- `src/pages/settings/use-settings-page.ts` +- `src/pages/grant/use-grant-flow.ts` + +Each hook instance installs its own full set of Tauri event listeners (lines 102-162): + +- `personal-server-ready` +- `personal-server-error` +- `personal-server-exited` +- `personal-server-tunnel` +- `personal-server-log` +- `personal-server-dev-token` + +This means 5+ copies of each listener fire simultaneously, all updating shared module-level globals: + +```typescript +// lines 14-23 +let _sharedPort: number | null = null; +let _sharedStatus: "stopped" | "starting" | "running" | "error" = "stopped"; +let _sharedTunnelUrl: string | null = null; +let _sharedDevToken: string | null = null; +``` + +### Target architecture + +**Single lifecycle controller** (React context or Zustand store) that: + +1. Owns the Tauri process (start/stop/restart) +2. Listens to Tauri events exactly once +3. Manages state centrally +4. Exposes read-only state + actions to components + +**Implementation using Zustand (recommended — already used in project):** + +```typescript +// src/store/personal-server.ts + +interface PersonalServerState { + status: "stopped" | "starting" | "running" | "error"; + port: number | null; + tunnelUrl: string | null; + socketPath: string | null; + error: string | null; + + // Actions + start: (opts: StartOptions) => Promise; + stop: () => Promise; + restart: (opts: StartOptions) => Promise; +} +``` + +**Key changes:** + +- Event listeners registered ONCE at app startup (in `App.tsx` or a provider) +- Components call `usePersonalServerStore()` for read-only state +- Only `App.tsx` calls `start()` / `stop()` +- Retry logic moves to Rust (Supervisor) or stays in the store, not in the hook + +### Two-phase startup (currently lines 170-192) + +Phase 1 (unauthenticated): Start server immediately on app launch + +```typescript +await invoke("start_personal_server", { port: null, masterKeySignature: null }); +``` + +Phase 2 (authenticated): Restart with wallet credentials + +```typescript +await invoke("start_personal_server", { + masterKeySignature: sig, + ownerAddress: wallet, +}); +``` + +This two-phase model stays the same. The difference is it's triggered from one place. + +--- + +## 5. Use PID File for Server Discovery + +### Current approach + +`server.rs` tracks port from stdout JSON message: + +```rust +// line 282-292 +"ready" => { + let port = msg.get("port").and_then(|p| p.as_u64()).unwrap_or(0) as u16; + app_handle.emit("personal-server-ready", json!({ "port": port })); +} +``` + +### New approach + +After starting the server, read the PID file at `{storageRoot}/server.json`: + +```json +{ + "pid": 12345, + "port": 8080, + "socketPath": "/Users/user/.data-connect/personal-server/ipc.sock", + "version": "0.1.0", + "startedAt": "2025-02-12T10:30:00.000Z" +} +``` + +The PID file is written by `packages/server/src/index.ts` (lines 52-59) after both HTTP and IPC listeners are up. + +**Benefits:** + +- Can detect already-running server via `checkRunningServer(storageRoot)` — checks if PID is alive +- Socket path for IPC is discoverable +- Version and start time available for diagnostics + +**Where to use in DataConnect:** + +- `server.rs` can poll for PID file after spawning process (instead of relying solely on stdout `ready` message) +- `usePersonalServer` can read PID file to reconnect to existing server on app restart + +--- + +## 6. Adopt Supervisor for Crash Recovery + +### Current retry logic (`usePersonalServer.ts` lines 131-142) + +```typescript +// On personal-server-exited event: +if (_restartCount < MAX_RESTART_ATTEMPTS) { + _restartCount++; + const delay = Math.pow(2, _restartCount) * 1000; // 2s, 4s, 8s + setTimeout(() => startServer(), delay); +} +``` + +- Max 3 attempts +- Exponential backoff: 2s, 4s, 8s +- Lives in React hook (fragile, unmount = restart logic lost) + +### New: Supervisor class (`packages/runtime/src/supervisor.ts`) + +```typescript +const supervisor = new Supervisor({ + command: nodePath, + args: [entryScript], + spawnOptions: { env: { PORT, CONFIG_DIR, ... } }, + baseDelayMs: 1000, // First retry after 1s + maxDelayMs: 60000, // Cap at 60s + maxJitterMs: 1000, // Random jitter 0-1s + maxRetries: 10, // 10 attempts before giving up + resetAfterMs: 30000, // Reset counter if alive > 30s +}); + +supervisor.on('start', (pid) => { /* emit ready event */ }); +supervisor.on('exit', (code, signal) => { /* log */ }); +supervisor.on('restart', (attempt, delayMs) => { /* log */ }); +supervisor.on('max-retries', () => { /* give up, notify user */ }); + +supervisor.start(); +// Later: +await supervisor.stop(); // SIGTERM, 5s timeout, then SIGKILL +``` + +**Where to integrate:** The Supervisor can be used either: + +- **In Rust (`server.rs`):** Rust spawns Node process and handles restart logic natively +- **In the wrapper (`index.js`):** Wrapper uses Supervisor to manage the server process +- **In Tauri frontend:** Less ideal — React shouldn't own process lifecycle + +**Recommended:** Use Supervisor from the wrapper if keeping a wrapper, or implement equivalent restart logic in Rust. The key is moving crash recovery out of the React hook. + +--- + +## 7. Release Workflow Simplification + +### Current release steps for personal-server (`.github/workflows/release.yml`) + +1. `npm ci` in `personal-server/` (line 115-116) +2. `npm run build` — runs pkg (line 118-120) +3. Code sign the binary (line 122-127) +4. Tauri build (creates `.app`) +5. Find all `.node` files, sign each individually (line 183-188) +6. Copy `node_modules/` into `.app/Contents/Resources/personal-server/dist/` (line 202-203) +7. Re-sign entire `.app` (line 214-221) +8. Create DMG with node_modules verification (line 228-265) +9. Notarize and staple (line 268-290) +10. For Linux: extract AppImage → inject node_modules → repack (line 294-313) + +### New release steps + +1. Download Node.js 22 binary for target platform +2. `npm ci` in `personal-server/` (dependencies only, no build step) +3. Tauri build (bundles Node binary + personal-server folder as resources) +4. Sign `.app` once (includes Node binary) +5. Create DMG +6. Notarize and staple +7. For Linux: standard AppImage (no repack needed) + +Steps 3-7 of the current workflow are eliminated entirely. + +--- + +## 8. Environment Variables and Config + +### Current env vars passed by server.rs (lines 138-156) + +| Variable | Purpose | Still needed? | +| --------------------------- | ---------------------- | ---------------------------------------- | +| `PORT` | HTTP listen port | Yes | +| `NODE_ENV` | Always `production` | Yes | +| `VANA_MASTER_KEY_SIGNATURE` | EIP-712 key derivation | Yes | +| `GATEWAY_URL` | Data gateway endpoint | Yes | +| `OWNER_ADDRESS` | Owner wallet address | Yes | +| `CONFIG_DIR` | Storage root path | Yes, maps to `PERSONAL_SERVER_ROOT_PATH` | + +Note: personal-server-ts `index.ts` reads `PERSONAL_SERVER_ROOT_PATH` (line 18), not `CONFIG_DIR`. The wrapper maps `CONFIG_DIR` → config loading. If eliminating the wrapper, `server.rs` should set `PERSONAL_SERVER_ROOT_PATH` instead. + +### Config directory + +Current default: `~/.data-connect/personal-server` (set in `server.rs` lines 153-156) + +PID file will be at: `~/.data-connect/personal-server/server.json` +IPC socket will be at: `~/.data-connect/personal-server/ipc.sock` (or `/tmp/vana-ps-{hash}.sock` if path > 100 bytes on macOS) + +--- + +## 9. Endpoint Reference (Admin App over IPC) + +These are the admin routes available on the IPC socket (`packages/server/src/admin-app.ts`). No auth headers required — socket permissions enforce trust boundary. + +### Data Ingest + +``` +POST /v1/data/:scope +Body: { ...data } +Response: 201 { scope, collectedAt, status: "stored" | "syncing" } +``` + +### Data Delete + +``` +DELETE /v1/data/:scope +Response: 204 +``` + +### List Grants + +``` +GET /v1/grants +Response: 200 { grants: Grant[] } +``` + +### Create Grant + +``` +POST /v1/grants +Body: { granteeAddress: "0x...", scopes: string[], expiresAt?: number, nonce?: number } +Response: 201 { grantId: string } +Error 404: { error: { errorCode: "BUILDER_NOT_REGISTERED", ... } } +``` + +### Revoke Grant + +``` +DELETE /v1/grants/:grantId +Response: 200 { revoked: true } +``` + +### Access Logs + +``` +GET /v1/access-logs?limit=50&offset=0 +Response: 200 { logs: AccessLog[], total: number } +``` + +### Sync Status + +``` +GET /v1/sync/status +Response: 200 { enabled, running, lastSync, lastProcessedTimestamp, pendingFiles, errors } +``` + +### Trigger Sync + +``` +POST /v1/sync/trigger +Response: 202 { status: "started", message: "Sync triggered" } +``` + +--- + +## 10. Readiness & Health Detection (Replaces Stdout Protocol) + +The wrapper's stdout JSON-line protocol is eliminated. DataConnect uses file-based and HTTP-based detection instead. + +### Readiness detection + +After spawning the Node process, `server.rs` polls for the PID file: + +**PID file location:** `{storageRoot}/server.json` (written by `packages/server/src/index.ts` lines 52-59) + +```json +{ + "pid": 12345, + "port": 8080, + "socketPath": "/Users/user/.data-connect/personal-server/ipc.sock", + "version": "0.1.0", + "startedAt": "2026-02-12T10:30:00.000Z" +} +``` + +Poll strategy: check file existence every 250ms, timeout after 30s. File appears only after both HTTP and IPC listeners are ready. + +### Health and tunnel URL + +**`GET /status`** (on HTTP, no auth): + +```json +{ "status": "running", "owner": "0x...", "port": 8080 } +``` + +**`GET /health`** (on HTTP, no auth): Returns full server info including tunnel URL, identity, and connected services. + +### Error detection + +- **Process exit:** `server.rs` already handles `child.wait()` and emits `personal-server-exited` event +- **Startup failure:** PID file never appears within timeout → emit `personal-server-error` +- **Logs:** pino writes to stdout (captured by `server.rs` as raw text) and to `{storageRoot}/daemon.log` + +--- + +## Implementation Order + +**No changes to personal-server-ts are required.** All work is in DataConnect. + +### Phase A: Eliminate pkg + wrapper + stdout protocol (do together) + +| # | Change | Files to modify/delete | +| --- | ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| A1 | Delete wrapper + pkg pipeline | `personal-server/index.js` (delete), `personal-server/index.cjs` (delete), `personal-server/scripts/build.js` (delete), `scripts/ensure-personal-server.js` (delete), `personal-server/package.json` (simplify to thin dep spec) | +| A2 | Ship Node.js binary, run PS entry point directly | `src-tauri/tauri.conf.json`, `src-tauri/src/commands/server.rs`, `scripts/build-prod.js` | +| A3 | Replace stdout JSON parsing with PID file polling | `src-tauri/src/commands/server.rs` — delete stdout reader thread, add PID file poll | +| A4 | Simplify release workflow | `.github/workflows/release.yml` — delete pkg build, .node signing, node_modules injection, AppImage repack; add Node.js binary download | + +### Phase B: Auth + lifecycle cleanup (independent items, can parallel with A) + +| # | Change | Files | +| --- | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| B1 | Remove devToken auth from admin calls | `src/services/personalServer.ts` — remove `Authorization: Bearer` headers | +| B2 | Centralize lifecycle hook into Zustand store | `src/hooks/usePersonalServer.ts` → `src/store/personal-server.ts`, update `App.tsx`, `home/index.tsx`, `use-runs-page.ts`, `use-settings-page.ts`, `use-grant-flow.ts` | + +### Phase C: IPC migration (sequential, after Phase A) + +| # | Change | Files | +| --- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| C1 | Add IPC proxy Tauri command | `src-tauri/src/commands/server.rs` — new `ipc_request` command using UDS | +| C2 | Switch service calls to IPC | `src/services/personalServer.ts`, `src/services/personalServerIngest.ts` — replace `serverFetch()` with `invoke('ipc_request')` | + +### Phase D: Process management (independent, after Phase A) + +| # | Change | Files | +| --- | ----------------------------------- | --------------------------------------------------------------------------------------------- | +| D1 | Adopt Supervisor for crash recovery | `src-tauri/src/commands/server.rs` — implement restart logic in Rust, remove React hook retry | + +**Dependencies:** + +- Phase A has no prerequisites — personal-server-ts changes are already landed +- Phase B is independent and can be done in parallel with Phase A +- Phase C requires Phase A (PID file provides socket path for IPC) +- Phase D requires Phase A (Node.js binary spawning must work first) +- C1 must complete before C2 diff --git a/package-lock.json b/package-lock.json index d8829d3..0243397 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1296,6 +1296,10 @@ "resolved": "packages/core", "link": true }, + "node_modules/@opendatalabs/personal-server-ts-runtime": { + "resolved": "packages/runtime", + "link": true + }, "node_modules/@opendatalabs/personal-server-ts-server": { "resolved": "packages/server", "link": true @@ -10841,6 +10845,7 @@ "license": "MIT", "dependencies": { "@opendatalabs/personal-server-ts-core": "*", + "@opendatalabs/personal-server-ts-runtime": "*", "@opendatalabs/personal-server-ts-server": "*" } }, @@ -10873,6 +10878,15 @@ "url": "https://paulmillr.com/funding/" } }, + "packages/runtime": { + "name": "@opendatalabs/personal-server-ts-runtime", + "version": "0.0.1", + "license": "MIT", + "dependencies": { + "@hono/node-server": "^1.14.0", + "@opendatalabs/personal-server-ts-core": "*" + } + }, "packages/server": { "name": "@opendatalabs/personal-server-ts-server", "version": "0.0.1", @@ -10880,6 +10894,7 @@ "dependencies": { "@hono/node-server": "^1.14.0", "@opendatalabs/personal-server-ts-core": "*", + "@opendatalabs/personal-server-ts-runtime": "*", "hono": "^4.7.0" } } diff --git a/packages/cli/package.json b/packages/cli/package.json index 76bb25e..30762ca 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -32,6 +32,7 @@ }, "dependencies": { "@opendatalabs/personal-server-ts-core": "*", + "@opendatalabs/personal-server-ts-runtime": "*", "@opendatalabs/personal-server-ts-server": "*" } } diff --git a/packages/cli/src/index.ts b/packages/cli/src/index.ts index 595b5e9..9215090 100644 --- a/packages/cli/src/index.ts +++ b/packages/cli/src/index.ts @@ -10,8 +10,29 @@ export { type LoadConfigOptions, } from "@opendatalabs/personal-server-ts-core/config"; export type { ServerConfig } from "@opendatalabs/personal-server-ts-core/schemas"; +export { + RuntimeStateMachine, + type RuntimeState, + type StateTransitionEvent, +} from "@opendatalabs/personal-server-ts-core/lifecycle"; export { createServer, type CreateServerOptions, type ServerContext, } from "@opendatalabs/personal-server-ts-server"; +export { + writePidFile, + readPidFile, + removePidFile, + checkRunningServer, + Supervisor, + daemonize, + resolveSocketPath, + createIpcServer, + IpcClient, + type ServerMetadata, + type SupervisorOptions, + type DaemonizeOptions, + type IpcClientOptions, + type IpcResponse, +} from "@opendatalabs/personal-server-ts-runtime"; diff --git a/packages/cli/tsconfig.json b/packages/cli/tsconfig.json index 27bab49..c822f05 100644 --- a/packages/cli/tsconfig.json +++ b/packages/cli/tsconfig.json @@ -10,5 +10,9 @@ }, "include": ["src/**/*.ts"], "exclude": ["src/**/*.test.ts"], - "references": [{ "path": "../core" }, { "path": "../server" }] + "references": [ + { "path": "../core" }, + { "path": "../runtime" }, + { "path": "../server" } + ] } diff --git a/packages/core/package.json b/packages/core/package.json index 3eedbf7..d0f1c99 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -95,6 +95,10 @@ "./test-utils": { "types": "./dist/test-utils/index.d.ts", "import": "./dist/test-utils/index.js" + }, + "./lifecycle": { + "types": "./dist/lifecycle/index.d.ts", + "import": "./dist/lifecycle/index.js" } }, "scripts": { diff --git a/packages/core/src/lifecycle/endpoint-ownership.ts b/packages/core/src/lifecycle/endpoint-ownership.ts new file mode 100644 index 0000000..413ad92 --- /dev/null +++ b/packages/core/src/lifecycle/endpoint-ownership.ts @@ -0,0 +1,120 @@ +/** + * Endpoint ownership matrix. + * + * Defines which routes are served on which transport (HTTP vs IPC) + * and what auth model applies to each. + */ + +export type Transport = "http" | "ipc" | "both"; + +export type AuthModel = + | "none" + | "local-only" + | "web3-signed" + | "web3-signed+owner" + | "web3-signed+builder" + | "web3-signed+builder+grant"; + +export interface EndpointSpec { + method: "GET" | "POST" | "PUT" | "DELETE"; + path: string; + transport: Transport; + auth: AuthModel; + description: string; +} + +/** + * Canonical endpoint ownership matrix. + * + * HTTP transport: routes reachable through FRP tunnel by builders. + * IPC transport: routes reachable only via UDS by local clients (DataBridge/CLI). + */ +export const ENDPOINT_MATRIX: readonly EndpointSpec[] = [ + // --- Protocol routes (HTTP) --- + { + method: "GET", + path: "/health", + transport: "http", + auth: "none", + description: "Health check and server info", + }, + { + method: "GET", + path: "/v1/data", + transport: "http", + auth: "web3-signed+builder", + description: "List available scopes", + }, + { + method: "GET", + path: "/v1/data/:scope/versions", + transport: "http", + auth: "web3-signed+builder", + description: "List versions for a scope", + }, + { + method: "GET", + path: "/v1/data/:scope", + transport: "http", + auth: "web3-signed+builder+grant", + description: "Read data for a scope (grant required)", + }, + { + method: "POST", + path: "/v1/grants/verify", + transport: "http", + auth: "none", + description: "Verify a grant EIP-712 signature", + }, + + // --- Admin routes (IPC) --- + { + method: "POST", + path: "/v1/data/:scope", + transport: "ipc", + auth: "none", + description: "Ingest data (local-only)", + }, + { + method: "DELETE", + path: "/v1/data/:scope", + transport: "ipc", + auth: "none", + description: "Delete scope data (local-only)", + }, + { + method: "GET", + path: "/v1/grants", + transport: "ipc", + auth: "none", + description: "List grants (local-only)", + }, + { + method: "POST", + path: "/v1/grants", + transport: "ipc", + auth: "none", + description: "Create grant (local-only)", + }, + { + method: "GET", + path: "/v1/access-logs", + transport: "ipc", + auth: "none", + description: "Read access logs (local-only)", + }, + { + method: "GET", + path: "/v1/sync", + transport: "ipc", + auth: "none", + description: "Get sync status (local-only)", + }, + { + method: "POST", + path: "/v1/sync", + transport: "ipc", + auth: "none", + description: "Trigger sync (local-only)", + }, +] as const; diff --git a/packages/core/src/lifecycle/index.ts b/packages/core/src/lifecycle/index.ts new file mode 100644 index 0000000..4681f78 --- /dev/null +++ b/packages/core/src/lifecycle/index.ts @@ -0,0 +1,12 @@ +export { + RuntimeStateMachine, + type RuntimeState, + type StateTransitionEvent, + type StateChangeListener, +} from "./state-machine.js"; +export { + ENDPOINT_MATRIX, + type Transport, + type AuthModel, + type EndpointSpec, +} from "./endpoint-ownership.js"; diff --git a/packages/core/src/lifecycle/state-machine.test.ts b/packages/core/src/lifecycle/state-machine.test.ts new file mode 100644 index 0000000..2361cd8 --- /dev/null +++ b/packages/core/src/lifecycle/state-machine.test.ts @@ -0,0 +1,134 @@ +import { describe, it, expect, vi } from "vitest"; +import { + RuntimeStateMachine, + type StateTransitionEvent, +} from "./state-machine.js"; + +describe("RuntimeStateMachine", () => { + it("starts in uninitialized state", () => { + const sm = new RuntimeStateMachine(); + expect(sm.getState()).toBe("uninitialized"); + }); + + it("transitions through the happy path", () => { + const sm = new RuntimeStateMachine(); + sm.transition("starting"); + expect(sm.getState()).toBe("starting"); + + sm.transition("ready-local"); + expect(sm.getState()).toBe("ready-local"); + + sm.transition("ready-authenticated"); + expect(sm.getState()).toBe("ready-authenticated"); + + sm.transition("shutting-down"); + expect(sm.getState()).toBe("shutting-down"); + + sm.transition("stopped"); + expect(sm.getState()).toBe("stopped"); + }); + + it("allows transition to error from any non-terminal state", () => { + for (const state of [ + "uninitialized", + "starting", + "ready-local", + "ready-authenticated", + "shutting-down", + ] as const) { + const sm = new RuntimeStateMachine(); + // Walk to the target state + if (state === "starting") sm.transition("starting"); + if (state === "ready-local") { + sm.transition("starting"); + sm.transition("ready-local"); + } + if (state === "ready-authenticated") { + sm.transition("starting"); + sm.transition("ready-local"); + sm.transition("ready-authenticated"); + } + if (state === "shutting-down") { + sm.transition("starting"); + sm.transition("ready-local"); + sm.transition("shutting-down"); + } + + expect(sm.canTransition("error")).toBe(true); + sm.transition("error"); + expect(sm.getState()).toBe("error"); + } + }); + + it("rejects invalid transitions", () => { + const sm = new RuntimeStateMachine(); + expect(sm.canTransition("ready-authenticated")).toBe(false); + expect(() => sm.transition("ready-authenticated")).toThrow( + "Invalid state transition: uninitialized -> ready-authenticated", + ); + }); + + it("does not allow transitions from stopped", () => { + const sm = new RuntimeStateMachine(); + sm.transition("starting"); + sm.transition("ready-local"); + sm.transition("shutting-down"); + sm.transition("stopped"); + + expect(sm.canTransition("starting")).toBe(false); + expect(sm.canTransition("error")).toBe(false); + }); + + it("allows recovery from error to starting", () => { + const sm = new RuntimeStateMachine(); + sm.transition("error"); + expect(sm.canTransition("starting")).toBe(true); + sm.transition("starting"); + expect(sm.getState()).toBe("starting"); + }); + + it("allows transition from error to stopped", () => { + const sm = new RuntimeStateMachine(); + sm.transition("error"); + sm.transition("stopped"); + expect(sm.getState()).toBe("stopped"); + }); + + it("notifies listeners on state change", () => { + const sm = new RuntimeStateMachine(); + const events: StateTransitionEvent[] = []; + sm.onStateChange((e) => events.push(e)); + + sm.transition("starting", "boot"); + sm.transition("ready-local"); + + expect(events).toHaveLength(2); + expect(events[0].from).toBe("uninitialized"); + expect(events[0].to).toBe("starting"); + expect(events[0].reason).toBe("boot"); + expect(events[1].from).toBe("starting"); + expect(events[1].to).toBe("ready-local"); + expect(events[1].reason).toBeUndefined(); + }); + + it("unsubscribes listener", () => { + const sm = new RuntimeStateMachine(); + const listener = vi.fn(); + const unsub = sm.onStateChange(listener); + + sm.transition("starting"); + expect(listener).toHaveBeenCalledTimes(1); + + unsub(); + sm.transition("ready-local"); + expect(listener).toHaveBeenCalledTimes(1); + }); + + it("allows shutting down from ready-local (skipping auth)", () => { + const sm = new RuntimeStateMachine(); + sm.transition("starting"); + sm.transition("ready-local"); + sm.transition("shutting-down"); + expect(sm.getState()).toBe("shutting-down"); + }); +}); diff --git a/packages/core/src/lifecycle/state-machine.ts b/packages/core/src/lifecycle/state-machine.ts new file mode 100644 index 0000000..1b5bc0c --- /dev/null +++ b/packages/core/src/lifecycle/state-machine.ts @@ -0,0 +1,88 @@ +/** + * Runtime state machine for Personal Server lifecycle. + * + * States: + * - uninitialized: Process started, nothing configured + * - starting: Config loaded, dependencies initializing + * - ready-local: HTTP listener up, no owner auth yet + * - ready-authenticated: Owner key derived, tunnel + sync active + * - shutting-down: Graceful shutdown in progress + * - stopped: All resources released + * - error: Unrecoverable error from any state + */ + +export type RuntimeState = + | "uninitialized" + | "starting" + | "ready-local" + | "ready-authenticated" + | "shutting-down" + | "stopped" + | "error"; + +/** Valid state transitions. Each key maps to the set of states it can transition to. */ +const VALID_TRANSITIONS: Record> = { + uninitialized: new Set(["starting", "error"]), + starting: new Set(["ready-local", "error"]), + "ready-local": new Set(["ready-authenticated", "shutting-down", "error"]), + "ready-authenticated": new Set(["shutting-down", "error"]), + "shutting-down": new Set(["stopped", "error"]), + stopped: new Set(), + error: new Set(["starting", "stopped"]), +}; + +export interface StateTransitionEvent { + from: RuntimeState; + to: RuntimeState; + timestamp: Date; + reason?: string; +} + +export type StateChangeListener = (event: StateTransitionEvent) => void; + +export class RuntimeStateMachine { + private state: RuntimeState = "uninitialized"; + private listeners: StateChangeListener[] = []; + + /** Get the current state. */ + getState(): RuntimeState { + return this.state; + } + + /** Check whether a transition to the target state is valid. */ + canTransition(to: RuntimeState): boolean { + return VALID_TRANSITIONS[this.state].has(to); + } + + /** + * Transition to a new state. + * Throws if the transition is not valid. + */ + transition(to: RuntimeState, reason?: string): void { + if (!this.canTransition(to)) { + throw new Error(`Invalid state transition: ${this.state} -> ${to}`); + } + + const event: StateTransitionEvent = { + from: this.state, + to, + timestamp: new Date(), + reason, + }; + + this.state = to; + + for (const listener of this.listeners) { + listener(event); + } + } + + /** Register a listener for state changes. Returns an unsubscribe function. */ + onStateChange(listener: StateChangeListener): () => void { + this.listeners.push(listener); + return () => { + const idx = this.listeners.indexOf(listener); + if (idx >= 0) this.listeners.splice(idx, 1); + }; + } +} diff --git a/packages/core/src/schemas/server-config.test.ts b/packages/core/src/schemas/server-config.test.ts index 6144a0f..70f063d 100644 --- a/packages/core/src/schemas/server-config.test.ts +++ b/packages/core/src/schemas/server-config.test.ts @@ -39,6 +39,20 @@ describe("ServerConfigSchema — sync fields", () => { expect(config.sync.lastProcessedTimestamp).toBe("2026-01-21T10:00:00Z"); }); + it("server.address is optional and accepts 0x-prefixed strings", () => { + const config = ServerConfigSchema.parse({ + server: { address: "0x1234567890abcdef1234567890abcdef12345678" }, + }); + expect(config.server.address).toBe( + "0x1234567890abcdef1234567890abcdef12345678", + ); + }); + + it("server.address defaults to undefined when not provided", () => { + const config = ServerConfigSchema.parse({}); + expect(config.server.address).toBeUndefined(); + }); + it("storage.config.vana.apiUrl defaults to https://storage.vana.com", () => { const config = ServerConfigSchema.parse({ storage: { config: { vana: {} } }, diff --git a/packages/core/src/schemas/server-config.ts b/packages/core/src/schemas/server-config.ts index debf52e..408cfae 100644 --- a/packages/core/src/schemas/server-config.ts +++ b/packages/core/src/schemas/server-config.ts @@ -58,6 +58,11 @@ export const ServerConfigSchema = z.object({ .object({ port: z.number().int().min(1).max(65535).default(DEFAULTS.server.port), origin: z.url().default(DEFAULTS.server.origin), + address: z + .string() + .startsWith("0x") + .optional() + .describe("Owner wallet address (set by DataBridge wrapper)"), }) .default(DEFAULTS.server), logging: z diff --git a/packages/runtime/package.json b/packages/runtime/package.json new file mode 100644 index 0000000..4603269 --- /dev/null +++ b/packages/runtime/package.json @@ -0,0 +1,37 @@ +{ + "name": "@opendatalabs/personal-server-ts-runtime", + "version": "0.0.1", + "description": "Runtime lifecycle for the Vana Personal Server — daemon, IPC, supervisor, tunnel lifecycle", + "license": "MIT", + "repository": { + "type": "git", + "url": "https://github.com/vana-com/personal-server-ts.git", + "directory": "packages/runtime" + }, + "homepage": "https://github.com/vana-com/personal-server-ts#readme", + "bugs": { + "url": "https://github.com/vana-com/personal-server-ts/issues" + }, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + } + }, + "publishConfig": { + "access": "public" + }, + "files": [ + "dist" + ], + "scripts": { + "build": "tsc --build" + }, + "dependencies": { + "@opendatalabs/personal-server-ts-core": "*", + "@hono/node-server": "^1.14.0" + } +} diff --git a/packages/runtime/src/daemon.ts b/packages/runtime/src/daemon.ts new file mode 100644 index 0000000..d0ee02a --- /dev/null +++ b/packages/runtime/src/daemon.ts @@ -0,0 +1,64 @@ +/** + * Self-daemonization for Personal Server. + * + * Uses `spawn()` with `{ detached: true, stdio: 'ignore' }` + `unref()` + * to run the server as a background process. + * + * Cross-platform: uses `spawn()` (not `fork()`) because `fork()` + * is broken on Windows for detached processes (nodejs/node#36808). + */ + +import { spawn } from "node:child_process"; +import { openSync, closeSync } from "node:fs"; +import { join } from "node:path"; + +export interface DaemonizeOptions { + /** Path to the script to run (e.g. packages/server/dist/index.js). */ + scriptPath: string; + /** Environment variables to pass to the child. */ + env?: Record; + /** Storage root for log output. */ + storageRoot: string; + /** Path to the node binary. Default: process.execPath */ + nodePath?: string; +} + +export interface DaemonizeResult { + pid: number; + logPath: string; +} + +/** + * Spawn the server as a detached background process. + * + * Stdout and stderr are redirected to a log file at + * `{storageRoot}/daemon.log`. The parent process can exit + * immediately after calling this. + */ +export function daemonize(options: DaemonizeOptions): DaemonizeResult { + const { + scriptPath, + env = {}, + storageRoot, + nodePath = process.execPath, + } = options; + + const logPath = join(storageRoot, "daemon.log"); + + // Open log file for stdout/stderr + const logFd = openSync(logPath, "a"); + + const child = spawn(nodePath, [scriptPath], { + detached: true, + stdio: ["ignore", logFd, logFd], + env: { ...process.env, ...env }, + }); + + child.unref(); + closeSync(logFd); + + return { + pid: child.pid!, + logPath, + }; +} diff --git a/packages/runtime/src/index.ts b/packages/runtime/src/index.ts new file mode 100644 index 0000000..9446217 --- /dev/null +++ b/packages/runtime/src/index.ts @@ -0,0 +1,25 @@ +export { + writePidFile, + readPidFile, + removePidFile, + checkRunningServer, + pidFilePath, + type ServerMetadata, +} from "./pid.js"; +export { + Supervisor, + type SupervisorOptions, + type SupervisorEvents, +} from "./supervisor.js"; +export { + daemonize, + type DaemonizeOptions, + type DaemonizeResult, +} from "./daemon.js"; +export { resolveSocketPath } from "./ipc-path.js"; +export { createIpcServer, type IpcServerOptions } from "./ipc-server.js"; +export { + IpcClient, + type IpcClientOptions, + type IpcResponse, +} from "./ipc-client.js"; diff --git a/packages/runtime/src/ipc-client.ts b/packages/runtime/src/ipc-client.ts new file mode 100644 index 0000000..c6404e9 --- /dev/null +++ b/packages/runtime/src/ipc-client.ts @@ -0,0 +1,97 @@ +/** + * IPC client for communicating with Personal Server over UDS. + * + * Sends standard HTTP requests over Unix domain socket. + * Used by CLI commands (start, stop, status) and by Tauri backend. + */ + +import { request as httpRequest, type IncomingMessage } from "node:http"; +import { resolveSocketPath } from "./ipc-path.js"; + +export interface IpcClientOptions { + /** Storage root for socket path resolution. */ + storageRoot: string; + /** Request timeout in ms. Default: 5000 */ + timeoutMs?: number; +} + +export interface IpcResponse { + status: number; + body: string; +} + +export class IpcClient { + private readonly socketPath: string; + private readonly timeoutMs: number; + + constructor(options: IpcClientOptions) { + this.socketPath = resolveSocketPath(options.storageRoot); + this.timeoutMs = options.timeoutMs ?? 5_000; + } + + /** Send a GET request to the IPC server. */ + async get(path: string): Promise { + return this.send("GET", path); + } + + /** Send a POST request to the IPC server with a JSON body. */ + async post(path: string, body?: unknown): Promise { + return this.send("POST", path, body); + } + + /** Send a DELETE request to the IPC server. */ + async delete(path: string): Promise { + return this.send("DELETE", path); + } + + /** Parse response body as JSON. */ + static parseJson(response: IpcResponse): T { + return JSON.parse(response.body) as T; + } + + private send( + method: string, + path: string, + body?: unknown, + ): Promise { + return new Promise((resolve, reject) => { + const jsonBody = body !== undefined ? JSON.stringify(body) : undefined; + + const req = httpRequest( + { + socketPath: this.socketPath, + method, + path, + headers: { + ...(jsonBody + ? { + "Content-Type": "application/json", + "Content-Length": Buffer.byteLength(jsonBody), + } + : {}), + }, + timeout: this.timeoutMs, + }, + (res: IncomingMessage) => { + let data = ""; + res.on("data", (chunk: Buffer) => { + data += chunk.toString(); + }); + res.on("end", () => { + resolve({ status: res.statusCode ?? 0, body: data }); + }); + }, + ); + + req.on("error", reject); + req.on("timeout", () => { + req.destroy(new Error("IPC request timed out")); + }); + + if (jsonBody) { + req.write(jsonBody); + } + req.end(); + }); + } +} diff --git a/packages/runtime/src/ipc-path.test.ts b/packages/runtime/src/ipc-path.test.ts new file mode 100644 index 0000000..e6b7efa --- /dev/null +++ b/packages/runtime/src/ipc-path.test.ts @@ -0,0 +1,29 @@ +import { describe, it, expect } from "vitest"; +import { resolveSocketPath } from "./ipc-path.js"; + +describe("resolveSocketPath", () => { + it("returns path within storage root for short paths", () => { + const result = resolveSocketPath("/tmp/ps"); + expect(result).toBe("/tmp/ps/ipc.sock"); + }); + + it("falls back to /tmp for paths exceeding 100 bytes", () => { + // Create a very long path that exceeds the macOS 103-byte limit + const longRoot = "/" + "a".repeat(100); + const result = resolveSocketPath(longRoot); + expect(result).toMatch(/^\/tmp\/vana-ps-[a-f0-9]{12}\.sock$/); + }); + + it("produces deterministic fallback paths for same input", () => { + const longRoot = "/" + "b".repeat(100); + const path1 = resolveSocketPath(longRoot); + const path2 = resolveSocketPath(longRoot); + expect(path1).toBe(path2); + }); + + it("produces different fallback paths for different inputs", () => { + const root1 = "/" + "c".repeat(100); + const root2 = "/" + "d".repeat(100); + expect(resolveSocketPath(root1)).not.toBe(resolveSocketPath(root2)); + }); +}); diff --git a/packages/runtime/src/ipc-path.ts b/packages/runtime/src/ipc-path.ts new file mode 100644 index 0000000..00c4c01 --- /dev/null +++ b/packages/runtime/src/ipc-path.ts @@ -0,0 +1,42 @@ +/** + * IPC socket path resolution. + * + * | Platform | Path | + * |-------------|-----------------------------------------------| + * | macOS/Linux | {storageRoot}/ipc.sock (fallback if >100 bytes)| + * | Windows | \\.\pipe\vana-personal-server | + * + * macOS has a 103-byte limit on Unix domain socket paths. + * If the preferred path exceeds 100 bytes, falls back to + * /tmp/vana-ps-{hash}.sock using a hash of the storage root. + */ + +import { join } from "node:path"; +import { createHash } from "node:crypto"; + +/** Max safe socket path length (macOS limit is 103, use 100 for safety). */ +const MAX_SOCKET_PATH_LENGTH = 100; + +const WINDOWS_PIPE_NAME = "\\\\.\\pipe\\vana-personal-server"; + +/** + * Resolve the IPC socket/pipe path for the current platform. + */ +export function resolveSocketPath(storageRoot: string): string { + if (process.platform === "win32") { + return WINDOWS_PIPE_NAME; + } + + const preferred = join(storageRoot, "ipc.sock"); + + if (Buffer.byteLength(preferred, "utf-8") <= MAX_SOCKET_PATH_LENGTH) { + return preferred; + } + + // Fallback: hash the storage root to create a short path in /tmp + const hash = createHash("sha256") + .update(storageRoot) + .digest("hex") + .slice(0, 12); + return `/tmp/vana-ps-${hash}.sock`; +} diff --git a/packages/runtime/src/ipc-server.ts b/packages/runtime/src/ipc-server.ts new file mode 100644 index 0000000..70a4228 --- /dev/null +++ b/packages/runtime/src/ipc-server.ts @@ -0,0 +1,83 @@ +/** + * IPC server using HTTP-over-UDS. + * + * Uses @hono/node-server's createAdaptorServer() to create an + * http.Server that listens on a Unix domain socket (or named pipe + * on Windows) instead of a TCP port. + * + * Security: socket file permissions set to 0600 (owner-only access). + */ + +import { createServer, type Server } from "node:http"; +import { unlink, chmod } from "node:fs/promises"; +import { resolveSocketPath } from "./ipc-path.js"; + +export interface IpcServerOptions { + /** Storage root for socket path resolution. */ + storageRoot: string; + /** The request handler function (Hono's fetch adapter). */ + fetch: (request: Request) => Response | Promise; +} + +/** + * Create and start an IPC server listening on a Unix domain socket. + * + * Returns an object with the server instance, socket path, and + * a close function for graceful shutdown. + */ +export async function createIpcServer(options: IpcServerOptions): Promise<{ + server: Server; + socketPath: string; + close: () => Promise; +}> { + const socketPath = resolveSocketPath(options.storageRoot); + + // Remove stale socket file if it exists + try { + await unlink(socketPath); + } catch { + // File doesn't exist — that's fine + } + + // We use the Hono node adapter's fetch handler with a raw http.Server + // since createAdaptorServer hardcodes listen behavior we don't want. + // Instead, we import the request-to-fetch adapter ourselves. + const { getRequestListener } = await import("@hono/node-server"); + const listener = getRequestListener(options.fetch); + + const server = createServer(listener); + + return new Promise((resolve, reject) => { + server.on("error", reject); + + server.listen(socketPath, async () => { + server.removeListener("error", reject); + + // Set socket permissions to owner-only on Unix + if (process.platform !== "win32") { + try { + await chmod(socketPath, 0o600); + } catch { + // Best effort — some filesystems may not support this + } + } + + resolve({ + server, + socketPath, + close: () => + new Promise((res, rej) => { + server.close((err) => { + if (err) rej(err); + else { + // Clean up socket file + unlink(socketPath) + .catch(() => {}) + .then(res); + } + }); + }), + }); + }); + }); +} diff --git a/packages/runtime/src/pid.test.ts b/packages/runtime/src/pid.test.ts new file mode 100644 index 0000000..c8cccfc --- /dev/null +++ b/packages/runtime/src/pid.test.ts @@ -0,0 +1,88 @@ +import { describe, it, expect, afterEach } from "vitest"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { + writePidFile, + readPidFile, + removePidFile, + checkRunningServer, + pidFilePath, + type ServerMetadata, +} from "./pid.js"; + +describe("PID file management", () => { + let tempDir: string; + + afterEach(async () => { + if (tempDir) { + await rm(tempDir, { recursive: true, force: true }); + } + }); + + async function setup(): Promise { + tempDir = await mkdtemp(join(tmpdir(), "pid-test-")); + return tempDir; + } + + const sampleMetadata: ServerMetadata = { + pid: process.pid, + port: 8080, + socketPath: "/tmp/ipc.sock", + version: "1.0.0", + startedAt: "2026-01-01T00:00:00.000Z", + }; + + it("resolves PID file path", async () => { + const root = await setup(); + expect(pidFilePath(root)).toBe(join(root, "server.json")); + }); + + it("writes and reads PID file", async () => { + const root = await setup(); + await writePidFile(root, sampleMetadata); + const read = await readPidFile(root); + expect(read).toEqual(sampleMetadata); + }); + + it("returns null when PID file does not exist", async () => { + const root = await setup(); + const result = await readPidFile(root); + expect(result).toBeNull(); + }); + + it("removes PID file", async () => { + const root = await setup(); + await writePidFile(root, sampleMetadata); + await removePidFile(root); + const result = await readPidFile(root); + expect(result).toBeNull(); + }); + + it("removePidFile does not throw when file missing", async () => { + const root = await setup(); + await expect(removePidFile(root)).resolves.toBeUndefined(); + }); + + it("checkRunningServer returns metadata for current process", async () => { + const root = await setup(); + await writePidFile(root, sampleMetadata); + const result = await checkRunningServer(root); + expect(result).toEqual(sampleMetadata); + }); + + it("checkRunningServer cleans up stale PID file for dead process", async () => { + const root = await setup(); + const staleMeta: ServerMetadata = { + ...sampleMetadata, + pid: 999999, // Very likely not running + }; + await writePidFile(root, staleMeta); + const result = await checkRunningServer(root); + expect(result).toBeNull(); + + // PID file should be cleaned up + const afterRead = await readPidFile(root); + expect(afterRead).toBeNull(); + }); +}); diff --git a/packages/runtime/src/pid.ts b/packages/runtime/src/pid.ts new file mode 100644 index 0000000..221992f --- /dev/null +++ b/packages/runtime/src/pid.ts @@ -0,0 +1,76 @@ +/** + * PID file management for Personal Server. + * + * Writes a `server.json` file containing PID, port, socket path, version, + * and start time. Used by CLI and DataBridge to detect running instances. + */ + +import { writeFile, readFile, unlink } from "node:fs/promises"; +import { join } from "node:path"; + +export interface ServerMetadata { + pid: number; + port: number; + socketPath: string | null; + version: string; + startedAt: string; +} + +const PID_FILENAME = "server.json"; + +/** Resolve the PID file path within the storage root. */ +export function pidFilePath(storageRoot: string): string { + return join(storageRoot, PID_FILENAME); +} + +/** Write server metadata to the PID file. */ +export async function writePidFile( + storageRoot: string, + metadata: ServerMetadata, +): Promise { + const filePath = pidFilePath(storageRoot); + await writeFile(filePath, JSON.stringify(metadata, null, 2), "utf-8"); +} + +/** Read server metadata from the PID file. Returns null if file doesn't exist. */ +export async function readPidFile( + storageRoot: string, +): Promise { + try { + const raw = await readFile(pidFilePath(storageRoot), "utf-8"); + return JSON.parse(raw) as ServerMetadata; + } catch { + return null; + } +} + +/** Remove the PID file. Ignores errors if the file doesn't exist. */ +export async function removePidFile(storageRoot: string): Promise { + try { + await unlink(pidFilePath(storageRoot)); + } catch { + // File may not exist — that's fine + } +} + +/** + * Check if the process recorded in the PID file is still running. + * Returns the metadata if alive, null otherwise. + * Cleans up stale PID files. + */ +export async function checkRunningServer( + storageRoot: string, +): Promise { + const metadata = await readPidFile(storageRoot); + if (!metadata) return null; + + try { + // signal 0 tests if the process exists without actually sending a signal + process.kill(metadata.pid, 0); + return metadata; + } catch { + // Process is dead — clean up stale PID file + await removePidFile(storageRoot); + return null; + } +} diff --git a/packages/runtime/src/supervisor.test.ts b/packages/runtime/src/supervisor.test.ts new file mode 100644 index 0000000..da289c7 --- /dev/null +++ b/packages/runtime/src/supervisor.test.ts @@ -0,0 +1,152 @@ +import { describe, it, expect, afterEach } from "vitest"; +import { Supervisor } from "./supervisor.js"; + +describe("Supervisor", () => { + let supervisor: Supervisor | null = null; + + afterEach(async () => { + if (supervisor) { + await supervisor.stop(); + supervisor = null; + } + }); + + it("starts a child process and emits start event", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "setTimeout(() => {}, 60000)"], + }); + + const started = new Promise((resolve) => { + supervisor!.on("start", resolve); + }); + + supervisor.start(); + const pid = await started; + expect(pid).toBeGreaterThan(0); + expect(supervisor.isRunning()).toBe(true); + }); + + it("stops the child process", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "setTimeout(() => {}, 60000)"], + }); + + // Register listener before starting + const started = new Promise((resolve) => { + supervisor!.on("start", () => resolve()); + }); + supervisor.start(); + await started; + + await supervisor.stop(); + expect(supervisor.getProcess()).toBeNull(); + expect(supervisor.isRunning()).toBe(false); + }); + + it("restarts on process exit with backoff", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "process.exit(1)"], + baseDelayMs: 50, + maxDelayMs: 200, + maxJitterMs: 0, // Disable jitter for deterministic tests + maxRetries: 2, + }); + + const restarts: number[] = []; + supervisor.on("restart", (attempt) => restarts.push(attempt)); + + const maxRetried = new Promise((resolve) => { + supervisor!.on("max-retries", resolve); + }); + + supervisor.start(); + await maxRetried; + + expect(restarts).toEqual([1, 2]); + expect(supervisor.isRunning()).toBe(false); + }); + + it("emits exit event with code", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "process.exit(42)"], + maxRetries: 0, + }); + + const exitPromise = new Promise<[number | null, string | null]>( + (resolve) => { + supervisor!.on("exit", (code, signal) => resolve([code, signal])); + }, + ); + + supervisor.start(); + const [code] = await exitPromise; + expect(code).toBe(42); + }); + + it("throws when starting while already running", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "setTimeout(() => {}, 60000)"], + }); + + const started = new Promise((r) => { + supervisor!.on("start", () => r()); + }); + supervisor.start(); + await started; + + expect(() => supervisor!.start()).toThrow("Supervisor already running"); + }); + + it("calculates exponential backoff delay", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "process.exit(1)"], + baseDelayMs: 100, + maxDelayMs: 1000, + maxJitterMs: 0, + maxRetries: 3, + }); + + const delays: number[] = []; + supervisor.on("restart", (_attempt, delay) => delays.push(delay)); + + const maxRetried = new Promise((resolve) => { + supervisor!.on("max-retries", resolve); + }); + + supervisor.start(); + await maxRetried; + + // Exponential: 100 * 2^0 = 100, 100 * 2^1 = 200, 100 * 2^2 = 400 + expect(delays).toEqual([100, 200, 400]); + }); + + it("caps delay at maxDelayMs", async () => { + supervisor = new Supervisor({ + command: process.execPath, + args: ["-e", "process.exit(1)"], + baseDelayMs: 100, + maxDelayMs: 150, + maxJitterMs: 0, + maxRetries: 3, + }); + + const delays: number[] = []; + supervisor.on("restart", (_attempt, delay) => delays.push(delay)); + + const maxRetried = new Promise((resolve) => { + supervisor!.on("max-retries", resolve); + }); + + supervisor.start(); + await maxRetried; + + // 100, 150 (capped from 200), 150 (capped from 400) + expect(delays).toEqual([100, 150, 150]); + }); +}); diff --git a/packages/runtime/src/supervisor.ts b/packages/runtime/src/supervisor.ts new file mode 100644 index 0000000..5e949a5 --- /dev/null +++ b/packages/runtime/src/supervisor.ts @@ -0,0 +1,184 @@ +/** + * Generic process supervisor with exponential backoff. + * + * Restarts a child process on crash with configurable backoff: + * - Base delay: 1 second + * - Max delay: 60 seconds + * - Jitter: 0-1 second (random) + * - Max retries: 10 (then gives up and emits "max-retries" event) + * + * Resets retry count on successful runs (process alive > resetAfterMs). + */ + +import { + spawn, + type ChildProcess, + type SpawnOptions, +} from "node:child_process"; +import { EventEmitter } from "node:events"; + +export interface SupervisorOptions { + /** Command to execute. */ + command: string; + /** Arguments for the command. */ + args: string[]; + /** Spawn options (env, cwd, stdio, etc). */ + spawnOptions?: SpawnOptions; + /** Base delay in ms before first retry. Default: 1000 */ + baseDelayMs?: number; + /** Max delay in ms between retries. Default: 60000 */ + maxDelayMs?: number; + /** Max jitter in ms added to delay. Default: 1000 */ + maxJitterMs?: number; + /** Max consecutive retries before giving up. Default: 10 */ + maxRetries?: number; + /** Time in ms after which a running process resets retry count. Default: 30000 */ + resetAfterMs?: number; +} + +export interface SupervisorEvents { + start: [pid: number]; + exit: [code: number | null, signal: string | null]; + restart: [attempt: number, delayMs: number]; + "max-retries": []; + error: [error: Error]; +} + +export class Supervisor extends EventEmitter { + private process: ChildProcess | null = null; + private retryCount = 0; + private retryTimer: ReturnType | null = null; + private startTime: number | null = null; + private stopped = true; + + private readonly command: string; + private readonly args: string[]; + private readonly spawnOptions: SpawnOptions; + private readonly baseDelayMs: number; + private readonly maxDelayMs: number; + private readonly maxJitterMs: number; + private readonly maxRetries: number; + private readonly resetAfterMs: number; + + constructor(options: SupervisorOptions) { + super(); + this.command = options.command; + this.args = options.args; + this.spawnOptions = options.spawnOptions ?? {}; + this.baseDelayMs = options.baseDelayMs ?? 1_000; + this.maxDelayMs = options.maxDelayMs ?? 60_000; + this.maxJitterMs = options.maxJitterMs ?? 1_000; + this.maxRetries = options.maxRetries ?? 10; + this.resetAfterMs = options.resetAfterMs ?? 30_000; + } + + /** Start the supervised process. */ + start(): ChildProcess { + if (!this.stopped) { + throw new Error("Supervisor already running"); + } + + this.stopped = false; + this.retryCount = 0; + return this.spawnChild(); + } + + /** Stop the supervised process. Does not restart. */ + async stop(): Promise { + this.stopped = true; + this.clearRetryTimer(); + + if (!this.process) return; + + return new Promise((resolve) => { + const proc = this.process!; + + const timeout = setTimeout(() => { + proc.kill("SIGKILL"); + this.process = null; + resolve(); + }, 5_000); + + proc.once("exit", () => { + clearTimeout(timeout); + this.process = null; + resolve(); + }); + + proc.kill("SIGTERM"); + }); + } + + /** Get the current child process (if running). */ + getProcess(): ChildProcess | null { + return this.process; + } + + /** Whether the supervisor is actively managing a process. */ + isRunning(): boolean { + return !this.stopped; + } + + private spawnChild(): ChildProcess { + const proc = spawn(this.command, this.args, this.spawnOptions); + this.process = proc; + this.startTime = Date.now(); + + this.emit("start", proc.pid!); + + proc.on("error", (err) => { + this.emit("error", err); + }); + + proc.on("exit", (code, signal) => { + this.process = null; + this.emit("exit", code, signal); + + if (this.stopped) return; + + // Reset retry count if process ran long enough + if (this.startTime && Date.now() - this.startTime >= this.resetAfterMs) { + this.retryCount = 0; + } + + this.scheduleRestart(); + }); + + return proc; + } + + private scheduleRestart(): void { + if (this.retryCount >= this.maxRetries) { + this.stopped = true; + this.emit("max-retries"); + return; + } + + const delay = this.calculateDelay(); + this.retryCount++; + + this.emit("restart", this.retryCount, delay); + + this.retryTimer = setTimeout(() => { + this.retryTimer = null; + if (!this.stopped) { + this.spawnChild(); + } + }, delay); + } + + /** Exponential backoff with jitter. */ + private calculateDelay(): number { + const exponential = this.baseDelayMs * Math.pow(2, this.retryCount); + const capped = Math.min(exponential, this.maxDelayMs); + const jitter = Math.random() * this.maxJitterMs; + return capped + jitter; + } + + private clearRetryTimer(): void { + if (this.retryTimer) { + clearTimeout(this.retryTimer); + this.retryTimer = null; + } + } +} diff --git a/packages/runtime/tsconfig.json b/packages/runtime/tsconfig.json new file mode 100644 index 0000000..49b37cc --- /dev/null +++ b/packages/runtime/tsconfig.json @@ -0,0 +1,14 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src", + "composite": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*.ts"], + "exclude": ["src/**/*.test.ts"], + "references": [{ "path": "../core" }] +} diff --git a/packages/server/package.json b/packages/server/package.json index 616a62f..0400c87 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -39,6 +39,7 @@ }, "dependencies": { "@opendatalabs/personal-server-ts-core": "*", + "@opendatalabs/personal-server-ts-runtime": "*", "hono": "^4.7.0", "@hono/node-server": "^1.14.0" } diff --git a/packages/server/src/admin-app.ts b/packages/server/src/admin-app.ts new file mode 100644 index 0000000..9339dd1 --- /dev/null +++ b/packages/server/src/admin-app.ts @@ -0,0 +1,409 @@ +/** + * Admin Hono app for IPC (Unix domain socket) transport. + * + * Mounts admin/owner routes WITHOUT auth middleware — socket file + * permissions (chmod 0600) enforce the local trust boundary instead. + * + * During the migration period, both the HTTP app and this admin app + * coexist. Once DataBridge switches to IPC, admin routes will be + * removed from the HTTP app. + */ + +import { Hono } from "hono"; +import { ProtocolError } from "@opendatalabs/personal-server-ts-core/errors"; +import { ScopeSchema } from "@opendatalabs/personal-server-ts-core/scopes"; +import { createDataFileEnvelope } from "@opendatalabs/personal-server-ts-core/schemas/data-file"; +import { + generateCollectedAt, + writeDataFile, + deleteAllForScope, +} from "@opendatalabs/personal-server-ts-core/storage/hierarchy"; +import type { HierarchyManagerOptions } from "@opendatalabs/personal-server-ts-core/storage/hierarchy"; +import type { IndexManager } from "@opendatalabs/personal-server-ts-core/storage/index"; +import type { GatewayClient } from "@opendatalabs/personal-server-ts-core/gateway"; +import type { AccessLogReader } from "@opendatalabs/personal-server-ts-core/logging/access-reader"; +import type { SyncManager } from "@opendatalabs/personal-server-ts-core/sync"; +import type { ServerSigner } from "@opendatalabs/personal-server-ts-core/signing"; +import type { Logger } from "pino"; +import { + createBodyLimit, + DATA_INGEST_MAX_SIZE, +} from "./middleware/body-limit.js"; + +export interface AdminAppDeps { + logger: Logger; + indexManager: IndexManager; + hierarchyOptions: HierarchyManagerOptions; + gateway: GatewayClient; + accessLogReader: AccessLogReader; + serverOwner?: `0x${string}`; + syncManager?: SyncManager | null; + serverSigner?: ServerSigner; +} + +export function createAdminApp(deps: AdminAppDeps): Hono { + const app = new Hono(); + + // --- Data ingest (POST /v1/data/:scope) --- + app.use("/v1/data/:scope", createBodyLimit(DATA_INGEST_MAX_SIZE)); + + app.post("/v1/data/:scope", async (c) => { + const scopeParam = c.req.param("scope"); + const scopeResult = ScopeSchema.safeParse(scopeParam); + if (!scopeResult.success) { + return c.json( + { + error: "INVALID_SCOPE", + message: scopeResult.error.issues[0].message, + }, + 400, + ); + } + const scope = scopeResult.data; + + let body: unknown; + try { + body = await c.req.json(); + } catch { + return c.json( + { error: "INVALID_BODY", message: "Request body must be valid JSON" }, + 400, + ); + } + + if (body === null || typeof body !== "object" || Array.isArray(body)) { + return c.json( + { + error: "INVALID_BODY", + message: "Request body must be a JSON object", + }, + 400, + ); + } + + let schemaUrl: string | undefined; + try { + const schema = await deps.gateway.getSchemaForScope(scope); + if (!schema) { + return c.json( + { + error: "NO_SCHEMA", + message: `No schema registered for scope: ${scope}`, + }, + 400, + ); + } + schemaUrl = schema.definitionUrl; + } catch (err) { + deps.logger.error({ err, scope }, "Gateway schema lookup failed"); + return c.json( + { + error: "GATEWAY_ERROR", + message: "Failed to look up schema for scope", + }, + 502, + ); + } + + const collectedAt = generateCollectedAt(); + const envelope = createDataFileEnvelope( + scope, + collectedAt, + body as Record, + schemaUrl, + ); + + const writeResult = await writeDataFile(deps.hierarchyOptions, envelope); + + deps.indexManager.insert({ + fileId: null, + path: writeResult.relativePath, + scope, + collectedAt, + sizeBytes: writeResult.sizeBytes, + }); + + deps.logger.info( + { scope, collectedAt, path: writeResult.relativePath }, + "Data file ingested (IPC)", + ); + + let status: "stored" | "syncing" = "stored"; + if (deps.syncManager) { + deps.syncManager.notifyNewData(); + status = "syncing"; + } + + return c.json({ scope, collectedAt, status }, 201); + }); + + // --- Data delete (DELETE /v1/data/:scope) --- + app.delete("/v1/data/:scope", async (c) => { + const scopeParam = c.req.param("scope"); + const scopeResult = ScopeSchema.safeParse(scopeParam); + if (!scopeResult.success) { + return c.json( + { + error: "INVALID_SCOPE", + message: scopeResult.error.issues[0].message, + }, + 400, + ); + } + const scope = scopeResult.data; + + const deletedCount = deps.indexManager.deleteByScope(scope); + await deleteAllForScope(deps.hierarchyOptions, scope); + + deps.logger.info({ scope, deletedCount }, "Scope deleted (IPC)"); + return c.body(null, 204); + }); + + // --- Grants (GET + POST /v1/grants) --- + app.get("/v1/grants", async (c) => { + if (!deps.serverOwner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_NOT_CONFIGURED", + message: "Server owner address not configured", + }, + }, + 500, + ); + } + const grants = await deps.gateway.listGrantsByUser(deps.serverOwner); + return c.json({ grants }); + }); + + app.post("/v1/grants", async (c) => { + if (!deps.serverOwner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_NOT_CONFIGURED", + message: "Server owner not configured", + }, + }, + 500, + ); + } + if (!deps.serverSigner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_SIGNER_NOT_CONFIGURED", + message: "Server signer not configured", + }, + }, + 500, + ); + } + + let body: unknown; + try { + body = await c.req.json(); + } catch { + return c.json( + { error: "INVALID_BODY", message: "Invalid JSON body" }, + 400, + ); + } + + const b = body as Record; + if ( + !b || + typeof b.granteeAddress !== "string" || + !b.granteeAddress.startsWith("0x") || + !Array.isArray(b.scopes) || + b.scopes.length === 0 + ) { + return c.json( + { + error: "INVALID_BODY", + message: + "Body must include granteeAddress (0x string) and scopes (non-empty string array)", + }, + 400, + ); + } + + const { granteeAddress, scopes, expiresAt, nonce } = b as { + granteeAddress: `0x${string}`; + scopes: string[]; + expiresAt?: number; + nonce?: number; + }; + + const builder = await deps.gateway.getBuilder(granteeAddress); + if (!builder) { + return c.json( + { + error: { + code: 404, + errorCode: "BUILDER_NOT_REGISTERED", + message: `Builder ${granteeAddress} is not registered on-chain`, + }, + }, + 404, + ); + } + + const grantPayload = JSON.stringify({ + user: deps.serverOwner, + builder: granteeAddress, + scopes, + expiresAt: expiresAt ?? 0, + nonce: nonce ?? Date.now(), + }); + + const signature = await deps.serverSigner.signGrantRegistration({ + grantorAddress: deps.serverOwner, + granteeId: builder.id as `0x${string}`, + grant: grantPayload, + fileIds: [], + }); + + const result = await deps.gateway.createGrant({ + grantorAddress: deps.serverOwner, + granteeId: builder.id, + grant: grantPayload, + fileIds: [], + signature, + }); + + return c.json({ grantId: result.grantId }, 201); + }); + + // --- Grant revoke (DELETE /v1/grants/:grantId) --- + app.delete("/v1/grants/:grantId", async (c) => { + if (!deps.serverOwner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_NOT_CONFIGURED", + message: "Server owner not configured", + }, + }, + 500, + ); + } + if (!deps.serverSigner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_SIGNER_NOT_CONFIGURED", + message: "Server signer not configured", + }, + }, + 500, + ); + } + + const grantId = c.req.param("grantId"); + if (!grantId || !grantId.startsWith("0x")) { + return c.json( + { + error: "INVALID_GRANT_ID", + message: "grantId must be a 0x-prefixed hex string", + }, + 400, + ); + } + + const signature = await deps.serverSigner.signGrantRevocation({ + grantorAddress: deps.serverOwner, + grantId: grantId as `0x${string}`, + }); + + await deps.gateway.revokeGrant({ + grantId, + grantorAddress: deps.serverOwner, + signature, + }); + + return c.json({ revoked: true }); + }); + + // --- Access logs (GET /v1/access-logs) --- + app.get("/v1/access-logs", async (c) => { + const limitParam = c.req.query("limit"); + const offsetParam = c.req.query("offset"); + + const limit = limitParam !== undefined ? parseInt(limitParam, 10) : 50; + const offset = offsetParam !== undefined ? parseInt(offsetParam, 10) : 0; + + const result = await deps.accessLogReader.read({ + limit: Number.isNaN(limit) ? 50 : limit, + offset: Number.isNaN(offset) ? 0 : offset, + }); + + return c.json(result); + }); + + // --- Sync (GET + POST /v1/sync) --- + app.get("/v1/sync/status", async (c) => { + if (!deps.syncManager) { + return c.json({ + enabled: false, + running: false, + lastSync: null, + lastProcessedTimestamp: null, + pendingFiles: 0, + errors: [], + }); + } + return c.json(deps.syncManager.getStatus()); + }); + + app.post("/v1/sync/trigger", async (c) => { + if (!deps.syncManager) { + return c.json( + { status: "disabled", message: "Sync is not enabled" }, + 200, + ); + } + await deps.syncManager.trigger(); + return c.json({ status: "started", message: "Sync triggered" }, 202); + }); + + // --- Error handler --- + app.onError((err, c) => { + if (err instanceof ProtocolError) { + deps.logger.warn({ err }, err.message); + return c.json(err.toJSON(), err.code as 400 | 500); + } + + deps.logger.error({ err }, "Unhandled error (admin)"); + return c.json( + { + error: { + code: 500, + errorCode: "INTERNAL_ERROR", + message: "Internal server error", + }, + }, + 500, + ); + }); + + app.notFound((c) => { + return c.json( + { + error: { + code: 404, + errorCode: "NOT_FOUND", + message: "Not found", + }, + }, + 404, + ); + }); + + return app; +} diff --git a/packages/server/src/api.ts b/packages/server/src/api.ts index 7c9c6f2..df966de 100644 --- a/packages/server/src/api.ts +++ b/packages/server/src/api.ts @@ -3,3 +3,4 @@ export { type CreateServerOptions, type ServerContext, } from "./bootstrap.js"; +export { createAdminApp, type AdminAppDeps } from "./admin-app.js"; diff --git a/packages/server/src/app.test.ts b/packages/server/src/app.test.ts index 355e717..7593e30 100644 --- a/packages/server/src/app.test.ts +++ b/packages/server/src/app.test.ts @@ -97,6 +97,7 @@ describe("createApp", () => { logger, version: "0.0.1", startedAt: new Date(), + port: 8080, indexManager, hierarchyOptions: { dataDir: join(tempDir, "data") }, serverOrigin: SERVER_ORIGIN, @@ -251,6 +252,7 @@ describe("createApp", () => { logger, version: "0.0.1", startedAt: new Date(), + port: 8080, indexManager, hierarchyOptions: { dataDir: join(tempDir, "data") }, serverOrigin: SERVER_ORIGIN, @@ -285,6 +287,7 @@ describe("createApp", () => { logger, version: "0.0.1", startedAt: new Date(), + port: 8080, indexManager, hierarchyOptions: { dataDir: join(tempDir, "data") }, serverOrigin: SERVER_ORIGIN, @@ -313,6 +316,7 @@ describe("createApp", () => { logger, version: "0.0.1", startedAt: new Date(), + port: 8080, indexManager, hierarchyOptions: { dataDir: join(tempDir, "data") }, serverOrigin: SERVER_ORIGIN, diff --git a/packages/server/src/app.ts b/packages/server/src/app.ts index 16e5d7e..c05dfd0 100644 --- a/packages/server/src/app.ts +++ b/packages/server/src/app.ts @@ -27,6 +27,7 @@ export interface AppDeps { logger: Logger; version: string; startedAt: Date; + port: number; indexManager: IndexManager; hierarchyOptions: HierarchyManagerOptions; serverOrigin: string | (() => string); @@ -62,6 +63,7 @@ export function createApp(deps: AppDeps): Hono { healthRoute({ version: deps.version, startedAt: deps.startedAt, + port: deps.port, serverOwner: deps.serverOwner, identity: deps.identity, gateway: deps.gateway, diff --git a/packages/server/src/bootstrap.ts b/packages/server/src/bootstrap.ts index 659f1f8..3b6b4bc 100644 --- a/packages/server/src/bootstrap.ts +++ b/packages/server/src/bootstrap.ts @@ -44,14 +44,17 @@ import { import { createVanaStorageAdapter } from "@opendatalabs/personal-server-ts-core/storage/adapters"; import type { Hono } from "hono"; import { createApp, type IdentityInfo } from "./app.js"; +import { createAdminApp } from "./admin-app.js"; import { generateDevToken } from "./dev-token.js"; import { TunnelManager, ensureFrpcBinary } from "./tunnel/index.js"; export interface ServerContext { app: Hono; + adminApp: Hono; logger: Logger; config: ServerConfig; startedAt: Date; + storageRoot: string; indexManager: IndexManager; gatewayClient: GatewayClient; accessLogReader: AccessLogReader; @@ -220,6 +223,7 @@ export async function createServer( logger, version: pkg.version, startedAt, + port: config.server.port, indexManager, hierarchyOptions, serverOrigin: () => effectiveOrigin, @@ -235,6 +239,17 @@ export async function createServer( getTunnelStatus: () => tunnelManager?.getStatus() ?? null, }); + const adminApp = createAdminApp({ + logger, + indexManager, + hierarchyOptions, + gateway: gatewayClient, + accessLogReader, + serverOwner, + syncManager, + serverSigner, + }); + const cleanup = async () => { if (tunnelManager) { await tunnelManager.stop(); @@ -247,9 +262,11 @@ export async function createServer( const context: ServerContext = { app, + adminApp, logger, config, startedAt, + storageRoot, indexManager, gatewayClient, accessLogReader, diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 437fb8d..a7ecd36 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1,6 +1,11 @@ import { serve } from "@hono/node-server"; import { createRequire } from "node:module"; import { loadConfig } from "@opendatalabs/personal-server-ts-core/config"; +import { + createIpcServer, + writePidFile, + removePidFile, +} from "@opendatalabs/personal-server-ts-runtime"; import { createServer } from "./bootstrap.js"; import { verifyTunnelUrl } from "./tunnel/index.js"; @@ -13,12 +18,16 @@ async function main(): Promise { const rootPath = process.env.PERSONAL_SERVER_ROOT_PATH; const config = await loadConfig({ rootPath }); const context = await createServer(config, { rootPath }); - const { app, logger, devToken } = context; + const { app, adminApp, logger, devToken, storageRoot } = context; + // --- HTTP listener (protocol routes) --- const server = serve( { fetch: app.fetch, port: config.server.port }, (info) => { - logger.info({ port: info.port, version: pkg.version }, "Server started"); + logger.info( + { port: info.port, version: pkg.version }, + "HTTP server started", + ); if (devToken) { logger.info( @@ -30,6 +39,31 @@ async function main(): Promise { }, ); + // --- IPC listener (admin routes via Unix domain socket) --- + let closeIpc: (() => Promise) | undefined; + try { + const ipc = await createIpcServer({ + storageRoot, + fetch: adminApp.fetch, + }); + closeIpc = ipc.close; + logger.info({ socketPath: ipc.socketPath }, "IPC server started"); + + // Write PID file after both listeners are up + await writePidFile(storageRoot, { + pid: process.pid, + port: config.server.port, + socketPath: ipc.socketPath, + version: pkg.version, + startedAt: new Date().toISOString(), + }); + } catch (err) { + logger.warn( + { err }, + "IPC server failed to start — admin routes unavailable via socket", + ); + } + // Fire-and-forget: gateway check + tunnel connect (slow operations) // HTTP server is already listening so POST /v1/data/:scope works immediately context.startBackgroundServices().then(() => { @@ -58,9 +92,24 @@ async function main(): Promise { } }); - function shutdown(signal: string): void { + async function shutdown(signal: string): Promise { logger.info({ signal }, "Shutdown signal received, draining connections"); + // Clean up PID file + await removePidFile(storageRoot); + + // Clean up IPC server + if (closeIpc) { + try { + await closeIpc(); + } catch { + // Best effort + } + } + + // Clean up server context (tunnel, sync, db) + await context.cleanup(); + server.close(() => { logger.info("Server stopped"); process.exit(0); @@ -73,8 +122,8 @@ async function main(): Promise { }, DRAIN_TIMEOUT_MS).unref(); } - process.on("SIGTERM", () => shutdown("SIGTERM")); - process.on("SIGINT", () => shutdown("SIGINT")); + process.on("SIGTERM", () => void shutdown("SIGTERM")); + process.on("SIGINT", () => void shutdown("SIGINT")); } main().catch((err) => { diff --git a/packages/server/src/middleware/local-only.test.ts b/packages/server/src/middleware/local-only.test.ts new file mode 100644 index 0000000..2c8f410 --- /dev/null +++ b/packages/server/src/middleware/local-only.test.ts @@ -0,0 +1,65 @@ +import { describe, it, expect } from "vitest"; +import { Hono } from "hono"; +import { createLocalOnlyMiddleware } from "./local-only.js"; + +function createTestApp() { + const app = new Hono(); + const localOnly = createLocalOnlyMiddleware(); + + app.post("/data", localOnly, (c) => c.json({ ok: true })); + + return app; +} + +describe("createLocalOnlyMiddleware", () => { + it("allows requests without tunnel headers", async () => { + const app = createTestApp(); + const res = await app.request("/data", { method: "POST" }); + expect(res.status).toBe(200); + const body = await res.json(); + expect(body).toEqual({ ok: true }); + }); + + it("rejects requests with X-Forwarded-For header", async () => { + const app = createTestApp(); + const res = await app.request("/data", { + method: "POST", + headers: { "X-Forwarded-For": "1.2.3.4" }, + }); + expect(res.status).toBe(403); + const body = await res.json(); + expect(body.error.errorCode).toBe("LOCAL_ONLY"); + }); + + it("rejects requests with x-ps-transport: tunnel header", async () => { + const app = createTestApp(); + const res = await app.request("/data", { + method: "POST", + headers: { "x-ps-transport": "tunnel" }, + }); + expect(res.status).toBe(403); + const body = await res.json(); + expect(body.error.errorCode).toBe("LOCAL_ONLY"); + }); + + it("allows requests with x-ps-transport set to non-tunnel value", async () => { + const app = createTestApp(); + const res = await app.request("/data", { + method: "POST", + headers: { "x-ps-transport": "direct" }, + }); + expect(res.status).toBe(200); + }); + + it("rejects when both tunnel headers are present", async () => { + const app = createTestApp(); + const res = await app.request("/data", { + method: "POST", + headers: { + "X-Forwarded-For": "10.0.0.1", + "x-ps-transport": "tunnel", + }, + }); + expect(res.status).toBe(403); + }); +}); diff --git a/packages/server/src/middleware/local-only.ts b/packages/server/src/middleware/local-only.ts new file mode 100644 index 0000000..d29d571 --- /dev/null +++ b/packages/server/src/middleware/local-only.ts @@ -0,0 +1,32 @@ +/** + * Middleware that rejects requests arriving through the FRP tunnel. + * + * Detection: + * - X-Forwarded-For header (added automatically by frps) + * - x-ps-transport: "tunnel" header (set in frpc config) + * + * Apply to routes that must only be called locally (e.g. data ingest). + */ + +import type { MiddlewareHandler } from "hono"; + +export function createLocalOnlyMiddleware(): MiddlewareHandler { + return async (c, next) => { + if ( + c.req.header("x-forwarded-for") || + c.req.header("x-ps-transport") === "tunnel" + ) { + return c.json( + { + error: { + code: 403, + errorCode: "LOCAL_ONLY", + message: "This endpoint is only accessible locally", + }, + }, + 403, + ); + } + await next(); + }; +} diff --git a/packages/server/src/routes/data.ts b/packages/server/src/routes/data.ts index 60cefea..559738f 100644 --- a/packages/server/src/routes/data.ts +++ b/packages/server/src/routes/data.ts @@ -22,6 +22,7 @@ import { createBuilderCheckMiddleware } from "../middleware/builder-check.js"; import { createGrantCheckMiddleware } from "../middleware/grant-check.js"; import { createAccessLogMiddleware } from "../middleware/access-log.js"; import { createOwnerCheckMiddleware } from "../middleware/owner-check.js"; +import { createLocalOnlyMiddleware } from "../middleware/local-only.js"; export interface DataRouteDeps { indexManager: IndexManager; @@ -50,6 +51,7 @@ export function dataRoutes(deps: DataRouteDeps): Hono { serverOwner: deps.serverOwner, }); const accessLog = createAccessLogMiddleware(deps.accessLogWriter); + const localOnly = createLocalOnlyMiddleware(); // GET /v1/data/:scope/versions — list versions for a scope (requires auth + builder, no grant) app.get("/:scope/versions", web3Auth, builderCheck, async (c) => { @@ -176,7 +178,7 @@ export function dataRoutes(deps: DataRouteDeps): Hono { app.use("/:scope", createBodyLimit(DATA_INGEST_MAX_SIZE)); - app.post("/:scope", async (c) => { + app.post("/:scope", localOnly, async (c) => { // 1. Parse & validate scope const scopeParam = c.req.param("scope"); const scopeResult = ScopeSchema.safeParse(scopeParam); diff --git a/packages/server/src/routes/grants.test.ts b/packages/server/src/routes/grants.test.ts index b7836e8..8e459ed 100644 --- a/packages/server/src/routes/grants.test.ts +++ b/packages/server/src/routes/grants.test.ts @@ -310,6 +310,83 @@ describe("POST /verify", () => { }); }); +describe("DELETE /:grantId", () => { + async function deleteWithOwnerAuth( + app: ReturnType, + grantId: string, + ) { + const auth = await buildWeb3SignedHeader({ + wallet: owner, + aud: SERVER_ORIGIN, + method: "DELETE", + uri: `/${grantId}`, + }); + return app.request(`/${grantId}`, { + method: "DELETE", + headers: { authorization: auth }, + }); + } + + it("revokes grant via gateway and returns { revoked: true }", async () => { + const mockGateway = createMockGateway(); + const mockSigner = createMockServerSigner(); + + const app = createApp({ gateway: mockGateway, serverSigner: mockSigner }); + const res = await deleteWithOwnerAuth(app, "0xgrant1"); + + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.revoked).toBe(true); + + expect(mockSigner.signGrantRevocation).toHaveBeenCalledWith({ + grantorAddress: owner.address, + grantId: "0xgrant1", + }); + + expect(mockGateway.revokeGrant).toHaveBeenCalledWith({ + grantId: "0xgrant1", + grantorAddress: owner.address, + signature: "0xrevokesig", + }); + }); + + it("returns 400 for invalid grantId (no 0x prefix)", async () => { + const app = createApp(); + const res = await deleteWithOwnerAuth(app, "invalid-grant-id"); + + expect(res.status).toBe(400); + const json = await res.json(); + expect(json.error).toBe("INVALID_GRANT_ID"); + }); + + it("returns 500 when serverSigner is not configured", async () => { + const app = grantsRoutes({ + logger, + gateway: createMockGateway(), + serverOwner: owner.address, + serverOrigin: SERVER_ORIGIN, + }); + + const res = await deleteWithOwnerAuth(app, "0xgrant1"); + + expect(res.status).toBe(500); + const json = await res.json(); + expect(json.error.errorCode).toBe("SERVER_SIGNER_NOT_CONFIGURED"); + }); + + it("propagates gateway errors", async () => { + const mockGateway = createMockGateway(); + vi.mocked(mockGateway.revokeGrant).mockRejectedValue( + new Error("Gateway down"), + ); + + const app = createApp({ gateway: mockGateway }); + const res = await deleteWithOwnerAuth(app, "0xgrant1"); + + expect(res.status).toBe(500); + }); +}); + describe("POST /", () => { async function postWithOwnerAuth( app: ReturnType, diff --git a/packages/server/src/routes/grants.ts b/packages/server/src/routes/grants.ts index a038a35..4eadb3c 100644 --- a/packages/server/src/routes/grants.ts +++ b/packages/server/src/routes/grants.ts @@ -215,6 +215,61 @@ export function grantsRoutes(deps: GrantsRouteDeps): Hono { return c.json({ grantId: result.grantId }, 201); }); + // DELETE /:grantId — revoke a grant (owner-only) + app.delete("/:grantId", web3Auth, ownerCheck, async (c) => { + if (!deps.serverOwner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_NOT_CONFIGURED", + message: + "Server owner address not configured. Set VANA_MASTER_KEY_SIGNATURE environment variable.", + }, + }, + 500, + ); + } + + if (!deps.serverSigner) { + return c.json( + { + error: { + code: 500, + errorCode: "SERVER_SIGNER_NOT_CONFIGURED", + message: + "Server signer not configured. Set VANA_MASTER_KEY_SIGNATURE environment variable.", + }, + }, + 500, + ); + } + + const grantId = c.req.param("grantId"); + if (!grantId || !grantId.startsWith("0x")) { + return c.json( + { + error: "INVALID_GRANT_ID", + message: "grantId must be a 0x-prefixed hex string", + }, + 400, + ); + } + + const signature = await deps.serverSigner.signGrantRevocation({ + grantorAddress: deps.serverOwner, + grantId: grantId as `0x${string}`, + }); + + await deps.gateway.revokeGrant({ + grantId, + grantorAddress: deps.serverOwner, + signature, + }); + + return c.json({ revoked: true }); + }); + // POST /verify — public endpoint, no auth required app.post("/verify", async (c) => { let body: unknown; diff --git a/packages/server/src/routes/health.test.ts b/packages/server/src/routes/health.test.ts index 09abd94..0afabe1 100644 --- a/packages/server/src/routes/health.test.ts +++ b/packages/server/src/routes/health.test.ts @@ -3,7 +3,7 @@ import { describe, it, expect, vi } from "vitest"; import { healthRoute } from "./health.js"; describe("healthRoute", () => { - const deps = { version: "0.0.1", startedAt: new Date() }; + const deps = { version: "0.0.1", startedAt: new Date(), port: 8080 }; function createMockGateway( overrides?: Partial, @@ -49,6 +49,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, serverOwner: "0x1234567890abcdef1234567890abcdef12345678", }); const res = await app.request("/health"); @@ -58,7 +59,11 @@ describe("healthRoute", () => { }); it("owner is null when serverOwner is not set", async () => { - const app = healthRoute({ version: "0.0.1", startedAt: new Date() }); + const app = healthRoute({ + version: "0.0.1", + startedAt: new Date(), + port: 8080, + }); const res = await app.request("/health"); const body = await res.json(); @@ -67,7 +72,7 @@ describe("healthRoute", () => { it("uptime increases over time", async () => { const past = new Date(Date.now() - 5000); - const app = healthRoute({ version: "0.0.1", startedAt: past }); + const app = healthRoute({ version: "0.0.1", startedAt: past, port: 8080 }); const res = await app.request("/health"); const body = await res.json(); @@ -81,7 +86,11 @@ describe("healthRoute", () => { }); it("identity is null when not configured", async () => { - const app = healthRoute({ version: "0.0.1", startedAt: new Date() }); + const app = healthRoute({ + version: "0.0.1", + startedAt: new Date(), + port: 8080, + }); const res = await app.request("/health"); const body = await res.json(); expect(body.identity).toBeNull(); @@ -91,6 +100,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, identity: { address: "0xServerAddr", publicKey: "0x04PubKey", @@ -110,6 +120,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, identity: { address: "0xServerAddr", publicKey: "0x04PubKey", @@ -140,6 +151,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, identity: { address: "0xServerAddr", publicKey: "0x04PubKey", @@ -160,7 +172,11 @@ describe("healthRoute", () => { }); it("tunnel is null when getTunnelStatus is not provided", async () => { - const app = healthRoute({ version: "0.0.1", startedAt: new Date() }); + const app = healthRoute({ + version: "0.0.1", + startedAt: new Date(), + port: 8080, + }); const res = await app.request("/health"); const body = await res.json(); expect(body.tunnel).toBeNull(); @@ -170,6 +186,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, getTunnelStatus: () => ({ enabled: true, status: "connected", @@ -191,6 +208,7 @@ describe("healthRoute", () => { const app = healthRoute({ version: "0.0.1", startedAt: new Date(), + port: 8080, getTunnelStatus: () => ({ enabled: true, status: "disconnected", @@ -206,4 +224,32 @@ describe("healthRoute", () => { expect(body.tunnel.publicUrl).toBeNull(); expect(body.tunnel.error).toBe("Connection lost"); }); + + describe("GET /status", () => { + it("returns status, owner, and port", async () => { + const app = healthRoute({ + version: "0.0.1", + startedAt: new Date(), + port: 9090, + serverOwner: "0x1234567890abcdef1234567890abcdef12345678", + }); + const res = await app.request("/status"); + expect(res.status).toBe(200); + const body = await res.json(); + expect(body.status).toBe("running"); + expect(body.owner).toBe("0x1234567890abcdef1234567890abcdef12345678"); + expect(body.port).toBe(9090); + }); + + it("returns null owner when not configured", async () => { + const app = healthRoute({ + version: "0.0.1", + startedAt: new Date(), + port: 8080, + }); + const res = await app.request("/status"); + const body = await res.json(); + expect(body.owner).toBeNull(); + }); + }); }); diff --git a/packages/server/src/routes/health.ts b/packages/server/src/routes/health.ts index a01278c..4e19a84 100644 --- a/packages/server/src/routes/health.ts +++ b/packages/server/src/routes/health.ts @@ -8,6 +8,7 @@ import type { TunnelStatusInfo } from "../tunnel/index.js"; export interface HealthDeps { version: string; startedAt: Date; + port: number; serverOwner?: `0x${string}`; identity?: IdentityInfo; gateway?: GatewayClient; @@ -56,5 +57,14 @@ export function healthRoute(deps: HealthDeps): Hono { }); }); + // GET /status — lightweight status for DataBridge wrapper + app.get("/status", (c) => { + return c.json({ + status: "running", + owner: deps.serverOwner ?? null, + port: deps.port, + }); + }); + return app; } diff --git a/packages/server/src/tunnel/config.test.ts b/packages/server/src/tunnel/config.test.ts index bc1d429..f5f8636 100644 --- a/packages/server/src/tunnel/config.test.ts +++ b/packages/server/src/tunnel/config.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect } from "vitest"; -import { generateFrpcConfig } from "./config.js"; +import { generateFrpcConfig, deriveProxyName } from "./config.js"; describe("tunnel/config", () => { describe("generateFrpcConfig", () => { @@ -85,5 +85,33 @@ describe("tunnel/config", () => { expect(config).toContain("localPort = 3000"); }); + + it("uses unique proxy name derived from runId", () => { + const config = generateFrpcConfig(defaultOptions); + expect(config).toContain('name = "ps-run-123"'); + expect(config).not.toContain('name = "personal-server"'); + }); + + it("includes x-ps-transport tunnel header in proxy config", () => { + const config = generateFrpcConfig(defaultOptions); + expect(config).toContain('x-ps-transport = "tunnel"'); + expect(config).toContain("[proxies.requestHeaders.set]"); + }); + }); + + describe("deriveProxyName", () => { + it("prefixes with ps- and takes first 8 chars of runId", () => { + expect(deriveProxyName("abcdef12-3456-7890")).toBe("ps-abcdef12"); + }); + + it("handles short runIds", () => { + expect(deriveProxyName("abc")).toBe("ps-abc"); + }); + + it("handles UUID format", () => { + expect(deriveProxyName("a1b2c3d4-e5f6-7890-abcd-ef0123456789")).toBe( + "ps-a1b2c3d4", + ); + }); }); }); diff --git a/packages/server/src/tunnel/config.ts b/packages/server/src/tunnel/config.ts index f25c045..82a044d 100644 --- a/packages/server/src/tunnel/config.ts +++ b/packages/server/src/tunnel/config.ts @@ -16,6 +16,15 @@ export interface FrpcConfigOptions { authSig: string; } +/** + * Derive a unique proxy name from the runId. + * Uses the first 8 characters to avoid FRP proxy name collisions + * between concurrent or stale sessions. + */ +export function deriveProxyName(runId: string): string { + return `ps-${runId.slice(0, 8)}`; +} + /** * Generate frpc.toml configuration content. * @@ -26,6 +35,8 @@ export interface FrpcConfigOptions { * - Root-level metadatas for Auth Plugin validation (required by frps server) */ export function generateFrpcConfig(options: FrpcConfigOptions): string { + const proxyName = deriveProxyName(options.runId); + return `# Auto-generated frpc configuration # Do not edit - regenerated on each server start @@ -41,10 +52,12 @@ metadatas.auth_claim = "${options.authClaim}" metadatas.auth_sig = "${options.authSig}" [[proxies]] -name = "personal-server" +name = "${proxyName}" type = "http" localIP = "127.0.0.1" localPort = ${options.localPort} subdomain = "${options.subdomain}" +[proxies.requestHeaders.set] +x-ps-transport = "tunnel" `; } diff --git a/packages/server/tsconfig.json b/packages/server/tsconfig.json index 49b37cc..d7f9f5c 100644 --- a/packages/server/tsconfig.json +++ b/packages/server/tsconfig.json @@ -10,5 +10,5 @@ }, "include": ["src/**/*.ts"], "exclude": ["src/**/*.test.ts"], - "references": [{ "path": "../core" }] + "references": [{ "path": "../core" }, { "path": "../runtime" }] } diff --git a/tsconfig.json b/tsconfig.json index 86663e6..62b5dab 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,6 +1,7 @@ { "references": [ { "path": "packages/core" }, + { "path": "packages/runtime" }, { "path": "packages/server" }, { "path": "packages/cli" } ],