diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..fa8d6e23 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,35 @@ +**/node_modules/ +**/build/ +**/.svelte-kit/ +**/.vite-cache/ +frontend/build/ +frontend/tests/ +backend/data/ +backend/tests/ +data/ +app/ +cli/ +docker/config/ +docker/storage/ +docker/certs/ +*.log +*.stackdump +*.bun-build +**/*.lish +**/*.lishnet +**/settings.json +**/peer-id.json +**/datastore/ +.git/ +.github/ +.vscode/ +.idea/ +**/.env +**/.env.* +**/*.local +**/*.local.json +**/*.local.neon +**/*.local.yaml +**/*.local.toml +coverage/ +target/ diff --git a/backend/src/api/api.ts b/backend/src/api/api.ts index 00570376..ace99700 100644 --- a/backend/src/api/api.ts +++ b/backend/src/api/api.ts @@ -36,6 +36,19 @@ export interface APIServerOptions { apiToken?: string | undefined; } +/** + * Liveness probe handler used by the docker-compose healthcheck and external + * orchestrators. Returns a 200 plain-text response when the URL pathname is + * exactly `/health`, or `null` to let the caller fall through to other + * routing (WebSocket upgrade, 400 fallback). Pure so it stays unit-testable + * without spinning up the full APIServer dependency graph. + */ +export function handleHealthProbe(req: globalThis.Request): Response | null { + const url = new URL(req.url); + if (url.pathname === '/health') return new Response('ok\n', { status: 200, headers: { 'content-type': 'text/plain' } }); + return null; +} + export class APIServer { private clients: Set = new Set(); private server: ReturnType> | null = null; @@ -201,6 +214,11 @@ export class APIServer { hostname: this.host, fetch(req, server): Response | undefined { const url = new URL(req.url); + // Liveness probe used by docker-compose healthcheck and external + // orchestrators. Placed before auth + per-request log so probes + // don't need a token and don't pollute traces at probe cadence. + const probe = handleHealthProbe(req); + if (probe) return probe; console.log(`[API] Incoming request: ${req.method} ${url.pathname}`); if (req.method === 'OPTIONS' && url.pathname === '/status') return self.statusOptionsResponse(); if (url.pathname === '/status') return self.statusResponse(url); diff --git a/backend/src/app.ts b/backend/src/app.ts index 5d543ad6..c8585c14 100644 --- a/backend/src/app.ts +++ b/backend/src/app.ts @@ -1,5 +1,6 @@ import { dirname, join } from 'path'; import { productName, productVersion } from '@shared'; +import { resolveHealthcheckPort } from './healthcheck.ts'; import { setupLogger, type LogLevel } from './logger.ts'; import { Networks } from './lishnet/lishnets.ts'; import { DataServer } from './lish/data-server.ts'; @@ -53,6 +54,30 @@ for (let i = 0; i < args.length; i++) { } } +// Self-healthcheck mode used by docker-compose / orchestrators. Performs a +// single HTTP GET against the running instance's `/health` endpoint and exits +// 0 on 2xx, 1 otherwise — no logger setup, no DB open, no libp2p init. +if (args.includes('--healthcheck')) { + const decision = resolveHealthcheckPort(apiPort, process.env['BACKEND_PORT']); + if (decision.exit !== undefined) { + if (decision.message) console.error(decision.message); + process.exit(decision.exit); + } + // Try IPv4 first, then IPv6 — `--host localhost` on Windows binds only to + // `[::1]` while the same flag in a Docker container binds to `127.0.0.1`. + // Probing both addresses keeps the self-flag portable across deployments. + const targets = [`http://127.0.0.1:${decision.port}/health`, `http://[::1]:${decision.port}/health`]; + for (const target of targets) { + try { + const res = await fetch(target, { signal: AbortSignal.timeout(2500) }); + if (res.ok) process.exit(0); + } catch { + // Try the next address. + } + } + process.exit(1); +} + setupLogger(logLevel, logFile ?? join(dataDir, 'libershare.log')); const header = `${productName} v${productVersion}`; console.log('='.repeat(header.length)); diff --git a/backend/src/healthcheck.ts b/backend/src/healthcheck.ts new file mode 100644 index 00000000..00641479 --- /dev/null +++ b/backend/src/healthcheck.ts @@ -0,0 +1,43 @@ +/** + * Decision returned by {@link resolveHealthcheckPort}. The probe should + * exit immediately with `exit` if it is set; otherwise `port` carries the + * resolved target for the localhost HTTP probe. + */ +export interface HealthcheckPortDecision { + port: number; + exit?: number; + message?: string; +} + +/** + * Resolve the port the `--healthcheck` self-flag should probe. + * + * Priority: + * 1. Explicit `--port` argument (already parsed into a number by app.ts). + * 2. `BACKEND_PORT` environment variable, if it parses as a positive integer. + * 3. Fallback to 1158 — the binary's own default elsewhere. + * + * A `--port 0` (random-port) configuration without a `BACKEND_PORT` env var + * cannot be probed from a separate process: there is no way to discover the + * actual bound port without intervening file/IPC. Returning a non-zero exit + * code instead of guessing 1158 surfaces the misconfiguration to the + * orchestrator instead of silently flapping the container. + */ +export function resolveHealthcheckPort(apiPort: number, backendPortEnv: string | undefined): HealthcheckPortDecision { + if (apiPort > 0) return { port: apiPort }; + if (backendPortEnv !== undefined && backendPortEnv.length > 0) { + const envPort = Number(backendPortEnv); + if (Number.isFinite(envPort) && envPort > 0) return { port: envPort }; + // User explicitly set BACKEND_PORT but it didn't parse — surface that + // rather than silently falling back to a default port the operator + // might not be expecting. + return { + port: 0, + exit: 2, + message: `[Healthcheck] BACKEND_PORT="${backendPortEnv}" is not a positive integer; cannot probe`, + }; + } + // No explicit configuration — fall back to the binary's documented + // default. Caller is presumed to also be running the server on 1158. + return { port: 1158 }; +} diff --git a/backend/src/settings.ts b/backend/src/settings.ts index ce1fa90c..10b99e82 100644 --- a/backend/src/settings.ts +++ b/backend/src/settings.ts @@ -119,6 +119,14 @@ export interface SettingsData { }; } +function storagePath(envName: string, defaultRelative: string, fallback: string): string { + const explicit = process.env[envName]; + if (explicit) return explicit; + const root = process.env['LIBERSHARE_STORAGE_ROOT']; + if (!root) return fallback; + return `${root.replace(/[\\/]+$/, '')}/${defaultRelative}/`; +} + const DEFAULT_SETTINGS: SettingsData = { language: '', ui: { @@ -147,11 +155,11 @@ const DEFAULT_SETTINGS: SettingsData = { volume: 50, }, storage: { - downloadPath: '~/LiberShare/finished/', - tempPath: '~/LiberShare/temp/', - lishPath: '~/LiberShare/lish/', - lishnetPath: '~/LiberShare/lishnet/', - backupPath: '~/LiberShare/backup/', + downloadPath: storagePath('LIBERSHARE_DOWNLOAD_PATH', 'finished', '~/LiberShare/finished/'), + tempPath: storagePath('LIBERSHARE_TEMP_PATH', 'temp', '~/LiberShare/temp/'), + lishPath: storagePath('LIBERSHARE_LISH_PATH', 'lish', '~/LiberShare/lish/'), + lishnetPath: storagePath('LIBERSHARE_LISHNET_PATH', 'lishnet', '~/LiberShare/lishnet/'), + backupPath: storagePath('LIBERSHARE_BACKUP_PATH', 'backup', '~/LiberShare/backup/'), }, network: { incomingPort: 9090, diff --git a/backend/src/storage.ts b/backend/src/storage.ts index 9ed54ef2..13832d68 100644 --- a/backend/src/storage.ts +++ b/backend/src/storage.ts @@ -1,5 +1,37 @@ import { join } from 'path'; +/** + * Filesystem error codes that signal the data directory cannot be written — + * persisting state would silently disappear, so the caller fails fast instead + * of limping along with non-persistent in-memory state. Exported so unit tests + * can drive the same set the production code uses. + */ +export const FATAL_STORAGE_CODES = ['EACCES', 'EROFS', 'EPERM', 'ENOSPC', 'EISDIR'] as const; +export type FatalStorageCode = (typeof FATAL_STORAGE_CODES)[number]; + +export function isFatalStorageError(error: unknown): error is NodeJS.ErrnoException & { code: FatalStorageCode } { + const code = (error as NodeJS.ErrnoException | null)?.code; + return typeof code === 'string' && (FATAL_STORAGE_CODES as readonly string[]).includes(code); +} + +/** + * Build the operator-facing message for a fatal storage error. Pure function + * so unit tests can assert the exact wording without spawning a real process. + */ +export function fatalStorageMessage(filePath: string, code: FatalStorageCode): string[] { + const lines = [`[Storage] FATAL: cannot persist ${filePath} (${code}).`]; + if (code === 'ENOSPC') { + lines.push(`[Storage] The filesystem hosting the data directory is full.`); + } else if (code === 'EISDIR') { + lines.push(`[Storage] A directory exists where a file is expected — remove it before restart.`); + } else { + lines.push(`[Storage] If running in Docker with cap_drop:ALL, the container loses CAP_DAC_OVERRIDE and`); + lines.push(`[Storage] cannot write to a host bind-mount unless its owner matches the container UID.`); + lines.push(`[Storage] Fix on the host: chown 0:0 && chmod 0700 , then restart.`); + } + return lines; +} + /** * Base class for JSON file storage. */ @@ -30,6 +62,18 @@ abstract class BaseStorage { try { await Bun.write(this.filePath, JSON.stringify(data, null, '\t')); } catch (error) { + // Permission / read-only filesystem errors at this layer mean every + // subsequent write to settings.json (peer identity, joined networks, + // user preferences) would silently disappear and the next restart + // would regenerate state from defaults. That is much worse than + // crashing — fail fast with an operator-actionable hint instead of + // limping along. The most common trigger in container deployments is + // `cap_drop: ALL` stripping CAP_DAC_OVERRIDE while the bind-mount on + // the host is owned by a non-root user. + if (isFatalStorageError(error)) { + for (const line of fatalStorageMessage(this.filePath, error.code!)) console.error(line); + process.exit(74); // sysexits.h EX_IOERR + } console.error(`[Storage] Error saving ${this.filePath}:`, error); } } diff --git a/backend/tests/unit/api/health.test.ts b/backend/tests/unit/api/health.test.ts new file mode 100644 index 00000000..e6303e13 --- /dev/null +++ b/backend/tests/unit/api/health.test.ts @@ -0,0 +1,32 @@ +import { describe, expect, it } from 'bun:test'; +import { handleHealthProbe } from '../../../src/api/api.ts'; + +describe('handleHealthProbe', () => { + it('returns 200 with plain text body for /health', async () => { + const res = handleHealthProbe(new Request('http://localhost:1158/health')); + expect(res).not.toBeNull(); + expect(res!.status).toBe(200); + expect(res!.headers.get('content-type')).toContain('text/plain'); + expect(await res!.text()).toBe('ok\n'); + }); + + it('returns null for the WebSocket-upgrade path', () => { + expect(handleHealthProbe(new Request('http://localhost:1158/'))).toBeNull(); + }); + + it('returns null for arbitrary paths', () => { + expect(handleHealthProbe(new Request('http://localhost:1158/api/method'))).toBeNull(); + expect(handleHealthProbe(new Request('http://localhost:1158/healthcheck'))).toBeNull(); + expect(handleHealthProbe(new Request('http://localhost:1158/HEALTH'))).toBeNull(); + }); + + it('matches exact path even with query string', async () => { + const res = handleHealthProbe(new Request('http://localhost:1158/health?probe=1')); + expect(res).not.toBeNull(); + expect(res!.status).toBe(200); + }); + + it('does not match nested paths', () => { + expect(handleHealthProbe(new Request('http://localhost:1158/health/extra'))).toBeNull(); + }); +}); diff --git a/backend/tests/unit/healthcheck.test.ts b/backend/tests/unit/healthcheck.test.ts new file mode 100644 index 00000000..5069cf3e --- /dev/null +++ b/backend/tests/unit/healthcheck.test.ts @@ -0,0 +1,50 @@ +import { describe, expect, it } from 'bun:test'; +import { resolveHealthcheckPort } from '../../src/healthcheck.ts'; + +describe('resolveHealthcheckPort', () => { + it('uses explicit --port when positive', () => { + expect(resolveHealthcheckPort(2200, undefined)).toEqual({ port: 2200 }); + }); + + it('explicit --port wins over BACKEND_PORT env', () => { + expect(resolveHealthcheckPort(2200, '9999')).toEqual({ port: 2200 }); + }); + + it('falls back to BACKEND_PORT when --port is unset', () => { + const decision = resolveHealthcheckPort(0, '2200'); + expect(decision.port).toBe(2200); + expect(decision.exit).toBeUndefined(); + }); + + it('falls back to 1158 when neither --port nor BACKEND_PORT is set', () => { + expect(resolveHealthcheckPort(0, undefined)).toEqual({ port: 1158 }); + }); + + it('falls back to 1158 when BACKEND_PORT is empty string', () => { + expect(resolveHealthcheckPort(0, '')).toEqual({ port: 1158 }); + }); + + it('exits with code 2 when BACKEND_PORT is non-numeric', () => { + const decision = resolveHealthcheckPort(0, 'abc'); + expect(decision.exit).toBe(2); + expect(decision.message).toContain('BACKEND_PORT'); + expect(decision.message).toContain('"abc"'); + }); + + it('exits with code 2 when BACKEND_PORT is zero', () => { + const decision = resolveHealthcheckPort(0, '0'); + expect(decision.exit).toBe(2); + }); + + it('exits with code 2 when BACKEND_PORT is negative', () => { + const decision = resolveHealthcheckPort(0, '-1'); + expect(decision.exit).toBe(2); + }); + + it('returns the parsed port even when --port=0', () => { + // Random-port mode (--port 0) is allowed if the operator wires + // BACKEND_PORT to point at the actual bound port. + const decision = resolveHealthcheckPort(0, '54321'); + expect(decision.port).toBe(54321); + }); +}); diff --git a/backend/tests/unit/storage.test.ts b/backend/tests/unit/storage.test.ts new file mode 100644 index 00000000..2b609f63 --- /dev/null +++ b/backend/tests/unit/storage.test.ts @@ -0,0 +1,56 @@ +import { describe, expect, it } from 'bun:test'; +import { isFatalStorageError, fatalStorageMessage, FATAL_STORAGE_CODES } from '../../src/storage.ts'; + +describe('storage fatal-error classifier', () => { + for (const code of FATAL_STORAGE_CODES) { + it(`classifies ${code} as fatal`, () => { + const err = Object.assign(new Error('boom'), { code }); + expect(isFatalStorageError(err)).toBe(true); + }); + } + + it('does not classify ENOENT as fatal', () => { + const err = Object.assign(new Error('not found'), { code: 'ENOENT' }); + expect(isFatalStorageError(err)).toBe(false); + }); + + it('does not classify a plain Error as fatal', () => { + expect(isFatalStorageError(new Error('plain'))).toBe(false); + }); + + it('does not classify null/undefined as fatal', () => { + expect(isFatalStorageError(null)).toBe(false); + expect(isFatalStorageError(undefined)).toBe(false); + }); +}); + +describe('storage fatal-error message', () => { + const fixture = '/app/config/settings.json'; + + it('mentions the file path and code on every line block', () => { + const lines = fatalStorageMessage(fixture, 'EACCES'); + expect(lines[0]).toContain(fixture); + expect(lines[0]).toContain('EACCES'); + expect(lines.length).toBeGreaterThan(1); + }); + + it('includes the chown remediation hint for permission codes', () => { + for (const code of ['EACCES', 'EROFS', 'EPERM'] as const) { + const joined = fatalStorageMessage(fixture, code).join('\n'); + expect(joined).toContain('chown 0:0'); + expect(joined).toContain('cap_drop'); + } + }); + + it('uses a disk-full hint for ENOSPC instead of the chown hint', () => { + const joined = fatalStorageMessage(fixture, 'ENOSPC').join('\n'); + expect(joined).toContain('full'); + expect(joined).not.toContain('chown 0:0'); + }); + + it('uses a directory-clash hint for EISDIR', () => { + const joined = fatalStorageMessage(fixture, 'EISDIR').join('\n'); + expect(joined).toContain('directory'); + expect(joined).not.toContain('chown 0:0'); + }); +}); diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..6496bee7 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,40 @@ +# syntax=docker/dockerfile:1 + +FROM oven/bun:1.3.13-debian AS build + +WORKDIR /src + +COPY backend/package.json backend/bun.lock ./backend/ +COPY shared/package.json ./shared/ + +WORKDIR /src/backend +RUN bun install --frozen-lockfile + +WORKDIR /src +COPY backend ./backend +COPY shared ./shared + +ARG TARGETARCH +RUN set -eu; \ + case "$TARGETARCH" in \ + amd64) BUN_TARGET="bun-linux-x64" ;; \ + arm64) BUN_TARGET="bun-linux-arm64" ;; \ + *) echo "Unsupported Docker target architecture: $TARGETARCH" >&2; exit 1 ;; \ + esac; \ + cd backend; \ + bun build --compile --target "$BUN_TARGET" src/app.ts --outfile build/lish-backend + +FROM debian:trixie-slim AS runtime + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app +COPY --from=build /src/backend/build/lish-backend /app/lish-backend +COPY docker/backend-entrypoint.sh /app/backend-entrypoint.sh +RUN chmod 0755 /app/lish-backend /app/backend-entrypoint.sh + +EXPOSE 1158/tcp 9090/tcp +ENTRYPOINT ["/app/backend-entrypoint.sh"] +CMD ["--datadir", "/app/data", "--host", "0.0.0.0", "--port", "1158"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..7aa70d42 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,281 @@ +# LiberShare Docker + +This compose setup runs LiberShare as two containers: + +- `libershare-backend`: Bun-compiled backend, WebSocket API, libp2p node +- `libershare-frontend`: static Svelte frontend served by a small Bun HTTPS server + +Run commands from this `docker/` directory. + +## Defaults + +- Compose project name: `libershare` +- Backend API/WebSocket: `127.0.0.1:${BACKEND_PORT:-1158}` (host-bound to loopback by default) +- libp2p TCP: `9091:9090` (LAN-bound — peers must reach it externally) +- Frontend HTTPS: `6003:6003` +- Browser URL: `https://:6003/` +- Docker network: `libershare-net`, created automatically by compose + +The frontend container reaches the backend over the internal Docker network +(`ws://backend:${BACKEND_PORT}`), so the API does not need to be published on +the host's public interface. Override with `BACKEND_BIND=0.0.0.0` only when a +non-Docker frontend or the CLI client running on another machine needs direct +access. + +Default writable paths are local directories next to `docker-compose.yml`: + +- `./config` -> `/app/config` +- `./storage` -> `/app/storage` +- `./certs` -> `/app/certs` + +`./config` contains backend runtime state: `settings.json`, `libershare.db`, +`libershare.log`, and libp2p datastore. `./storage` is used by default for +finished downloads, temp files, LISH files, LISH network files, and backups. +`./certs` contains frontend TLS certificate files. + +## First-run permissions + +Both services run with `cap_drop: ALL` and `read_only: true` rootfs, but each +keeps `cap_add: CHOWN` so its entrypoint can re-own the bind-mounted state +directories (`/app/config`, `/app/storage`, `/app/certs`) to UID 0 at startup. +The deploy is therefore independent of who runs `mkdir` on the host: + +```sh +mkdir -p config storage certs +docker compose up -d --build +``` + +If the entrypoints are bypassed (e.g. somebody removes `cap_add: CHOWN`) the +backend still fails fast with an actionable message instead of silently +losing writes: + +``` +[Storage] FATAL: cannot persist /app/config/settings.json (EACCES). +[Storage] Fix on the host: chown 0:0 && chmod 0700 , then restart. +``` + +Docker named volumes (`LIBERSHARE_CONFIG_SOURCE=my-libershare-config`) work +out of the box without any host-side `mkdir` — the daemon creates the volume +root-owned. + +## Start + +```sh +docker compose config +docker compose up -d --build +docker compose logs -f +``` + +The frontend `depends_on.backend.condition: service_healthy` waits until the +backend healthcheck reports `healthy` before the frontend container is +started, so the WebSocket proxy never races a backend that's still booting. + +## Storage + +For a fresh config, backend storage settings default to: + +- `/app/storage/finished/` +- `/app/storage/temp/` +- `/app/storage/lish/` +- `/app/storage/lishnet/` +- `/app/storage/backup/` + +To put config and storage on specific host disks: + +```sh +mkdir -p /mnt/ssd/libershare-config /mnt/big/libershare-storage +LIBERSHARE_CONFIG_SOURCE=/mnt/ssd/libershare-config \ +LIBERSHARE_STORAGE_SOURCE=/mnt/big/libershare-storage \ +docker compose up -d +``` + +To use Docker named volumes instead of local directories: + +```sh +LIBERSHARE_CONFIG_SOURCE=my-libershare-config \ +LIBERSHARE_STORAGE_SOURCE=my-libershare-storage \ +docker compose up -d +``` + +When migrating an existing node, keep its old config/datastore/database mounted +as `/app/config`; otherwise the backend generates a new peer identity and starts +as a different node. + +## Ports + +Set `BACKEND_PORT` to run the backend API/WebSocket on a different port: + +```sh +BACKEND_PORT=2158 docker compose up -d +``` + +The frontend never hardcodes the backend browser port. Browser WebSocket traffic +goes to same-origin `/ws`, and the frontend container proxies it to: + +```sh +BACKEND_WS_URL=ws://backend:${BACKEND_PORT:-1158} +``` + +By default the host-side publication of the backend API/WebSocket port is +bound to `127.0.0.1`, so only the local machine (and the in-network frontend +container) can reach it. Set `BACKEND_BIND=0.0.0.0` to expose the API to the +LAN — for example when the CLI client or a non-Docker frontend runs on a +different host: + +```sh +BACKEND_BIND=0.0.0.0 docker compose up -d +``` + +Combine `BACKEND_BIND=0.0.0.0` with `LISH_TOKEN=...` (see *Authentication* +below) so the exposed port still requires a shared secret. + +## Authentication + +The backend reads `LISH_TOKEN` from the environment. Set it in `.env` next +to `docker-compose.yml`: + +```sh +LISH_TOKEN=$(openssl rand -hex 32) +``` + +When `LISH_TOKEN` is non-empty, every WebSocket and REST request must carry +the same value as `?token=` in the URL — the only exceptions are the +liveness probe `/health` and the auth-state endpoint `/status`, which stay +public so orchestrators and the frontend can detect the auth state without +already knowing the token. + +Browser side: the SvelteKit frontend stores the token (set on the auth +prompt or injected as `__BACKEND_TOKEN__` by the Tauri shell) and passes it +on every WebSocket reconnect. The Docker frontend proxy forwards URLs as-is, +so the same `?token=` query string is preserved when the browser connects to +same-origin `/ws`. + +CLI / curl: + +```sh +curl -fsS "http://localhost:${BACKEND_PORT:-1158}/status?token=$LISH_TOKEN" +``` + +Leave `LISH_TOKEN` unset (the default) to disable authentication entirely. + +## TLS + +The frontend serves HTTPS. On first start it generates a self-signed certificate +in `./certs` unless `TLS_CERT_FILE` and `TLS_KEY_FILE` point to existing files. + +Default self-signed SAN: + +```sh +DNS:localhost,IP:127.0.0.1 +``` + +Set `TLS_CERT_SAN` before the first frontend start when the self-signed +certificate must be valid for a LAN IP or DNS name: + +```sh +TLS_CERT_SAN=DNS:localhost,IP:127.0.0.1,IP:192.168.2.9 docker compose up -d +``` + +To use a real certificate for a hostname, mount a cert directory and point the +container paths at the cert/key: + +```sh +TLS_CERT_SOURCE=/etc/libershare/certs \ +TLS_CERT_FILE=/app/certs/fullchain.pem \ +TLS_KEY_FILE=/app/certs/privkey.pem \ +docker compose up -d +``` + +The browser hostname must match the certificate SAN, for example +`https://lish.example.net:6003/`. For Let's Encrypt live directories, mount or +copy real files; symlinks under `/etc/letsencrypt/live/...` also need their +`archive` target available inside the container. + +## Logs + +Backend application logs are written to: + +```sh +./config/libershare.log +``` + +The app rotates `libershare.log` at 10 MB and keeps 3 rotated files. + +Docker stdout/stderr logs are rotated by compose: + +```sh +LOG_MAX_SIZE=10m +LOG_MAX_FILE=3 +``` + +Backend memory tracing is disabled by default: + +```sh +LIBERSHARE_MEMTRACE=0 +``` + +Set `LIBERSHARE_MEMTRACE=1` only while collecting diagnostics. Memory trace +output is an application file, not a Docker log, so Docker log rotation does not +rotate `memory-trace.jsonl`. + +## Hardening + +Both services run with: + +- `no-new-privileges` +- all Linux capabilities dropped +- read-only root filesystem +- writable state only through explicit mounts and `/tmp` + +## Healthcheck + +The backend exposes an unauthenticated `GET /health` endpoint that returns +`200 ok` once the API server has bound. The compose healthcheck reuses the +same binary as a self-probe: + +```yaml +healthcheck: + test: ["CMD", "/app/lish-backend", "--healthcheck"] + interval: 10s + timeout: 3s + retries: 5 + start_period: 20s +``` + +`--healthcheck` does no logger setup, no DB open, no libp2p init — it just +performs one HTTP `GET http://127.0.0.1:$BACKEND_PORT/health` and exits 0 on +2xx, 1 on any other response, 2 on a misconfigured `BACKEND_PORT` env. Probe +the endpoint manually with: + +```sh +curl -fsS http://localhost:${BACKEND_PORT:-1158}/health +``` + +## WebSocket proxy resilience + +The frontend container terminates the browser WebSocket and forwards it to +`ws://backend:$BACKEND_PORT`. If the backend goes away (rolling restart, +crash) the proxy keeps the browser-side socket alive while it reconnects the +upstream with exponential backoff (250 ms → 5 s, capped). It buffers up to +1 MiB of in-flight messages during the outage; if that ceiling is exceeded +the client is closed with code 1011 to force a fresh handshake instead of +silently dropping subscribe messages. + +A single warning is logged after 10 consecutive reconnect attempts; further +retries continue silently to avoid filling the proxy log when a tab is left +open across a long backend outage. + +## Verification + +Run project checks inside Docker/Bun instead of relying on host Node/Bun: + +```sh +docker run --rm -v "$PWD/..:/src" -w /src/frontend oven/bun:1.3.13-debian \ + sh -lc "bun install --frozen-lockfile && bun test tests/api-url.test.ts && bun --bun run check && bun --bun run build" +``` + +Build both images: + +```sh +docker compose build backend frontend +``` diff --git a/docker/backend-entrypoint.sh b/docker/backend-entrypoint.sh new file mode 100644 index 00000000..ce5456b7 --- /dev/null +++ b/docker/backend-entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/sh +set -eu + +# Repair ownership of bind-mounted state directories before dropping into the +# backend binary. With `cap_drop: ALL` the container loses CAP_DAC_OVERRIDE, +# so a host-side `mkdir ./config ./storage` performed by an unprivileged +# user (UID != 0) would otherwise produce EACCES on the first write inside +# the container. CAP_CHOWN is granted back via `cap_add` in compose so the +# chown succeeds without re-introducing CAP_DAC_OVERRIDE. +# +# Already-correct ownership is a no-op for chown — there is no I/O cost +# beyond a stat() per entry, and -R only descends if directories exist. +for dir in /app/config /app/storage; do + if [ -d "$dir" ]; then + chown -R 0:0 "$dir" 2>/dev/null || true + fi +done + +exec /app/lish-backend "$@" diff --git a/docker/certs/.gitignore b/docker/certs/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/docker/certs/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/docker/config/.gitignore b/docker/config/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/docker/config/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 00000000..7e46a6ea --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,112 @@ +name: libershare + +services: + backend: + build: + context: .. + dockerfile: docker/Dockerfile + image: docker-libershare-backend:local + container_name: libershare-backend + restart: unless-stopped + init: true + read_only: true + security_opt: + - no-new-privileges:true + cap_drop: + - ALL + # Granted back so backend-entrypoint.sh can chown the bind-mounted + # config/storage dirs at startup. This makes the deploy independent of + # who created the dirs on the host (root vs. an unprivileged user) and + # of the mode bits the host user picked (e.g. an opinionated 0700). + # CHOWN — change file ownership. + # DAC_READ_SEARCH — traverse and read dirs the container does not own, + # so chown(2) can reach files inside a 0700 host dir without needing + # the broader DAC_OVERRIDE that also bypasses write checks. + cap_add: + - CHOWN + - DAC_READ_SEARCH + command: ["--datadir", "/app/config", "--host", "0.0.0.0", "--port", "${BACKEND_PORT:-1158}"] + environment: + LIBERSHARE_MEMTRACE: ${LIBERSHARE_MEMTRACE:-0} + LIBERSHARE_STORAGE_ROOT: /app/storage + # Read by `--healthcheck` self-flag so the probe targets the same port as + # the live API server when BACKEND_PORT is overridden via .env. + BACKEND_PORT: ${BACKEND_PORT:-1158} + # Optional shared secret. If set, every WebSocket and REST request must + # carry `?token=` (the frontend proxy forwards the URL untouched, + # so the browser-side WsClient handles it). Empty default preserves the + # current "no auth" behaviour for upgraders. + LISH_TOKEN: ${LISH_TOKEN:-} + healthcheck: + test: ["CMD", "/app/lish-backend", "--healthcheck"] + interval: 10s + timeout: 3s + retries: 5 + start_period: 20s + logging: + driver: json-file + options: + max-size: ${LOG_MAX_SIZE:-10m} + max-file: ${LOG_MAX_FILE:-3} + # Backend WebSocket/API is reached by the frontend container over the + # internal Docker network, not by the host browser, so by default we bind + # the host-side publication to localhost (`127.0.0.1`). Set + # `BACKEND_BIND=0.0.0.0` to expose the API to the LAN (e.g. for the CLI + # client or a non-Docker frontend on another host). The libp2p TCP port + # always listens on every interface — peers must reach it from outside. + ports: + - "${BACKEND_BIND:-127.0.0.1}:${BACKEND_PORT:-1158}:${BACKEND_PORT:-1158}" + - "9091:9090" + volumes: + - ${LIBERSHARE_CONFIG_SOURCE:-./config}:/app/config + - ${LIBERSHARE_STORAGE_SOURCE:-./storage}:/app/storage + tmpfs: + - /tmp + networks: + - libershare-net + + frontend: + build: + context: .. + dockerfile: docker/frontend.Dockerfile + image: docker-libershare-frontend:local + container_name: libershare-frontend + restart: unless-stopped + init: true + read_only: true + security_opt: + - no-new-privileges:true + cap_drop: + - ALL + # Granted back so frontend-entrypoint.sh can chown the bind-mounted + # certs dir before openssl writes the self-signed cert/key. Same + # rationale (and same minimal cap set) as the backend service above. + cap_add: + - CHOWN + - DAC_READ_SEARCH + environment: + BACKEND_WS_URL: ws://backend:${BACKEND_PORT:-1158} + TLS_CERT_DIR: /app/certs + TLS_CERT_FILE: ${TLS_CERT_FILE:-/app/certs/pubkey.pem} + TLS_KEY_FILE: ${TLS_KEY_FILE:-/app/certs/privkey.pem} + TLS_CERT_SAN: ${TLS_CERT_SAN:-DNS:localhost,IP:127.0.0.1} + logging: + driver: json-file + options: + max-size: ${LOG_MAX_SIZE:-10m} + max-file: ${LOG_MAX_FILE:-3} + ports: + - "6003:6003" + depends_on: + backend: + condition: service_healthy + volumes: + - ${TLS_CERT_SOURCE:-./certs}:/app/certs + tmpfs: + - /tmp + networks: + - libershare-net + +networks: + libershare-net: + name: libershare-net diff --git a/docker/frontend-entrypoint.sh b/docker/frontend-entrypoint.sh new file mode 100644 index 00000000..7f16b1e7 --- /dev/null +++ b/docker/frontend-entrypoint.sh @@ -0,0 +1,39 @@ +#!/bin/sh +set -eu + +cert_dir="${TLS_CERT_DIR:-/app/certs}" +key_file="${TLS_KEY_FILE:-$cert_dir/privkey.pem}" +cert_file="${TLS_CERT_FILE:-$cert_dir/pubkey.pem}" +cert_days="${TLS_CERT_DAYS:-3650}" +cert_subject="${TLS_CERT_SUBJECT:-/CN=libershare.local}" +cert_san="${TLS_CERT_SAN:-DNS:localhost,IP:127.0.0.1}" + +# Repair ownership of the bind-mounted certs dir so openssl below can write +# the freshly generated private key and certificate. Without this, a host +# `mkdir ./certs` performed by an unprivileged user (UID != 0) would fail +# with EACCES inside the container — the `cap_drop: ALL` setup strips +# CAP_DAC_OVERRIDE, so root inside the container cannot bypass DAC. The +# CAP_CHOWN granted back via compose lets this chown succeed without +# re-introducing CAP_DAC_OVERRIDE. +if [ -d "$cert_dir" ]; then + chown -R 0:0 "$cert_dir" 2>/dev/null || true +fi + +if [ ! -s "$key_file" ] || [ ! -s "$cert_file" ]; then + mkdir -p "$cert_dir" + openssl req \ + -x509 \ + -nodes \ + -newkey rsa:2048 \ + -sha256 \ + -days "$cert_days" \ + -keyout "$key_file" \ + -out "$cert_file" \ + -subj "$cert_subject" \ + -addext "subjectAltName=$cert_san" +fi + +export TLS_KEY_FILE="$key_file" +export TLS_CERT_FILE="$cert_file" + +exec bun frontend-server.ts diff --git a/docker/frontend-server.ts b/docker/frontend-server.ts new file mode 100644 index 00000000..f8dd71b0 --- /dev/null +++ b/docker/frontend-server.ts @@ -0,0 +1,169 @@ +import { join } from 'node:path'; + +const root = '/app/build'; +const port = Number(process.env['PORT'] ?? 6003); +const backendWsUrl = process.env['BACKEND_WS_URL']; +const keyFile = process.env['TLS_KEY_FILE']; +const certFile = process.env['TLS_CERT_FILE']; +const tlsEnabled = Boolean(keyFile && certFile); + +if (!backendWsUrl) throw new Error('BACKEND_WS_URL is required'); + +const contentTypes: Record = { + '.css': 'text/css; charset=utf-8', + '.html': 'text/html; charset=utf-8', + '.ico': 'image/x-icon', + '.js': 'application/javascript; charset=utf-8', + '.json': 'application/json; charset=utf-8', + '.svg': 'image/svg+xml', + '.txt': 'text/plain; charset=utf-8', + '.webp': 'image/webp', +}; + +function contentType(path: string): string | undefined { + const dot = path.lastIndexOf('.'); + return dot >= 0 ? contentTypes[path.slice(dot).toLowerCase()] : undefined; +} + +function fileForPath(pathname: string): string { + const decoded = decodeURIComponent(pathname); + const parts = decoded.split('/').filter(part => part && part !== '.' && part !== '..'); + return parts.length > 0 ? join(root, ...parts) : join(root, 'index.html'); +} + +type ClientData = { + upstream?: WebSocket; + pending: Array; + closed: boolean; + reconnectAttempt: number; + reconnectTimer?: ReturnType; + /** + * Upstream URL with the client's original query string preserved so the + * backend sees `?token=…` (and any future query params) when + * authentication is enabled. Computed at upgrade time and reused on + * every reconnect attempt. + */ + upstreamUrl: string; +}; + +/** + * Build the upstream WebSocket URL for one client connection by copying any + * query params from the incoming `/ws` URL onto the configured + * `BACKEND_WS_URL`. The backend's `isAuthorized` middleware reads `?token=…` + * from the URL it receives, so without this the token a client carries on + * `wss://frontend/ws?token=…` would be lost at the proxy boundary. + */ +function buildUpstreamUrl(clientUrl: URL): string { + const upstream = new URL(backendWsUrl!); + for (const [k, v] of clientUrl.searchParams) upstream.searchParams.set(k, v); + return upstream.toString(); +} + +const MAX_PENDING_BYTES = 1 * 1024 * 1024; // 1 MiB cap so a long backend outage does not exhaust container memory +const MAX_RECONNECT_DELAY_MS = 5000; +const BASE_RECONNECT_DELAY_MS = 250; +// Log a single warning after this many consecutive upstream-reconnect attempts +// fail; reconnects continue silently afterwards so a tab left open over a +// weekend doesn't fill the proxy log with retries. +const RECONNECT_WARN_AFTER_ATTEMPTS = 10; + +function pendingByteSize(pending: ClientData['pending']): number { + let total = 0; + for (const m of pending) total += typeof m === 'string' ? Buffer.byteLength(m, 'utf8') : m.byteLength; + return total; +} + +function connectUpstream(ws: import('bun').ServerWebSocket): void { + if (ws.data.closed) return; + const upstream = new WebSocket(ws.data.upstreamUrl); + ws.data.upstream = upstream; + upstream.onopen = () => { + ws.data.reconnectAttempt = 0; + for (const message of ws.data.pending.splice(0)) upstream.send(message); + }; + upstream.onmessage = event => { + if (ws.readyState === WebSocket.OPEN) ws.send(event.data); + }; + const handleDrop = (): void => { + if (ws.data.closed) return; + // Exponential backoff capped at MAX_RECONNECT_DELAY_MS. The browser + // tab stays connected to this proxy while we retry the upstream — so + // a backend rolling restart no longer forces every open page to + // reload to recover its WebSocket session. + const attempt = ws.data.reconnectAttempt++; + if (attempt === RECONNECT_WARN_AFTER_ATTEMPTS) { + console.warn(`[proxy] upstream still unreachable after ${attempt} attempts; will keep retrying every ${MAX_RECONNECT_DELAY_MS}ms`); + } + const delay = Math.min(MAX_RECONNECT_DELAY_MS, BASE_RECONNECT_DELAY_MS * 2 ** attempt); + ws.data.reconnectTimer = setTimeout(() => connectUpstream(ws), delay); + }; + upstream.onclose = handleDrop; + upstream.onerror = handleDrop; +} + +Bun.serve({ + port, + tls: tlsEnabled + ? { + key: Bun.file(keyFile!), + cert: Bun.file(certFile!), + } + : undefined, + async fetch(request, server) { + const url = new URL(request.url); + if (url.pathname === '/ws') { + const upgraded = server.upgrade(request, { + data: { pending: [], closed: false, reconnectAttempt: 0, upstreamUrl: buildUpstreamUrl(url) }, + }); + if (upgraded) return undefined; + return new Response('Expected WebSocket', { status: 400 }); + } + + const filePath = fileForPath(url.pathname); + let file = Bun.file(filePath); + + if (!(await file.exists())) { + file = Bun.file(join(root, 'index.html')); + } + + return new Response(file, { + headers: contentType(file.name ?? filePath) ? { 'content-type': contentType(file.name ?? filePath)! } : undefined, + }); + }, + websocket: { + open(ws) { + connectUpstream(ws); + }, + message(ws, message) { + const upstream = ws.data.upstream; + if (upstream?.readyState === WebSocket.OPEN) { + upstream.send(message); + return; + } + // Buffer messages while upstream is reconnecting. The LISH protocol + // is stateful (subscribe → receive events) — silently dropping the + // oldest queued message would let the subscribe handshake disappear + // while later events survive, leaving the FE wired to a topic the + // backend never registered. Closing the client with a non-normal + // code instead forces the browser-side WsClient to reconnect and + // re-run its full handshake from scratch. + ws.data.pending.push(message); + if (pendingByteSize(ws.data.pending) > MAX_PENDING_BYTES) { + console.warn(`[proxy] pending queue exceeded ${MAX_PENDING_BYTES} bytes during upstream outage; closing client to force re-handshake`); + ws.data.pending.length = 0; + ws.close(1011, 'upstream backlog overflow'); + } + }, + close(ws) { + ws.data.closed = true; + if (ws.data.reconnectTimer) { + clearTimeout(ws.data.reconnectTimer); + ws.data.reconnectTimer = undefined; + } + ws.data.upstream?.close(); + }, + }, +}); + +const protocol = tlsEnabled ? 'https' : 'http'; +console.log(`LiberShare frontend listening on ${protocol}://0.0.0.0:${port}`); diff --git a/docker/frontend.Dockerfile b/docker/frontend.Dockerfile new file mode 100644 index 00000000..d2357e5d --- /dev/null +++ b/docker/frontend.Dockerfile @@ -0,0 +1,31 @@ +# syntax=docker/dockerfile:1 + +FROM oven/bun:1.3.13-debian AS build + +WORKDIR /src + +COPY frontend/package.json frontend/bun.lock ./frontend/ +COPY shared/package.json ./shared/ + +WORKDIR /src/frontend +RUN bun install --frozen-lockfile + +WORKDIR /src +COPY frontend ./frontend +COPY shared ./shared + +WORKDIR /src/frontend +RUN bun --bun run build + +FROM oven/bun:1.3.13-alpine AS runtime + +RUN apk add --no-cache openssl + +WORKDIR /app +COPY --from=build /src/frontend/build ./build +COPY docker/frontend-server.ts ./frontend-server.ts +COPY docker/frontend-entrypoint.sh ./frontend-entrypoint.sh +RUN chmod 0755 ./frontend-entrypoint.sh + +EXPOSE 6003/tcp +ENTRYPOINT ["./frontend-entrypoint.sh"] diff --git a/docker/storage/.gitignore b/docker/storage/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/docker/storage/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/frontend/src/scripts/api-url.ts b/frontend/src/scripts/api-url.ts new file mode 100644 index 00000000..82c17202 --- /dev/null +++ b/frontend/src/scripts/api-url.ts @@ -0,0 +1,46 @@ +import { DEFAULT_API_URL } from '@shared'; + +type ApiWindow = { + location: { + protocol: string; + host: string; + search: string; + }; + __BACKEND_PORT__?: number | string; +}; + +type ApiUrlOptions = { + window?: ApiWindow | undefined; + viteBackendUrl?: string | undefined; + dev?: boolean | undefined; +}; + +function viteBackendUrl(): string | undefined { + return (import.meta as { env?: Record }).env?.['VITE_BACKEND_URL']; +} + +function viteDev(): boolean { + return Boolean((import.meta as { env?: { DEV?: boolean } }).env?.DEV); +} + +export function getAPIURL(options: ApiUrlOptions = {}): string { + const browserWindow = options.window ?? (typeof window !== 'undefined' ? (window as unknown as ApiWindow) : undefined); + const configuredBackendUrl = options.viteBackendUrl ?? viteBackendUrl(); + const isDev = options.dev ?? viteDev(); + + if (browserWindow) { + // URL param override for multi-node dev testing (e.g. ?backend=ws://localhost:1159) + if (isDev) { + const param = new URLSearchParams(browserWindow.location.search).get('backend'); + if (param) return param; + } + // When running inside Tauri, the backend port is passed via initialization script. + if (browserWindow.__BACKEND_PORT__) return `ws://localhost:${browserWindow.__BACKEND_PORT__}`; + if (!configuredBackendUrl) { + const protocol = browserWindow.location.protocol === 'https:' ? 'wss:' : 'ws:'; + return `${protocol}//${browserWindow.location.host}/ws`; + } + } + + return configuredBackendUrl || DEFAULT_API_URL; +} diff --git a/frontend/src/scripts/ws-client.ts b/frontend/src/scripts/ws-client.ts index 627f114b..44cbc2a7 100644 --- a/frontend/src/scripts/ws-client.ts +++ b/frontend/src/scripts/ws-client.ts @@ -1,7 +1,8 @@ import { get, writable } from 'svelte/store'; -import { WsClient, DEFAULT_API_URL } from '@shared'; +import { WsClient } from '@shared'; import { addNotification } from './notifications.ts'; import { tt } from './language.ts'; +import { getAPIURL } from './api-url.ts'; export type BackendConnectionStatus = 'connecting' | 'connected' | 'disconnected' | 'auth-required' | 'auth-failed'; @@ -12,19 +13,6 @@ interface BackendStatusResponse { error?: string; } -function getAPIURL(): string { - if (typeof window !== 'undefined') { - // URL param override for multi-node dev testing (e.g. ?backend=ws://localhost:1159) - if (import.meta.env.DEV) { - const param = new URLSearchParams(window.location.search).get('backend'); - if (param) return param; - } - // When running inside Tauri, the backend port is passed via initialization script - if ((window as any).__BACKEND_PORT__) return `ws://localhost:${(window as any).__BACKEND_PORT__}`; - } - return import.meta.env['VITE_BACKEND_URL'] || DEFAULT_API_URL; -} - function getInitialBackendToken(): string { const envToken = import.meta.env['VITE_LISH_TOKEN']; if (typeof envToken === 'string' && envToken) return envToken; diff --git a/frontend/tests/api-url.test.ts b/frontend/tests/api-url.test.ts new file mode 100644 index 00000000..a709f7af --- /dev/null +++ b/frontend/tests/api-url.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, test } from 'bun:test'; +import { getAPIURL } from '../src/scripts/api-url.ts'; + +function browserWindow(protocol: string, host: string, search = '') { + return { + location: { + protocol, + host, + search, + }, + }; +} + +describe('getAPIURL', () => { + test('uses same-origin WSS endpoint for HTTPS static frontend', () => { + expect(getAPIURL({ window: browserWindow('https:', '192.168.2.9:6003') })).toBe('wss://192.168.2.9:6003/ws'); + }); + + test('uses same-origin WS endpoint for HTTP static frontend', () => { + expect(getAPIURL({ window: browserWindow('http:', 'localhost:6003') })).toBe('ws://localhost:6003/ws'); + }); + + test('uses the Tauri injected backend port before static fallback', () => { + expect(getAPIURL({ window: { ...browserWindow('https:', 'app.local:6003'), __BACKEND_PORT__: 23145 } })).toBe('ws://localhost:23145'); + }); + + test('keeps the dev backend query override', () => { + expect(getAPIURL({ window: browserWindow('https:', 'localhost:6003', '?backend=ws://localhost:1159'), dev: true })).toBe('ws://localhost:1159'); + }); + + test('uses VITE_BACKEND_URL when configured', () => { + expect(getAPIURL({ window: browserWindow('https:', 'localhost:6003'), viteBackendUrl: 'wss://api.example/ws' })).toBe('wss://api.example/ws'); + }); +}); diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json index 0d8ff9fb..2a5e688e 100644 --- a/frontend/tsconfig.json +++ b/frontend/tsconfig.json @@ -21,7 +21,8 @@ "verbatimModuleSyntax": true, "moduleResolution": "bundler", "allowImportingTsExtensions": true - } + }, + "exclude": ["tests/**/*.test.ts"] // Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias // except $lib which is handled by https://svelte.dev/docs/kit/configuration#files // diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index 13ad9f23..1f12b4bb 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -6,6 +6,10 @@ import path from 'path'; import { fileURLToPath } from 'url'; const __dirname = path.dirname(fileURLToPath(import.meta.url)); +function getBackendProxyTarget(): string { + return process.env['VITE_BACKEND_URL'] || 'ws://localhost:1158'; +} + function getCommitHash(): string { try { return execSync('git rev-parse --short HEAD').toString().trim(); @@ -67,6 +71,12 @@ export default defineConfig({ allowedHosts: true, host: true, port: 6003, + proxy: { + '/ws': { + target: getBackendProxyTarget(), + ws: true, + }, + }, fs: { allow: [__dirname, path.resolve(__dirname, '..')], },