diff --git a/.gitignore b/.gitignore index 9c58933ad..bbff03815 100644 --- a/.gitignore +++ b/.gitignore @@ -68,6 +68,7 @@ src/qt/interchained-qt.includes *.rej *.orig *.pyc +__pycache__/ *.o *.o-* *.a diff --git a/ambassadors/interchained-node-operator-portal/README.md b/ambassadors/interchained-node-operator-portal/README.md new file mode 100644 index 000000000..0f4519c08 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/README.md @@ -0,0 +1,97 @@ +# Interchained Operator Control Plane + +An enterprise-grade SaaS portal for managing Interchained node operators. The +platform combines a FastAPI backend, Redis DB 6 telemetry store, and a Next.js +frontend to deliver RBAC, analytics, automated rewards, and compliance tooling +for distributed infrastructure teams. + +## Backend + +* Framework: FastAPI +* Data store: Redis DB 6 +* Location: `backend/` + +### Highlights + +* **Multi-tenant RBAC** – Super admins provision organisations, org admins manage + operators/auditors through invite workflows, and all access is tokenised. +* **Node lifecycle management** – Register, tag, and flag nodes per + organisation. Background monitors capture uptime, latency, and block height + every 60 seconds. +* **Reward distribution** – Daily payouts weight uptime scores with service plan + multipliers, taper shares for P2P-only seed nodes, and persist + organisation-level history plus lifetime accruals. +* **Compliance exports** – Org and super admins can download CSV reward ledgers + with wallet destinations, daily shares, and pending balances for payout ops. +* **Analytics & billing** – API endpoints expose fleet metrics (MRR, plan + distribution, uptime trends) and tenant billing summaries. +* **Audit logging** – Immutable audit events for invitations, role updates, + organisation changes, and node flagging. + +### Running locally + +```bash +cd backend +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +uvicorn main:app --reload +``` + +Set `PORTAL_REDIS_URL` if Redis is not running at `redis://localhost:6379/6`. + +## Frontend + +* Framework: Next.js + TailwindCSS +* Location: `frontend/` + +### Key screens + +``` +frontend/ +├─ pages/ +│ ├─ index.js # Marketing / landing +│ ├─ login.js # Auth & invite flow +│ ├─ dashboard.js # Executive overview +│ ├─ nodes.js # Node inventory & provisioning +│ ├─ rewards.js # Reward analytics +│ └─ admin/ +│ ├─ audit.js # Audit trail browser +│ ├─ billing.js # Billing summary +│ ├─ organizations.js # Super-admin tenant management +│ └─ users.js # Team access & invites +├─ components/ +│ ├─ layout/AdminShell.js +│ ├─ navigation/SidebarNav.js +│ ├─ cards/MetricCard.js +│ ├─ charts/UptimeTrend.js +│ ├─ tables/AuditTable.js +│ ├─ tables/OrganizationTable.js +│ ├─ NodeTable.js +│ └─ RewardGraph.js +└─ lib/api.js +``` + +### Running locally + +```bash +cd frontend +npm install +npm run dev +``` + +The frontend targets `http://localhost:8000/api/v1` by default. Override with +`NEXT_PUBLIC_API_BASE` when deploying. Both tiers share the same Redis instance +for telemetry and session storage. + +## Architectural Notes + +* **Lifespan-managed services** – `NodeMonitor` and `RewardDistributor` start + and stop with FastAPI, ensuring clean shutdown of background tasks. +* **Analytics service layer** – `services/analytics.py` aggregates plan + distribution, uptime averages, and billing information for dashboards. +* **Storage layout** – Redis hashes and sets are namespaced per organisation + (`org:{id}:*`) to simplify tenant isolation and analytics fan-out. +* **Frontend auth context** – `AuthProvider` persists bearer sessions, injects + tokens into API calls, and gates protected routes via `AdminShell`. + diff --git a/ambassadors/interchained-node-operator-portal/backend/__init__.py b/ambassadors/interchained-node-operator-portal/backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ambassadors/interchained-node-operator-portal/backend/auth.py b/ambassadors/interchained-node-operator-portal/backend/auth.py new file mode 100644 index 000000000..45638e8f1 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/auth.py @@ -0,0 +1,316 @@ +"""Authentication utilities for the Node Operator portal.""" +from __future__ import annotations + +import json +from datetime import datetime, timedelta +from typing import Any, Optional + +from fastapi import Depends, HTTPException, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from passlib.context import CryptContext + +from .config import get_settings +from .models import ( + AdminUserCreate, + InviteCreate, + InvitePublic, + OrganizationCreate, + ServicePlanTier, + SessionToken, + UserCreate, + UserPublic, + UserRole, +) +from .services.audit import record_audit_event +from .services.organizations import create_organization, get_organization +from .utils.ids import random_token +from .utils.redis_client import get_redis + +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") +security = HTTPBearer(auto_error=False) + + +async def get_user(email: str) -> Optional[dict[str, str]]: + redis = await get_redis() + user_key = f"users:{email}" + user = await redis.hgetall(user_key) + return user or None + + +async def create_user(payload: UserCreate) -> UserPublic: + """Register a user using an invite or bootstrap the first super admin.""" + + redis = await get_redis() + existing = await redis.hgetall(f"users:{payload.email}") + if existing: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User already exists") + + invite_details = await _consume_invite(payload.invite_code) if payload.invite_code else None + total_users = int(await redis.get("meta:user_count") or 0) + + if invite_details: + organization_id = invite_details["organization_id"] + role = UserRole(invite_details["role"]) + elif total_users == 0: + org = await create_organization( + OrganizationCreate(name="Interchained Core", billing_email=payload.email, plan=ServicePlanTier.ENTERPRISE), + owner_email=payload.email, + ) + organization_id = org.id + role = UserRole.SUPER_ADMIN + else: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Registration requires an invitation") + + created_at = datetime.utcnow() + await redis.hset( + f"users:{payload.email}", + mapping={ + "email": payload.email, + "password": pwd_context.hash(payload.password), + "full_name": payload.full_name, + "created_at": created_at.isoformat(), + "organization_id": organization_id, + "role": role.value, + "is_active": 1, + }, + ) + await redis.sadd(f"org:{organization_id}:members", payload.email) + await redis.incr("meta:user_count") + await record_audit_event( + actor_email=payload.email, + action="user.registered", + organization_id=organization_id, + metadata={"role": role.value}, + ) + return UserPublic( + email=payload.email, + full_name=payload.full_name, + organization_id=organization_id, + role=role, + created_at=created_at, + ) + + +async def create_admin_user(payload: AdminUserCreate, actor: UserPublic) -> UserPublic: + redis = await get_redis() + organization = await get_organization(payload.organization_id) + if not organization: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Organization not found") + if actor.role != UserRole.SUPER_ADMIN and payload.organization_id != actor.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot manage another organization") + if actor.role != UserRole.SUPER_ADMIN and payload.role == UserRole.SUPER_ADMIN: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only super admins can grant super admin access") + if await redis.exists(f"users:{payload.email}"): + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User already exists") + created_at = datetime.utcnow() + await redis.hset( + f"users:{payload.email}", + mapping={ + "email": payload.email, + "password": pwd_context.hash(payload.password), + "full_name": payload.full_name, + "created_at": created_at.isoformat(), + "organization_id": payload.organization_id, + "role": payload.role.value, + "is_active": 1, + }, + ) + await redis.sadd(f"org:{payload.organization_id}:members", payload.email) + await redis.incr("meta:user_count") + await record_audit_event( + actor_email=actor.email, + action="user.invited", + organization_id=payload.organization_id, + target=payload.email, + metadata={"role": payload.role.value}, + ) + return UserPublic( + email=payload.email, + full_name=payload.full_name, + organization_id=payload.organization_id, + role=payload.role, + created_at=created_at, + ) + + +async def list_org_users(organization_id: str) -> list[UserPublic]: + redis = await get_redis() + members = await redis.smembers(f"org:{organization_id}:members") + results: list[UserPublic] = [] + for email in members: + data = await redis.hgetall(f"users:{email}") + if not data or not int(data.get("is_active", 1)): + continue + results.append(_deserialize_user(data)) + return sorted(results, key=lambda u: u.created_at) + + +async def update_user_role(email: str, role: UserRole, actor: UserPublic) -> UserPublic: + redis = await get_redis() + user_data = await redis.hgetall(f"users:{email}") + if not user_data: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found") + if actor.role != UserRole.SUPER_ADMIN and user_data.get("organization_id") != actor.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot manage another organization") + if actor.role != UserRole.SUPER_ADMIN and role == UserRole.SUPER_ADMIN: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only super admins can grant super admin access") + await redis.hset(f"users:{email}", mapping={"role": role.value}) + await record_audit_event( + actor_email=actor.email, + action="user.role.updated", + organization_id=user_data.get("organization_id"), + target=email, + metadata={"role": role.value}, + ) + user_data["role"] = role.value + return _deserialize_user(user_data) + + +async def deactivate_user(email: str, actor: UserPublic) -> None: + redis = await get_redis() + user_key = f"users:{email}" + user_data = await redis.hgetall(user_key) + if not user_data: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found") + if actor.role != UserRole.SUPER_ADMIN and user_data.get("organization_id") != actor.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot manage another organization") + await redis.hset(user_key, mapping={"is_active": 0}) + await redis.srem(f"org:{user_data['organization_id']}:members", email) + await record_audit_event( + actor_email=actor.email, + action="user.deactivated", + organization_id=user_data.get("organization_id"), + target=email, + ) + + +async def authenticate_user(email: str, password: str) -> Optional[UserPublic]: + user = await get_user(email) + if not user or not int(user.get("is_active", 1)): + return None + if not pwd_context.verify(password, user.get("password", "")): + return None + return _deserialize_user(user) + + +async def create_session_token(user: UserPublic) -> SessionToken: + redis = await get_redis() + settings = get_settings() + token = random_token() + payload = { + "email": user.email, + "role": user.role.value, + "organization_id": user.organization_id, + } + await redis.setex(f"sessions:{token}", settings.session_ttl_seconds, json.dumps(payload)) + return SessionToken(access_token=token, expires_in=settings.session_ttl_seconds, user=user) + + +async def resolve_token(credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)) -> UserPublic: + if credentials is None or credentials.scheme.lower() != "bearer": + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing credentials") + redis = await get_redis() + raw = await redis.get(f"sessions:{credentials.credentials}") + if not raw: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token") + payload = json.loads(raw) + user = await get_user(payload["email"]) + if not user: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Inactive user") + return _deserialize_user(user) + + +async def get_current_user(user: UserPublic = Depends(resolve_token)) -> UserPublic: + return user + + +async def create_invite(payload: InviteCreate, actor: UserPublic) -> InvitePublic: + redis = await get_redis() + organization = await get_organization(payload.organization_id) + if not organization: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Organization not found") + if actor.role != UserRole.SUPER_ADMIN and organization.id != actor.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot invite to another organization") + if actor.role != UserRole.SUPER_ADMIN and payload.role == UserRole.SUPER_ADMIN: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only super admins can invite super admins") + code = random_token(20) + expires_at = datetime.utcnow() + timedelta(hours=payload.expires_in_hours) + await redis.hset( + f"invites:{code}", + mapping={ + "code": code, + "organization_id": payload.organization_id, + "role": payload.role.value, + "expires_at": expires_at.isoformat(), + "created_by": actor.email, + "note": payload.note or "", + }, + ) + await redis.sadd("invites:index", code) + await record_audit_event( + actor_email=actor.email, + action="invite.created", + organization_id=payload.organization_id, + metadata={"code": code, "role": payload.role.value}, + ) + return InvitePublic( + code=code, + organization_id=payload.organization_id, + role=payload.role, + expires_at=expires_at, + created_by=actor.email, + note=payload.note, + ) + + +async def list_invites(organization_id: str) -> list[InvitePublic]: + redis = await get_redis() + codes = await redis.smembers("invites:index") + invites: list[InvitePublic] = [] + for code in codes: + data = await redis.hgetall(f"invites:{code}") + if not data or data.get("organization_id") != organization_id: + continue + expires_at = datetime.fromisoformat(data["expires_at"]) + if expires_at < datetime.utcnow(): + await redis.delete(f"invites:{code}") + await redis.srem("invites:index", code) + continue + invites.append( + InvitePublic( + code=data["code"], + organization_id=data["organization_id"], + role=UserRole(data["role"]), + expires_at=expires_at, + created_by=data["created_by"], + note=data.get("note") or None, + ) + ) + return invites + + +async def _consume_invite(code: str | None) -> dict[str, Any] | None: + if not code: + return None + redis = await get_redis() + data = await redis.hgetall(f"invites:{code}") + if not data: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid invitation code") + expires_at = datetime.fromisoformat(data["expires_at"]) + if expires_at < datetime.utcnow(): + await redis.delete(f"invites:{code}") + await redis.srem("invites:index", code) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invitation expired") + await redis.delete(f"invites:{code}") + await redis.srem("invites:index", code) + return data + + +def _deserialize_user(data: dict[str, str]) -> UserPublic: + return UserPublic( + email=data["email"], + full_name=data.get("full_name", data["email"]), + organization_id=data.get("organization_id", ""), + role=UserRole(data.get("role", UserRole.OPERATOR.value)), + created_at=datetime.fromisoformat(data["created_at"]), + ) diff --git a/ambassadors/interchained-node-operator-portal/backend/config.py b/ambassadors/interchained-node-operator-portal/backend/config.py new file mode 100644 index 000000000..452c54a08 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/config.py @@ -0,0 +1,31 @@ +"""Application configuration via environment variables.""" +from __future__ import annotations + +from functools import lru_cache +from typing import List + +from pydantic import AnyUrl, Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + """Strongly typed application settings loaded from the environment.""" + + app_name: str = "Interchained Operator Control Plane" + environment: str = Field(default="development", description="Deployment environment name") + redis_url: AnyUrl = Field(default="redis://localhost:6379/6", description="Redis connection URL") + session_ttl_seconds: int = Field(default=60 * 60 * 24 * 7, ge=3600) + monitor_interval_seconds: int = Field(default=60, ge=15) + reward_distribution_hour_utc: int = Field(default=0, ge=0, le=23) + audit_log_retention_days: int = Field(default=90, ge=7) + allowed_cors_origins: List[str] = Field(default_factory=lambda: ["*"]) + default_plan: str = Field(default="enterprise") + + model_config = SettingsConfigDict(env_prefix="PORTAL_", env_file=".env", extra="ignore") + + +@lru_cache(maxsize=1) +def get_settings() -> Settings: + """Return cached application settings.""" + + return Settings() diff --git a/ambassadors/interchained-node-operator-portal/backend/dependencies.py b/ambassadors/interchained-node-operator-portal/backend/dependencies.py new file mode 100644 index 000000000..535d7caa8 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/dependencies.py @@ -0,0 +1,34 @@ +"""FastAPI dependency helpers.""" +from __future__ import annotations + +from collections.abc import Callable + +from fastapi import Depends, HTTPException, status + +from .auth import get_current_user +from .models import UserPublic, UserRole + + +def require_roles(*roles: UserRole) -> Callable[[UserPublic], UserPublic]: + """FastAPI dependency enforcing that the current user has one of the roles.""" + + allowed_roles: set[UserRole] = set(roles) + + async def dependency(current_user: UserPublic = Depends(get_current_user)) -> UserPublic: + if current_user.role not in allowed_roles: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + return current_user + + return dependency + + +def require_super_admin() -> Callable[[UserPublic], UserPublic]: + """Shortcut dependency for super-admin protected endpoints.""" + + return require_roles(UserRole.SUPER_ADMIN) + + +def require_org_admin() -> Callable[[UserPublic], UserPublic]: + """Shortcut dependency for org-admin and super-admin roles.""" + + return require_roles(UserRole.SUPER_ADMIN, UserRole.ORG_ADMIN) diff --git a/ambassadors/interchained-node-operator-portal/backend/main.py b/ambassadors/interchained-node-operator-portal/backend/main.py new file mode 100644 index 000000000..e9b7ac918 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/main.py @@ -0,0 +1,54 @@ +"""FastAPI entry point for the Node Operator Rewards Portal backend.""" +from __future__ import annotations + +import asyncio +from contextlib import asynccontextmanager +from typing import AsyncIterator + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from .config import get_settings +from .node_monitor import NodeMonitor +from .rewards import RewardDistributor +from .routes import admin, analytics, audit, nodes, rewards, users +from .utils.redis_client import close_redis + + +@asynccontextmanager +async def lifespan(_: FastAPI) -> AsyncIterator[None]: + monitor = NodeMonitor() + rewards_job = RewardDistributor() + await asyncio.gather(monitor.start(), rewards_job.start()) + try: + yield + finally: + await asyncio.gather(monitor.stop(), rewards_job.stop()) + await close_redis() + + +settings = get_settings() + +app = FastAPI(title=settings.app_name, lifespan=lifespan) + +app.add_middleware( + CORSMiddleware, + allow_origins=settings.allowed_cors_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +api_prefix = "/api/v1" + +app.include_router(users.router, prefix=api_prefix) +app.include_router(admin.router, prefix=api_prefix) +app.include_router(nodes.router, prefix=api_prefix) +app.include_router(rewards.router, prefix=api_prefix) +app.include_router(analytics.router, prefix=api_prefix) +app.include_router(audit.router, prefix=api_prefix) + + +@app.get("/") +async def root() -> dict[str, str]: + return {"status": "ok", "environment": settings.environment} diff --git a/ambassadors/interchained-node-operator-portal/backend/models.py b/ambassadors/interchained-node-operator-portal/backend/models.py new file mode 100644 index 000000000..de5f1f45a --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/models.py @@ -0,0 +1,214 @@ +"""Pydantic models used across the FastAPI application.""" +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Optional + +from pydantic import BaseModel, EmailStr, Field, HttpUrl + + +class UserRole(str, Enum): + """Supported authorization roles.""" + + SUPER_ADMIN = "super_admin" + ORG_ADMIN = "org_admin" + OPERATOR = "operator" + AUDITOR = "auditor" + + +class ServicePlanTier(str, Enum): + """Service plan tiers for tenant organisations.""" + + LAUNCH = "launch" + GROWTH = "growth" + ENTERPRISE = "enterprise" + + +class FeatureSettings(BaseModel): + """Feature toggles that can be attached to an organisation.""" + + realtime_alerting: bool = True + automated_payouts: bool = False + ai_insights: bool = False + gamified_badges: bool = True + compliance_reporting: bool = True + unlimited_seats: bool = False + + +class OrganizationCreate(BaseModel): + name: str = Field(min_length=2, max_length=120) + billing_email: EmailStr + plan: ServicePlanTier = ServicePlanTier.ENTERPRISE + feature_overrides: Optional[FeatureSettings] = None + slug: Optional[str] = Field(default=None, description="Custom slug for vanity URLs") + + +class Organization(BaseModel): + id: str + name: str + slug: str + billing_email: EmailStr + plan: ServicePlanTier + features: FeatureSettings + created_at: datetime + updated_at: datetime + owner_email: EmailStr + is_active: bool = True + + +class OrganizationUpdate(BaseModel): + name: Optional[str] = None + billing_email: Optional[EmailStr] = None + plan: Optional[ServicePlanTier] = None + feature_overrides: Optional[FeatureSettings] = None + is_active: Optional[bool] = None + + +class UserCreate(BaseModel): + email: EmailStr + password: str = Field(min_length=12) + full_name: str = Field(min_length=2) + invite_code: Optional[str] = Field(default=None, description="Invitation code when registering") + + +class AdminUserCreate(BaseModel): + email: EmailStr + password: str = Field(min_length=12) + full_name: str + role: UserRole = UserRole.OPERATOR + organization_id: str + + +class UserLogin(BaseModel): + email: EmailStr + password: str + + +class UserPublic(BaseModel): + email: EmailStr + full_name: str + organization_id: str + role: UserRole + created_at: datetime + + +class SessionToken(BaseModel): + access_token: str + token_type: str = "bearer" + expires_in: int + user: UserPublic + + +class InviteCreate(BaseModel): + organization_id: str + role: UserRole = UserRole.OPERATOR + expires_in_hours: int = Field(default=72, ge=1, le=24 * 14) + note: Optional[str] = None + + +class InvitePublic(BaseModel): + code: str + organization_id: str + role: UserRole + expires_at: datetime + created_by: EmailStr + note: Optional[str] = None + + +class NodeRegistration(BaseModel): + name: str = Field(min_length=3, max_length=80) + p2p_address: str = Field(description="IP or hostname with port, e.g. node.example:18080") + rpc_url: HttpUrl + wallet_address: str + owner_email: Optional[EmailStr] = None + tags: list[str] = Field(default_factory=list) + + +class NodeUpdate(BaseModel): + name: Optional[str] = None + p2p_address: Optional[str] = None + rpc_url: Optional[HttpUrl] = None + wallet_address: Optional[str] = None + owner_email: Optional[EmailStr] = None + tags: Optional[list[str]] = None + is_flagged: Optional[bool] = None + + +class NodeStatus(BaseModel): + id: str + organization_id: str + name: str + p2p_address: str + rpc_url: HttpUrl + wallet_address: str + owner_email: Optional[EmailStr] = None + tags: list[str] = Field(default_factory=list) + last_seen: Optional[datetime] = None + uptime_score: float = 0.0 + total_checks: int = 0 + successful_checks: int = 0 + latency_ms: Optional[float] = None + block_height: Optional[int] = None + is_flagged: bool = False + p2p_online: bool = False + rpc_responding: bool = False + fully_online: bool = False + + +class RewardSummary(BaseModel): + date: datetime + rewards: dict[str, float] + pool_balance: float + + +class RewardHistoryItem(BaseModel): + date: datetime + amount: float + node_id: str + + +class RewardHistory(BaseModel): + organization_id: str + history: list[RewardHistoryItem] + + +class PoolBalance(BaseModel): + balance: float + + +class PoolTopUpRequest(BaseModel): + amount: float = Field(gt=0, description="Amount to add to the reward pool") + + +class AuditEvent(BaseModel): + id: str + actor_email: EmailStr + organization_id: Optional[str] + action: str + target: Optional[str] = None + metadata: dict[str, Any] = Field(default_factory=dict) + created_at: datetime + + +class MetricTimeseriesPoint(BaseModel): + timestamp: datetime + value: float + + +class AdminDashboardMetrics(BaseModel): + total_active_nodes: int + avg_uptime: float + flagged_nodes: int + mrr: float + plan_distribution: dict[str, int] + uptime_timeseries: list[MetricTimeseriesPoint] + + +class BillingSummary(BaseModel): + organization_id: str + plan: ServicePlanTier + monthly_cost: float + included_nodes: int + additional_node_price: float + current_month_usage: int diff --git a/ambassadors/interchained-node-operator-portal/backend/node_monitor.py b/ambassadors/interchained-node-operator-portal/backend/node_monitor.py new file mode 100644 index 000000000..ba995ddad --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/node_monitor.py @@ -0,0 +1,81 @@ +"""Background task that periodically checks node health metrics.""" +from __future__ import annotations + +import asyncio +import json +from datetime import datetime, timedelta +from typing import Any + +from .config import get_settings +from .utils.bitcoin_rpc import check_node_health +from .utils.redis_client import get_redis + + +class NodeMonitor: + """Continuously check registered nodes and persist uptime metrics.""" + + def __init__(self, interval_seconds: int | None = None) -> None: + settings = get_settings() + self._interval = interval_seconds or settings.monitor_interval_seconds + self._task: asyncio.Task[Any] | None = None + self._stop_event = asyncio.Event() + + async def start(self) -> None: + if self._task is None: + self._stop_event.clear() + self._task = asyncio.create_task(self._run()) + + async def stop(self) -> None: + if self._task: + self._stop_event.set() + await self._task + self._task = None + + async def _run(self) -> None: + while not self._stop_event.is_set(): + await self._check_all_nodes() + try: + await asyncio.wait_for(self._stop_event.wait(), timeout=self._interval) + except asyncio.TimeoutError: + continue + + async def _check_all_nodes(self) -> None: + redis = await get_redis() + node_ids = await redis.smembers("node:index") + now = datetime.utcnow() + for node_id in node_ids: + node = await redis.hgetall(f"node:{node_id}") + if not node: + continue + health = await check_node_health(node.get("p2p_address", ""), node.get("rpc_url", "")) + stats_key = f"uptime:{node_id}" + total_checks = await redis.hincrby(stats_key, "total_checks", 1) + if health.is_online: + successful_checks = await redis.hincrby(stats_key, "successful_checks", 1) + else: + successful_checks = int(await redis.hget(stats_key, "successful_checks") or 0) + uptime_score = successful_checks / total_checks if total_checks else 0.0 + await redis.hset( + stats_key, + mapping={ + "total_checks": total_checks, + "successful_checks": successful_checks, + "uptime_score": uptime_score, + "last_seen": now.isoformat(), + "latency_ms": health.latency_ms, + "block_height": health.block_height or 0, + "rpc_responding": int(health.rpc_responding), + "p2p_online": int(health.p2p_online), + "fully_online": int(health.is_fully_online), + }, + ) + timeseries_entry = json.dumps( + { + "timestamp": now.isoformat(), + "node_id": node_id, + "uptime": uptime_score, + } + ) + await redis.zadd("metrics:uptime:global", {timeseries_entry: now.timestamp()}) + cutoff = (datetime.utcnow() - timedelta(days=14)).timestamp() + await redis.zremrangebyscore("metrics:uptime:global", 0, cutoff) diff --git a/ambassadors/interchained-node-operator-portal/backend/requirements.txt b/ambassadors/interchained-node-operator-portal/backend/requirements.txt new file mode 100644 index 000000000..796540bb1 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/requirements.txt @@ -0,0 +1,8 @@ +fastapi==0.110.0 +uvicorn[standard]==0.27.1 +redis==5.0.4 +httpx==0.27.0 +passlib[bcrypt]==1.7.4 +python-multipart==0.0.9 +pydantic-settings==2.2.1 +python-slugify==8.0.4 diff --git a/ambassadors/interchained-node-operator-portal/backend/rewards.py b/ambassadors/interchained-node-operator-portal/backend/rewards.py new file mode 100644 index 000000000..e6bbd1a9a --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/rewards.py @@ -0,0 +1,134 @@ +"""Reward distribution utilities.""" +from __future__ import annotations + +import asyncio +from datetime import datetime, timedelta +from typing import Any + +from .config import get_settings +from .models import ServicePlanTier +from .utils.redis_client import get_redis + + +REWARD_PREFIX = "rewards:" +REWARD_HISTORY_PREFIX = "rewards:history:" +PLAN_MULTIPLIER = { + ServicePlanTier.LAUNCH: 1.0, + ServicePlanTier.GROWTH: 1.15, + ServicePlanTier.ENTERPRISE: 1.3, +} + +P2P_ONLY_REWARD_FACTOR = 0.6 + + +async def distribute_daily_rewards(date: datetime | None = None) -> dict[str, float]: + redis = await get_redis() + snapshot_date = (date or datetime.utcnow()).date() + date_key = f"{REWARD_PREFIX}{snapshot_date.isoformat()}" + + total_pool_raw = await redis.get("pool:balance") + total_pool = float(total_pool_raw or 0) + if total_pool <= 0: + return {} + + node_ids = await redis.smembers("node:index") + active_nodes: list[tuple[str, float, str]] = [] + for node_id in node_ids: + stats = await redis.hgetall(f"uptime:{node_id}") + score = float(stats.get("uptime_score", 0)) + if score < 0.9: + continue + node = await redis.hgetall(f"node:{node_id}") + if not node: + continue + rpc_responding = bool(int(stats.get("rpc_responding", "0") or 0)) + p2p_online = bool(int(stats.get("p2p_online", "0") or 0)) + if not p2p_online: + continue + org_id = node.get("organization_id") or "" + plan_value = await redis.hget(f"org:{org_id}", "plan") or ServicePlanTier.LAUNCH.value + try: + plan_enum = ServicePlanTier(plan_value) + except ValueError: + plan_enum = ServicePlanTier.LAUNCH + multiplier = PLAN_MULTIPLIER.get(plan_enum, 1.0) + interface_multiplier = 1.0 if rpc_responding else P2P_ONLY_REWARD_FACTOR + weight = score * multiplier * interface_multiplier + active_nodes.append((node_id, weight, org_id)) + + if not active_nodes: + return {} + + total_weight = sum(weight for _, weight, _ in active_nodes) + if total_weight == 0: + return {} + + rewards: dict[str, float] = {} + org_rewards: dict[str, float] = {} + for node_id, weight, org_id in active_nodes: + share = (weight / total_weight) * total_pool + rewards[node_id] = round(share, 8) + org_rewards[org_id] = org_rewards.get(org_id, 0.0) + share + + if rewards: + await redis.hset(date_key, mapping={node_id: str(amount) for node_id, amount in rewards.items()}) + await redis.set("pool:balance", 0) + for node_id, amount in rewards.items(): + node = await redis.hgetall(f"node:{node_id}") + org_id = node.get("organization_id", "") + ledger_key = f"node:{node_id}:rewards" + await redis.hincrbyfloat(ledger_key, "pending", amount) + await redis.hincrbyfloat(ledger_key, "lifetime", amount) + await redis.hset( + ledger_key, + mapping={ + "last_share": str(amount), + "last_rewarded_at": snapshot_date.isoformat(), + }, + ) + await redis.lpush( + f"{REWARD_HISTORY_PREFIX}{org_id}", + f"{snapshot_date.isoformat()}:{amount}:{node_id}", + ) + for org_id, total_amount in org_rewards.items(): + await redis.hincrbyfloat(f"org:{org_id}:rewards", "lifetime", total_amount) + await redis.hset(f"org:{org_id}:rewards", mapping={"last_payout": snapshot_date.isoformat()}) + return rewards + + +class RewardDistributor: + """Background job that triggers a distribution once every 24 hours.""" + + def __init__(self, run_at_hour: int | None = None, run_at_minute: int = 5) -> None: + settings = get_settings() + self._run_at_hour = run_at_hour if run_at_hour is not None else settings.reward_distribution_hour_utc + self._run_at_minute = run_at_minute + self._task: asyncio.Task[Any] | None = None + self._stop_event = asyncio.Event() + + async def start(self) -> None: + if self._task is None: + self._stop_event.clear() + self._task = asyncio.create_task(self._loop()) + + async def stop(self) -> None: + if self._task: + self._stop_event.set() + await self._task + self._task = None + + async def _loop(self) -> None: + while not self._stop_event.is_set(): + seconds_until_run = self._seconds_until_next_run() + try: + await asyncio.wait_for(self._stop_event.wait(), timeout=seconds_until_run) + continue + except asyncio.TimeoutError: + await distribute_daily_rewards() + + def _seconds_until_next_run(self) -> float: + now = datetime.utcnow() + next_run = now.replace(hour=self._run_at_hour, minute=self._run_at_minute, second=0, microsecond=0) + if next_run <= now: + next_run = next_run + timedelta(days=1) + return (next_run - now).total_seconds() diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/__init__.py b/ambassadors/interchained-node-operator-portal/backend/routes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/admin.py b/ambassadors/interchained-node-operator-portal/backend/routes/admin.py new file mode 100644 index 000000000..113e01737 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/admin.py @@ -0,0 +1,91 @@ +"""Administrative API endpoints for the control panel.""" +from __future__ import annotations + +from fastapi import APIRouter, Depends, HTTPException, status + +from .. import auth +from ..dependencies import require_org_admin, require_super_admin +from ..models import ( + AdminUserCreate, + Organization, + OrganizationCreate, + OrganizationUpdate, + UserPublic, + UserRole, +) +from ..services import organizations +from ..services.audit import record_audit_event + + +router = APIRouter(prefix="/admin", tags=["admin"]) + + +@router.get("/organizations", response_model=list[Organization]) +async def list_organizations_route(_: UserPublic = Depends(require_super_admin())) -> list[Organization]: + return await organizations.list_organizations() + + +@router.post("/organizations", response_model=Organization, status_code=status.HTTP_201_CREATED) +async def create_organization_route( + payload: OrganizationCreate, + current_user: UserPublic = Depends(require_super_admin()), +) -> Organization: + org = await organizations.create_organization(payload, owner_email=current_user.email) + await record_audit_event(actor_email=current_user.email, action="organization.created", organization_id=org.id) + return org + + +@router.patch("/organizations/{organization_id}", response_model=Organization) +async def update_organization_route( + organization_id: str, + payload: OrganizationUpdate, + current_user: UserPublic = Depends(require_super_admin()), +) -> Organization: + org = await organizations.update_organization(organization_id, payload) + if not org: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Organization not found") + changed_fields = list(payload.model_dump(exclude_none=True).keys()) + await record_audit_event( + actor_email=current_user.email, + action="organization.updated", + organization_id=org.id, + metadata={"fields": changed_fields}, + ) + return org + + +@router.post("/users", response_model=UserPublic, status_code=status.HTTP_201_CREATED) +async def create_user_route( + payload: AdminUserCreate, + current_user: UserPublic = Depends(require_org_admin()), +) -> UserPublic: + if current_user.role != UserRole.SUPER_ADMIN and payload.organization_id != current_user.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot manage another organization") + return await auth.create_admin_user(payload, current_user) + + +@router.get("/organizations/{organization_id}/users", response_model=list[UserPublic]) +async def list_org_users_route( + organization_id: str, + current_user: UserPublic = Depends(require_org_admin()), +) -> list[UserPublic]: + if current_user.role != UserRole.SUPER_ADMIN and current_user.organization_id != organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot view another organization") + return await auth.list_org_users(organization_id) + + +@router.post("/users/{email}/role", response_model=UserPublic) +async def update_user_role_route( + email: str, + role: UserRole, + current_user: UserPublic = Depends(require_org_admin()), +) -> UserPublic: + return await auth.update_user_role(email, role, current_user) + + +@router.post("/users/{email}/deactivate", status_code=status.HTTP_204_NO_CONTENT) +async def deactivate_user_route( + email: str, + current_user: UserPublic = Depends(require_org_admin()), +) -> None: + await auth.deactivate_user(email, current_user) diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/analytics.py b/ambassadors/interchained-node-operator-portal/backend/routes/analytics.py new file mode 100644 index 000000000..5f9fddf52 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/analytics.py @@ -0,0 +1,26 @@ +"""Analytics API endpoints.""" +from __future__ import annotations + +from fastapi import APIRouter, Depends, HTTPException, status + +from ..dependencies import require_org_admin, require_super_admin +from ..models import AdminDashboardMetrics, BillingSummary, UserPublic, UserRole +from ..services import analytics + + +router = APIRouter(prefix="/analytics", tags=["analytics"]) + + +@router.get("/dashboard", response_model=AdminDashboardMetrics) +async def get_dashboard_metrics(_: UserPublic = Depends(require_super_admin())) -> AdminDashboardMetrics: + return await analytics.get_dashboard_metrics() + + +@router.get("/billing/{organization_id}", response_model=BillingSummary) +async def get_billing_summary( + organization_id: str, + current_user: UserPublic = Depends(require_org_admin()), +) -> BillingSummary: + if current_user.role != UserRole.SUPER_ADMIN and current_user.organization_id != organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot access another organization") + return await analytics.get_billing_summary(organization_id) diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/audit.py b/ambassadors/interchained-node-operator-portal/backend/routes/audit.py new file mode 100644 index 000000000..7ab013bee --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/audit.py @@ -0,0 +1,21 @@ +"""Audit trail endpoints.""" +from __future__ import annotations + +from fastapi import APIRouter, Depends + +from ..dependencies import require_org_admin +from ..models import AuditEvent, UserPublic, UserRole +from ..services.audit import fetch_audit_events + + +router = APIRouter(prefix="/audit", tags=["audit"]) + + +@router.get("", response_model=list[AuditEvent]) +async def get_audit_trail( + limit: int = 100, + current_user: UserPublic = Depends(require_org_admin()), +) -> list[AuditEvent]: + if current_user.role == UserRole.SUPER_ADMIN: + return await fetch_audit_events(limit=limit) + return await fetch_audit_events(limit=limit, organization_id=current_user.organization_id) diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/nodes.py b/ambassadors/interchained-node-operator-portal/backend/routes/nodes.py new file mode 100644 index 000000000..ad43bb83e --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/nodes.py @@ -0,0 +1,158 @@ +"""Node management endpoints.""" +from __future__ import annotations + +from datetime import datetime + +from fastapi import APIRouter, Depends, HTTPException, status + +from ..dependencies import require_org_admin, require_roles +from ..models import NodeRegistration, NodeStatus, NodeUpdate, UserPublic, UserRole +from ..services.audit import record_audit_event +from ..utils.ids import short_ulid +from ..utils.redis_client import get_redis + + +router = APIRouter(prefix="/nodes", tags=["nodes"]) + + +NodeWriter = require_roles(UserRole.SUPER_ADMIN, UserRole.ORG_ADMIN, UserRole.OPERATOR) + + +@router.post("", response_model=NodeStatus, status_code=status.HTTP_201_CREATED) +async def create_node( + payload: NodeRegistration, + current_user: UserPublic = Depends(NodeWriter), +) -> NodeStatus: + organization_id = current_user.organization_id + owner_email = payload.owner_email or current_user.email + if current_user.role in {UserRole.OPERATOR, UserRole.AUDITOR} and owner_email != current_user.email: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot assign nodes to other members") + + redis = await get_redis() + node_id = short_ulid("node") + now = datetime.utcnow() + await redis.hset( + f"node:{node_id}", + mapping={ + "id": node_id, + "organization_id": organization_id, + "name": payload.name, + "p2p_address": payload.p2p_address, + "rpc_url": str(payload.rpc_url), + "wallet_address": payload.wallet_address, + "owner_email": owner_email, + "tags": ",".join(payload.tags), + "created_at": now.isoformat(), + "updated_at": now.isoformat(), + "is_flagged": 0, + }, + ) + await redis.sadd("node:index", node_id) + await redis.sadd(f"org:{organization_id}:nodes", node_id) + await redis.sadd(f"user:{owner_email}:nodes", node_id) + await record_audit_event( + actor_email=current_user.email, + action="node.created", + organization_id=organization_id, + target=node_id, + metadata={"name": payload.name}, + ) + return await _build_node_status(node_id) + + +@router.post("/register", response_model=NodeStatus, status_code=status.HTTP_201_CREATED) +async def register_legacy_node( + payload: NodeRegistration, + current_user: UserPublic = Depends(NodeWriter), +) -> NodeStatus: + return await create_node(payload, current_user) + + +@router.get("", response_model=list[NodeStatus]) +async def list_nodes(current_user: UserPublic = Depends(require_org_admin())) -> list[NodeStatus]: + redis = await get_redis() + if current_user.role == UserRole.SUPER_ADMIN: + node_ids = await redis.smembers("node:index") + else: + node_ids = await redis.smembers(f"org:{current_user.organization_id}:nodes") + results: list[NodeStatus] = [] + for node_id in node_ids: + try: + results.append(await _build_node_status(node_id)) + except HTTPException: + continue + return results + + +@router.get("/{node_id}", response_model=NodeStatus) +async def get_node(node_id: str, current_user: UserPublic = Depends(require_org_admin())) -> NodeStatus: + node = await _build_node_status(node_id) + if current_user.role != UserRole.SUPER_ADMIN and node.organization_id != current_user.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Node does not belong to your organization") + return node + + +@router.patch("/{node_id}", response_model=NodeStatus) +async def update_node( + node_id: str, + payload: NodeUpdate, + current_user: UserPublic = Depends(NodeWriter), +) -> NodeStatus: + redis = await get_redis() + node_key = f"node:{node_id}" + node_data = await redis.hgetall(node_key) + if not node_data: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Node not found") + if current_user.role != UserRole.SUPER_ADMIN and node_data.get("organization_id") != current_user.organization_id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Node does not belong to your organization") + updates = payload.model_dump(exclude_none=True) + if "tags" in updates: + updates["tags"] = ",".join(updates["tags"]) + if "is_flagged" in updates: + updates["is_flagged"] = 1 if bool(updates["is_flagged"]) else 0 + if updates: + updates["updated_at"] = datetime.utcnow().isoformat() + await redis.hset(node_key, mapping=updates) + if "owner_email" in updates: + await redis.srem(f"user:{node_data.get('owner_email')}:nodes", node_id) + await redis.sadd(f"user:{updates['owner_email']}:nodes", node_id) + if "is_flagged" in updates: + await record_audit_event( + actor_email=current_user.email, + action="node.flagged" if updates["is_flagged"] else "node.unflagged", + organization_id=node_data.get("organization_id"), + target=node_id, + ) + return await _build_node_status(node_id) + + +async def _build_node_status(node_id: str) -> NodeStatus: + redis = await get_redis() + node = await redis.hgetall(f"node:{node_id}") + if not node: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Node not found") + stats = await redis.hgetall(f"uptime:{node_id}") + latency_raw = float(stats.get("latency_ms", 0)) if stats.get("latency_ms") else None + if latency_raw is not None and latency_raw < 0: + latency_raw = None + tags = node.get("tags", "") + return NodeStatus( + id=node_id, + organization_id=node.get("organization_id", ""), + name=node.get("name", ""), + p2p_address=node.get("p2p_address", ""), + rpc_url=node.get("rpc_url", ""), + wallet_address=node.get("wallet_address", ""), + owner_email=node.get("owner_email") or None, + tags=[tag for tag in tags.split(",") if tag], + last_seen=datetime.fromisoformat(stats["last_seen"]) if stats.get("last_seen") else None, + uptime_score=float(stats.get("uptime_score", 0.0)), + total_checks=int(stats.get("total_checks", 0)), + successful_checks=int(stats.get("successful_checks", 0)), + latency_ms=latency_raw, + block_height=int(stats.get("block_height", 0)) if stats.get("block_height") else None, + is_flagged=bool(int(node.get("is_flagged", 0))), + p2p_online=bool(int(stats.get("p2p_online", "0") or 0)), + rpc_responding=bool(int(stats.get("rpc_responding", "0") or 0)), + fully_online=bool(int(stats.get("fully_online", "0") or 0)), + ) diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/rewards.py b/ambassadors/interchained-node-operator-portal/backend/routes/rewards.py new file mode 100644 index 000000000..866ba380e --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/rewards.py @@ -0,0 +1,174 @@ +"""Reward data endpoints.""" +from __future__ import annotations + +import csv +from datetime import date, datetime +from io import StringIO + +from fastapi import APIRouter, Depends, HTTPException, Query +from fastapi.responses import StreamingResponse + +from .. import auth +from ..dependencies import require_org_admin, require_super_admin +from ..models import ( + PoolBalance, + PoolTopUpRequest, + RewardHistory, + RewardHistoryItem, + RewardSummary, + UserPublic, + UserRole, +) +from ..rewards import distribute_daily_rewards +from ..services.audit import record_audit_event +from ..utils.redis_client import get_redis + + +def _as_float(value: str | None) -> float: + try: + return float(value) if value is not None else 0.0 + except (TypeError, ValueError): + return 0.0 + +router = APIRouter(prefix="/rewards", tags=["rewards"]) + + +@router.post("/run", response_model=RewardSummary) +async def trigger_rewards(_: UserPublic = Depends(require_super_admin())) -> RewardSummary: + rewards = await distribute_daily_rewards() + snapshot = datetime.utcnow() + redis = await get_redis() + pool_balance = float(await redis.get("pool:balance") or 0) + return RewardSummary(date=snapshot, rewards=rewards, pool_balance=pool_balance) + + +@router.post("/pool/top-up", response_model=PoolBalance) +async def top_up_reward_pool( + payload: PoolTopUpRequest, + current_user: UserPublic = Depends(require_super_admin()), +) -> PoolBalance: + redis = await get_redis() + new_balance = await redis.incrbyfloat("pool:balance", payload.amount) + await record_audit_event( + actor_email=current_user.email, + action="rewards.pool.top_up", + metadata={"amount": f"{payload.amount:.8f}", "balance": f"{new_balance:.8f}"}, + ) + return PoolBalance(balance=float(new_balance)) + + +@router.get("/today", response_model=RewardSummary) +async def rewards_today(current_user: UserPublic = Depends(auth.get_current_user)) -> RewardSummary: + today = date.today().isoformat() + redis = await get_redis() + rewards = await redis.hgetall(f"rewards:{today}") + rewards_float: dict[str, float] = {} + for node_id, amount in rewards.items(): + if current_user.role != UserRole.SUPER_ADMIN: + node = await redis.hgetall(f"node:{node_id}") + if node.get("organization_id") != current_user.organization_id: + continue + rewards_float[node_id] = float(amount) + pool_balance = float(await redis.get("pool:balance") or 0) + return RewardSummary(date=datetime.utcnow(), rewards=rewards_float, pool_balance=pool_balance) + + +@router.get("/history", response_model=RewardHistory) +async def reward_history(current_user: UserPublic = Depends(require_org_admin())) -> RewardHistory: + redis = await get_redis() + entries = await redis.lrange(f"rewards:history:{current_user.organization_id}", 0, 50) + history: list[RewardHistoryItem] = [] + for entry in entries: + try: + date_str, amount_str, node_id = entry.split(":", 2) + history.append( + RewardHistoryItem(date=datetime.fromisoformat(date_str), amount=float(amount_str), node_id=node_id) + ) + except ValueError: + continue + return RewardHistory(organization_id=current_user.organization_id, history=list(reversed(history))) + + +@router.get("/export", response_class=StreamingResponse) +async def export_rewards_csv( + report_date: date | None = Query(default=None, alias="date"), + organization_id: str | None = Query(default=None, alias="organizationId"), + current_user: UserPublic = Depends(require_org_admin()), +) -> StreamingResponse: + if organization_id and current_user.role != UserRole.SUPER_ADMIN: + raise HTTPException(status_code=403, detail="Cannot export other organizations") + + target_date = report_date or date.today() + redis = await get_redis() + rewards_key = f"rewards:{target_date.isoformat()}" + reward_map = await redis.hgetall(rewards_key) + + node_ids: set[str] = set(reward_map.keys()) + if organization_id: + node_ids.update(await redis.smembers(f"org:{organization_id}:nodes")) + elif current_user.role == UserRole.SUPER_ADMIN: + node_ids.update(await redis.smembers("node:index")) + else: + organization_id = current_user.organization_id + node_ids.update(await redis.smembers(f"org:{organization_id}:nodes")) + + rows: list[dict[str, str]] = [] + org_cache: dict[str, dict[str, str]] = {} + + for node_id in sorted(node_ids): + node = await redis.hgetall(f"node:{node_id}") + if not node: + continue + org_id = node.get("organization_id") or "" + if current_user.role != UserRole.SUPER_ADMIN and org_id != current_user.organization_id: + continue + if organization_id and org_id != organization_id: + continue + + ledger = await redis.hgetall(f"node:{node_id}:rewards") + org_data = org_cache.get(org_id) + if org_data is None: + org_data = await redis.hgetall(f"org:{org_id}") if org_id else {} + org_cache[org_id] = org_data + + rows.append( + { + "organization_id": org_id, + "organization_name": org_data.get("name", ""), + "node_id": node_id, + "node_name": node.get("name", ""), + "wallet_address": node.get("wallet_address", ""), + "owner_email": node.get("owner_email", ""), + "today_share": f"{_as_float(reward_map.get(node_id)):.8f}", + "pending_rewards": f"{_as_float(ledger.get('pending')):.8f}", + "lifetime_rewards": f"{_as_float(ledger.get('lifetime')):.8f}", + "last_rewarded_at": ledger.get("last_rewarded_at", ""), + } + ) + + output = StringIO() + fieldnames = [ + "organization_id", + "organization_name", + "node_id", + "node_name", + "wallet_address", + "owner_email", + "today_share", + "pending_rewards", + "lifetime_rewards", + "last_rewarded_at", + ] + writer = csv.DictWriter(output, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + filename = f"reward-export-{target_date.isoformat()}" + if organization_id: + filename += f"-{organization_id}" + output.seek(0) + csv_bytes = output.getvalue().encode("utf-8") + headers = { + "Content-Disposition": f'attachment; filename="{filename}.csv"', + } + return StreamingResponse(iter([csv_bytes]), media_type="text/csv", headers=headers) diff --git a/ambassadors/interchained-node-operator-portal/backend/routes/users.py b/ambassadors/interchained-node-operator-portal/backend/routes/users.py new file mode 100644 index 000000000..9d97450a1 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/routes/users.py @@ -0,0 +1,41 @@ +"""User authentication routes.""" +from __future__ import annotations + +from fastapi import APIRouter, Depends, HTTPException, status + +from .. import auth +from ..dependencies import require_org_admin +from ..models import InviteCreate, InvitePublic, SessionToken, UserCreate, UserLogin, UserPublic + +router = APIRouter(prefix="/users", tags=["users"]) + + +@router.post("/register", response_model=UserPublic, status_code=status.HTTP_201_CREATED) +async def register_user(payload: UserCreate) -> UserPublic: + return await auth.create_user(payload) + + +@router.post("/login", response_model=SessionToken) +async def login_user(payload: UserLogin) -> SessionToken: + user = await auth.authenticate_user(payload.email, payload.password) + if not user: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid credentials") + return await auth.create_session_token(user) + + +@router.get("/me", response_model=UserPublic) +async def get_me(current_user: UserPublic = Depends(auth.get_current_user)) -> UserPublic: + return current_user + + +@router.post("/invites", response_model=InvitePublic) +async def create_invite( + payload: InviteCreate, + current_user: UserPublic = Depends(require_org_admin()), +) -> InvitePublic: + return await auth.create_invite(payload, current_user) + + +@router.get("/invites", response_model=list[InvitePublic]) +async def list_invites(current_user: UserPublic = Depends(require_org_admin())) -> list[InvitePublic]: + return await auth.list_invites(current_user.organization_id) diff --git a/ambassadors/interchained-node-operator-portal/backend/services/__init__.py b/ambassadors/interchained-node-operator-portal/backend/services/__init__.py new file mode 100644 index 000000000..b9182a1a0 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/services/__init__.py @@ -0,0 +1 @@ +"""Service layer modules for the control plane backend.""" diff --git a/ambassadors/interchained-node-operator-portal/backend/services/analytics.py b/ambassadors/interchained-node-operator-portal/backend/services/analytics.py new file mode 100644 index 000000000..2bb069a20 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/services/analytics.py @@ -0,0 +1,112 @@ +"""Analytics services supporting the admin dashboard.""" +from __future__ import annotations + +import json +from collections import defaultdict +from datetime import date, datetime, timedelta + +from ..models import AdminDashboardMetrics, BillingSummary, MetricTimeseriesPoint, ServicePlanTier +from ..utils.redis_client import get_redis + + +PLAN_PRICING = { + ServicePlanTier.LAUNCH: {"price": 99.0, "included": 5, "overage": 20.0}, + ServicePlanTier.GROWTH: {"price": 299.0, "included": 20, "overage": 15.0}, + ServicePlanTier.ENTERPRISE: {"price": 799.0, "included": 100, "overage": 10.0}, +} + + +async def get_dashboard_metrics() -> AdminDashboardMetrics: + redis = await get_redis() + node_ids = await redis.smembers("node:index") + total_active_nodes = 0 + uptime_scores: list[float] = [] + flagged_nodes = 0 + + for node_id in node_ids: + node = await redis.hgetall(f"node:{node_id}") + if not node: + continue + stats = await redis.hgetall(f"uptime:{node_id}") + score = float(stats.get("uptime_score", 0.0)) if stats else 0.0 + if score >= 0.9: + total_active_nodes += 1 + uptime_scores.append(score) + if int(node.get("is_flagged", 0)): + flagged_nodes += 1 + + avg_uptime = sum(uptime_scores) / len(uptime_scores) if uptime_scores else 0.0 + + plan_distribution: dict[str, int] = {} + org_ids = await redis.smembers("org:index") + for org_id in org_ids: + raw_plan = await redis.hget(f"org:{org_id}", "plan") or ServicePlanTier.LAUNCH.value + try: + plan_enum = ServicePlanTier(raw_plan) + except ValueError: + plan_enum = ServicePlanTier.LAUNCH + plan_distribution[plan_enum.value] = plan_distribution.get(plan_enum.value, 0) + 1 + + mrr = 0.0 + for plan, count in plan_distribution.items(): + try: + plan_enum = ServicePlanTier(plan) + except ValueError: + continue + mrr += PLAN_PRICING.get(plan_enum, {"price": 0})["price"] * count + + now = datetime.utcnow() + window_start = now - timedelta(days=6) + raw_entries = await redis.zrangebyscore("metrics:uptime:global", window_start.timestamp(), "+inf") + buckets: dict[date, list[float]] = defaultdict(list) + for entry in raw_entries: + try: + payload = json.loads(entry) + timestamp = datetime.fromisoformat(payload["timestamp"]) + uptime = float(payload.get("uptime", 0.0)) + except (KeyError, TypeError, ValueError, json.JSONDecodeError): + continue + buckets[timestamp.date()].append(uptime) + + points: list[MetricTimeseriesPoint] = [] + for offset in range(6, -1, -1): + day = (now - timedelta(days=offset)).date() + samples = buckets.get(day, []) + average = sum(samples) / len(samples) if samples else 0.0 + points.append( + MetricTimeseriesPoint( + timestamp=datetime.combine(day, datetime.min.time()), + value=round(average * 100, 2), + ) + ) + + return AdminDashboardMetrics( + total_active_nodes=total_active_nodes, + avg_uptime=round(avg_uptime, 4), + flagged_nodes=flagged_nodes, + mrr=float(mrr), + plan_distribution=plan_distribution, + uptime_timeseries=points, + ) + + +async def get_billing_summary(organization_id: str) -> BillingSummary: + redis = await get_redis() + plan_value = await redis.hget(f"org:{organization_id}", "plan") or ServicePlanTier.LAUNCH.value + try: + plan = ServicePlanTier(plan_value) + except ValueError: + plan = ServicePlanTier.LAUNCH + pricing = PLAN_PRICING[plan] + current_usage = await redis.scard(f"org:{organization_id}:nodes") + included = pricing["included"] + overage_nodes = max(current_usage - included, 0) + monthly_cost = pricing["price"] + overage_nodes * pricing["overage"] + return BillingSummary( + organization_id=organization_id, + plan=plan, + monthly_cost=monthly_cost, + included_nodes=included, + additional_node_price=pricing["overage"], + current_month_usage=current_usage, + ) diff --git a/ambassadors/interchained-node-operator-portal/backend/services/audit.py b/ambassadors/interchained-node-operator-portal/backend/services/audit.py new file mode 100644 index 000000000..8cfb52bb7 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/services/audit.py @@ -0,0 +1,79 @@ +"""Audit logging services.""" +from __future__ import annotations + +import json +from datetime import datetime, timedelta + +from ..config import get_settings +from ..models import AuditEvent +from ..utils.ids import short_ulid +from ..utils.redis_client import get_redis + + +async def record_audit_event( + *, + actor_email: str, + action: str, + organization_id: str | None = None, + target: str | None = None, + metadata: dict[str, str] | None = None, +) -> AuditEvent: + """Persist an immutable audit event.""" + + redis = await get_redis() + now = datetime.utcnow() + event_id = short_ulid("audit") + event = AuditEvent( + id=event_id, + actor_email=actor_email, + organization_id=organization_id, + action=action, + target=target, + metadata=metadata or {}, + created_at=now, + ) + await redis.hset( + f"audit:{event_id}", + mapping={ + "id": event.id, + "actor_email": event.actor_email, + "organization_id": event.organization_id or "", + "action": event.action, + "target": event.target or "", + "metadata": json.dumps(event.metadata), + "created_at": event.created_at.isoformat(), + }, + ) + await redis.zadd("audit:index", {event_id: now.timestamp()}) + await _enforce_retention() + return event + + +async def fetch_audit_events(*, limit: int = 100, organization_id: str | None = None) -> list[AuditEvent]: + redis = await get_redis() + event_ids = await redis.zrevrange("audit:index", 0, limit - 1) + events: list[AuditEvent] = [] + for event_id in event_ids: + data = await redis.hgetall(f"audit:{event_id}") + if not data: + continue + event = AuditEvent( + id=data["id"], + actor_email=data["actor_email"], + organization_id=data["organization_id"] or None, + action=data["action"], + target=data["target"] or None, + metadata=json.loads(data.get("metadata", "{}")), + created_at=datetime.fromisoformat(data["created_at"]), + ) + if organization_id and event.organization_id != organization_id: + continue + events.append(event) + return events + + +async def _enforce_retention() -> None: + settings = get_settings() + redis = await get_redis() + cutoff = datetime.utcnow() - timedelta(days=settings.audit_log_retention_days) + await redis.zremrangebyscore("audit:index", 0, cutoff.timestamp()) diff --git a/ambassadors/interchained-node-operator-portal/backend/services/organizations.py b/ambassadors/interchained-node-operator-portal/backend/services/organizations.py new file mode 100644 index 000000000..497c07428 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/services/organizations.py @@ -0,0 +1,150 @@ +"""Organisation management services.""" +from __future__ import annotations + +from datetime import datetime + +from slugify import slugify + +from ..models import FeatureSettings, Organization, OrganizationCreate, OrganizationUpdate, ServicePlanTier +from ..utils.ids import short_ulid +from ..utils.redis_client import get_redis + + +def _plan_defaults(plan: ServicePlanTier) -> FeatureSettings: + if plan == ServicePlanTier.LAUNCH: + return FeatureSettings( + realtime_alerting=False, + gamified_badges=True, + compliance_reporting=False, + ) + if plan == ServicePlanTier.GROWTH: + return FeatureSettings( + realtime_alerting=True, + gamified_badges=True, + automated_payouts=False, + ai_insights=False, + ) + return FeatureSettings( + realtime_alerting=True, + automated_payouts=True, + ai_insights=True, + gamified_badges=True, + compliance_reporting=True, + unlimited_seats=True, + ) + + +async def create_organization(payload: OrganizationCreate, owner_email: str) -> Organization: + redis = await get_redis() + org_id = short_ulid("org") + now = datetime.utcnow() + slug = payload.slug or slugify(payload.name) + features = payload.feature_overrides or _plan_defaults(payload.plan) + org_key = f"org:{org_id}" + await redis.hset( + org_key, + mapping={ + "id": org_id, + "name": payload.name, + "slug": slug, + "billing_email": payload.billing_email, + "plan": payload.plan.value, + "features": features.model_dump_json(), + "created_at": now.isoformat(), + "updated_at": now.isoformat(), + "owner_email": owner_email, + "is_active": 1, + }, + ) + await redis.hset("org:slugs", slug, org_id) + await redis.sadd("org:index", org_id) + return Organization( + id=org_id, + name=payload.name, + slug=slug, + billing_email=payload.billing_email, + plan=payload.plan, + features=features, + created_at=now, + updated_at=now, + owner_email=owner_email, + is_active=True, + ) + + +async def get_organization(org_id: str) -> Organization | None: + redis = await get_redis() + data = await redis.hgetall(f"org:{org_id}") + if not data: + return None + return _deserialize_org(data) + + +async def get_organization_by_slug(slug: str) -> Organization | None: + redis = await get_redis() + org_id = await redis.hget("org:slugs", slug) + if not org_id: + return None + return await get_organization(org_id) + + +async def update_organization(org_id: str, payload: OrganizationUpdate) -> Organization | None: + redis = await get_redis() + org_key = f"org:{org_id}" + data = await redis.hgetall(org_key) + if not data: + return None + updates: dict[str, str | int] = {} + if payload.name: + updates["name"] = payload.name + new_slug = slugify(payload.name) + current_slug = data.get("slug") + if current_slug and new_slug != current_slug: + await redis.hdel("org:slugs", current_slug) + await redis.hset("org:slugs", new_slug, org_id) + updates["slug"] = new_slug + if payload.billing_email: + updates["billing_email"] = payload.billing_email + if payload.plan: + updates["plan"] = payload.plan.value + if payload.feature_overrides: + updates["features"] = payload.feature_overrides.model_dump_json() + if payload.is_active is not None: + updates["is_active"] = 1 if payload.is_active else 0 + if updates: + updates["updated_at"] = datetime.utcnow().isoformat() + await redis.hset(org_key, mapping=updates) + data.update({k: str(v) for k, v in updates.items()}) + return _deserialize_org(data) + + +async def list_organizations() -> list[Organization]: + redis = await get_redis() + org_ids = await redis.smembers("org:index") + results: list[Organization] = [] + for org_id in org_ids: + data = await redis.hgetall(f"org:{org_id}") + if data: + org = _deserialize_org(data) + results.append(org) + return sorted(results, key=lambda org: org.created_at) + + +async def count_organizations() -> int: + redis = await get_redis() + return await redis.scard("org:index") + + +def _deserialize_org(data: dict[str, str]) -> Organization: + return Organization( + id=data["id"], + name=data["name"], + slug=data["slug"], + billing_email=data["billing_email"], + plan=ServicePlanTier(data.get("plan", ServicePlanTier.LAUNCH.value)), + features=FeatureSettings.model_validate_json(data.get("features", FeatureSettings().model_dump_json())), + created_at=datetime.fromisoformat(data["created_at"]), + updated_at=datetime.fromisoformat(data["updated_at"]), + owner_email=data.get("owner_email", ""), + is_active=bool(int(data.get("is_active", "1"))), + ) diff --git a/ambassadors/interchained-node-operator-portal/backend/utils/__init__.py b/ambassadors/interchained-node-operator-portal/backend/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ambassadors/interchained-node-operator-portal/backend/utils/bitcoin_rpc.py b/ambassadors/interchained-node-operator-portal/backend/utils/bitcoin_rpc.py new file mode 100644 index 000000000..8a7b1c694 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/utils/bitcoin_rpc.py @@ -0,0 +1,87 @@ +"""Helpers for performing lightweight node health checks.""" +from __future__ import annotations + +import asyncio +import json +import time +from dataclasses import dataclass +from typing import Optional +from urllib.parse import urlparse + +import httpx + + +@dataclass +class NodeHealth: + is_online: bool + latency_ms: float + block_height: Optional[int] + rpc_responding: bool + p2p_online: bool = False + is_fully_online: bool = False + + +async def _check_tcp_connectivity(host: str, port: int, timeout: float = 2.0) -> float: + start = time.perf_counter() + try: + reader, writer = await asyncio.wait_for( + asyncio.open_connection(host, port), timeout=timeout + ) + except (OSError, asyncio.TimeoutError): + return -1.0 + else: + writer.close() + await writer.wait_closed() + return (time.perf_counter() - start) * 1000 + + +async def _fetch_rpc_height(rpc_url: str, timeout: float = 2.0) -> tuple[bool, Optional[int]]: + try: + async with httpx.AsyncClient(timeout=timeout) as client: + response = await client.post( + rpc_url, + json={"jsonrpc": "1.0", "id": "health", "method": "getblockcount", "params": []}, + ) + response.raise_for_status() + payload = response.json() + except (httpx.HTTPError, json.JSONDecodeError): + return False, None + + height = None + if isinstance(payload, dict): + height = payload.get("result") + if isinstance(height, str) and height.isdigit(): + height = int(height) + return True, height if isinstance(height, int) else None + + +async def check_node_health(p2p_address: str, rpc_url: str) -> NodeHealth: + """Check whether a node responds over P2P and RPC interfaces.""" + + latency_ms = -1.0 + rpc_ok = False + block_height: Optional[int] = None + + if ":" in p2p_address: + host, port_str = p2p_address.rsplit(":", 1) + try: + port = int(port_str) + except ValueError: + port = 0 + if host and port: + latency_ms = await _check_tcp_connectivity(host, port) + + parsed = urlparse(rpc_url) + if parsed.scheme and parsed.netloc: + rpc_ok, block_height = await _fetch_rpc_height(rpc_url) + + p2p_ok = latency_ms >= 0 + is_online = p2p_ok + return NodeHealth( + is_online=is_online, + latency_ms=latency_ms if latency_ms >= 0 else -1, + block_height=block_height, + rpc_responding=rpc_ok, + p2p_online=p2p_ok, + is_fully_online=p2p_ok and rpc_ok, + ) diff --git a/ambassadors/interchained-node-operator-portal/backend/utils/ids.py b/ambassadors/interchained-node-operator-portal/backend/utils/ids.py new file mode 100644 index 000000000..764c34a0e --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/utils/ids.py @@ -0,0 +1,20 @@ +"""Helpers for generating human friendly identifiers.""" +from __future__ import annotations + +import secrets +import string +from uuid import uuid4 + + +def short_ulid(prefix: str) -> str: + """Return a short identifier prefixed with the provided namespace.""" + + token = uuid4().hex[:12] + return f"{prefix}_{token}" + + +def random_token(length: int = 32) -> str: + """Generate a secure random token suitable for API keys or invites.""" + + alphabet = string.ascii_letters + string.digits + return "".join(secrets.choice(alphabet) for _ in range(length)) diff --git a/ambassadors/interchained-node-operator-portal/backend/utils/redis_client.py b/ambassadors/interchained-node-operator-portal/backend/utils/redis_client.py new file mode 100644 index 000000000..08c97e632 --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/backend/utils/redis_client.py @@ -0,0 +1,44 @@ +"""Utility helpers for working with Redis connections.""" +from __future__ import annotations + +from functools import lru_cache +from typing import AsyncIterator + +from redis.asyncio import Redis + +from ..config import get_settings + + +@lru_cache +def _build_redis_client() -> Redis: + """Instantiate a Redis client using the configured connection URL.""" + + settings = get_settings() + return Redis.from_url(str(settings.redis_url), decode_responses=True) + + +async def get_redis() -> Redis: + """Return a cached Redis client instance.""" + + return _build_redis_client() + + +async def close_redis() -> None: + """Close the cached Redis connection pool, if one has been instantiated.""" + + client = _build_redis_client() + await client.close() + + +async def iter_hash_keys(prefix: str) -> AsyncIterator[str]: + """Iterate over keys that match the provided hash prefix. + + Parameters + ---------- + prefix: + A glob-compatible prefix, e.g. ``"node:*"``. + """ + + client = await get_redis() + async for key in client.scan_iter(match=prefix): + yield key diff --git a/ambassadors/interchained-node-operator-portal/frontend/.eslintrc.json b/ambassadors/interchained-node-operator-portal/frontend/.eslintrc.json new file mode 100644 index 000000000..97a2bb84e --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/frontend/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": ["next", "next/core-web-vitals"] +} diff --git a/ambassadors/interchained-node-operator-portal/frontend/components/GlassContainer.js b/ambassadors/interchained-node-operator-portal/frontend/components/GlassContainer.js new file mode 100644 index 000000000..7d898831a --- /dev/null +++ b/ambassadors/interchained-node-operator-portal/frontend/components/GlassContainer.js @@ -0,0 +1,7 @@ +export default function GlassContainer({ children, className = '' }) { + return ( +
{title}
+{value}
+ {footer &&{footer}
} +No nodes registered yet.
; + } + + return ( +| Node | +RPC URL | +Latency | +Block Height | +Uptime | +Tags | +Status | +
|---|---|---|---|---|---|---|
|
+ {node.name}
+ {node.p2p_address}
+ {node.owner_email || 'Unassigned'}
+ |
+ {node.rpc_url} | +{node.latency_ms ? `${node.latency_ms.toFixed(0)} ms` : '–'} | +{node.block_height || '–'} | +{uptime.toFixed(1)}% | +
+
+ {node.tags?.length ? node.tags.map((tag) => #{tag}) : —}
+
+ |
+
+ {interfaceLabel}
+ |
+
No rewards distributed yet.
; + } + + return ( +{label}
+{helper}
} +No uptime data available.
; + } + + const formatted = data.map((point) => ({ + date: new Date(point.timestamp).toLocaleDateString(), + value: Math.round(point.value * 100) / 100, + })); + + return ( +{user.organization_id}
+{user.full_name || user.email}
+{user.role.replace('_', ' ')}
+No audit events captured yet.
; + } + + return ( +| Timestamp | +Actor | +Action | +Target | +Metadata | +
|---|---|---|---|---|
| {new Date(event.created_at).toLocaleString()} | +{event.actor_email} | +{event.action} | +{event.target || '—'} | +
+ {Object.keys(event.metadata || {}).length ? (
+ {JSON.stringify(event.metadata, null, 2)}
+ ) : (
+ '—'
+ )}
+ |
+
No organizations found.
; + } + + return ( +| Name | +Plan | +Billing Email | +Members | +Created | +
|---|---|---|---|---|
|
+ {org.name}
+ {org.slug}
+ |
+ {org.plan} | +{org.billing_email} | +{org.member_count ?? '—'} | +{new Date(org.created_at).toLocaleDateString()} | +
Fetching compliance history…
+ ) : error ? ( +{error}
+ ) : ( +Calculating utilisation…
+ ) : error ? ( +{error}
+ ) : summary ? ( +Monthly recurring charge calculated on active node usage.
+No billing data available.
+ )} +Super admin permissions required.
+Loading tenants…
+ ) : ( +{message}
} + {error &&{error}
} +Loading access roster…
+ ) : ( ++ Generate time-bound invites to onboard additional operators or auditors. Invites expire automatically and can be + rescinded by regenerating the link. +
+ +No active invites.
+ )} +{message}
} + {error &&{error}
} +Synthesising live telemetry…
+ ) : error ? ( +{error}
+ ) : ( +No organisations tracked yet.
+ )} ++ Operate mission-critical Interchained infrastructure with SLO-driven monitoring, weighted reward distribution, and + enterprise-grade access controls. +
+{item.body}
+Querying fleet telemetry…
+ ) : ( +No nodes currently flagged for review.
+ )} ++ Register a node with the control plane to start receiving uptime scoring and reward attribution. Tags are optional + and help categorise infrastructure (e.g. edge, validator, apac). +
+ + {message &&{message}
} + {error &&{error}
} +Aggregating distribution records…
+ ) : error ? ( +{error}
+ ) : ( +Daily distributions
++ {exportError} +
+ )} + {user?.role === 'SUPER_ADMIN' && ( + + )} +Total pool distributed: {totalToday.toFixed(4)} ITC
+Demo Admin · Super Admin · org-enterprise
+Demo Admin
+Super Admin
+Active Nodes
+Nodes responding to orchestration
+Avg Uptime
+Weighted across fleet
+Flagged Nodes
+Requires operator attention
+Rewards Today
+Distributed during last cycle
+Mon
+Tue
+Wed
+Thu
+Fri
+Sat
+Sun
+| Node | +Location | +Uptime | +Latency | +Status | +
|---|---|---|---|---|
| ic-node-12 | +New York, US | +99.82% | +42 ms | +Healthy | +
| ic-node-44 | +Frankfurt, DE | +99.64% | +57 ms | +Boosted | +
| ic-node-08 | +Tokyo, JP | +96.04% | +81 ms | +Flagged | +
| ic-node-31 | +São Paulo, BR | +91.24% | +114 ms | +Degraded | +