diff --git a/Makefile b/Makefile index d69743f0d..9ae678384 100644 --- a/Makefile +++ b/Makefile @@ -669,3 +669,73 @@ ci: lint test-all test-acceptance docs-validate ## Run full CI pipeline (lint, t pre-commit: fmt lint test ## Run pre-commit checks (format, lint, test) $(call print_green,Pre-commit checks passed) + +##@ Quick Development Iteration + +quick-check: lint-parallel-critical build test-unit-fast ## Quick pre-commit check (critical lint + build + fast unit tests) ~30s + $(call print_green,Quick check complete - ready to commit!) + +test-unit-fast: ## Run only unit tests (no acceptance, no testcontainers) ~10s + $(call print_blue,Running fast unit tests...) + @uv run tooling/parallel_test.py --categories unit,lint --log-dir $(TEST_LOGS_DIR) + $(call print_green,Fast unit tests complete) + +quick-pattern: ## Quick test for current pattern directory (auto-detects pattern) + $(call print_blue,Quick pattern test...) + @if [ -f "go.mod" ]; then \ + go test -v -short -timeout 30s ./...; \ + elif [ -f "../../go.mod" ] && [ -d "cmd" ]; then \ + cd cmd && go build && cd .. && go test -v -short -timeout 30s ./...; \ + else \ + echo "Not in a pattern directory"; \ + exit 1; \ + fi + $(call print_green,Pattern quick test complete) + +quick-run-pattern: ## Quick build and run current pattern runner + $(call print_blue,Building and running pattern...) + @if [ -d "cmd" ]; then \ + RUNNER_NAME=$$(basename $(CURDIR))-runner; \ + echo "Building $$RUNNER_NAME..."; \ + cd cmd/$$RUNNER_NAME && go build -o $(BINARIES_DIR)/$$RUNNER_NAME . && \ + echo "✓ Built: $(BINARIES_DIR)/$$RUNNER_NAME" && \ + echo "Run with: ./$(BINARIES_DIR)/$$RUNNER_NAME --help"; \ + else \ + echo "Not in a pattern directory with cmd/"; \ + exit 1; \ + fi + +test-this: ## Test current directory (auto-detects Go, Rust, or Python) + $(call print_blue,Testing current directory...) + @if [ -f "Cargo.toml" ]; then \ + cargo test; \ + elif [ -f "go.mod" ]; then \ + go test -v -timeout 30s ./...; \ + elif [ -f "pyproject.toml" ]; then \ + uv run pytest; \ + else \ + echo "No recognized test framework in current directory"; \ + exit 1; \ + fi + +build-this: ## Build current directory (auto-detects Go, Rust) + $(call print_blue,Building current directory...) + @if [ -f "Cargo.toml" ]; then \ + cargo build; \ + elif [ -f "go.mod" ]; then \ + go build ./...; \ + elif [ -d "cmd" ] && [ -f "../go.mod" ]; then \ + for d in cmd/*; do \ + if [ -d "$$d" ]; then \ + echo "Building $$d..."; \ + cd $$d && go build && cd ../..; \ + fi \ + done; \ + else \ + echo "No recognized build system in current directory"; \ + exit 1; \ + fi + +quick-verify-pattern: quick-pattern quick-run-pattern ## Verify pattern (test + build + show how to run) + $(call print_green,Pattern verified and ready!) + diff --git a/cmd/pattern-launcher/main.go b/cmd/pattern-launcher/main.go index 7f582be41..7c9396ea5 100644 --- a/cmd/pattern-launcher/main.go +++ b/cmd/pattern-launcher/main.go @@ -20,22 +20,30 @@ import ( ) var ( - grpcPort = flag.Int("grpc-port", 8982, "gRPC server port") - metricsPort = flag.Int("metrics-port", 9092, "Metrics server port") - healthPort = flag.Int("health-port", 9093, "Health server port") - patternsDir = flag.String("patterns-dir", "./patterns", "Patterns directory") - isolationStr = flag.String("isolation", "namespace", "Default isolation level (none, namespace, session)") + grpcPort = flag.Int("grpc-port", 8982, "gRPC server port") + metricsPort = flag.Int("metrics-port", 9092, "Metrics server port") + healthPort = flag.Int("health-port", 9093, "Health server port") + patternsDir = flag.String("patterns-dir", "./patterns", "Patterns directory") + isolationStr = flag.String("isolation", "namespace", "Default isolation level (none, namespace, session)") + adminEndpoint = flag.String("admin-endpoint", "", "Admin control plane endpoint (e.g., localhost:8981)") + launcherID = flag.String("launcher-id", "launcher-01", "Unique launcher identifier") + region = flag.String("region", "local", "Deployment region") + maxProcs = flag.Int("max-processes", 20, "Maximum concurrent processes") ) func main() { flag.Parse() log.Printf("Starting Pattern Launcher") + log.Printf(" Launcher ID: %s", *launcherID) + log.Printf(" Region: %s", *region) + log.Printf(" Admin endpoint: %s", *adminEndpoint) log.Printf(" gRPC port: %d", *grpcPort) log.Printf(" Metrics port: %d", *metricsPort) log.Printf(" Health port: %d", *healthPort) log.Printf(" Patterns directory: %s", *patternsDir) log.Printf(" Default isolation: %s", *isolationStr) + log.Printf(" Max processes: %d", *maxProcs) // Parse isolation level isolationLevel := parseIsolationLevel(*isolationStr) @@ -56,6 +64,41 @@ func main() { log.Fatalf("Failed to create launcher service: %v", err) } + // Connect to admin control plane if endpoint provided + var adminClient *launcher.AdminClient + if *adminEndpoint != "" { + adminCfg := &launcher.AdminClientConfig{ + AdminEndpoint: *adminEndpoint, + LauncherID: *launcherID, + Address: fmt.Sprintf("localhost:%d", *grpcPort), + Region: *region, + MaxProcesses: int32(*maxProcs), + } + + adminClient, err = launcher.NewAdminClient(adminCfg) + if err != nil { + log.Fatalf("Failed to create admin client: %v", err) + } + defer adminClient.Close() + + // Register with admin + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ack, err := adminClient.Register(ctx) + cancel() + + if err != nil { + log.Printf("Warning: Failed to register with admin: %v", err) + log.Printf("Continuing without admin connectivity...") + } else { + log.Printf("Successfully registered with admin: %s", ack.Message) + + // Start heartbeat loop + go adminClient.StartHeartbeatLoop(context.Background(), 30*time.Second) + } + } else { + log.Printf("No admin endpoint configured, running standalone") + } + // Create gRPC server grpcServer := grpc.NewServer() pb.RegisterPatternLauncherServer(grpcServer, service) diff --git a/cmd/prism-admin/control_plane.go b/cmd/prism-admin/control_plane.go new file mode 100644 index 000000000..7d0626732 --- /dev/null +++ b/cmd/prism-admin/control_plane.go @@ -0,0 +1,424 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "hash/crc32" + "sync" + "time" + + pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ControlPlaneService implements the ControlPlane gRPC service +type ControlPlaneService struct { + pb.UnimplementedControlPlaneServer + storage *Storage + partitions *PartitionManager + mu sync.RWMutex +} + +// NewControlPlaneService creates a new control plane service +func NewControlPlaneService(storage *Storage) *ControlPlaneService { + return &ControlPlaneService{ + storage: storage, + partitions: NewPartitionManager(), + } +} + +// ==================================================================== +// Proxy RPCs (ADR-055) +// ==================================================================== + +// RegisterProxy registers a proxy instance with admin on startup +func (s *ControlPlaneService) RegisterProxy( + ctx context.Context, + req *pb.ProxyRegistration, +) (*pb.ProxyRegistrationAck, error) { + s.mu.Lock() + defer s.mu.Unlock() + + fmt.Printf("[ControlPlane] RegisterProxy: proxy_id=%s, address=%s, region=%s, version=%s\n", + req.ProxyId, req.Address, req.Region, req.Version) + + // Record proxy in storage + now := time.Now() + proxy := &Proxy{ + ProxyID: req.ProxyId, + Address: req.Address, + Version: req.Version, + Status: "healthy", + LastSeen: &now, + } + + if err := s.storage.UpsertProxy(ctx, proxy); err != nil { + return nil, status.Errorf(codes.Internal, "failed to register proxy: %v", err) + } + + // Assign partition ranges + ranges := s.partitions.AssignRanges(req.ProxyId) + + // Get initial namespace assignments for this proxy's partitions + namespaces, err := s.getNamespacesForRanges(ctx, ranges) + if err != nil { + fmt.Printf("[ControlPlane] Warning: failed to get namespaces for ranges: %v\n", err) + namespaces = []*pb.NamespaceAssignment{} // Continue with empty list + } + + fmt.Printf("[ControlPlane] Proxy registered: %d partition ranges, %d initial namespaces\n", + len(ranges), len(namespaces)) + + return &pb.ProxyRegistrationAck{ + Success: true, + Message: "Proxy registered successfully", + InitialNamespaces: namespaces, + PartitionRanges: ranges, + }, nil +} + +// AssignNamespace pushes namespace configuration from admin to proxy +func (s *ControlPlaneService) AssignNamespace( + ctx context.Context, + req *pb.NamespaceAssignment, +) (*pb.NamespaceAssignmentAck, error) { + fmt.Printf("[ControlPlane] AssignNamespace: namespace=%s, partition=%d, version=%d\n", + req.Namespace, req.PartitionId, req.Version) + + // TODO: Implement namespace assignment logic + // This would be called by admin when pushing config to proxy + + return &pb.NamespaceAssignmentAck{ + Success: true, + Message: "Namespace assigned successfully", + }, nil +} + +// CreateNamespace handles client-initiated namespace creation requests +func (s *ControlPlaneService) CreateNamespace( + ctx context.Context, + req *pb.CreateNamespaceRequest, +) (*pb.CreateNamespaceResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + fmt.Printf("[ControlPlane] CreateNamespace: namespace=%s, requesting_proxy=%s, principal=%s\n", + req.Namespace, req.RequestingProxy, req.Principal) + + // Calculate partition ID for this namespace + partitionID := s.partitions.HashNamespace(req.Namespace) + + // Find proxy assigned to this partition + proxyID, err := s.partitions.GetProxyForPartition(partitionID) + if err != nil { + return nil, status.Errorf(codes.FailedPrecondition, + "no proxy assigned to partition %d: %v", partitionID, err) + } + + // Persist namespace in storage + ns := &Namespace{ + Name: req.Namespace, + Description: fmt.Sprintf("Created via %s by %s", req.RequestingProxy, req.Principal), + } + + if err := s.storage.CreateNamespace(ctx, ns); err != nil { + return nil, status.Errorf(codes.Internal, "failed to create namespace: %v", err) + } + + fmt.Printf("[ControlPlane] Namespace created: %s → partition %d → proxy %s\n", + req.Namespace, partitionID, proxyID) + + // TODO: Send NamespaceAssignment to the assigned proxy + + return &pb.CreateNamespaceResponse{ + Success: true, + Message: "Namespace created successfully", + AssignedPartition: partitionID, + AssignedProxy: proxyID, + }, nil +} + +// Heartbeat receives periodic health updates from proxies +func (s *ControlPlaneService) Heartbeat( + ctx context.Context, + req *pb.ProxyHeartbeat, +) (*pb.HeartbeatAck, error) { + fmt.Printf("[ControlPlane] Heartbeat from proxy %s: %d namespaces, cpu=%.1f%%, mem=%dMB\n", + req.ProxyId, + len(req.NamespaceHealth), + req.Resources.CpuPercent, + req.Resources.MemoryMb) + + // Update proxy last_seen timestamp + now := time.Now() + proxy := &Proxy{ + ProxyID: req.ProxyId, + LastSeen: &now, + Status: "healthy", + } + + if err := s.storage.UpsertProxy(ctx, proxy); err != nil { + fmt.Printf("[ControlPlane] Warning: failed to update proxy heartbeat: %v\n", err) + } + + // TODO: Update namespace health metrics in storage + + return &pb.HeartbeatAck{ + Success: true, + Message: "Heartbeat received", + ServerTimestamp: time.Now().Unix(), + }, nil +} + +// RevokeNamespace removes namespace assignment from proxy +func (s *ControlPlaneService) RevokeNamespace( + ctx context.Context, + req *pb.NamespaceRevocation, +) (*pb.NamespaceRevocationAck, error) { + fmt.Printf("[ControlPlane] RevokeNamespace: proxy=%s, namespace=%s, graceful_timeout=%ds\n", + req.ProxyId, req.Namespace, req.GracefulTimeoutSeconds) + + // TODO: Implement namespace revocation logic + + return &pb.NamespaceRevocationAck{ + Success: true, + Message: "Namespace revoked successfully", + RevokedAt: time.Now().Unix(), + }, nil +} + +// ==================================================================== +// Launcher RPCs (ADR-056, ADR-057) +// ==================================================================== + +// RegisterLauncher registers a launcher instance with admin on startup +func (s *ControlPlaneService) RegisterLauncher( + ctx context.Context, + req *pb.LauncherRegistration, +) (*pb.LauncherRegistrationAck, error) { + s.mu.Lock() + defer s.mu.Unlock() + + fmt.Printf("[ControlPlane] RegisterLauncher: launcher_id=%s, address=%s, region=%s, max_processes=%d, capabilities=%v\n", + req.LauncherId, req.Address, req.Region, req.MaxProcesses, req.Capabilities) + + // Persist launcher in storage + now := time.Now() + capabilitiesJSON, _ := json.Marshal(req.Capabilities) + launcher := &Launcher{ + LauncherID: req.LauncherId, + Address: req.Address, + Region: req.Region, + Version: req.Version, + Status: "healthy", + MaxProcesses: req.MaxProcesses, + AvailableSlots: req.MaxProcesses, // Initially all slots available + Capabilities: capabilitiesJSON, + LastSeen: &now, + } + + if err := s.storage.UpsertLauncher(ctx, launcher); err != nil { + return nil, status.Errorf(codes.Internal, "failed to register launcher: %v", err) + } + + // TODO: Get initial process assignments for this launcher + + fmt.Printf("[ControlPlane] Launcher registered successfully\n") + + return &pb.LauncherRegistrationAck{ + Success: true, + Message: "Launcher registered successfully", + InitialProcesses: []*pb.ProcessAssignment{}, // No initial processes for now + AssignedCapacity: 0, + }, nil +} + +// AssignProcess pushes process assignment from admin to launcher +func (s *ControlPlaneService) AssignProcess( + ctx context.Context, + req *pb.ProcessAssignment, +) (*pb.ProcessAssignmentAck, error) { + fmt.Printf("[ControlPlane] AssignProcess: process_id=%s, type=%s, namespace=%s\n", + req.ProcessId, req.ProcessType, req.Namespace) + + // TODO: Implement process assignment logic + + return &pb.ProcessAssignmentAck{ + Success: true, + Message: "Process assigned successfully", + }, nil +} + +// LauncherHeartbeat receives periodic health updates from launchers +func (s *ControlPlaneService) LauncherHeartbeat( + ctx context.Context, + req *pb.LauncherHeartbeatRequest, +) (*pb.HeartbeatAck, error) { + fmt.Printf("[ControlPlane] LauncherHeartbeat from %s: %d processes, available_slots=%d, cpu=%.1f%%, mem=%dMB\n", + req.LauncherId, + req.Resources.ProcessCount, + req.Resources.AvailableSlots, + req.Resources.CpuPercent, + req.Resources.TotalMemoryMb) + + // Update launcher last_seen timestamp and resource info in storage + now := time.Now() + launcher := &Launcher{ + LauncherID: req.LauncherId, + LastSeen: &now, + Status: "healthy", + AvailableSlots: req.Resources.AvailableSlots, + } + + if err := s.storage.UpsertLauncher(ctx, launcher); err != nil { + fmt.Printf("[ControlPlane] Warning: failed to update launcher heartbeat: %v\n", err) + } + + // TODO: Update process health metrics in storage + + return &pb.HeartbeatAck{ + Success: true, + Message: "Heartbeat received", + ServerTimestamp: time.Now().Unix(), + }, nil +} + +// RevokeProcess removes process assignment from launcher +func (s *ControlPlaneService) RevokeProcess( + ctx context.Context, + req *pb.ProcessRevocation, +) (*pb.ProcessRevocationAck, error) { + fmt.Printf("[ControlPlane] RevokeProcess: launcher=%s, process_id=%s, graceful_timeout=%ds\n", + req.LauncherId, req.ProcessId, req.GracefulTimeoutSeconds) + + // TODO: Implement process revocation logic + + return &pb.ProcessRevocationAck{ + Success: true, + Message: "Process revoked successfully", + StoppedAt: time.Now().Unix(), + ExitCode: 0, + }, nil +} + +// ==================================================================== +// Helper Methods +// ==================================================================== + +// getNamespacesForRanges retrieves namespace assignments for given partition ranges +func (s *ControlPlaneService) getNamespacesForRanges( + ctx context.Context, + ranges []*pb.PartitionRange, +) ([]*pb.NamespaceAssignment, error) { + // Get all namespaces from storage + namespaces, err := s.storage.ListNamespaces(ctx) + if err != nil { + return nil, err + } + + // Filter namespaces that belong to these partition ranges + var assignments []*pb.NamespaceAssignment + for _, ns := range namespaces { + partitionID := s.partitions.HashNamespace(ns.Name) + + // Check if partition falls within any of our ranges + for _, r := range ranges { + if partitionID >= r.Start && partitionID <= r.End { + // TODO: Load actual namespace config from storage + assignments = append(assignments, &pb.NamespaceAssignment{ + Namespace: ns.Name, + PartitionId: partitionID, + Config: &pb.NamespaceConfig{ + Backends: map[string]*pb.BackendConfig{}, + Patterns: map[string]*pb.PatternConfig{}, + Metadata: map[string]string{}, + }, + Version: 1, + }) + break + } + } + } + + return assignments, nil +} + +// ==================================================================== +// Partition Manager +// ==================================================================== + +// PartitionManager handles partition distribution across proxies +type PartitionManager struct { + mu sync.RWMutex + proxies map[string][]*pb.PartitionRange // proxy_id → partition ranges + partitionMap map[int32]string // partition_id → proxy_id +} + +// NewPartitionManager creates a new partition manager +func NewPartitionManager() *PartitionManager { + return &PartitionManager{ + proxies: make(map[string][]*pb.PartitionRange), + partitionMap: make(map[int32]string), + } +} + +// HashNamespace calculates partition ID for a namespace using consistent hashing +func (pm *PartitionManager) HashNamespace(namespace string) int32 { + hash := crc32.ChecksumIEEE([]byte(namespace)) + return int32(hash % 256) // 256 partitions (0-255) +} + +// AssignRanges assigns partition ranges to a proxy using round-robin distribution +func (pm *PartitionManager) AssignRanges(proxyID string) []*pb.PartitionRange { + pm.mu.Lock() + defer pm.mu.Unlock() + + // Check if proxy already has assignments + if existing, ok := pm.proxies[proxyID]; ok { + return existing + } + + // Calculate range size based on number of proxies + proxyCount := len(pm.proxies) + 1 // +1 for new proxy + rangeSize := 256 / proxyCount + + // Calculate start/end for this proxy + proxyIndex := len(pm.proxies) + start := int32(proxyIndex * rangeSize) + end := int32(start + int32(rangeSize) - 1) + + // Last proxy gets remaining partitions + if end > 255 { + end = 255 + } + if proxyIndex == proxyCount-1 { + end = 255 + } + + ranges := []*pb.PartitionRange{{Start: start, End: end}} + pm.proxies[proxyID] = ranges + + // Update partition map + for i := start; i <= end; i++ { + pm.partitionMap[i] = proxyID + } + + fmt.Printf("[PartitionManager] Assigned partitions [%d-%d] to proxy %s\n", start, end, proxyID) + + return ranges +} + +// GetProxyForPartition returns the proxy ID assigned to a partition +func (pm *PartitionManager) GetProxyForPartition(partitionID int32) (string, error) { + pm.mu.RLock() + defer pm.mu.RUnlock() + + proxyID, ok := pm.partitionMap[partitionID] + if !ok { + return "", fmt.Errorf("no proxy assigned to partition %d", partitionID) + } + + return proxyID, nil +} diff --git a/cmd/prism-admin/go.mod b/cmd/prism-admin/go.mod index b2fda8d92..f11986d5a 100644 --- a/cmd/prism-admin/go.mod +++ b/cmd/prism-admin/go.mod @@ -3,26 +3,44 @@ module github.com/jrepp/prism-data-layer/cmd/prism-admin go 1.24.0 require ( + github.com/golang-migrate/migrate/v4 v4.18.2 github.com/spf13/cobra v1.10.1 github.com/spf13/viper v1.21.0 google.golang.org/grpc v1.76.0 + modernc.org/sqlite v1.36.3 ) require ( + github.com/dustin/go-humanize v1.0.1 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jrepp/prism-data-layer/pkg/plugin v0.0.0 + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/subosito/gotenv v1.6.0 // indirect + go.uber.org/atomic v1.7.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 // indirect golang.org/x/net v0.42.0 // indirect golang.org/x/sys v0.34.0 // indirect golang.org/x/text v0.28.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect google.golang.org/protobuf v1.36.6 // indirect + modernc.org/libc v1.61.13 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.8.2 // indirect ) + +replace github.com/jrepp/prism-data-layer/pkg/plugin => ../../pkg/plugin diff --git a/cmd/prism-admin/go.sum b/cmd/prism-admin/go.sum index 8551945de..314076d94 100644 --- a/cmd/prism-admin/go.sum +++ b/cmd/prism-admin/go.sum @@ -1,6 +1,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -11,24 +14,43 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= +github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= @@ -45,6 +67,8 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -61,14 +85,25 @@ go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFh go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 h1:pVgRXcIictcr+lBQIFeiwuwtDIs4eL21OuM9nyAADmo= +golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= @@ -82,3 +117,27 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0= +modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo= +modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw= +modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8= +modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI= +modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.36.3 h1:qYMYlFR+rtLDUzuXoST1SDIdEPbX8xzuhdF90WsX1ss= +modernc.org/sqlite v1.36.3/go.mod h1:ADySlx7K4FdY5MaJcEv86hTJ0PjedAloTUuif0YS3ws= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/cmd/prism-admin/migrations/000001_initial_schema.down.sql b/cmd/prism-admin/migrations/000001_initial_schema.down.sql new file mode 100644 index 000000000..1f6bc1813 --- /dev/null +++ b/cmd/prism-admin/migrations/000001_initial_schema.down.sql @@ -0,0 +1,22 @@ +-- Rollback initial schema + +DROP INDEX IF EXISTS idx_audit_logs_user; +DROP INDEX IF EXISTS idx_audit_logs_action; +DROP INDEX IF EXISTS idx_audit_logs_resource; +DROP INDEX IF EXISTS idx_audit_logs_namespace; +DROP INDEX IF EXISTS idx_audit_logs_timestamp; +DROP TABLE IF EXISTS audit_logs; + +DROP INDEX IF EXISTS idx_patterns_pattern_id; +DROP INDEX IF EXISTS idx_patterns_proxy; +DROP INDEX IF EXISTS idx_patterns_namespace; +DROP TABLE IF EXISTS patterns; + +DROP INDEX IF EXISTS idx_proxies_status; +DROP INDEX IF EXISTS idx_proxies_proxy_id; +DROP TABLE IF EXISTS proxies; + +DROP INDEX IF EXISTS idx_namespaces_name; +DROP TABLE IF EXISTS namespaces; + +DROP TABLE IF EXISTS schema_version; diff --git a/cmd/prism-admin/migrations/000001_initial_schema.up.sql b/cmd/prism-admin/migrations/000001_initial_schema.up.sql new file mode 100644 index 000000000..b4a752048 --- /dev/null +++ b/cmd/prism-admin/migrations/000001_initial_schema.up.sql @@ -0,0 +1,83 @@ +-- Initial schema for prism-admin storage + +-- Namespaces table +CREATE TABLE IF NOT EXISTS namespaces ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE, + description TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT for SQLite compatibility +); + +CREATE INDEX IF NOT EXISTS idx_namespaces_name ON namespaces(name); + +-- Proxies table (last known state) +CREATE TABLE IF NOT EXISTS proxies ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + proxy_id TEXT NOT NULL UNIQUE, + address TEXT NOT NULL, + version TEXT, + status TEXT CHECK(status IN ('healthy', 'unhealthy', 'unknown')) NOT NULL DEFAULT 'unknown', + last_seen TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT +); + +CREATE INDEX IF NOT EXISTS idx_proxies_proxy_id ON proxies(proxy_id); +CREATE INDEX IF NOT EXISTS idx_proxies_status ON proxies(status, last_seen); + +-- Patterns table (active connections) +CREATE TABLE IF NOT EXISTS patterns ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pattern_id TEXT NOT NULL, + pattern_type TEXT NOT NULL, + proxy_id TEXT NOT NULL, + namespace TEXT NOT NULL, + status TEXT CHECK(status IN ('active', 'stopped', 'error')) NOT NULL DEFAULT 'active', + config TEXT, -- JSON stored as TEXT + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (proxy_id) REFERENCES proxies(proxy_id) ON DELETE CASCADE, + FOREIGN KEY (namespace) REFERENCES namespaces(name) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS idx_patterns_namespace ON patterns(namespace); +CREATE INDEX IF NOT EXISTS idx_patterns_proxy ON patterns(proxy_id); +CREATE INDEX IF NOT EXISTS idx_patterns_pattern_id ON patterns(pattern_id); + +-- Audit log table +CREATE TABLE IF NOT EXISTS audit_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + user TEXT, + action TEXT NOT NULL, + resource_type TEXT NOT NULL, + resource_id TEXT, + namespace TEXT, + method TEXT, + path TEXT, + status_code INTEGER, + request_body TEXT, -- JSON stored as TEXT + response_body TEXT, -- JSON stored as TEXT + error TEXT, + duration_ms INTEGER, + client_ip TEXT, + user_agent TEXT +); + +CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp); +CREATE INDEX IF NOT EXISTS idx_audit_logs_namespace ON audit_logs(namespace); +CREATE INDEX IF NOT EXISTS idx_audit_logs_resource ON audit_logs(resource_type, resource_id); +CREATE INDEX IF NOT EXISTS idx_audit_logs_action ON audit_logs(action); +CREATE INDEX IF NOT EXISTS idx_audit_logs_user ON audit_logs(user); + +-- Schema version tracking +CREATE TABLE IF NOT EXISTS schema_version ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + description TEXT +); + +INSERT INTO schema_version (version, description) VALUES (1, 'Initial schema with namespaces, proxies, patterns, and audit logs'); diff --git a/cmd/prism-admin/migrations/000002_add_launchers.down.sql b/cmd/prism-admin/migrations/000002_add_launchers.down.sql new file mode 100644 index 000000000..27460166b --- /dev/null +++ b/cmd/prism-admin/migrations/000002_add_launchers.down.sql @@ -0,0 +1,8 @@ +-- Rollback launchers table + +DROP INDEX IF EXISTS idx_launchers_region; +DROP INDEX IF EXISTS idx_launchers_status; +DROP INDEX IF EXISTS idx_launchers_launcher_id; +DROP TABLE IF EXISTS launchers; + +DELETE FROM schema_version WHERE version = 2; diff --git a/cmd/prism-admin/migrations/000002_add_launchers.up.sql b/cmd/prism-admin/migrations/000002_add_launchers.up.sql new file mode 100644 index 000000000..cd19cdb15 --- /dev/null +++ b/cmd/prism-admin/migrations/000002_add_launchers.up.sql @@ -0,0 +1,24 @@ +-- Add launchers table for tracking pattern launcher instances + +CREATE TABLE IF NOT EXISTS launchers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + launcher_id TEXT NOT NULL UNIQUE, + address TEXT NOT NULL, + region TEXT, + version TEXT, + status TEXT CHECK(status IN ('healthy', 'unhealthy', 'unknown')) NOT NULL DEFAULT 'unknown', + max_processes INTEGER DEFAULT 0, + available_slots INTEGER DEFAULT 0, + capabilities TEXT, -- JSON array stored as TEXT + last_seen TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT +); + +CREATE INDEX IF NOT EXISTS idx_launchers_launcher_id ON launchers(launcher_id); +CREATE INDEX IF NOT EXISTS idx_launchers_status ON launchers(status, last_seen); +CREATE INDEX IF NOT EXISTS idx_launchers_region ON launchers(region); + +-- Update schema version +INSERT INTO schema_version (version, description) VALUES (2, 'Add launchers table for pattern launcher tracking'); diff --git a/cmd/prism-admin/root.go b/cmd/prism-admin/root.go index aecb0255c..557c4d7c2 100644 --- a/cmd/prism-admin/root.go +++ b/cmd/prism-admin/root.go @@ -23,12 +23,15 @@ func init() { rootCmd.PersistentFlags().StringP("endpoint", "e", "localhost:8981", "Admin API endpoint") rootCmd.PersistentFlags().StringP("config", "c", "", "Config file (default: ~/.prism.yaml)") rootCmd.PersistentFlags().String("log-level", "info", "Log level (debug, info, warn, error)") + rootCmd.PersistentFlags().String("db", "", "Database URN (default: sqlite://~/.prism/admin.db, supports: sqlite://, postgresql://)") // Bind flags to viper viper.BindPFlag("admin.endpoint", rootCmd.PersistentFlags().Lookup("endpoint")) viper.BindPFlag("logging.level", rootCmd.PersistentFlags().Lookup("log-level")) + viper.BindPFlag("storage.db", rootCmd.PersistentFlags().Lookup("db")) // Add subcommands + rootCmd.AddCommand(serveCmd) rootCmd.AddCommand(namespaceCmd) rootCmd.AddCommand(healthCmd) } diff --git a/cmd/prism-admin/serve.go b/cmd/prism-admin/serve.go new file mode 100644 index 000000000..e6e232a49 --- /dev/null +++ b/cmd/prism-admin/serve.go @@ -0,0 +1,110 @@ +package main + +import ( + "context" + "fmt" + "net" + "os" + "os/signal" + "syscall" + + pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "Start the prism-admin control plane server", + Long: `Start the prism-admin control plane gRPC server on port 8981. + +The control plane server accepts connections from: +- prism-proxy instances (proxy registration, namespace management) +- prism-launcher instances (process lifecycle management) + +Example: + prism-admin serve + prism-admin serve --port 8981 --db sqlite://~/.prism/admin.db +`, + RunE: runServe, +} + +func init() { + serveCmd.Flags().IntP("port", "p", 8981, "Control plane gRPC port") + serveCmd.Flags().String("listen", "0.0.0.0", "Listen address") + viper.BindPFlag("server.port", serveCmd.Flags().Lookup("port")) + viper.BindPFlag("server.listen", serveCmd.Flags().Lookup("listen")) +} + +func runServe(cmd *cobra.Command, args []string) error { + ctx := context.Background() + + // Parse database configuration + dbURN := viper.GetString("storage.db") + dbCfg, err := ParseDatabaseURN(dbURN) + if err != nil { + return fmt.Errorf("invalid database URN: %w", err) + } + + // Initialize storage + fmt.Printf("[INFO] Initializing storage: %s (%s)\n", dbCfg.Type, dbCfg.Path) + storage, err := NewStorage(ctx, dbCfg) + if err != nil { + return fmt.Errorf("failed to initialize storage: %w", err) + } + defer storage.Close() + fmt.Printf("[INFO] Storage initialized successfully\n") + + // Create control plane service + controlPlane := NewControlPlaneService(storage) + fmt.Printf("[INFO] Control plane service created\n") + + // Start gRPC server + listenAddr := viper.GetString("server.listen") + port := viper.GetInt("server.port") + address := fmt.Sprintf("%s:%d", listenAddr, port) + + lis, err := net.Listen("tcp", address) + if err != nil { + return fmt.Errorf("failed to listen on %s: %w", address, err) + } + + grpcServer := grpc.NewServer() + pb.RegisterControlPlaneServer(grpcServer, controlPlane) + fmt.Printf("[INFO] gRPC server configured\n\n") + + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n") + fmt.Printf("🚀 Prism Admin Control Plane Server\n") + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n") + fmt.Printf(" Listening: %s\n", address) + fmt.Printf(" Database: %s (%s)\n", dbCfg.Type, dbCfg.Path) + fmt.Printf(" Status: ✅ Ready\n") + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n") + fmt.Printf(" Accepting connections from:\n") + fmt.Printf(" • Proxies (registration, heartbeats, namespace mgmt)\n") + fmt.Printf(" • Launchers (registration, heartbeats, process mgmt)\n") + fmt.Printf(" • Clients (namespace provisioning via proxy)\n") + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n") + + // Handle graceful shutdown + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + errChan := make(chan error, 1) + go func() { + if err := grpcServer.Serve(lis); err != nil { + errChan <- err + } + }() + + // Wait for signal or error + select { + case sig := <-sigChan: + fmt.Printf("\nReceived signal %v, shutting down gracefully...\n", sig) + grpcServer.GracefulStop() + return nil + case err := <-errChan: + return fmt.Errorf("server error: %w", err) + } +} diff --git a/cmd/prism-admin/storage.go b/cmd/prism-admin/storage.go new file mode 100644 index 000000000..8fa14174e --- /dev/null +++ b/cmd/prism-admin/storage.go @@ -0,0 +1,604 @@ +package main + +import ( + "context" + "database/sql" + "embed" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/sqlite3" + "github.com/golang-migrate/migrate/v4/source/iofs" + _ "modernc.org/sqlite" // Pure Go SQLite driver +) + +//go:embed migrations/*.sql +var migrationsFS embed.FS + +// DatabaseConfig holds database connection configuration +type DatabaseConfig struct { + Type string // "sqlite" or "postgresql" + Path string // For SQLite + URN string // For PostgreSQL +} + +// Storage provides database operations for prism-admin +type Storage struct { + db *sql.DB + cfg *DatabaseConfig +} + +// Models + +type Namespace struct { + ID int64 + Name string + Description string + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type Proxy struct { + ID int64 + ProxyID string + Address string + Version string + Status string // "healthy", "unhealthy", "unknown" + LastSeen *time.Time + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type Pattern struct { + ID int64 + PatternID string + PatternType string + ProxyID string + Namespace string + Status string // "active", "stopped", "error" + Config json.RawMessage + CreatedAt time.Time + UpdatedAt time.Time +} + +type Launcher struct { + ID int64 + LauncherID string + Address string + Region string + Version string + Status string // "healthy", "unhealthy", "unknown" + MaxProcesses int32 + AvailableSlots int32 + Capabilities json.RawMessage // JSON array + LastSeen *time.Time + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type AuditLog struct { + ID int64 + Timestamp time.Time + User string + Action string + ResourceType string + ResourceID string + Namespace string + Method string + Path string + StatusCode int + RequestBody json.RawMessage + ResponseBody json.RawMessage + Error string + DurationMs int64 + ClientIP string + UserAgent string +} + +// ParseDatabaseURN parses a database URN string +func ParseDatabaseURN(urn string) (*DatabaseConfig, error) { + if urn == "" { + return &DatabaseConfig{ + Type: "sqlite", + Path: defaultDatabasePath(), + }, nil + } + + // Parse sqlite:///path/to/db or sqlite://path/to/db + if strings.HasPrefix(urn, "sqlite://") { + path := strings.TrimPrefix(urn, "sqlite://") + // Handle sqlite:///absolute/path (three slashes) + if strings.HasPrefix(path, "/") { + return &DatabaseConfig{Type: "sqlite", Path: path}, nil + } + // Handle sqlite://relative/path (two slashes) + return &DatabaseConfig{Type: "sqlite", Path: path}, nil + } + + // Parse postgresql://... or postgres://... + if strings.HasPrefix(urn, "postgres") { + return &DatabaseConfig{Type: "postgresql", URN: urn}, nil + } + + return nil, fmt.Errorf("unsupported database URN: %s (supported: sqlite://, postgresql://)", urn) +} + +// defaultDatabasePath returns the default SQLite database path +func defaultDatabasePath() string { + homeDir, err := os.UserHomeDir() + if err != nil { + return "./prism-admin.db" + } + prismDir := filepath.Join(homeDir, ".prism") + if err := os.MkdirAll(prismDir, 0700); err != nil { + return "./prism-admin.db" + } + return filepath.Join(prismDir, "admin.db") +} + +// NewStorage creates a new Storage instance +func NewStorage(ctx context.Context, cfg *DatabaseConfig) (*Storage, error) { + var db *sql.DB + var err error + + switch cfg.Type { + case "sqlite": + // Ensure directory exists + dir := filepath.Dir(cfg.Path) + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) + } + + db, err = sql.Open("sqlite", cfg.Path) + if err != nil { + return nil, fmt.Errorf("failed to open sqlite database: %w", err) + } + + // Configure SQLite for better performance + _, err = db.Exec(` + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=ON; + PRAGMA busy_timeout=5000; + `) + if err != nil { + db.Close() + return nil, fmt.Errorf("failed to configure sqlite: %w", err) + } + + case "postgresql": + return nil, fmt.Errorf("postgresql support not yet implemented") + + default: + return nil, fmt.Errorf("unsupported database type: %s", cfg.Type) + } + + s := &Storage{ + db: db, + cfg: cfg, + } + + // Run migrations + if err := s.runMigrations(); err != nil { + db.Close() + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return s, nil +} + +// runMigrations applies database migrations +func (s *Storage) runMigrations() error { + sourceDriver, err := iofs.New(migrationsFS, "migrations") + if err != nil { + return fmt.Errorf("failed to create migration source: %w", err) + } + + dbDriver, err := sqlite3.WithInstance(s.db, &sqlite3.Config{}) + if err != nil { + return fmt.Errorf("failed to create database driver: %w", err) + } + + m, err := migrate.NewWithInstance("iofs", sourceDriver, "sqlite3", dbDriver) + if err != nil { + return fmt.Errorf("failed to create migrator: %w", err) + } + + if err := m.Up(); err != nil && err != migrate.ErrNoChange { + return fmt.Errorf("migration failed: %w", err) + } + + return nil +} + +// Close closes the database connection +func (s *Storage) Close() error { + if s.db != nil { + return s.db.Close() + } + return nil +} + +// Namespace operations + +func (s *Storage) CreateNamespace(ctx context.Context, ns *Namespace) error { + metadataJSON, _ := json.Marshal(ns.Metadata) + + result, err := s.db.ExecContext(ctx, ` + INSERT INTO namespaces (name, description, metadata) + VALUES (?, ?, ?) + `, ns.Name, ns.Description, string(metadataJSON)) + + if err != nil { + return fmt.Errorf("failed to create namespace: %w", err) + } + + id, _ := result.LastInsertId() + ns.ID = id + return nil +} + +func (s *Storage) GetNamespace(ctx context.Context, name string) (*Namespace, error) { + var ns Namespace + var metadataStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, name, description, created_at, updated_at, metadata + FROM namespaces WHERE name = ? + `, name).Scan(&ns.ID, &ns.Name, &ns.Description, &ns.CreatedAt, &ns.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("namespace not found: %s", name) + } + if err != nil { + return nil, fmt.Errorf("failed to get namespace: %w", err) + } + + if metadataStr != "" { + ns.Metadata = json.RawMessage(metadataStr) + } + + return &ns, nil +} + +func (s *Storage) ListNamespaces(ctx context.Context) ([]*Namespace, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, name, description, created_at, updated_at, metadata + FROM namespaces + ORDER BY name + `) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces: %w", err) + } + defer rows.Close() + + var namespaces []*Namespace + for rows.Next() { + var ns Namespace + var metadataStr string + + if err := rows.Scan(&ns.ID, &ns.Name, &ns.Description, &ns.CreatedAt, &ns.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan namespace: %w", err) + } + + if metadataStr != "" { + ns.Metadata = json.RawMessage(metadataStr) + } + + namespaces = append(namespaces, &ns) + } + + return namespaces, rows.Err() +} + +// Proxy operations + +func (s *Storage) UpsertProxy(ctx context.Context, p *Proxy) error { + metadataJSON, _ := json.Marshal(p.Metadata) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO proxies (proxy_id, address, version, status, last_seen, metadata, updated_at) + VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(proxy_id) DO UPDATE SET + address = excluded.address, + version = excluded.version, + status = excluded.status, + last_seen = excluded.last_seen, + metadata = excluded.metadata, + updated_at = CURRENT_TIMESTAMP + `, p.ProxyID, p.Address, p.Version, p.Status, p.LastSeen, string(metadataJSON)) + + return err +} + +func (s *Storage) GetProxy(ctx context.Context, proxyID string) (*Proxy, error) { + var p Proxy + var metadataStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, proxy_id, address, version, status, last_seen, created_at, updated_at, metadata + FROM proxies WHERE proxy_id = ? + `, proxyID).Scan(&p.ID, &p.ProxyID, &p.Address, &p.Version, &p.Status, &p.LastSeen, + &p.CreatedAt, &p.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("proxy not found: %s", proxyID) + } + if err != nil { + return nil, fmt.Errorf("failed to get proxy: %w", err) + } + + if metadataStr != "" { + p.Metadata = json.RawMessage(metadataStr) + } + + return &p, nil +} + +func (s *Storage) ListProxies(ctx context.Context) ([]*Proxy, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, proxy_id, address, version, status, last_seen, created_at, updated_at, metadata + FROM proxies + ORDER BY last_seen DESC + `) + if err != nil { + return nil, fmt.Errorf("failed to list proxies: %w", err) + } + defer rows.Close() + + var proxies []*Proxy + for rows.Next() { + var p Proxy + var metadataStr string + + if err := rows.Scan(&p.ID, &p.ProxyID, &p.Address, &p.Version, &p.Status, &p.LastSeen, + &p.CreatedAt, &p.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan proxy: %w", err) + } + + if metadataStr != "" { + p.Metadata = json.RawMessage(metadataStr) + } + + proxies = append(proxies, &p) + } + + return proxies, rows.Err() +} + +// Launcher operations + +func (s *Storage) UpsertLauncher(ctx context.Context, l *Launcher) error { + metadataJSON, _ := json.Marshal(l.Metadata) + capabilitiesJSON, _ := json.Marshal(l.Capabilities) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO launchers (launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, metadata, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(launcher_id) DO UPDATE SET + address = excluded.address, + region = excluded.region, + version = excluded.version, + status = excluded.status, + max_processes = excluded.max_processes, + available_slots = excluded.available_slots, + capabilities = excluded.capabilities, + last_seen = excluded.last_seen, + metadata = excluded.metadata, + updated_at = CURRENT_TIMESTAMP + `, l.LauncherID, l.Address, l.Region, l.Version, l.Status, l.MaxProcesses, l.AvailableSlots, string(capabilitiesJSON), l.LastSeen, string(metadataJSON)) + + return err +} + +func (s *Storage) GetLauncher(ctx context.Context, launcherID string) (*Launcher, error) { + var l Launcher + var metadataStr, capabilitiesStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, created_at, updated_at, metadata + FROM launchers WHERE launcher_id = ? + `, launcherID).Scan(&l.ID, &l.LauncherID, &l.Address, &l.Region, &l.Version, &l.Status, &l.MaxProcesses, &l.AvailableSlots, + &capabilitiesStr, &l.LastSeen, &l.CreatedAt, &l.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("launcher not found: %s", launcherID) + } + if err != nil { + return nil, fmt.Errorf("failed to get launcher: %w", err) + } + + if metadataStr != "" { + l.Metadata = json.RawMessage(metadataStr) + } + if capabilitiesStr != "" { + l.Capabilities = json.RawMessage(capabilitiesStr) + } + + return &l, nil +} + +func (s *Storage) ListLaunchers(ctx context.Context) ([]*Launcher, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, created_at, updated_at, metadata + FROM launchers + ORDER BY last_seen DESC + `) + if err != nil { + return nil, fmt.Errorf("failed to list launchers: %w", err) + } + defer rows.Close() + + var launchers []*Launcher + for rows.Next() { + var l Launcher + var metadataStr, capabilitiesStr string + + if err := rows.Scan(&l.ID, &l.LauncherID, &l.Address, &l.Region, &l.Version, &l.Status, &l.MaxProcesses, &l.AvailableSlots, + &capabilitiesStr, &l.LastSeen, &l.CreatedAt, &l.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan launcher: %w", err) + } + + if metadataStr != "" { + l.Metadata = json.RawMessage(metadataStr) + } + if capabilitiesStr != "" { + l.Capabilities = json.RawMessage(capabilitiesStr) + } + + launchers = append(launchers, &l) + } + + return launchers, rows.Err() +} + +// Pattern operations + +func (s *Storage) CreatePattern(ctx context.Context, p *Pattern) error { + configJSON, _ := json.Marshal(p.Config) + + result, err := s.db.ExecContext(ctx, ` + INSERT INTO patterns (pattern_id, pattern_type, proxy_id, namespace, status, config) + VALUES (?, ?, ?, ?, ?, ?) + `, p.PatternID, p.PatternType, p.ProxyID, p.Namespace, p.Status, string(configJSON)) + + if err != nil { + return fmt.Errorf("failed to create pattern: %w", err) + } + + id, _ := result.LastInsertId() + p.ID = id + return nil +} + +func (s *Storage) ListPatternsByNamespace(ctx context.Context, namespace string) ([]*Pattern, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, pattern_id, pattern_type, proxy_id, namespace, status, config, created_at, updated_at + FROM patterns + WHERE namespace = ? + ORDER BY created_at DESC + `, namespace) + if err != nil { + return nil, fmt.Errorf("failed to list patterns: %w", err) + } + defer rows.Close() + + var patterns []*Pattern + for rows.Next() { + var p Pattern + var configStr string + + if err := rows.Scan(&p.ID, &p.PatternID, &p.PatternType, &p.ProxyID, &p.Namespace, + &p.Status, &configStr, &p.CreatedAt, &p.UpdatedAt); err != nil { + return nil, fmt.Errorf("failed to scan pattern: %w", err) + } + + if configStr != "" { + p.Config = json.RawMessage(configStr) + } + + patterns = append(patterns, &p) + } + + return patterns, rows.Err() +} + +// Audit log operations + +func (s *Storage) LogAudit(ctx context.Context, log *AuditLog) error { + requestJSON, _ := json.Marshal(log.RequestBody) + responseJSON, _ := json.Marshal(log.ResponseBody) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO audit_logs ( + timestamp, user, action, resource_type, resource_id, namespace, + method, path, status_code, request_body, response_body, error, + duration_ms, client_ip, user_agent + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, log.Timestamp, log.User, log.Action, log.ResourceType, log.ResourceID, log.Namespace, + log.Method, log.Path, log.StatusCode, string(requestJSON), string(responseJSON), log.Error, + log.DurationMs, log.ClientIP, log.UserAgent) + + return err +} + +func (s *Storage) QueryAuditLogs(ctx context.Context, opts AuditQueryOptions) ([]*AuditLog, error) { + query := ` + SELECT id, timestamp, user, action, resource_type, resource_id, namespace, + method, path, status_code, request_body, response_body, error, + duration_ms, client_ip, user_agent + FROM audit_logs + WHERE 1=1 + ` + args := []interface{}{} + + if opts.Namespace != "" { + query += " AND namespace = ?" + args = append(args, opts.Namespace) + } + if opts.User != "" { + query += " AND user = ?" + args = append(args, opts.User) + } + if !opts.StartTime.IsZero() { + query += " AND timestamp >= ?" + args = append(args, opts.StartTime) + } + if !opts.EndTime.IsZero() { + query += " AND timestamp <= ?" + args = append(args, opts.EndTime) + } + + query += " ORDER BY timestamp DESC" + + if opts.Limit > 0 { + query += " LIMIT ?" + args = append(args, opts.Limit) + } + + rows, err := s.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to query audit logs: %w", err) + } + defer rows.Close() + + var logs []*AuditLog + for rows.Next() { + var log AuditLog + var requestStr, responseStr string + + if err := rows.Scan(&log.ID, &log.Timestamp, &log.User, &log.Action, &log.ResourceType, + &log.ResourceID, &log.Namespace, &log.Method, &log.Path, &log.StatusCode, + &requestStr, &responseStr, &log.Error, &log.DurationMs, &log.ClientIP, &log.UserAgent); err != nil { + return nil, fmt.Errorf("failed to scan audit log: %w", err) + } + + if requestStr != "" { + log.RequestBody = json.RawMessage(requestStr) + } + if responseStr != "" { + log.ResponseBody = json.RawMessage(responseStr) + } + + logs = append(logs, &log) + } + + return logs, rows.Err() +} + +// AuditQueryOptions specifies filters for querying audit logs +type AuditQueryOptions struct { + Namespace string + User string + StartTime time.Time + EndTime time.Time + Limit int +} diff --git a/cmd/prism-admin/storage_test.go b/cmd/prism-admin/storage_test.go new file mode 100644 index 000000000..8c7e9853d --- /dev/null +++ b/cmd/prism-admin/storage_test.go @@ -0,0 +1,266 @@ +package main + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" +) + +func TestStorageInitialization(t *testing.T) { + // Create temp directory + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "test.db") + + cfg := &DatabaseConfig{ + Type: "sqlite", + Path: dbPath, + } + + ctx := context.Background() + storage, err := NewStorage(ctx, cfg) + if err != nil { + t.Fatalf("Failed to create storage: %v", err) + } + defer storage.Close() + + // Verify database file was created + if _, err := os.Stat(dbPath); os.IsNotExist(err) { + t.Errorf("Database file was not created at %s", dbPath) + } +} + +func TestNamespaceOperations(t *testing.T) { + tmpDir := t.TempDir() + cfg := &DatabaseConfig{ + Type: "sqlite", + Path: filepath.Join(tmpDir, "test.db"), + } + + ctx := context.Background() + storage, err := NewStorage(ctx, cfg) + if err != nil { + t.Fatalf("Failed to create storage: %v", err) + } + defer storage.Close() + + // Create namespace + ns := &Namespace{ + Name: "test-namespace", + Description: "Test namespace description", + } + + if err := storage.CreateNamespace(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + + if ns.ID == 0 { + t.Error("Namespace ID was not set after creation") + } + + // Get namespace + retrieved, err := storage.GetNamespace(ctx, "test-namespace") + if err != nil { + t.Fatalf("Failed to get namespace: %v", err) + } + + if retrieved.Name != ns.Name { + t.Errorf("Expected name %s, got %s", ns.Name, retrieved.Name) + } + + if retrieved.Description != ns.Description { + t.Errorf("Expected description %s, got %s", ns.Description, retrieved.Description) + } + + // List namespaces + namespaces, err := storage.ListNamespaces(ctx) + if err != nil { + t.Fatalf("Failed to list namespaces: %v", err) + } + + if len(namespaces) != 1 { + t.Errorf("Expected 1 namespace, got %d", len(namespaces)) + } +} + +func TestProxyOperations(t *testing.T) { + tmpDir := t.TempDir() + cfg := &DatabaseConfig{ + Type: "sqlite", + Path: filepath.Join(tmpDir, "test.db"), + } + + ctx := context.Background() + storage, err := NewStorage(ctx, cfg) + if err != nil { + t.Fatalf("Failed to create storage: %v", err) + } + defer storage.Close() + + now := time.Now() + proxy := &Proxy{ + ProxyID: "proxy-001", + Address: "localhost:8980", + Version: "0.1.0", + Status: "healthy", + LastSeen: &now, + } + + // Upsert proxy + if err := storage.UpsertProxy(ctx, proxy); err != nil { + t.Fatalf("Failed to upsert proxy: %v", err) + } + + // Get proxy + retrieved, err := storage.GetProxy(ctx, "proxy-001") + if err != nil { + t.Fatalf("Failed to get proxy: %v", err) + } + + if retrieved.ProxyID != proxy.ProxyID { + t.Errorf("Expected proxy_id %s, got %s", proxy.ProxyID, retrieved.ProxyID) + } + + if retrieved.Status != proxy.Status { + t.Errorf("Expected status %s, got %s", proxy.Status, retrieved.Status) + } + + // Update proxy status + proxy.Status = "unhealthy" + if err := storage.UpsertProxy(ctx, proxy); err != nil { + t.Fatalf("Failed to update proxy: %v", err) + } + + updated, err := storage.GetProxy(ctx, "proxy-001") + if err != nil { + t.Fatalf("Failed to get updated proxy: %v", err) + } + + if updated.Status != "unhealthy" { + t.Errorf("Expected status unhealthy, got %s", updated.Status) + } + + // List proxies + proxies, err := storage.ListProxies(ctx) + if err != nil { + t.Fatalf("Failed to list proxies: %v", err) + } + + if len(proxies) != 1 { + t.Errorf("Expected 1 proxy, got %d", len(proxies)) + } +} + +func TestAuditLogOperations(t *testing.T) { + tmpDir := t.TempDir() + cfg := &DatabaseConfig{ + Type: "sqlite", + Path: filepath.Join(tmpDir, "test.db"), + } + + ctx := context.Background() + storage, err := NewStorage(ctx, cfg) + if err != nil { + t.Fatalf("Failed to create storage: %v", err) + } + defer storage.Close() + + // Log audit entry + log := &AuditLog{ + Timestamp: time.Now(), + User: "admin", + Action: "CREATE_NAMESPACE", + ResourceType: "namespace", + ResourceID: "test-ns", + Namespace: "test-ns", + Method: "POST", + Path: "/api/namespaces", + StatusCode: 201, + DurationMs: 15, + ClientIP: "127.0.0.1", + UserAgent: "prism-admin/0.1.0", + } + + if err := storage.LogAudit(ctx, log); err != nil { + t.Fatalf("Failed to log audit entry: %v", err) + } + + // Query audit logs + logs, err := storage.QueryAuditLogs(ctx, AuditQueryOptions{ + Limit: 10, + }) + if err != nil { + t.Fatalf("Failed to query audit logs: %v", err) + } + + if len(logs) != 1 { + t.Errorf("Expected 1 audit log, got %d", len(logs)) + } + + if logs[0].User != "admin" { + t.Errorf("Expected user admin, got %s", logs[0].User) + } + + // Query by namespace + logs, err = storage.QueryAuditLogs(ctx, AuditQueryOptions{ + Namespace: "test-ns", + Limit: 10, + }) + if err != nil { + t.Fatalf("Failed to query audit logs by namespace: %v", err) + } + + if len(logs) != 1 { + t.Errorf("Expected 1 audit log for namespace, got %d", len(logs)) + } +} + +func TestParseDatabaseURN(t *testing.T) { + tests := []struct { + name string + urn string + wantErr bool + dbType string + }{ + { + name: "empty URN uses default", + urn: "", + dbType: "sqlite", + }, + { + name: "sqlite with relative path", + urn: "sqlite://test.db", + dbType: "sqlite", + }, + { + name: "sqlite with absolute path", + urn: "sqlite:///tmp/test.db", + dbType: "sqlite", + }, + { + name: "postgresql URN", + urn: "postgresql://user:pass@localhost:5432/prism", + dbType: "postgresql", + }, + { + name: "unsupported URN", + urn: "mysql://localhost", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := ParseDatabaseURN(tt.urn) + if (err != nil) != tt.wantErr { + t.Errorf("ParseDatabaseURN() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !tt.wantErr && cfg.Type != tt.dbType { + t.Errorf("Expected type %s, got %s", tt.dbType, cfg.Type) + } + }) + } +} diff --git a/cmd/prismctl/cmd/admin.go b/cmd/prismctl/cmd/admin.go new file mode 100644 index 000000000..39f4c4c95 --- /dev/null +++ b/cmd/prismctl/cmd/admin.go @@ -0,0 +1,242 @@ +package cmd + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(adminCmd) + adminCmd.AddCommand(adminStatusCmd) +} + +// adminCmd represents the admin command +var adminCmd = &cobra.Command{ + Use: "admin", + Short: "Interact with prism-admin control plane", + Long: `Commands for interacting with the prism-admin control plane. + +View status, inspect resources, and manage the admin server.`, +} + +// adminStatusCmd shows the status of admin resources +var adminStatusCmd = &cobra.Command{ + Use: "status", + Short: "Show admin control plane status and resources", + Long: `Display a comprehensive view of the prism-admin control plane including: +- Namespaces +- Proxy connections +- Launcher connections +- Recent audit logs + +Example: + prismctl admin status + prismctl admin status --endpoint localhost:8981`, + RunE: func(cmd *cobra.Command, args []string) error { + endpoint := cmd.Flag("endpoint").Value.String() + if endpoint == "" { + endpoint = "localhost:8981" + } + return showAdminStatus(endpoint) + }, +} + +func init() { + adminStatusCmd.Flags().StringP("endpoint", "e", "localhost:8981", "Admin control plane endpoint") +} + +// showAdminStatus displays the admin control plane status +func showAdminStatus(endpoint string) error { + fmt.Printf("🔍 Prism Admin Control Plane Status\n") + fmt.Printf(" Endpoint: %s\n\n", endpoint) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Connect to admin's storage directly for now + // In a production system, this would be a gRPC Admin API call + + // For POC, we'll read directly from the SQLite database + storage, err := connectToAdminStorage() + if err != nil { + return fmt.Errorf("failed to connect to admin storage: %w", err) + } + defer storage.Close() + + // Display Namespaces + fmt.Println("📦 Namespaces") + namespaces, err := storage.ListNamespaces(ctx) + if err != nil { + fmt.Printf(" ⚠️ Error listing namespaces: %v\n", err) + } else if len(namespaces) == 0 { + fmt.Println(" (none)") + } else { + for _, ns := range namespaces { + fmt.Printf(" • %s\n", ns.Name) + if ns.Description != "" { + fmt.Printf(" Description: %s\n", ns.Description) + } + fmt.Printf(" Created: %s\n", ns.CreatedAt.Format("2006-01-02 15:04:05")) + } + } + fmt.Println() + + // Display Proxies + fmt.Println("🔌 Proxy Connections") + proxies, err := storage.ListProxies(ctx) + if err != nil { + fmt.Printf(" ⚠️ Error listing proxies: %v\n", err) + } else if len(proxies) == 0 { + fmt.Println(" (none)") + } else { + for _, proxy := range proxies { + statusIcon := "✅" + if proxy.Status != "healthy" { + statusIcon = "❌" + } + + lastSeenStr := "never" + if proxy.LastSeen != nil { + elapsed := time.Since(*proxy.LastSeen) + if elapsed < time.Minute { + lastSeenStr = fmt.Sprintf("%ds ago", int(elapsed.Seconds())) + } else if elapsed < time.Hour { + lastSeenStr = fmt.Sprintf("%dm ago", int(elapsed.Minutes())) + } else { + lastSeenStr = proxy.LastSeen.Format("2006-01-02 15:04:05") + } + } + + fmt.Printf(" %s %s (%s)\n", statusIcon, proxy.ProxyID, proxy.Status) + fmt.Printf(" Address: %s\n", proxy.Address) + fmt.Printf(" Version: %s\n", proxy.Version) + fmt.Printf(" Last Seen: %s\n", lastSeenStr) + } + } + fmt.Println() + + // Display Launchers + fmt.Println("🚀 Launcher Connections") + launchers, err := storage.ListLaunchers(ctx) + if err != nil { + fmt.Printf(" ⚠️ Error listing launchers: %v\n", err) + } else if len(launchers) == 0 { + fmt.Println(" (none)") + } else { + for _, launcher := range launchers { + statusIcon := "✅" + if launcher.Status != "healthy" { + statusIcon = "❌" + } + + lastSeenStr := "never" + if launcher.LastSeen != nil { + elapsed := time.Since(*launcher.LastSeen) + if elapsed < time.Minute { + lastSeenStr = fmt.Sprintf("%ds ago", int(elapsed.Seconds())) + } else if elapsed < time.Hour { + lastSeenStr = fmt.Sprintf("%dm ago", int(elapsed.Minutes())) + } else { + lastSeenStr = launcher.LastSeen.Format("2006-01-02 15:04:05") + } + } + + fmt.Printf(" %s %s (%s)\n", statusIcon, launcher.LauncherID, launcher.Status) + fmt.Printf(" Address: %s\n", launcher.Address) + fmt.Printf(" Region: %s\n", launcher.Region) + fmt.Printf(" Version: %s\n", launcher.Version) + fmt.Printf(" Capacity: %d/%d slots available\n", launcher.AvailableSlots, launcher.MaxProcesses) + fmt.Printf(" Last Seen: %s\n", lastSeenStr) + } + } + fmt.Println() + + // Display Recent Audit Logs + fmt.Println("📋 Recent Audit Logs (last 10)") + logs, err := storage.QueryAuditLogs(ctx, AuditQueryOptions{ + Limit: 10, + }) + if err != nil { + fmt.Printf(" ⚠️ Error listing audit logs: %v\n", err) + } else if len(logs) == 0 { + fmt.Println(" (none)") + } else { + for _, log := range logs { + timestamp := log.Timestamp.Format("15:04:05") + statusIcon := "✅" + if log.StatusCode >= 400 { + statusIcon = "❌" + } + + fmt.Printf(" %s [%s] %s %s\n", statusIcon, timestamp, log.Method, log.Path) + if log.User != "" { + fmt.Printf(" User: %s\n", log.User) + } + if log.Namespace != "" { + fmt.Printf(" Namespace: %s\n", log.Namespace) + } + if log.Error != "" { + fmt.Printf(" Error: %s\n", log.Error) + } + fmt.Printf(" Duration: %dms\n", log.DurationMs) + } + } + fmt.Println() + + // Display Summary + fmt.Println("📊 Summary Statistics") + totalNamespaces := len(namespaces) + totalProxies := len(proxies) + totalLaunchers := len(launchers) + healthyProxies := 0 + unhealthyProxies := 0 + healthyLaunchers := 0 + unhealthyLaunchers := 0 + totalLauncherSlots := int32(0) + availableLauncherSlots := int32(0) + + for _, p := range proxies { + if p.Status == "healthy" { + healthyProxies++ + } else { + unhealthyProxies++ + } + } + + for _, l := range launchers { + if l.Status == "healthy" { + healthyLaunchers++ + } else { + unhealthyLaunchers++ + } + totalLauncherSlots += l.MaxProcesses + availableLauncherSlots += l.AvailableSlots + } + + fmt.Printf(" Namespaces: %d\n", totalNamespaces) + fmt.Printf(" Proxies: %d total (%d healthy, %d unhealthy)\n", totalProxies, healthyProxies, unhealthyProxies) + fmt.Printf(" Launchers: %d total (%d healthy, %d unhealthy)\n", totalLaunchers, healthyLaunchers, unhealthyLaunchers) + if totalLaunchers > 0 { + utilizationPct := float64(totalLauncherSlots-availableLauncherSlots) / float64(totalLauncherSlots) * 100 + fmt.Printf(" Capacity: %d/%d slots used (%.1f%% utilization)\n", + totalLauncherSlots-availableLauncherSlots, totalLauncherSlots, utilizationPct) + } + fmt.Println() + + fmt.Println("✅ Status check complete") + return nil +} + +// connectToAdminStorage connects directly to the admin SQLite database +func connectToAdminStorage() (*Storage, error) { + // Default admin database path + cfg := &DatabaseConfig{ + Type: "sqlite", + Path: defaultDatabasePath(), + } + + return NewStorage(context.Background(), cfg) +} diff --git a/cmd/prismctl/cmd/local.go b/cmd/prismctl/cmd/local.go index b19730bec..ae0a53dd4 100644 --- a/cmd/prismctl/cmd/local.go +++ b/cmd/prismctl/cmd/local.go @@ -7,9 +7,13 @@ import ( "os" "os/exec" "path/filepath" + "strings" "time" + pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism" "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -23,6 +27,21 @@ func init() { localCmd.AddCommand(localStopCmd) localCmd.AddCommand(localStatusCmd) localCmd.AddCommand(localLogsCmd) + localCmd.AddCommand(localNamespaceCmd) +} + +// localNamespaceCmd provisions a namespace via control plane +var localNamespaceCmd = &cobra.Command{ + Use: "namespace [name]", + Short: "Provision a namespace via the control plane", + Long: `Provision a namespace by sending a CreateNamespace request through the control plane. + +Example: + prismctl local namespace $admin-logs`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return provisionNamespace(args[0]) + }, } // localCmd represents the local command @@ -119,6 +138,18 @@ func startLocalStack() error { return fmt.Errorf("failed to create logs directory: %w", err) } + // Convert binDir to absolute path + absBinDir, err := filepath.Abs(binDir) + if err != nil { + return fmt.Errorf("failed to get absolute path for binaries directory: %w", err) + } + + // Find patterns directory (should be at project root) + patternsDir := filepath.Join(absBinDir, "..", "..", "patterns") + if _, err := os.Stat(patternsDir); os.IsNotExist(err) { + return fmt.Errorf("patterns directory not found at %s", patternsDir) + } + // Start components in order components := []struct { name string @@ -129,35 +160,21 @@ func startLocalStack() error { }{ { name: "prism-admin", - binary: filepath.Join(binDir, "prism-admin"), - args: []string{"serve", "--port=8080"}, + binary: filepath.Join(absBinDir, "prism-admin"), + args: []string{"serve", "--port=8981"}, logFile: filepath.Join(logsDir, "admin.log"), delay: 2 * time.Second, }, - { - name: "prism-proxy-1", - binary: filepath.Join(binDir, "prism-proxy"), - args: []string{"--admin-addr=localhost:8080", "--control-port=9090", "--data-port=19090"}, - logFile: filepath.Join(logsDir, "proxy1.log"), - delay: 2 * time.Second, - }, - { - name: "prism-proxy-2", - binary: filepath.Join(binDir, "prism-proxy"), - args: []string{"--admin-addr=localhost:8080", "--control-port=9091", "--data-port=19091"}, - logFile: filepath.Join(logsDir, "proxy2.log"), - delay: 2 * time.Second, - }, { name: "pattern-launcher", - binary: filepath.Join(binDir, "pattern-launcher"), - args: []string{"--admin-addr=localhost:8080", "--listen=:7070"}, + binary: filepath.Join(absBinDir, "pattern-launcher"), + args: []string{"--admin-endpoint=localhost:8981", "--launcher-id=launcher-01", "--grpc-port=7070", "--patterns-dir=" + patternsDir}, logFile: filepath.Join(logsDir, "launcher.log"), delay: 2 * time.Second, }, { name: "keyvalue-runner", - binary: filepath.Join(binDir, "keyvalue-runner"), + binary: filepath.Join(absBinDir, "keyvalue-runner"), args: []string{"--proxy-addr=localhost:9090"}, logFile: filepath.Join(logsDir, "keyvalue.log"), delay: 1 * time.Second, @@ -207,10 +224,8 @@ func startLocalStack() error { fmt.Printf("\n✅ Local Prism stack started successfully!\n\n") fmt.Println("📊 Stack Overview:") - fmt.Println(" • Admin UI: http://localhost:8080") - fmt.Println(" • Proxy 1 (CP): localhost:9090") - fmt.Println(" • Proxy 2 (CP): localhost:9091") - fmt.Println(" • Pattern Launcher: localhost:7070") + fmt.Println(" • Admin Control Plane: localhost:8981") + fmt.Println(" • Pattern Launcher: localhost:7070") fmt.Println(" • KeyValue: Ready (MemStore backend)") fmt.Println() fmt.Println("📝 View logs: prismctl local logs [component]") @@ -230,7 +245,7 @@ func stopLocalStack() error { fmt.Println("🛑 Stopping local Prism stack...") - components := []string{"keyvalue-runner", "pattern-launcher", "prism-proxy-2", "prism-proxy-1", "prism-admin"} + components := []string{"keyvalue-runner", "pattern-launcher", "prism-admin"} for _, comp := range components { pidFile := filepath.Join(logsDir, fmt.Sprintf("%s.pid", comp)) @@ -281,7 +296,7 @@ func showLocalStackStatus() error { fmt.Println("📊 Local Prism Stack Status") - components := []string{"prism-admin", "prism-proxy-1", "prism-proxy-2", "pattern-launcher", "keyvalue-runner"} + components := []string{"prism-admin", "pattern-launcher", "keyvalue-runner"} for _, comp := range components { pidFile := filepath.Join(logsDir, fmt.Sprintf("%s.pid", comp)) @@ -399,3 +414,86 @@ func isInBinariesDir(dir string) bool { } return true } + +// provisionNamespace creates a namespace via the control plane +func provisionNamespace(namespace string) error { + fmt.Printf("📦 Provisioning namespace: %s\n", namespace) + + // Connect to admin control plane + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + conn, err := grpc.NewClient( + "localhost:8981", + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return fmt.Errorf("failed to connect to admin: %w", err) + } + defer conn.Close() + + client := pb.NewControlPlaneClient(conn) + + // Send CreateNamespace request + req := &pb.CreateNamespaceRequest{ + Namespace: namespace, + RequestingProxy: "prismctl-local", + Principal: "local-user", + Config: &pb.NamespaceConfig{ + Backends: map[string]*pb.BackendConfig{ + "memstore": { + BackendType: "memstore", + ConnectionString: "memory://local", + Credentials: map[string]string{}, + Options: map[string]string{}, + }, + }, + Patterns: map[string]*pb.PatternConfig{ + "keyvalue": { + PatternName: "keyvalue", + Settings: map[string]string{}, + RequiredInterfaces: []string{"KeyValue"}, + }, + }, + Auth: &pb.AuthConfig{Enabled: false}, + Metadata: map[string]string{"source": "prismctl-local"}, + }, + } + + resp, err := client.CreateNamespace(ctx, req) + if err != nil { + // Improve error messages for common issues + if strings.Contains(err.Error(), "no proxy assigned to partition") { + fmt.Printf("\n❌ Namespace creation failed\n") + fmt.Printf(" Error: No proxy is available to handle this namespace\n") + fmt.Printf(" Namespace: %s\n", namespace) + fmt.Printf("\n") + fmt.Printf(" This typically means:\n") + fmt.Printf(" • No prism-proxy instances are running\n") + fmt.Printf(" • No proxy has registered with the admin control plane\n") + fmt.Printf("\n") + fmt.Printf(" To fix:\n") + fmt.Printf(" 1. Start a prism-proxy instance\n") + fmt.Printf(" 2. Ensure it connects to admin at localhost:8981\n") + fmt.Printf(" 3. Retry namespace creation\n") + fmt.Printf("\n") + return fmt.Errorf("no proxy available") + } + return fmt.Errorf("failed to create namespace: %w", err) + } + + if !resp.Success { + return fmt.Errorf("namespace creation rejected: %s", resp.Message) + } + + fmt.Printf("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n") + fmt.Printf("✅ Namespace Created Successfully\n") + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n") + fmt.Printf(" Namespace: %s\n", namespace) + fmt.Printf(" Partition: %d\n", resp.AssignedPartition) + fmt.Printf(" Proxy: %s\n", resp.AssignedProxy) + fmt.Printf(" Message: %s\n", resp.Message) + fmt.Printf("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\n") + + return nil +} diff --git a/cmd/prismctl/cmd/mailbox.go b/cmd/prismctl/cmd/mailbox.go new file mode 100644 index 000000000..d92239521 --- /dev/null +++ b/cmd/prismctl/cmd/mailbox.go @@ -0,0 +1,319 @@ +package cmd + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + "github.com/jrepp/prism-data-layer/prismctl/internal/client" + "github.com/spf13/cobra" +) + +var mailboxCmd = &cobra.Command{ + Use: "mailbox", + Short: "Query messages from mailbox namespaces", + Long: `Query and retrieve messages stored in mailbox pattern namespaces. + +The mailbox pattern stores messages with indexed headers and blob bodies, +enabling efficient searching by metadata while keeping encrypted payloads opaque.`, +} + +var mailboxFlags struct { + limit int + offset int + startTime string + endTime string + topics []string + principals []string + correlationID string + showPayload bool + format string +} + +var mailboxQueryCmd = &cobra.Command{ + Use: "query NAMESPACE", + Short: "Query messages from a mailbox", + Long: `Query messages from a mailbox namespace using filters. + +Examples: + # Query recent messages + prismctl mailbox query my-mailbox --limit 10 + + # Query by time range + prismctl mailbox query my-mailbox \ + --start-time "2025-10-15T00:00:00Z" \ + --end-time "2025-10-15T23:59:59Z" + + # Query by topic + prismctl mailbox query my-mailbox --topic "admin.users.*" + + # Query by principal + prismctl mailbox query my-mailbox --principal "user@example.com" + + # Query with correlation ID (trace) + prismctl mailbox query my-mailbox --correlation-id "trace-abc-123" + + # Show full payload + prismctl mailbox query my-mailbox --limit 5 --show-payload`, + Args: cobra.ExactArgs(1), + RunE: runMailboxQuery, +} + +var mailboxGetCmd = &cobra.Command{ + Use: "get NAMESPACE MESSAGE_ID", + Short: "Get a single message by ID", + Long: `Retrieve a single message from the mailbox by its message ID.`, + Args: cobra.ExactArgs(2), + RunE: runMailboxGet, +} + +func init() { + rootCmd.AddCommand(mailboxCmd) + mailboxCmd.AddCommand(mailboxQueryCmd) + mailboxCmd.AddCommand(mailboxGetCmd) + + // Query flags + mailboxQueryCmd.Flags().IntVarP(&mailboxFlags.limit, "limit", "l", 10, "Maximum number of messages to return") + mailboxQueryCmd.Flags().IntVar(&mailboxFlags.offset, "offset", 0, "Offset for pagination") + mailboxQueryCmd.Flags().StringVar(&mailboxFlags.startTime, "start-time", "", "Start time filter (RFC3339 format)") + mailboxQueryCmd.Flags().StringVar(&mailboxFlags.endTime, "end-time", "", "End time filter (RFC3339 format)") + mailboxQueryCmd.Flags().StringSliceVarP(&mailboxFlags.topics, "topic", "t", []string{}, "Topic filters") + mailboxQueryCmd.Flags().StringSliceVarP(&mailboxFlags.principals, "principal", "p", []string{}, "Principal filters") + mailboxQueryCmd.Flags().StringVar(&mailboxFlags.correlationID, "correlation-id", "", "Correlation ID filter") + mailboxQueryCmd.Flags().BoolVar(&mailboxFlags.showPayload, "show-payload", false, "Show message payload") + mailboxQueryCmd.Flags().StringVar(&mailboxFlags.format, "format", "table", "Output format (table, json)") + + // Get flags + mailboxGetCmd.Flags().BoolVar(&mailboxFlags.showPayload, "show-payload", true, "Show message payload") + mailboxGetCmd.Flags().StringVar(&mailboxFlags.format, "format", "table", "Output format (table, json)") +} + +func runMailboxQuery(cmd *cobra.Command, args []string) error { + token, err := loadAndValidateToken() + if err != nil { + return err + } + + namespace := args[0] + + // Build filter + filter := make(map[string]interface{}) + + if mailboxFlags.limit > 0 { + filter["limit"] = mailboxFlags.limit + } + if mailboxFlags.offset > 0 { + filter["offset"] = mailboxFlags.offset + } + + // Parse time filters + if mailboxFlags.startTime != "" { + t, err := time.Parse(time.RFC3339, mailboxFlags.startTime) + if err != nil { + uiInstance.Error(fmt.Sprintf("Invalid start-time format: %v", err)) + return err + } + filter["start_time"] = t.UnixMilli() + } + if mailboxFlags.endTime != "" { + t, err := time.Parse(time.RFC3339, mailboxFlags.endTime) + if err != nil { + uiInstance.Error(fmt.Sprintf("Invalid end-time format: %v", err)) + return err + } + filter["end_time"] = t.UnixMilli() + } + + if len(mailboxFlags.topics) > 0 { + filter["topics"] = mailboxFlags.topics + } + if len(mailboxFlags.principals) > 0 { + filter["principals"] = mailboxFlags.principals + } + if mailboxFlags.correlationID != "" { + filter["correlation_id"] = mailboxFlags.correlationID + } + + // Create client + c := client.NewClient(&cfg.Proxy, token) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + uiInstance.Info(fmt.Sprintf("Querying mailbox '%s'", namespace)) + + events, err := c.QueryMailbox(ctx, namespace, filter) + if err != nil { + uiInstance.Error(fmt.Sprintf("Failed to query mailbox: %v", err)) + return err + } + + if len(events) == 0 { + uiInstance.Info("No messages found") + return nil + } + + // Output based on format + if mailboxFlags.format == "json" { + output, err := json.MarshalIndent(events, "", " ") + if err != nil { + return err + } + fmt.Println(string(output)) + return nil + } + + // Table format + uiInstance.Success(fmt.Sprintf("Found %d message(s)", len(events))) + uiInstance.Println("") + + for i, event := range events { + displayEvent(event, i+1, mailboxFlags.showPayload) + if i < len(events)-1 { + uiInstance.Println("---") + } + } + + return nil +} + +func runMailboxGet(cmd *cobra.Command, args []string) error { + token, err := loadAndValidateToken() + if err != nil { + return err + } + + namespace := args[0] + messageID := args[1] + + // Create client + c := client.NewClient(&cfg.Proxy, token) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + uiInstance.Info(fmt.Sprintf("Retrieving message '%s' from mailbox '%s'", messageID, namespace)) + + event, err := c.GetMailboxEvent(ctx, namespace, messageID) + if err != nil { + uiInstance.Error(fmt.Sprintf("Failed to get message: %v", err)) + return err + } + + // Output based on format + if mailboxFlags.format == "json" { + output, err := json.MarshalIndent(event, "", " ") + if err != nil { + return err + } + fmt.Println(string(output)) + return nil + } + + // Table format + uiInstance.Success("Message found") + uiInstance.Println("") + displayEvent(event, 0, mailboxFlags.showPayload) + + return nil +} + +func displayEvent(event map[string]interface{}, index int, showPayload bool) { + if index > 0 { + uiInstance.Header(fmt.Sprintf("Message #%d", index)) + } + + // Display indexed headers + if messageID, ok := event["message_id"].(string); ok { + uiInstance.KeyValue("Message ID", messageID) + } + + if timestamp, ok := event["timestamp"].(float64); ok { + t := time.UnixMilli(int64(timestamp)) + uiInstance.KeyValue("Timestamp", t.Format(time.RFC3339)) + } + + if topic, ok := event["topic"].(string); ok { + uiInstance.KeyValue("Topic", topic) + } + + if contentType, ok := event["content_type"].(string); ok && contentType != "" { + uiInstance.KeyValue("Content Type", contentType) + } + + if schemaID, ok := event["schema_id"].(string); ok && schemaID != "" { + uiInstance.KeyValue("Schema ID", schemaID) + } + + if encryption, ok := event["encryption"].(string); ok && encryption != "" { + uiInstance.KeyValue("Encryption", encryption) + } + + if correlationID, ok := event["correlation_id"].(string); ok && correlationID != "" { + uiInstance.KeyValue("Correlation ID", correlationID) + } + + if principal, ok := event["principal"].(string); ok && principal != "" { + uiInstance.KeyValue("Principal", principal) + } + + if namespace, ok := event["namespace"].(string); ok && namespace != "" { + uiInstance.KeyValue("Namespace", namespace) + } + + // Display custom headers + if customHeaders, ok := event["custom_headers"].(map[string]interface{}); ok && len(customHeaders) > 0 { + uiInstance.Println("") + uiInstance.Subtle("Custom Headers:") + for k, v := range customHeaders { + uiInstance.Subtle(fmt.Sprintf(" %s: %v", k, v)) + } + } + + // Display payload if requested + if showPayload { + if body, ok := event["body"].(string); ok && body != "" { + uiInstance.Println("") + + // Check if body is base64 encoded + if decoded, err := base64.StdEncoding.DecodeString(body); err == nil { + body = string(decoded) + } + + // Try to format as JSON + if isJSON([]byte(body)) { + var prettyJSON interface{} + if err := json.Unmarshal([]byte(body), &prettyJSON); err == nil { + formatted, err := json.MarshalIndent(prettyJSON, "", " ") + if err == nil { + uiInstance.Subtle("Payload:") + uiInstance.Println(string(formatted)) + } else { + uiInstance.Subtle("Payload:") + displayTruncatedPayload(body) + } + } + } else { + uiInstance.Subtle("Payload:") + displayTruncatedPayload(body) + } + } + } else { + if body, ok := event["body"].(string); ok && body != "" { + size := len(body) + uiInstance.Subtle(fmt.Sprintf("Payload: %d bytes (use --show-payload to display)", size)) + } + } +} + +func displayTruncatedPayload(payload string) { + maxLen := 500 + if len(payload) > maxLen { + uiInstance.Println(payload[:maxLen] + "...") + uiInstance.Subtle(fmt.Sprintf("(truncated, %d bytes total)", len(payload))) + } else { + uiInstance.Println(payload) + } +} diff --git a/cmd/prismctl/cmd/migrations/000001_initial_schema.down.sql b/cmd/prismctl/cmd/migrations/000001_initial_schema.down.sql new file mode 100644 index 000000000..1f6bc1813 --- /dev/null +++ b/cmd/prismctl/cmd/migrations/000001_initial_schema.down.sql @@ -0,0 +1,22 @@ +-- Rollback initial schema + +DROP INDEX IF EXISTS idx_audit_logs_user; +DROP INDEX IF EXISTS idx_audit_logs_action; +DROP INDEX IF EXISTS idx_audit_logs_resource; +DROP INDEX IF EXISTS idx_audit_logs_namespace; +DROP INDEX IF EXISTS idx_audit_logs_timestamp; +DROP TABLE IF EXISTS audit_logs; + +DROP INDEX IF EXISTS idx_patterns_pattern_id; +DROP INDEX IF EXISTS idx_patterns_proxy; +DROP INDEX IF EXISTS idx_patterns_namespace; +DROP TABLE IF EXISTS patterns; + +DROP INDEX IF EXISTS idx_proxies_status; +DROP INDEX IF EXISTS idx_proxies_proxy_id; +DROP TABLE IF EXISTS proxies; + +DROP INDEX IF EXISTS idx_namespaces_name; +DROP TABLE IF EXISTS namespaces; + +DROP TABLE IF EXISTS schema_version; diff --git a/cmd/prismctl/cmd/migrations/000001_initial_schema.up.sql b/cmd/prismctl/cmd/migrations/000001_initial_schema.up.sql new file mode 100644 index 000000000..b4a752048 --- /dev/null +++ b/cmd/prismctl/cmd/migrations/000001_initial_schema.up.sql @@ -0,0 +1,83 @@ +-- Initial schema for prism-admin storage + +-- Namespaces table +CREATE TABLE IF NOT EXISTS namespaces ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE, + description TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT for SQLite compatibility +); + +CREATE INDEX IF NOT EXISTS idx_namespaces_name ON namespaces(name); + +-- Proxies table (last known state) +CREATE TABLE IF NOT EXISTS proxies ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + proxy_id TEXT NOT NULL UNIQUE, + address TEXT NOT NULL, + version TEXT, + status TEXT CHECK(status IN ('healthy', 'unhealthy', 'unknown')) NOT NULL DEFAULT 'unknown', + last_seen TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT +); + +CREATE INDEX IF NOT EXISTS idx_proxies_proxy_id ON proxies(proxy_id); +CREATE INDEX IF NOT EXISTS idx_proxies_status ON proxies(status, last_seen); + +-- Patterns table (active connections) +CREATE TABLE IF NOT EXISTS patterns ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pattern_id TEXT NOT NULL, + pattern_type TEXT NOT NULL, + proxy_id TEXT NOT NULL, + namespace TEXT NOT NULL, + status TEXT CHECK(status IN ('active', 'stopped', 'error')) NOT NULL DEFAULT 'active', + config TEXT, -- JSON stored as TEXT + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (proxy_id) REFERENCES proxies(proxy_id) ON DELETE CASCADE, + FOREIGN KEY (namespace) REFERENCES namespaces(name) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS idx_patterns_namespace ON patterns(namespace); +CREATE INDEX IF NOT EXISTS idx_patterns_proxy ON patterns(proxy_id); +CREATE INDEX IF NOT EXISTS idx_patterns_pattern_id ON patterns(pattern_id); + +-- Audit log table +CREATE TABLE IF NOT EXISTS audit_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + user TEXT, + action TEXT NOT NULL, + resource_type TEXT NOT NULL, + resource_id TEXT, + namespace TEXT, + method TEXT, + path TEXT, + status_code INTEGER, + request_body TEXT, -- JSON stored as TEXT + response_body TEXT, -- JSON stored as TEXT + error TEXT, + duration_ms INTEGER, + client_ip TEXT, + user_agent TEXT +); + +CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp); +CREATE INDEX IF NOT EXISTS idx_audit_logs_namespace ON audit_logs(namespace); +CREATE INDEX IF NOT EXISTS idx_audit_logs_resource ON audit_logs(resource_type, resource_id); +CREATE INDEX IF NOT EXISTS idx_audit_logs_action ON audit_logs(action); +CREATE INDEX IF NOT EXISTS idx_audit_logs_user ON audit_logs(user); + +-- Schema version tracking +CREATE TABLE IF NOT EXISTS schema_version ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + description TEXT +); + +INSERT INTO schema_version (version, description) VALUES (1, 'Initial schema with namespaces, proxies, patterns, and audit logs'); diff --git a/cmd/prismctl/cmd/migrations/000002_add_launchers.down.sql b/cmd/prismctl/cmd/migrations/000002_add_launchers.down.sql new file mode 100644 index 000000000..27460166b --- /dev/null +++ b/cmd/prismctl/cmd/migrations/000002_add_launchers.down.sql @@ -0,0 +1,8 @@ +-- Rollback launchers table + +DROP INDEX IF EXISTS idx_launchers_region; +DROP INDEX IF EXISTS idx_launchers_status; +DROP INDEX IF EXISTS idx_launchers_launcher_id; +DROP TABLE IF EXISTS launchers; + +DELETE FROM schema_version WHERE version = 2; diff --git a/cmd/prismctl/cmd/migrations/000002_add_launchers.up.sql b/cmd/prismctl/cmd/migrations/000002_add_launchers.up.sql new file mode 100644 index 000000000..cd19cdb15 --- /dev/null +++ b/cmd/prismctl/cmd/migrations/000002_add_launchers.up.sql @@ -0,0 +1,24 @@ +-- Add launchers table for tracking pattern launcher instances + +CREATE TABLE IF NOT EXISTS launchers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + launcher_id TEXT NOT NULL UNIQUE, + address TEXT NOT NULL, + region TEXT, + version TEXT, + status TEXT CHECK(status IN ('healthy', 'unhealthy', 'unknown')) NOT NULL DEFAULT 'unknown', + max_processes INTEGER DEFAULT 0, + available_slots INTEGER DEFAULT 0, + capabilities TEXT, -- JSON array stored as TEXT + last_seen TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata TEXT -- JSON stored as TEXT +); + +CREATE INDEX IF NOT EXISTS idx_launchers_launcher_id ON launchers(launcher_id); +CREATE INDEX IF NOT EXISTS idx_launchers_status ON launchers(status, last_seen); +CREATE INDEX IF NOT EXISTS idx_launchers_region ON launchers(region); + +-- Update schema version +INSERT INTO schema_version (version, description) VALUES (2, 'Add launchers table for pattern launcher tracking'); diff --git a/cmd/prismctl/cmd/publish.go b/cmd/prismctl/cmd/publish.go new file mode 100644 index 000000000..a632a6fdb --- /dev/null +++ b/cmd/prismctl/cmd/publish.go @@ -0,0 +1,225 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + "time" + + "github.com/jrepp/prism-data-layer/prismctl/internal/client" + "github.com/spf13/cobra" +) + +var publishCmd = &cobra.Command{ + Use: "publish", + Short: "Publish messages to a namespace", + Long: `Publish messages to a namespace for testing purposes. + +Examples: + # Publish a simple text message + prismctl publish my-namespace my-topic "Hello, World!" + + # Publish JSON data + prismctl publish my-namespace my-topic '{"user": "alice", "action": "login"}' + + # Publish from a file + prismctl publish my-namespace my-topic --file message.json + + # Publish with custom headers + prismctl publish my-namespace my-topic "test data" \ + --header "x-user-id=123" \ + --header "x-trace-id=abc-def" + + # Publish with standard Prism headers + prismctl publish my-namespace my-topic "test data" \ + --content-type "application/json" \ + --correlation-id "trace-123" \ + --principal "user@example.com"`, +} + +var publishFlags struct { + file string + contentType string + correlationID string + principal string + schemaID string + encryption string + headers []string + count int +} + +var publishMessageCmd = &cobra.Command{ + Use: "message NAMESPACE TOPIC DATA", + Short: "Publish a message to a topic", + Long: `Publish a message to a topic in the specified namespace.`, + Args: cobra.RangeArgs(2, 3), + RunE: runPublishMessage, +} + +func init() { + rootCmd.AddCommand(publishCmd) + publishCmd.AddCommand(publishMessageCmd) + + // Message publishing flags + publishMessageCmd.Flags().StringVarP(&publishFlags.file, "file", "f", "", "Read message payload from file") + publishMessageCmd.Flags().StringVar(&publishFlags.contentType, "content-type", "text/plain", "Content type of the message") + publishMessageCmd.Flags().StringVar(&publishFlags.correlationID, "correlation-id", "", "Correlation ID for tracing") + publishMessageCmd.Flags().StringVar(&publishFlags.principal, "principal", "", "Principal (user/service) sending the message") + publishMessageCmd.Flags().StringVar(&publishFlags.schemaID, "schema-id", "", "Schema registry ID") + publishMessageCmd.Flags().StringVar(&publishFlags.encryption, "encryption", "", "Encryption algorithm used") + publishMessageCmd.Flags().StringSliceVarP(&publishFlags.headers, "header", "H", []string{}, "Custom headers (format: key=value)") + publishMessageCmd.Flags().IntVarP(&publishFlags.count, "count", "n", 1, "Number of messages to publish") +} + +func runPublishMessage(cmd *cobra.Command, args []string) error { + token, err := loadAndValidateToken() + if err != nil { + return err + } + + namespace := args[0] + topic := args[1] + + // Read message payload + var payload []byte + if publishFlags.file != "" { + // Read from file + payload, err = os.ReadFile(publishFlags.file) + if err != nil { + uiInstance.Error(fmt.Sprintf("Failed to read file: %v", err)) + return err + } + uiInstance.Info(fmt.Sprintf("Read %d bytes from %s", len(payload), publishFlags.file)) + } else { + // Use inline data + if len(args) < 3 { + uiInstance.Error("DATA argument is required when --file is not specified") + return fmt.Errorf("missing DATA argument") + } + payload = []byte(args[2]) + } + + // Build metadata + metadata := make(map[string]string) + + // Standard Prism headers + metadata["prism-content-type"] = publishFlags.contentType + if publishFlags.correlationID != "" { + metadata["prism-correlation-id"] = publishFlags.correlationID + } + if publishFlags.principal != "" { + metadata["prism-principal"] = publishFlags.principal + } + if publishFlags.schemaID != "" { + metadata["prism-schema-id"] = publishFlags.schemaID + } + if publishFlags.encryption != "" { + metadata["prism-encryption"] = publishFlags.encryption + } + metadata["prism-namespace"] = namespace + + // Custom headers + for _, header := range publishFlags.headers { + key, value, found := parseHeader(header) + if !found { + uiInstance.Warning(fmt.Sprintf("Skipping invalid header format: %s", header)) + continue + } + metadata[key] = value + } + + // Create client + c := client.NewClient(&cfg.Proxy, token) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + uiInstance.Info(fmt.Sprintf("Publishing %d message(s) to namespace '%s', topic '%s'", publishFlags.count, namespace, topic)) + + // Publish messages + successCount := 0 + failCount := 0 + + for i := 0; i < publishFlags.count; i++ { + // Add sequence number for multiple messages + currentMetadata := make(map[string]string) + for k, v := range metadata { + currentMetadata[k] = v + } + if publishFlags.count > 1 { + currentMetadata["x-sequence"] = fmt.Sprintf("%d", i+1) + } + + messageID, err := c.PublishMessage(ctx, namespace, topic, payload, currentMetadata) + if err != nil { + uiInstance.Error(fmt.Sprintf("Failed to publish message %d: %v", i+1, err)) + failCount++ + continue + } + + successCount++ + + if publishFlags.count == 1 { + uiInstance.Success(fmt.Sprintf("Published message")) + uiInstance.KeyValue("Message ID", messageID) + uiInstance.KeyValue("Namespace", namespace) + uiInstance.KeyValue("Topic", topic) + uiInstance.KeyValue("Payload Size", fmt.Sprintf("%d bytes", len(payload))) + + // Show metadata if verbose + if len(currentMetadata) > 0 { + uiInstance.Println("") + uiInstance.Subtle("Metadata:") + for k, v := range currentMetadata { + uiInstance.Subtle(fmt.Sprintf(" %s: %s", k, v)) + } + } + + // Show payload preview for small messages + if len(payload) <= 200 { + uiInstance.Println("") + uiInstance.Subtle("Payload:") + // Try to format as JSON for readability + if isJSON(payload) { + var prettyJSON interface{} + if err := json.Unmarshal(payload, &prettyJSON); err == nil { + formatted, err := json.MarshalIndent(prettyJSON, " ", " ") + if err == nil { + uiInstance.Subtle(fmt.Sprintf(" %s", string(formatted))) + } + } + } else { + uiInstance.Subtle(fmt.Sprintf(" %s", string(payload))) + } + } + } else if i == 0 || (i+1)%10 == 0 || i == publishFlags.count-1 { + uiInstance.Info(fmt.Sprintf("Published message %d/%d (ID: %s)", i+1, publishFlags.count, messageID)) + } + } + + uiInstance.Println("") + if failCount == 0 { + uiInstance.Success(fmt.Sprintf("Successfully published %d message(s)", successCount)) + } else { + uiInstance.Warning(fmt.Sprintf("Published %d/%d messages (%d failed)", successCount, publishFlags.count, failCount)) + } + + return nil +} + +// parseHeader parses a header in "key=value" format +func parseHeader(header string) (key, value string, ok bool) { + for i := 0; i < len(header); i++ { + if header[i] == '=' { + return header[:i], header[i+1:], true + } + } + return "", "", false +} + +// isJSON checks if the payload is valid JSON +func isJSON(data []byte) bool { + var js interface{} + return json.Unmarshal(data, &js) == nil +} diff --git a/cmd/prismctl/cmd/storage.go b/cmd/prismctl/cmd/storage.go new file mode 100644 index 000000000..babaf5bef --- /dev/null +++ b/cmd/prismctl/cmd/storage.go @@ -0,0 +1,604 @@ +package cmd + +import ( + "context" + "database/sql" + "embed" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/sqlite3" + "github.com/golang-migrate/migrate/v4/source/iofs" + _ "modernc.org/sqlite" // Pure Go SQLite driver +) + +//go:embed migrations/*.sql +var migrationsFS embed.FS + +// DatabaseConfig holds database connection configuration +type DatabaseConfig struct { + Type string // "sqlite" or "postgresql" + Path string // For SQLite + URN string // For PostgreSQL +} + +// Storage provides database operations for prism-admin +type Storage struct { + db *sql.DB + cfg *DatabaseConfig +} + +// Models + +type Namespace struct { + ID int64 + Name string + Description string + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type Proxy struct { + ID int64 + ProxyID string + Address string + Version string + Status string // "healthy", "unhealthy", "unknown" + LastSeen *time.Time + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type Pattern struct { + ID int64 + PatternID string + PatternType string + ProxyID string + Namespace string + Status string // "active", "stopped", "error" + Config json.RawMessage + CreatedAt time.Time + UpdatedAt time.Time +} + +type Launcher struct { + ID int64 + LauncherID string + Address string + Region string + Version string + Status string // "healthy", "unhealthy", "unknown" + MaxProcesses int32 + AvailableSlots int32 + Capabilities json.RawMessage // JSON array + LastSeen *time.Time + CreatedAt time.Time + UpdatedAt time.Time + Metadata json.RawMessage +} + +type AuditLog struct { + ID int64 + Timestamp time.Time + User string + Action string + ResourceType string + ResourceID string + Namespace string + Method string + Path string + StatusCode int + RequestBody json.RawMessage + ResponseBody json.RawMessage + Error string + DurationMs int64 + ClientIP string + UserAgent string +} + +// ParseDatabaseURN parses a database URN string +func ParseDatabaseURN(urn string) (*DatabaseConfig, error) { + if urn == "" { + return &DatabaseConfig{ + Type: "sqlite", + Path: defaultDatabasePath(), + }, nil + } + + // Parse sqlite:///path/to/db or sqlite://path/to/db + if strings.HasPrefix(urn, "sqlite://") { + path := strings.TrimPrefix(urn, "sqlite://") + // Handle sqlite:///absolute/path (three slashes) + if strings.HasPrefix(path, "/") { + return &DatabaseConfig{Type: "sqlite", Path: path}, nil + } + // Handle sqlite://relative/path (two slashes) + return &DatabaseConfig{Type: "sqlite", Path: path}, nil + } + + // Parse postgresql://... or postgres://... + if strings.HasPrefix(urn, "postgres") { + return &DatabaseConfig{Type: "postgresql", URN: urn}, nil + } + + return nil, fmt.Errorf("unsupported database URN: %s (supported: sqlite://, postgresql://)", urn) +} + +// defaultDatabasePath returns the default SQLite database path +func defaultDatabasePath() string { + homeDir, err := os.UserHomeDir() + if err != nil { + return "./prism-admin.db" + } + prismDir := filepath.Join(homeDir, ".prism") + if err := os.MkdirAll(prismDir, 0700); err != nil { + return "./prism-admin.db" + } + return filepath.Join(prismDir, "admin.db") +} + +// NewStorage creates a new Storage instance +func NewStorage(ctx context.Context, cfg *DatabaseConfig) (*Storage, error) { + var db *sql.DB + var err error + + switch cfg.Type { + case "sqlite": + // Ensure directory exists + dir := filepath.Dir(cfg.Path) + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) + } + + db, err = sql.Open("sqlite", cfg.Path) + if err != nil { + return nil, fmt.Errorf("failed to open sqlite database: %w", err) + } + + // Configure SQLite for better performance + _, err = db.Exec(` + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=ON; + PRAGMA busy_timeout=5000; + `) + if err != nil { + db.Close() + return nil, fmt.Errorf("failed to configure sqlite: %w", err) + } + + case "postgresql": + return nil, fmt.Errorf("postgresql support not yet implemented") + + default: + return nil, fmt.Errorf("unsupported database type: %s", cfg.Type) + } + + s := &Storage{ + db: db, + cfg: cfg, + } + + // Run migrations + if err := s.runMigrations(); err != nil { + db.Close() + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return s, nil +} + +// runMigrations applies database migrations +func (s *Storage) runMigrations() error { + sourceDriver, err := iofs.New(migrationsFS, "migrations") + if err != nil { + return fmt.Errorf("failed to create migration source: %w", err) + } + + dbDriver, err := sqlite3.WithInstance(s.db, &sqlite3.Config{}) + if err != nil { + return fmt.Errorf("failed to create database driver: %w", err) + } + + m, err := migrate.NewWithInstance("iofs", sourceDriver, "sqlite3", dbDriver) + if err != nil { + return fmt.Errorf("failed to create migrator: %w", err) + } + + if err := m.Up(); err != nil && err != migrate.ErrNoChange { + return fmt.Errorf("migration failed: %w", err) + } + + return nil +} + +// Close closes the database connection +func (s *Storage) Close() error { + if s.db != nil { + return s.db.Close() + } + return nil +} + +// Namespace operations + +func (s *Storage) CreateNamespace(ctx context.Context, ns *Namespace) error { + metadataJSON, _ := json.Marshal(ns.Metadata) + + result, err := s.db.ExecContext(ctx, ` + INSERT INTO namespaces (name, description, metadata) + VALUES (?, ?, ?) + `, ns.Name, ns.Description, string(metadataJSON)) + + if err != nil { + return fmt.Errorf("failed to create namespace: %w", err) + } + + id, _ := result.LastInsertId() + ns.ID = id + return nil +} + +func (s *Storage) GetNamespace(ctx context.Context, name string) (*Namespace, error) { + var ns Namespace + var metadataStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, name, description, created_at, updated_at, metadata + FROM namespaces WHERE name = ? + `, name).Scan(&ns.ID, &ns.Name, &ns.Description, &ns.CreatedAt, &ns.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("namespace not found: %s", name) + } + if err != nil { + return nil, fmt.Errorf("failed to get namespace: %w", err) + } + + if metadataStr != "" { + ns.Metadata = json.RawMessage(metadataStr) + } + + return &ns, nil +} + +func (s *Storage) ListNamespaces(ctx context.Context) ([]*Namespace, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, name, description, created_at, updated_at, metadata + FROM namespaces + ORDER BY name + `) + if err != nil { + return nil, fmt.Errorf("failed to list namespaces: %w", err) + } + defer rows.Close() + + var namespaces []*Namespace + for rows.Next() { + var ns Namespace + var metadataStr string + + if err := rows.Scan(&ns.ID, &ns.Name, &ns.Description, &ns.CreatedAt, &ns.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan namespace: %w", err) + } + + if metadataStr != "" { + ns.Metadata = json.RawMessage(metadataStr) + } + + namespaces = append(namespaces, &ns) + } + + return namespaces, rows.Err() +} + +// Proxy operations + +func (s *Storage) UpsertProxy(ctx context.Context, p *Proxy) error { + metadataJSON, _ := json.Marshal(p.Metadata) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO proxies (proxy_id, address, version, status, last_seen, metadata, updated_at) + VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(proxy_id) DO UPDATE SET + address = excluded.address, + version = excluded.version, + status = excluded.status, + last_seen = excluded.last_seen, + metadata = excluded.metadata, + updated_at = CURRENT_TIMESTAMP + `, p.ProxyID, p.Address, p.Version, p.Status, p.LastSeen, string(metadataJSON)) + + return err +} + +func (s *Storage) GetProxy(ctx context.Context, proxyID string) (*Proxy, error) { + var p Proxy + var metadataStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, proxy_id, address, version, status, last_seen, created_at, updated_at, metadata + FROM proxies WHERE proxy_id = ? + `, proxyID).Scan(&p.ID, &p.ProxyID, &p.Address, &p.Version, &p.Status, &p.LastSeen, + &p.CreatedAt, &p.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("proxy not found: %s", proxyID) + } + if err != nil { + return nil, fmt.Errorf("failed to get proxy: %w", err) + } + + if metadataStr != "" { + p.Metadata = json.RawMessage(metadataStr) + } + + return &p, nil +} + +func (s *Storage) ListProxies(ctx context.Context) ([]*Proxy, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, proxy_id, address, version, status, last_seen, created_at, updated_at, metadata + FROM proxies + ORDER BY last_seen DESC + `) + if err != nil { + return nil, fmt.Errorf("failed to list proxies: %w", err) + } + defer rows.Close() + + var proxies []*Proxy + for rows.Next() { + var p Proxy + var metadataStr string + + if err := rows.Scan(&p.ID, &p.ProxyID, &p.Address, &p.Version, &p.Status, &p.LastSeen, + &p.CreatedAt, &p.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan proxy: %w", err) + } + + if metadataStr != "" { + p.Metadata = json.RawMessage(metadataStr) + } + + proxies = append(proxies, &p) + } + + return proxies, rows.Err() +} + +// Launcher operations + +func (s *Storage) UpsertLauncher(ctx context.Context, l *Launcher) error { + metadataJSON, _ := json.Marshal(l.Metadata) + capabilitiesJSON, _ := json.Marshal(l.Capabilities) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO launchers (launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, metadata, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(launcher_id) DO UPDATE SET + address = excluded.address, + region = excluded.region, + version = excluded.version, + status = excluded.status, + max_processes = excluded.max_processes, + available_slots = excluded.available_slots, + capabilities = excluded.capabilities, + last_seen = excluded.last_seen, + metadata = excluded.metadata, + updated_at = CURRENT_TIMESTAMP + `, l.LauncherID, l.Address, l.Region, l.Version, l.Status, l.MaxProcesses, l.AvailableSlots, string(capabilitiesJSON), l.LastSeen, string(metadataJSON)) + + return err +} + +func (s *Storage) GetLauncher(ctx context.Context, launcherID string) (*Launcher, error) { + var l Launcher + var metadataStr, capabilitiesStr string + + err := s.db.QueryRowContext(ctx, ` + SELECT id, launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, created_at, updated_at, metadata + FROM launchers WHERE launcher_id = ? + `, launcherID).Scan(&l.ID, &l.LauncherID, &l.Address, &l.Region, &l.Version, &l.Status, &l.MaxProcesses, &l.AvailableSlots, + &capabilitiesStr, &l.LastSeen, &l.CreatedAt, &l.UpdatedAt, &metadataStr) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("launcher not found: %s", launcherID) + } + if err != nil { + return nil, fmt.Errorf("failed to get launcher: %w", err) + } + + if metadataStr != "" { + l.Metadata = json.RawMessage(metadataStr) + } + if capabilitiesStr != "" { + l.Capabilities = json.RawMessage(capabilitiesStr) + } + + return &l, nil +} + +func (s *Storage) ListLaunchers(ctx context.Context) ([]*Launcher, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, launcher_id, address, region, version, status, max_processes, available_slots, capabilities, last_seen, created_at, updated_at, metadata + FROM launchers + ORDER BY last_seen DESC + `) + if err != nil { + return nil, fmt.Errorf("failed to list launchers: %w", err) + } + defer rows.Close() + + var launchers []*Launcher + for rows.Next() { + var l Launcher + var metadataStr, capabilitiesStr string + + if err := rows.Scan(&l.ID, &l.LauncherID, &l.Address, &l.Region, &l.Version, &l.Status, &l.MaxProcesses, &l.AvailableSlots, + &capabilitiesStr, &l.LastSeen, &l.CreatedAt, &l.UpdatedAt, &metadataStr); err != nil { + return nil, fmt.Errorf("failed to scan launcher: %w", err) + } + + if metadataStr != "" { + l.Metadata = json.RawMessage(metadataStr) + } + if capabilitiesStr != "" { + l.Capabilities = json.RawMessage(capabilitiesStr) + } + + launchers = append(launchers, &l) + } + + return launchers, rows.Err() +} + +// Pattern operations + +func (s *Storage) CreatePattern(ctx context.Context, p *Pattern) error { + configJSON, _ := json.Marshal(p.Config) + + result, err := s.db.ExecContext(ctx, ` + INSERT INTO patterns (pattern_id, pattern_type, proxy_id, namespace, status, config) + VALUES (?, ?, ?, ?, ?, ?) + `, p.PatternID, p.PatternType, p.ProxyID, p.Namespace, p.Status, string(configJSON)) + + if err != nil { + return fmt.Errorf("failed to create pattern: %w", err) + } + + id, _ := result.LastInsertId() + p.ID = id + return nil +} + +func (s *Storage) ListPatternsByNamespace(ctx context.Context, namespace string) ([]*Pattern, error) { + rows, err := s.db.QueryContext(ctx, ` + SELECT id, pattern_id, pattern_type, proxy_id, namespace, status, config, created_at, updated_at + FROM patterns + WHERE namespace = ? + ORDER BY created_at DESC + `, namespace) + if err != nil { + return nil, fmt.Errorf("failed to list patterns: %w", err) + } + defer rows.Close() + + var patterns []*Pattern + for rows.Next() { + var p Pattern + var configStr string + + if err := rows.Scan(&p.ID, &p.PatternID, &p.PatternType, &p.ProxyID, &p.Namespace, + &p.Status, &configStr, &p.CreatedAt, &p.UpdatedAt); err != nil { + return nil, fmt.Errorf("failed to scan pattern: %w", err) + } + + if configStr != "" { + p.Config = json.RawMessage(configStr) + } + + patterns = append(patterns, &p) + } + + return patterns, rows.Err() +} + +// Audit log operations + +func (s *Storage) LogAudit(ctx context.Context, log *AuditLog) error { + requestJSON, _ := json.Marshal(log.RequestBody) + responseJSON, _ := json.Marshal(log.ResponseBody) + + _, err := s.db.ExecContext(ctx, ` + INSERT INTO audit_logs ( + timestamp, user, action, resource_type, resource_id, namespace, + method, path, status_code, request_body, response_body, error, + duration_ms, client_ip, user_agent + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, log.Timestamp, log.User, log.Action, log.ResourceType, log.ResourceID, log.Namespace, + log.Method, log.Path, log.StatusCode, string(requestJSON), string(responseJSON), log.Error, + log.DurationMs, log.ClientIP, log.UserAgent) + + return err +} + +func (s *Storage) QueryAuditLogs(ctx context.Context, opts AuditQueryOptions) ([]*AuditLog, error) { + query := ` + SELECT id, timestamp, user, action, resource_type, resource_id, namespace, + method, path, status_code, request_body, response_body, error, + duration_ms, client_ip, user_agent + FROM audit_logs + WHERE 1=1 + ` + args := []interface{}{} + + if opts.Namespace != "" { + query += " AND namespace = ?" + args = append(args, opts.Namespace) + } + if opts.User != "" { + query += " AND user = ?" + args = append(args, opts.User) + } + if !opts.StartTime.IsZero() { + query += " AND timestamp >= ?" + args = append(args, opts.StartTime) + } + if !opts.EndTime.IsZero() { + query += " AND timestamp <= ?" + args = append(args, opts.EndTime) + } + + query += " ORDER BY timestamp DESC" + + if opts.Limit > 0 { + query += " LIMIT ?" + args = append(args, opts.Limit) + } + + rows, err := s.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to query audit logs: %w", err) + } + defer rows.Close() + + var logs []*AuditLog + for rows.Next() { + var log AuditLog + var requestStr, responseStr string + + if err := rows.Scan(&log.ID, &log.Timestamp, &log.User, &log.Action, &log.ResourceType, + &log.ResourceID, &log.Namespace, &log.Method, &log.Path, &log.StatusCode, + &requestStr, &responseStr, &log.Error, &log.DurationMs, &log.ClientIP, &log.UserAgent); err != nil { + return nil, fmt.Errorf("failed to scan audit log: %w", err) + } + + if requestStr != "" { + log.RequestBody = json.RawMessage(requestStr) + } + if responseStr != "" { + log.ResponseBody = json.RawMessage(responseStr) + } + + logs = append(logs, &log) + } + + return logs, rows.Err() +} + +// AuditQueryOptions specifies filters for querying audit logs +type AuditQueryOptions struct { + Namespace string + User string + StartTime time.Time + EndTime time.Time + Limit int +} diff --git a/cmd/prismctl/go.mod b/cmd/prismctl/go.mod index 52ae3bb1f..dd466e948 100644 --- a/cmd/prismctl/go.mod +++ b/cmd/prismctl/go.mod @@ -4,9 +4,28 @@ go 1.24.0 require ( github.com/charmbracelet/lipgloss v1.0.0 + github.com/golang-migrate/migrate/v4 v4.19.0 github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.19.0 - golang.org/x/oauth2 v0.26.0 + golang.org/x/oauth2 v0.30.0 + google.golang.org/grpc v1.76.0 + modernc.org/sqlite v1.39.1 +) + +require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + golang.org/x/net v0.42.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/protobuf v1.36.10 // indirect + modernc.org/libc v1.66.10 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect ) require ( @@ -15,6 +34,7 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jrepp/prism-data-layer/pkg/plugin v0.0.0 github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -29,15 +49,16 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/sys v0.34.0 // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/sys v0.36.0 // indirect golang.org/x/text v0.27.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/jrepp/prism-data-layer/patterns/core => ../patterns/core + +replace github.com/jrepp/prism-data-layer/pkg/plugin => ../../pkg/plugin diff --git a/cmd/prismctl/go.sum b/cmd/prismctl/go.sum index b93bd69b9..a5cc4f5fe 100644 --- a/cmd/prismctl/go.sum +++ b/cmd/prismctl/go.sum @@ -9,12 +9,31 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang-migrate/migrate/v4 v4.19.0 h1:RcjOnCGz3Or6HQYEJ/EEVLfWnmw9KnoigPSjzhCuaSE= +github.com/golang-migrate/migrate/v4 v4.19.0/go.mod h1:9dyEcu+hO+G9hPSw8AIg50yg622pXJsoHItQnDGZkI0= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -23,6 +42,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -31,15 +52,21 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -75,19 +102,47 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -96,3 +151,29 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4= +modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A= +modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A= +modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.39.1 h1:H+/wGFzuSCIEVCvXYVHX5RQglwhMOvtHSv+VtidL2r4= +modernc.org/sqlite v1.39.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/cmd/prismctl/internal/client/client.go b/cmd/prismctl/internal/client/client.go index 8c079d730..6a6615849 100644 --- a/cmd/prismctl/internal/client/client.go +++ b/cmd/prismctl/internal/client/client.go @@ -120,6 +120,53 @@ func (c *Client) ListSessions(ctx context.Context, namespace string) ([]map[stri return nil, fmt.Errorf("unexpected response format") } +// PublishMessage publishes a message to a topic in a namespace +func (c *Client) PublishMessage(ctx context.Context, namespace, topic string, payload []byte, metadata map[string]string) (string, error) { + body := map[string]interface{}{ + "topic": topic, + "payload": string(payload), + "metadata": metadata, + } + + result, err := c.doRequest(ctx, "POST", fmt.Sprintf("/api/v1/namespaces/%s/publish", namespace), body) + if err != nil { + return "", err + } + + // Extract message ID from response + if messageID, ok := result["message_id"].(string); ok { + return messageID, nil + } + + return "", fmt.Errorf("message_id not found in response") +} + +// QueryMailbox queries messages from a mailbox namespace +func (c *Client) QueryMailbox(ctx context.Context, namespace string, filter map[string]interface{}) ([]map[string]interface{}, error) { + result, err := c.doRequest(ctx, "POST", fmt.Sprintf("/api/v1/namespaces/%s/mailbox/query", namespace), filter) + if err != nil { + return nil, err + } + + // Result should be an array + if arr, ok := result["events"].([]interface{}); ok { + events := make([]map[string]interface{}, len(arr)) + for i, item := range arr { + if event, ok := item.(map[string]interface{}); ok { + events[i] = event + } + } + return events, nil + } + + return nil, fmt.Errorf("unexpected response format") +} + +// GetMailboxEvent retrieves a single event by message ID from a mailbox +func (c *Client) GetMailboxEvent(ctx context.Context, namespace, messageID string) (map[string]interface{}, error) { + return c.doRequest(ctx, "GET", fmt.Sprintf("/api/v1/namespaces/%s/mailbox/events/%s", namespace, messageID), nil) +} + // doRequest performs an HTTP request and decodes JSON response func (c *Client) doRequest(ctx context.Context, method, path string, body interface{}) (map[string]interface{}, error) { req, err := c.newRequest(ctx, method, path, body) diff --git a/docs-cms/adr/adr-054-prism-admin-sqlite-storage.md b/docs-cms/adr/adr-054-prism-admin-sqlite-storage.md new file mode 100644 index 000000000..34df98bf2 --- /dev/null +++ b/docs-cms/adr/adr-054-prism-admin-sqlite-storage.md @@ -0,0 +1,318 @@ +--- +date: 2025-10-15 +deciders: Engineering Team +doc_uuid: 8f3c4d2a-9b5e-4f1c-a2d7-3e8f9c1d5b4a +id: adr-054 +project_id: prism-data-layer +status: Accepted +tags: +- admin +- database +- sqlite +- storage +- cli +- audit +title: 'ADR-054: SQLite Storage for prism-admin Local State' +--- + +## Context + +The `prism-admin` CLI tool needs to persist operational state including: +- **Namespaces**: Configured namespaces and their settings +- **Proxy registry**: Last known proxies, their health status, and connection information +- **Pattern registry**: Active patterns connected to proxies +- **Audit log**: Complete record of all API interactions with the admin API + +Currently, prism-admin is stateless and relies on querying live proxy instances. This creates issues: +- No historical data when proxies are down +- No audit trail of administrative actions +- Cannot track namespace configuration over time +- Difficult to debug past issues + +We need a lightweight, embedded storage solution that requires zero external dependencies for local development and testing while supporting optional external database URNs for production deployments. + +## Decision + +Use SQLite as the default embedded storage backend for prism-admin with support for alternative database URNs via the `-db` flag: + +```bash +# Default: Creates ~/.prism/admin.db +prism-admin server + +# Custom SQLite location +prism-admin server -db sqlite:///path/to/admin.db + +# PostgreSQL for production +prism-admin server -db postgresql://user:pass@host:5432/prism_admin +``` + +**Schema Design:** + +```sql +-- Namespaces table +CREATE TABLE namespaces ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE, + description TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata JSON +); + +-- Proxies table (last known state) +CREATE TABLE proxies ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + proxy_id TEXT NOT NULL UNIQUE, + address TEXT NOT NULL, + version TEXT, + status TEXT CHECK(status IN ('healthy', 'unhealthy', 'unknown')), + last_seen TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + metadata JSON +); + +-- Patterns table (active connections) +CREATE TABLE patterns ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pattern_id TEXT NOT NULL, + pattern_type TEXT NOT NULL, + proxy_id TEXT NOT NULL, + namespace TEXT NOT NULL, + status TEXT CHECK(status IN ('active', 'stopped', 'error')), + config JSON, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (proxy_id) REFERENCES proxies(proxy_id), + FOREIGN KEY (namespace) REFERENCES namespaces(name) +); + +-- Audit log table +CREATE TABLE audit_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + user TEXT, + action TEXT NOT NULL, + resource_type TEXT NOT NULL, + resource_id TEXT, + namespace TEXT, + method TEXT, + path TEXT, + status_code INTEGER, + request_body JSON, + response_body JSON, + error TEXT, + duration_ms INTEGER, + client_ip TEXT, + user_agent TEXT +); + +-- Indexes for common queries +CREATE INDEX idx_audit_logs_timestamp ON audit_logs(timestamp); +CREATE INDEX idx_audit_logs_namespace ON audit_logs(namespace); +CREATE INDEX idx_audit_logs_resource ON audit_logs(resource_type, resource_id); +CREATE INDEX idx_proxies_status ON proxies(status, last_seen); +CREATE INDEX idx_patterns_namespace ON patterns(namespace); +CREATE INDEX idx_patterns_proxy ON patterns(proxy_id); +``` + +## Rationale + +**Why SQLite as default:** +- Zero configuration: Works out-of-the-box with no setup +- Zero external dependencies: Embedded in the Go binary +- Cross-platform: Works on macOS, Linux, Windows +- Excellent for local development and testing +- Sufficient performance for admin workloads (writes are infrequent) +- Battle-tested reliability +- Built-in JSON support for flexible metadata storage + +**Why support external database URNs:** +- Production deployments may require PostgreSQL for high availability +- Allows multiple prism-admin instances to share state +- Enables centralized audit logging +- Supports compliance requirements for audit log retention + +### Alternatives Considered + +1. **PostgreSQL only** + - Pros: Production-ready, handles high concurrency + - Cons: Requires external setup, overkill for local dev, increases friction + - Rejected because: Developer experience suffers, local testing becomes complex + +2. **JSON files** + - Pros: Simple, human-readable + - Cons: No transactional integrity, poor query performance, no concurrent access + - Rejected because: Audit logs grow quickly, queries would be slow + +3. **Embedded key-value store (BoltDB/BadgerDB)** + - Pros: Fast, embedded, good for key-value access + - Cons: Poor support for complex queries, no SQL, harder to inspect data + - Rejected because: Audit log queries require filtering, joins, aggregations + +4. **Redis** + - Pros: Fast, supports various data structures + - Cons: Requires external service, not embedded, persistence not primary use case + - Rejected because: Not suitable for audit logs, requires external dependency + +## Consequences + +### Positive + +- **Zero-config local development**: Developers can use prism-admin immediately +- **Audit compliance**: Complete trail of all administrative actions +- **Historical visibility**: View past proxy and pattern states even when offline +- **Debugging capability**: Troubleshoot issues using historical data +- **Flexibility**: Supports both embedded (SQLite) and external (PostgreSQL) databases +- **Standard tooling**: Can inspect/backup database with standard SQL tools +- **JSON columns**: Flexible schema for metadata without migrations + +### Negative + +- **SQLite limitations in production**: + - Single-writer limitation (but admin writes are infrequent) + - No network access (but can use external DB URN for multi-instance) +- **Schema migrations**: Need to manage database schema versions +- **Disk usage**: Audit logs grow over time, need rotation policy +- **Backup complexity**: Need to document backup procedures for both SQLite and PostgreSQL + +### Neutral + +- Database URN parsing adds configuration complexity +- Need to support two database drivers (sqlite3 and pgx) +- Must test both SQLite and PostgreSQL code paths + +## Implementation Notes + +### Database Driver Selection + +**SQLite**: Use `modernc.org/sqlite` (pure Go, no CGO required) +- Avoids CGO cross-compilation issues +- Fully compatible with SQLite file format +- Excellent performance for admin workloads + +**PostgreSQL**: Use `github.com/jackc/pgx/v5` (pure Go) +- Best-in-class PostgreSQL driver +- Native Go implementation + +### Default Database Location + +```go +func defaultDatabasePath() string { + homeDir, _ := os.UserHomeDir() + prismDir := filepath.Join(homeDir, ".prism") + os.MkdirAll(prismDir, 0700) + return filepath.Join(prismDir, "admin.db") +} +``` + +### Migration Strategy + +Use `golang-migrate/migrate` with embedded migrations: + +```go +//go:embed migrations/*.sql +var migrations embed.FS + +func runMigrations(db *sql.DB, dbType string) error { + driver, _ := sqlite.WithInstance(db, &sqlite.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "embed://migrations", + dbType, + driver, + ) + return m.Up() +} +``` + +### Audit Logging Middleware + +Wrap all gRPC/HTTP handlers with audit logging: + +```go +func AuditMiddleware(store *Storage) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // Capture request body + var bodyBytes []byte + if r.Body != nil { + bodyBytes, _ = io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + } + + // Wrap response writer to capture status code + rec := &responseRecorder{ResponseWriter: w, statusCode: 200} + + // Execute handler + next.ServeHTTP(rec, r) + + // Log audit entry + store.LogAudit(context.Background(), &AuditEntry{ + Timestamp: start, + Action: r.Method + " " + r.URL.Path, + Method: r.Method, + Path: r.URL.Path, + StatusCode: rec.statusCode, + DurationMs: time.Since(start).Milliseconds(), + ClientIP: r.RemoteAddr, + UserAgent: r.UserAgent(), + RequestBody: json.RawMessage(bodyBytes), + }) + }) + } +} +``` + +### Database URN Parsing + +```go +func ParseDatabaseURN(urn string) (*DatabaseConfig, error) { + if urn == "" { + return &DatabaseConfig{ + Type: "sqlite", + Path: defaultDatabasePath(), + }, nil + } + + // Parse sqlite:///path/to/db + if strings.HasPrefix(urn, "sqlite://") { + path := strings.TrimPrefix(urn, "sqlite://") + return &DatabaseConfig{Type: "sqlite", Path: path}, nil + } + + // Parse postgresql://... or postgres://... + if strings.HasPrefix(urn, "postgres") { + return &DatabaseConfig{Type: "postgresql", URN: urn}, nil + } + + return nil, fmt.Errorf("unsupported database URN: %s", urn) +} +``` + +### Audit Log Retention + +Implement configurable retention policy: + +```sql +-- Delete audit logs older than 90 days (default) +DELETE FROM audit_logs WHERE timestamp < datetime('now', '-90 days'); +``` + +Run as cron job or on prism-admin startup. + +## References + +- [ADR-036: SQLite Config Storage](/adr/adr-036) - Proxy config storage pattern +- [ADR-040: Go Binary Admin CLI](/adr/adr-040) - Admin CLI architecture +- [ADR-027: Admin API gRPC](/adr/adr-027) - Admin API design +- [SQLite JSON Functions](https://www.sqlite.org/json1.html) +- [golang-migrate](https://github.com/golang-migrate/migrate) +- [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) +- [pgx PostgreSQL driver](https://github.com/jackc/pgx) + +## Revision History + +- 2025-10-15: Initial draft +- 2025-10-15: Accepted - zero-config local storage for prism-admin diff --git a/docs-cms/rfcs/RFC-037-mailbox-pattern-searchable-event-store.md b/docs-cms/rfcs/RFC-037-mailbox-pattern-searchable-event-store.md new file mode 100644 index 000000000..58bd43155 --- /dev/null +++ b/docs-cms/rfcs/RFC-037-mailbox-pattern-searchable-event-store.md @@ -0,0 +1,504 @@ +--- +id: rfc-037 +title: "RFC-037: Mailbox Pattern - Searchable Event Store" +sidebar_label: "RFC-037: Mailbox Pattern" +rfc_number: 37 +status: Proposed +created: 2025-10-15 +updated: 2025-10-15 +author: Claude Code +project_id: prism +doc_uuid: 8f7c2a1d-5e6b-4c9a-8d2f-3a1c4b5d6e7f +tags: + - pattern + - consumer + - storage + - sqlite + - indexing +related_adrs: + - ADR-005 +related_rfcs: + - RFC-014 + - RFC-017 + - RFC-033 +--- + +# RFC-037: Mailbox Pattern - Searchable Event Store + +## Summary + +The **Mailbox Pattern** provides a searchable, persistent event store by consuming messages from a queue and storing them in a structured database with indexed headers and blob bodies. Headers are extracted from event metadata and stored as indexed table columns for efficient querying, while message bodies (which may be encrypted) are stored as opaque blobs. + +## Motivation + +### Use Cases + +1. **Audit Logging**: Store all system events with searchable metadata (user, action, resource) but encrypted PII +2. **Email/Message Archives**: Store communications with searchable headers (from, to, subject, date) and encrypted bodies +3. **Event Sourcing**: Capture all domain events with indexed event types, aggregates, and timestamps +4. **System Observability**: Archive traces, logs, and metrics with searchable dimensions +5. **Compliance**: Retain records with searchable metadata while protecting sensitive payload data + +### Problem Statement + +Existing patterns lack a unified solution for: +- **Indexed Search**: Query events by metadata without scanning all messages +- **Encrypted Bodies**: Store sensitive payloads securely while maintaining header searchability +- **Schema Evolution**: Handle varying header schemas across different event types +- **Pluggable Storage**: Decouple pattern logic from storage backend (SQLite, PostgreSQL, ClickHouse) + +## Design + +### Architecture + +```text +┌────────────────────────────────────────────────────────────────┐ +│ Mailbox Pattern (Composite) │ +├────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌──────────────┐ ┌─────────────┐│ +│ │ Message │ │ Table │ │ Table ││ +│ │ Consumer │──────▶│ Writer │ │ Reader ││ +│ │ Slot │ │ Slot │ │ Slot ││ +│ └─────────────┘ └──────────────┘ └─────────────┘│ +│ │ │ ▲ │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌────────────────┐ │ │ +│ │ │ SQLite DB │─────────────┘ │ +│ │ │ (Headers + │ │ +│ │ │ Blob) │ │ +│ │ └────────────────┘ │ +│ │ │ +│ ▼ │ +│ Extract Headers → Index Columns │ +│ Store Body → Blob Column │ +│ Query Interface → Returns MailboxEvent[] │ +│ │ +└────────────────────────────────────────────────────────────────┘ +``` + +### Slot Architecture + +The Mailbox Pattern has **three backend slots**: + +#### Slot 1: Message Source (Queue Consumer) +- **Interface**: `QueueInterface` or `PubSubInterface` +- **Purpose**: Consume events from messaging backend +- **Implementations**: NATS, Kafka, Redis Streams, RabbitMQ +- **Configuration**: topic, consumer group, batch size + +#### Slot 2: Storage Backend (Table Writer) +- **Interface**: `TableWriterInterface` (new) +- **Purpose**: Persist events with indexed headers +- **Implementations**: SQLite, PostgreSQL, ClickHouse +- **Configuration**: table name, indexed columns, retention policy + +#### Slot 3: Query Interface (Table Reader) +- **Interface**: `TableReaderInterface` (new) +- **Purpose**: Retrieve stored messages as array of MailboxEvent (header + payload) +- **Implementations**: SQLite, PostgreSQL, ClickHouse (same backends as writer) +- **Configuration**: shared database connection with writer slot + +### Message Structure + +Messages consumed from the queue follow the standard PubSubMessage format: + +```go +type PubSubMessage struct { + Topic string // Event topic/stream + Payload []byte // Message body (may be encrypted) + Metadata map[string]string // Headers to extract and index + MessageID string // Unique message identifier + Timestamp int64 // Event timestamp (Unix epoch millis) +} +``` + +### Header Extraction and Indexing + +The pattern extracts well-known headers from `Metadata` map: + +**Standard Indexed Headers**: +- `prism-message-id`: Unique message identifier +- `prism-timestamp`: Event timestamp +- `prism-topic`: Topic/stream name +- `prism-content-type`: Payload content type +- `prism-schema-id`: Schema registry ID (RFC-030) +- `prism-encryption`: Encryption algorithm (if encrypted) +- `prism-correlation-id`: Request correlation ID +- `prism-principal`: User/service identity +- `prism-namespace`: Prism namespace + +**Custom Headers**: +Application-specific headers with `x-` prefix are also indexed (configurable). + +### Table Schema + +Default SQLite table schema: + +```sql +CREATE TABLE IF NOT EXISTS mailbox ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + + -- Indexed headers (extracted from metadata) + message_id TEXT NOT NULL UNIQUE, + timestamp INTEGER NOT NULL, + topic TEXT NOT NULL, + content_type TEXT, + schema_id TEXT, + encryption TEXT, + correlation_id TEXT, + principal TEXT, + namespace TEXT, + + -- Custom headers (JSON for flexibility) + custom_headers TEXT, -- JSON map of x-* headers + + -- Body (opaque blob, may be encrypted) + body BLOB NOT NULL, + + -- Metadata + created_at INTEGER NOT NULL, + + -- Indexes for common queries + INDEX idx_timestamp (timestamp), + INDEX idx_topic (topic), + INDEX idx_principal (principal), + INDEX idx_correlation_id (correlation_id) +); +``` + +### Backend Interfaces + +Two new backend interfaces for structured storage: + +#### TableWriterInterface + +```go +// TableWriterInterface defines operations for writing structured events +type TableWriterInterface interface { + // WriteEvent stores an event with indexed headers and body + WriteEvent(ctx context.Context, event *MailboxEvent) error + + // DeleteOldEvents removes events older than retention period + DeleteOldEvents(ctx context.Context, olderThan int64) (int64, error) + + // GetTableStats returns storage statistics + GetTableStats(ctx context.Context) (*TableStats, error) +} +``` + +#### TableReaderInterface + +```go +// TableReaderInterface defines operations for reading structured events +type TableReaderInterface interface { + // QueryEvents retrieves events matching filter criteria + // Returns messages as array of MailboxEvent (header + payload) + QueryEvents(ctx context.Context, filter *EventFilter) ([]*MailboxEvent, error) + + // GetEvent retrieves a single event by message ID + GetEvent(ctx context.Context, messageID string) (*MailboxEvent, error) + + // GetTableStats returns storage statistics + GetTableStats(ctx context.Context) (*TableStats, error) +} +``` + +#### Shared Types + +```go +// MailboxEvent represents a structured event for storage +type MailboxEvent struct { + MessageID string + Timestamp int64 + Topic string + ContentType string + SchemaID string + Encryption string + CorrelationID string + Principal string + Namespace string + CustomHeaders map[string]string // x-* headers + Body []byte // Opaque blob +} + +// EventFilter defines query criteria +type EventFilter struct { + StartTime *time.Time + EndTime *time.Time + Topics []string + Principals []string + CorrelationID *string + Limit int + Offset int +} + +// TableStats provides storage metrics +type TableStats struct { + TotalEvents int64 + TotalSizeBytes int64 + OldestEvent time.Time + NewestEvent time.Time +} +``` + +### Pattern Configuration + +YAML configuration for mailbox pattern: + +```yaml +namespaces: + - name: $admin + pattern: mailbox + pattern_version: 0.1.0 + description: Store admin events with searchable headers + + slots: + message_source: + backend: nats + interfaces: + - QueueInterface + config: + url: nats://localhost:4222 + subject: admin.events.> + consumer_group: mailbox-admin + durable: true + + storage: + backend: sqlite + interfaces: + - TableWriterInterface + config: + database_path: /Users/jrepp/.prism/mailbox-admin.db + table_name: mailbox + indexed_headers: + - prism-message-id + - prism-timestamp + - prism-topic + - prism-principal + - prism-correlation-id + custom_header_pattern: "x-*" + retention_days: 90 + + query: + backend: sqlite + interfaces: + - TableReaderInterface + config: + database_path: /Users/jrepp/.prism/mailbox-admin.db + table_name: mailbox + + behavior: + batch_size: 100 + auto_commit: true + max_retries: 3 + retention_policy: + max_age_days: 90 + max_size_gb: 10 +``` + +### Pattern Behavior + +**Message Processing Flow**: + +1. **Consume**: Read message from queue (message_source slot) +2. **Extract**: Parse headers from `Metadata` map +3. **Transform**: Convert to `MailboxEvent` structure +4. **Store**: Write to table with indexed headers and blob body +5. **Commit**: Acknowledge message (if auto_commit enabled) + +**Error Handling**: +- Parse errors → Skip message, log warning +- Storage errors → Retry with exponential backoff +- Max retries exceeded → Log to dead letter queue (if configured) + +**Retention Policy**: +- Background job deletes events older than `max_age_days` +- Vacuum/compact database when exceeding `max_size_gb` + +### Comparison to Alternatives + +| Feature | Mailbox Pattern | Consumer Pattern | Raw SQL | +|------------------------|----------------|------------------|------------------| +| Indexed Headers | ✅ Automatic | ❌ Manual | ✅ Manual | +| Encrypted Bodies | ✅ Supported | ❌ Not handled | ✅ Manual | +| Pluggable Storage | ✅ Slot-based | ❌ None | ❌ Fixed | +| Schema Evolution | ✅ JSON custom | ❌ Not handled | ⚠️ Migrations | +| Query API | ✅ Built-in | ❌ None | ✅ SQL | +| Retention Management | ✅ Automatic | ❌ Manual | ❌ Manual | + +## Implementation Plan + +### Phase 1: Core Interfaces (Week 1) +- [ ] Define `TableWriterInterface` in `pkg/plugin/interfaces.go` +- [ ] Define `MailboxEvent`, `EventFilter`, `TableStats` types +- [ ] Add proto definitions for new interfaces + +### Phase 2: SQLite Backend (Week 2) +- [ ] Implement SQLite table writer in `pkg/drivers/sqlite/` +- [ ] Create table schema with indexed columns +- [ ] Implement `WriteEvent`, `QueryEvents`, `DeleteOldEvents` +- [ ] Add connection pooling and WAL mode +- [ ] Write unit tests with testcontainers + +### Phase 3: Mailbox Pattern (Week 2-3) +- [ ] Create `patterns/mailbox/` directory structure +- [ ] Implement mailbox pattern core logic +- [ ] Implement header extraction and mapping +- [ ] Add retention policy background job +- [ ] Implement `mailbox-runner` command +- [ ] Create `manifest.yaml` + +### Phase 4: Integration & Testing (Week 3) +- [ ] Integration tests with NATS + SQLite +- [ ] Test encrypted body handling +- [ ] Test custom header indexing +- [ ] Load test with 100k events/sec +- [ ] Documentation and examples + +### Phase 5: $admin Namespace Setup (Week 4) +- [ ] Configure mailbox pattern for `$admin` namespace +- [ ] Set up NATS subscription for `admin.*` topics +- [ ] Deploy with pattern-launcher +- [ ] Verify event capture and search + +## Testing Strategy + +### Unit Tests +- Header extraction from various metadata formats +- SQLite table writer operations +- Retention policy logic +- Error handling (storage failures, parse errors) + +### Integration Tests +- End-to-end: NATS → Mailbox → SQLite → Query +- Encrypted body storage and retrieval +- Custom header indexing +- Concurrent writes (10 goroutines) + +### Load Tests +- Throughput: 100k events/sec for 10 minutes +- Query performance: 1000 QPS on indexed headers +- Storage growth: 1M events = ~500MB database +- Retention policy: Delete 100k old events <1 second + +## Security Considerations + +### Encrypted Bodies +- Pattern stores encrypted bodies as-is (opaque blobs) +- No decryption required for indexing headers +- Encryption indicated by `prism-encryption` header + +### Access Control +- Namespace-level authorization via Prism auth layer +- SQLite file permissions: 0600 (owner read/write only) +- No direct database access from applications + +### PII Handling +- Headers should NOT contain PII (by convention) +- PII must be in encrypted body +- Audit headers: user ID, action, resource (not names/emails) + +## Open Questions + +1. **PostgreSQL Support**: Should we implement PostgreSQL table writer in Phase 2 or defer? + - **Decision**: Defer to Phase 6, focus on SQLite first + +2. **Query Language**: Expose SQL directly or create filter DSL? + - **Decision**: Start with `EventFilter` struct, add SQL query API later if needed + +3. **Compression**: Should we compress bodies before storage? + - **Decision**: No automatic compression. Applications can pre-compress and set `content-encoding` header + +4. **Partitioning**: How to handle very large mailboxes (>10M events)? + - **Decision**: Use SQLite ATTACH for time-based partitions (one DB per month) + +5. **Custom Index Columns**: Allow dynamic index creation at runtime? + - **Decision**: No. Indexes defined at configuration time only + +## Success Criteria + +- ✅ Consume 10k events/sec from NATS with SQLite backend +- ✅ Query indexed headers with <10ms latency (1M events) +- ✅ Support encrypted bodies without header degradation +- ✅ Automatic retention policy deletes old events +- ✅ Zero data loss during pattern restart (durable consumer) +- ✅ Integration with pattern-launcher and prism-admin + +## References + +- RFC-014: Layered Data Access Patterns (slot architecture) +- RFC-017: Multicast Registry Pattern (slot binding examples) +- RFC-030: Schema Evolution and Validation (schema-id header) +- RFC-033: Claim Check Pattern (large payload handling) +- ADR-005: Backend Plugin Architecture + +## Appendix A: Example Queries + +**Query by Time Range**: +```sql +SELECT message_id, timestamp, topic, principal +FROM mailbox +WHERE timestamp BETWEEN 1697000000000 AND 1697086400000 +ORDER BY timestamp DESC +LIMIT 100; +``` + +**Query by Principal and Topic**: +```sql +SELECT message_id, timestamp, correlation_id, body +FROM mailbox +WHERE principal = 'user-123' + AND topic LIKE 'admin.users.%' +ORDER BY timestamp DESC; +``` + +**Query by Correlation ID (Distributed Trace)**: +```sql +SELECT message_id, timestamp, topic, principal, body +FROM mailbox +WHERE correlation_id = 'trace-abc123' +ORDER BY timestamp ASC; +``` + +## Appendix B: SQLite Backend Details + +**Connection Settings**: +```go +// Optimized SQLite settings for write-heavy workload +PRAGMA journal_mode=WAL; +PRAGMA synchronous=NORMAL; +PRAGMA cache_size=10000; +PRAGMA temp_store=MEMORY; +PRAGMA mmap_size=30000000000; +``` + +**Write Performance**: +- Batch inserts: 100 events per transaction +- WAL mode: 10x faster writes vs rollback journal +- Expected: 50k events/sec on SSD (single process) + +**Query Performance**: +- Indexed queries: <10ms for 1M events +- Full-text search: Add FTS5 virtual table for body search (if decrypted) +- Explain query plans with `EXPLAIN QUERY PLAN` + +## Appendix C: Future Enhancements + +### Phase 6: Additional Backends +- PostgreSQL table writer (horizontal scaling) +- ClickHouse table writer (OLAP analytics) +- DynamoDB table writer (serverless) + +### Phase 7: Advanced Features +- Full-text search on decrypted bodies (opt-in) +- Time-series aggregations (events per hour/day) +- Materialized views for common queries +- Export to Parquet for data lake integration + +### Phase 8: Admin UI +- Web UI for searching mailbox events +- Query builder for non-SQL users +- Event detail view with header/body inspection +- Export results to CSV/JSON diff --git a/docs/404.html b/docs/404.html deleted file mode 100644 index ba183fafe..000000000 --- a/docs/404.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - -Page Not Found | Prism - - - - - - - - - - - -
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- - \ No newline at end of file diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index f6ac92804..000000000 --- a/docs/index.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - -Prism - High-Performance Data Access Gateway | Prism - - - - - - - - - - - -
Skip to main content

Prism

High-Performance Data Access Gateway

Unify your data access. One API, any backend. Blazing fast.

Easy to Use

Unified API

Single gRPC/HTTP interface across all backends: Kafka, NATS, Postgres, SQLite, Neptune. Applications declare requirements; Prism auto-provisions and routes.

Focus on What Matters

Rust Performance

10-100x faster than JVM alternatives with sub-millisecond P50 latency. Built with Rust for predictable performance and memory safety.

Powered by React

Protobuf-Driven

Single source of truth for all data models. PII tagging drives encryption and masking. Consistent types across Rust, Python, and TypeScript.

- - \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml deleted file mode 100644 index a97ca4464..000000000 --- a/docs/sitemap.xml +++ /dev/null @@ -1 +0,0 @@ -https://jrepp.github.io/prism-data-layer/searchweekly0.5https://jrepp.github.io/prism-data-layer/adr/tagsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/abacweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/abstractionweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/acceptance-testingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/adminweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/api-designweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/architectureweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/asyncweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/authenticationweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/authorizationweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/automationweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/awsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/backendweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/backendsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/blobsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/cacheweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/ci-cdweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/claim-checkweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/cleanupweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/cliweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/client-serverweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/codegenweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/concurrencyweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/configurationweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/containersweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/control-planeweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/costweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/data-lifecycleweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/databaseweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/debuggingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/deploymentweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/developer-experienceweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/dexweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/dockerweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/dryweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/dxweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/error-handlingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/evolutionweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/fastapiweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/frontendweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/garbage-collectionweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/goweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/graphweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/gremlinweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/grpcweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/grpc-webweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/hashicorpweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/infrastructureweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/interfacesweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/isolationweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/kubernetesweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/languageweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/languagesweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/launcherweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/lifecycleweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/local-developmentweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/local-testingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/loggingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/macosweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/messagingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/minioweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/namespaceweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/object-storageweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/observabilityweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/oidcweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/openpolicyagentweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/opentelemetryweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/operationsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/partitioningweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/patternsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/performanceweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/planningweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/pluginweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/pluginsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/podmanweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/policyweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/procmgrweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/protobufweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/protocolsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/proxyweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/qualityweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/queueweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/rbacweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/refactoringweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/registryweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/reliabilityweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/rustweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/s-3weekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/scalabilityweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/schemaweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/securityweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/signozweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/sqsweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/testcontainersweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/testingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/tinkerpopweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/tokioweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/toolingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/topazweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/tracingweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/ttlweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/uiweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/use-casesweekly0.5https://jrepp.github.io/prism-data-layer/adr/tags/versioningweekly0.5https://jrepp.github.io/prism-data-layer/adr/weekly0.5https://jrepp.github.io/prism-data-layer/adr/ADR-000-templateweekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-001weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-002weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-003weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-004weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-005weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-006weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-007weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-008weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-009weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-010weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-011weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-012weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-013weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-014weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-015weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-016weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-017weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-018weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-019weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-020weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-021weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-022weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-023weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-024weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-025weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-026weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-027weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-028weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-029weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-030weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-031weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-032weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-033weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-034weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-035weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-036weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-037weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-038weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-039weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-040weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-041weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-042weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-043weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-044weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-045weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-046weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-047weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-048weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-049weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-050weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-051weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-052weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-053weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-055weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-056weekly0.5https://jrepp.github.io/prism-data-layer/adr/adr-057weekly0.5https://jrepp.github.io/prism-data-layer/docs/tagsweekly0.5https://jrepp.github.io/prism-data-layer/docs/tags/architectureweekly0.5https://jrepp.github.io/prism-data-layer/docs/tags/backend-interfacesweekly0.5https://jrepp.github.io/prism-data-layer/docs/tags/patternsweekly0.5https://jrepp.github.io/prism-data-layer/docs/tags/system-designweekly0.5https://jrepp.github.io/prism-data-layer/docs/tags/technical-overviewweekly0.5https://jrepp.github.io/prism-data-layer/docs/architectureweekly0.5https://jrepp.github.io/prism-data-layer/docs/changelogweekly0.5https://jrepp.github.io/prism-data-layer/docs/introweekly0.5https://jrepp.github.io/prism-data-layer/docs/key-documentsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tagsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/acceptance-testsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/adminweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/api-designweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/architectureweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/authenticationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/authorizationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/awsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/backendweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/backendsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/best-practicesweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/build-systemweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/bulkheadweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/capabilitiesweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/ci-cdweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/cliweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/code-reuseweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/containersweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/coverageweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/credentialsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/demoweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/deploymentweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/developer-experienceweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/developer-guideweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/developmentweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/distributed-systemsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/documentationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/driversweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/dxweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/edge-casesweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/envelopeweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/errorsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/evolutionweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/fault-isolationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/frameworkweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/goweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/golangweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/grpcweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/implementationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/improvementsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/infrastructureweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/integrationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/interfacesweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/isolationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/kubernetesweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/launcherweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/lessons-learnedweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/lifecycleweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/lintingweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/load-testingweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/local-infrastructureweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/memstoreweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/messagingweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/multi-tenancyweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/multicast-registryweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/observabilityweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/oidcweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/opentelemetryweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/optimizationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/pattern-sdkweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/patternsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/performanceweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/pluginsweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/pocweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/poc-1weekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/poc-4weekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/podmanweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/postgresweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/processweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/procmgrweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/prometheusweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/protobufweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/protocolweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/pubsubweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/pythonweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/quickstartweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/redisweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/refactoringweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/registryweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/reliabilityweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/reviewweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/runtimeweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/rustweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/schemaweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/schemasweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/scratchweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/securityweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/service-identityweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/session-managementweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/summaryweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/table-drivenweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/tenancyweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/testingweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/toolingweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/topazweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/validationweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/vaultweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/walweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/workflowweekly0.5https://jrepp.github.io/prism-data-layer/memos/tags/workflowsweekly0.5https://jrepp.github.io/prism-data-layer/memos/weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-001weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-002weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-003weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-004weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-005weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-006weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-007weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-008weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-009weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-010weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-011weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-012weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-013weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-014weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-015weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-016weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-017weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-018weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-019weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-020weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-021weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-022weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-023weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-030weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-031weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-032weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-033weekly0.5https://jrepp.github.io/prism-data-layer/memos/memo-034weekly0.5https://jrepp.github.io/prism-data-layer/netflix/tagsweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/abstractionsweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/applicationsweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/architectureweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/cassandraweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/counterweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/data-gatewayweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/dual-writeweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/durabilityweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/evolutionweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/flinkweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/graphweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/kafkaweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/key-valueweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/lessons-learnedweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/metricsweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/migrationweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/netflixweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/performanceweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/real-timeweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/referenceweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/resilienceweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/scaleweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/schemaweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/timeseriesweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/transcriptweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/use-casesweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/videoweekly0.5https://jrepp.github.io/prism-data-layer/netflix/tags/walweekly0.5https://jrepp.github.io/prism-data-layer/netflix/weekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-abstractionsweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-data-evolve-migrationweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-dual-write-migrationweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-key-use-casesweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-scaleweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-summaryweekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-video1weekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-video2weekly0.5https://jrepp.github.io/prism-data-layer/netflix/netflix-write-ahead-logweekly0.5https://jrepp.github.io/prism-data-layer/prds/tagsweekly0.5https://jrepp.github.io/prism-data-layer/prds/tags/netflixweekly0.5https://jrepp.github.io/prism-data-layer/prds/tags/prdweekly0.5https://jrepp.github.io/prism-data-layer/prds/tags/productweekly0.5https://jrepp.github.io/prism-data-layer/prds/tags/requirementsweekly0.5https://jrepp.github.io/prism-data-layer/prds/tags/visionweekly0.5https://jrepp.github.io/prism-data-layer/prds/weekly0.5https://jrepp.github.io/prism-data-layer/prds/prd-001weekly0.5https://jrepp.github.io/prism-data-layer/rfc/tagsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/a-2-aweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/acceptanceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/acceptance-testingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/adapterweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/adminweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/architectureweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/authenticationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/authorizationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/awsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/backendweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/backendsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/bridgeweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/bufferingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/build-systemweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/bulkheadweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/claim-checkweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/cliweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/clientweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/client-apiweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/code-coverageweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/code-layoutweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/compositionweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/concurrencyweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/configurationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/consumerweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/control-planeweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/credentialsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/cross-regionweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/data-accessweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/debuggingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/developer-experienceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/dexweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/distributedweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/driversweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/evaluationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/evolutionweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/future-proofweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/goweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/governanceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/graphweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/grpcweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/high-availabilityweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/htmxweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/httpweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/implementationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/interfacesweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/internet-scaleweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/interoperabilityweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/isolationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/keyvalueweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/kubeletweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/layeringweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/libraryweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/lifecycleweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/load-testingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/local-developmentweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/mcpweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/memstoreweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/minioweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/mtlsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/multi-regionweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/namespaceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/neptuneweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/networkingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/object-storageweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/observabilityweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/oidcweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/orchestrationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/paginationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/patternweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/patternsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/performanceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/pluginweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/pluginsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/pocweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/policyweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/prioritiesweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/process-managementweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/producerweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/protocolweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/proxyweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/pubsubweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/quality-assuranceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/registryweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/reliabilityweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/replicationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/roadmapweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/s-3weekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/schemaweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/schema-registryweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/sdkweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/securityweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/self-serviceweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/service-discoveryweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/sessionweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/signozweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/snapshotterweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/sseweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/strategyweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/streamingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/supersededweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/tddweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/templweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/testingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/tokensweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/toolingweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/uiweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/validationweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/vaultweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/walking-skeletonweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/webweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/workstreamsweekly0.5https://jrepp.github.io/prism-data-layer/rfc/tags/write-onlyweekly0.5https://jrepp.github.io/prism-data-layer/rfc/weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-001weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-002weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-003weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-004weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-005weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-006weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-007weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-008weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-009weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-010weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-011weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-012weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-013weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-014weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-015weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-016weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-017weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-018weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-019weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-020weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-021weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-022weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-023weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-024weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-025weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-026weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-027weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-028weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-029weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-030weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-031weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-032weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-033weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-034weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-035weekly0.5https://jrepp.github.io/prism-data-layer/rfc/rfc-036weekly0.5https://jrepp.github.io/prism-data-layer/weekly0.5 \ No newline at end of file diff --git a/docusaurus/docs/changelog.md b/docusaurus/docs/changelog.md index b73018251..86ed2496e 100644 --- a/docusaurus/docs/changelog.md +++ b/docusaurus/docs/changelog.md @@ -12,6 +12,82 @@ Quick access to recently updated documentation. Changes listed in reverse chrono ### 2025-10-15 +#### RFC-037: Mailbox Pattern - Searchable Event Store (NEW) +**Link**: [RFC-037](/rfc/rfc-037) + +**Summary**: Comprehensive mailbox pattern design for consuming messages from queues and storing them in searchable databases with indexed headers and encrypted bodies: + +**Core Concept**: +- **Three-Slot Architecture**: Message source (queue consumer) + Storage backend (table writer) + Query interface (table reader) +- **Header Indexing**: Extract metadata from PubSubMessage.Metadata map into indexed table columns +- **Encrypted Body Support**: Store encrypted payloads as opaque blobs while maintaining header searchability +- **Schema Evolution**: JSON custom headers for flexibility without schema migrations +- **Pluggable Storage**: SQLite (default), PostgreSQL, ClickHouse via TableWriterInterface and TableReaderInterface + +**Use Cases**: +- Audit logging with searchable metadata (user, action, resource) but encrypted PII +- Email/message archives with searchable headers (from, to, subject) and encrypted bodies +- Event sourcing with indexed event types, aggregates, timestamps +- System observability (traces, logs, metrics) with searchable dimensions +- Compliance retention with searchable metadata while protecting sensitive payloads + +**New Backend Interfaces**: + +**TableWriterInterface** (Storage Operations): +```go +type TableWriterInterface interface { + WriteEvent(ctx context.Context, event *MailboxEvent) error + DeleteOldEvents(ctx context.Context, olderThan int64) (int64, error) + GetTableStats(ctx context.Context) (*TableStats, error) +} +``` + +**TableReaderInterface** (Query Operations): +```go +type TableReaderInterface interface { + QueryEvents(ctx context.Context, filter *EventFilter) ([]*MailboxEvent, error) + GetEvent(ctx context.Context, messageID string) (*MailboxEvent, error) + GetTableStats(ctx context.Context) (*TableStats, error) +} +``` + +**Standard Indexed Headers**: +- `prism-message-id`: Unique message identifier +- `prism-timestamp`: Event timestamp +- `prism-topic`: Topic/stream name +- `prism-content-type`: Payload content type +- `prism-schema-id`: Schema registry ID (RFC-030) +- `prism-encryption`: Encryption algorithm (if encrypted) +- `prism-correlation-id`: Request correlation ID +- `prism-principal`: User/service identity +- `prism-namespace`: Prism namespace + +**SQLite Table Schema**: +- Indexed headers as table columns (message_id, timestamp, topic, principal, correlation_id) +- Custom headers as JSON column (x-* headers) +- Body as BLOB column (may be encrypted) +- Automatic retention policy (90 days default) + +**Pattern Configuration**: +- Message source slot: NATS, Kafka, Redis Streams +- Storage slot: SQLite (local), PostgreSQL (distributed), ClickHouse (analytics) +- Behavior: batch_size, auto_commit, retention_policy + +**Implementation Plan** (4 weeks): +- Phase 1: TableWriterInterface definition in pkg/plugin/ +- Phase 2: SQLite backend with WAL mode and indexed columns +- Phase 3: Mailbox pattern with header extraction and retention +- Phase 4: Integration tests with NATS + SQLite +- Phase 5: $admin namespace configuration + +**Key Innovation**: Decouples message indexing from storage backend using slot architecture. Headers extracted from metadata map and indexed for fast queries while encrypted bodies remain opaque. Automatic retention management prevents storage bloat. SQLite default provides instant local testing, PostgreSQL enables distributed deployments. + +**Impact**: Enables searchable event storage without external dependencies (no Elasticsearch required). Encrypted bodies protected while headers remain queryable. Automatic retention policy with TTL. Pluggable storage backends via TableWriterInterface. Foundation for audit logging, message archives, and event sourcing patterns. Complements Consumer pattern (RFC-033) and Producer pattern with durable storage tier. + +**$admin Namespace Goal**: Deploy mailbox pattern in `$admin` namespace consuming all admin.* topics for operational observability and audit trail. + +--- + #### CI Workflow Consolidation and GitHub Merge Queue Support (MAJOR UPDATE) **Links**: [CI Workflow](https://github.com/jrepp/prism-data-layer/blob/main/.github/workflows/ci.yml), [Merge Queue Workflow](https://github.com/jrepp/prism-data-layer/blob/main/.github/workflows/merge-queue.yml), [Merge Queue Setup Guide](https://github.com/jrepp/prism-data-layer/blob/main/.github/MERGE_QUEUE_SETUP.md) diff --git a/patterns/mailbox/.gitignore b/patterns/mailbox/.gitignore new file mode 100644 index 000000000..81183ac02 --- /dev/null +++ b/patterns/mailbox/.gitignore @@ -0,0 +1,2 @@ +# Test database +.prism/ diff --git a/patterns/mailbox/README.md b/patterns/mailbox/README.md new file mode 100644 index 000000000..535b37752 --- /dev/null +++ b/patterns/mailbox/README.md @@ -0,0 +1,317 @@ +# Mailbox Pattern + +Searchable event store pattern that consumes messages from queues and stores them in structured databases with indexed headers and blob bodies. + +## Overview + +The Mailbox Pattern provides a persistent, searchable event store by consuming messages from a message queue or pub/sub system and storing them in a structured database. Headers are extracted from event metadata and stored as indexed table columns for efficient querying, while message bodies (which may be encrypted) are stored as opaque blobs. + +## Architecture + +The Mailbox Pattern uses a **3-slot architecture**: + +1. **Message Source** (QueueInterface or PubSubInterface) + - Consumes events from messaging backends (NATS, Kafka, Redis Streams) + - Configuration: topic, consumer group, batch size + +2. **Storage Backend** (TableWriterInterface) + - Persists events with indexed headers + - Operations: WriteEvent, DeleteOldEvents, GetTableStats + - Implementations: SQLite (default), PostgreSQL, ClickHouse + +3. **Query Interface** (TableReaderInterface) + - Retrieves stored messages as array of MailboxEvent (header + payload) + - Operations: QueryEvents, GetEvent, GetTableStats + - Shares database connection with writer slot + +## Use Cases + +- **Audit Logging**: Store all system events with searchable metadata (user, action, resource) but encrypted PII +- **Email/Message Archives**: Store communications with searchable headers (from, to, subject) and encrypted bodies +- **Event Sourcing**: Capture all domain events with indexed event types, aggregates, and timestamps +- **System Observability**: Archive traces, logs, and metrics with searchable dimensions +- **Compliance**: Retain records with searchable metadata while protecting sensitive payloads + +## Features + +- **9 Standard Indexed Headers**: message_id, timestamp, topic, content_type, schema_id, encryption, correlation_id, principal, namespace +- **Custom Headers**: Application-specific headers with `x-` prefix stored as JSON +- **Automatic Retention Cleanup**: Configurable retention period with background cleanup job +- **Performance Optimized**: SQLite WAL mode, indexed columns, efficient query building +- **Encrypted Body Support**: Store encrypted payloads as opaque blobs while maintaining header searchability +- **Schema Evolution**: JSON custom headers for flexibility without schema migrations + +## Quick Start + +### Local Testing (No External Dependencies) + +The simplest way to test the mailbox is using local SQLite storage without external message brokers: + +```bash +# 1. Build prismctl (if not already built) +cd cmd/prismctl +go build -o prismctl + +# 2. Publish test messages via proxy admin API +./prismctl publish message local-mailbox test.events '{"hello": "world"}' \ + --principal "test-user" \ + --correlation-id "trace-123" + +# 3. Query messages from mailbox +./prismctl mailbox query local-mailbox --limit 10 --show-payload + +# 4. Get specific message +./prismctl mailbox get local-mailbox +``` + +The mailbox stores all published messages in SQLite with indexed headers for efficient querying. + +### Production Setup with Message Broker + +For production use with NATS or Kafka: + +#### 1. Create Configuration + +Create a `mailbox.yaml` configuration file: + +```yaml +name: admin-mailbox + +behavior: + topic: "admin.events.>" + consumer_group: "mailbox-admin" + auto_commit: true + +storage: + database_path: "/var/lib/prism/mailbox-admin.db" + table_name: "mailbox" + retention_days: 90 + cleanup_interval: "24h" +``` + +#### 2. Build and Run + +```bash +# Build the mailbox runner +cd patterns/mailbox/cmd/mailbox-runner +go build -o mailbox-runner + +# Run with configuration +./mailbox-runner --config mailbox.yaml --log-level info +``` + +#### 3. Query Events + +Use prismctl to query stored events: + +```bash +# Query by time range +prismctl mailbox query admin-mailbox \ + --start-time "2025-10-15T00:00:00Z" \ + --end-time "2025-10-15T23:59:59Z" \ + --limit 100 + +# Query by topic +prismctl mailbox query admin-mailbox --topic "admin.users.*" + +# Query by principal +prismctl mailbox query admin-mailbox --principal "user@example.com" + +# Query with correlation ID +prismctl mailbox query admin-mailbox --correlation-id "trace-abc-123" +``` + +## Configuration + +### Behavior Settings + +- `topic` (string, required): Topic or queue to consume messages from +- `consumer_group` (string, required): Consumer group ID for coordinated consumption +- `auto_commit` (boolean, default: true): Automatically acknowledge messages after successful storage + +### Storage Settings + +- `database_path` (string, required): Path to SQLite database file or connection string +- `table_name` (string, default: "mailbox"): Table name for storing events +- `retention_days` (integer, default: 90): Number of days to retain events before deletion +- `cleanup_interval` (duration, default: "24h"): Interval between automatic cleanup runs + +## Table Schema + +Default SQLite table schema: + +```sql +CREATE TABLE IF NOT EXISTS mailbox ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + + -- Indexed headers + message_id TEXT NOT NULL UNIQUE, + timestamp INTEGER NOT NULL, + topic TEXT NOT NULL, + content_type TEXT, + schema_id TEXT, + encryption TEXT, + correlation_id TEXT, + principal TEXT, + namespace TEXT, + + -- Custom headers (JSON) + custom_headers TEXT, + + -- Body (blob) + body BLOB NOT NULL, + + -- Metadata + created_at INTEGER NOT NULL, + + -- Indexes + INDEX idx_timestamp (timestamp), + INDEX idx_topic (topic), + INDEX idx_principal (principal), + INDEX idx_correlation_id (correlation_id) +); +``` + +## API + +### Creating a Mailbox + +```go +config := mailbox.Config{ + Name: "admin-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "admin.events.>", + ConsumerGroup: "mailbox-admin", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/var/lib/prism/mailbox-admin.db", + TableName: "mailbox", + RetentionDays: 90, + }, +} + +mb, err := mailbox.New(config) +``` + +### Binding Slots + +```go +// Initialize backends +natsDriver := nats.New() +sqliteDriver := sqlite.New() + +// Bind slots +err = mb.BindSlots( + natsDriver, // Message source + sqliteDriver, // Table writer + sqliteDriver, // Table reader (same backend) +) +``` + +### Starting the Mailbox + +```go +ctx := context.Background() +err = mb.Start(ctx) +``` + +### Querying Events + +```go +// Query by time range +startTime := time.Now().Add(-24 * time.Hour).UnixMilli() +endTime := time.Now().UnixMilli() + +filter := &plugin.EventFilter{ + StartTime: &startTime, + EndTime: &endTime, + Topics: []string{"admin.users", "admin.sessions"}, + Limit: 100, +} + +events, err := mb.QueryEvents(ctx, filter) +``` + +### Getting Single Event + +```go +event, err := mb.GetEvent(ctx, "message-id-123") +``` + +### Getting Statistics + +```go +stats, err := mb.GetStats(ctx) +// Returns: events_received, events_stored, events_failed, +// bytes_stored, table_total_events, etc. +``` + +## Metrics + +The mailbox pattern exports the following metrics: + +- `events_received`: Total number of events received from message source +- `events_stored`: Total number of events successfully stored +- `events_failed`: Total number of events that failed to store +- `bytes_stored`: Total bytes stored in mailbox +- `processing_latency`: Time taken to process and store each event + +## Performance + +- **Throughput**: 10,000 events/sec with SQLite backend (SSD) +- **Query Latency**: <10ms for indexed queries on 1M events +- **Storage Growth**: ~500 bytes per event average (depends on payload size) +- **Retention Cleanup**: Deletes 100k old events in <1 second + +## Testing + +Run the test suite: + +```bash +cd patterns/mailbox +go test -v ./... +``` + +Tests include: +- Mailbox creation and configuration validation +- Slot binding +- Message storage with header extraction +- Query operations +- Health checks + +## Example: Audit Logging + +```yaml +name: audit-mailbox + +behavior: + topic: "audit.*" + consumer_group: "audit-logger" + auto_commit: true + +storage: + database_path: "/var/lib/prism/audit.db" + table_name: "audit_log" + retention_days: 365 # 1 year retention for compliance + cleanup_interval: "24h" +``` + +Messages should include metadata: + +```go +metadata := map[string]string{ + "prism-principal": "user-123", + "prism-correlation-id": "trace-abc", + "x-action": "user.login", + "x-resource": "/api/sessions", + "x-ip-address": "192.168.1.10", +} +``` + +## See Also + +- [RFC-037: Mailbox Pattern Specification](../../docs-cms/rfcs/RFC-037-mailbox-pattern-searchable-event-store.md) +- [TableWriterInterface Documentation](../../pkg/plugin/interfaces.go) +- [SQLite Backend Driver](../../pkg/drivers/sqlite/) +- [Consumer Pattern](../consumer/) - Similar pattern for message processing diff --git a/patterns/mailbox/cmd/mailbox-runner/main.go b/patterns/mailbox/cmd/mailbox-runner/main.go new file mode 100644 index 000000000..d03152e0d --- /dev/null +++ b/patterns/mailbox/cmd/mailbox-runner/main.go @@ -0,0 +1,204 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log/slog" + "os" + "os/signal" + "syscall" + "time" + + "gopkg.in/yaml.v3" + + "github.com/jrepp/prism-data-layer/patterns/mailbox" + "github.com/jrepp/prism-data-layer/pkg/drivers/memstore" + "github.com/jrepp/prism-data-layer/pkg/drivers/nats" + "github.com/jrepp/prism-data-layer/pkg/drivers/sqlite" + "github.com/jrepp/prism-data-layer/pkg/plugin" +) + +func main() { + // Parse command-line flags + configPath := flag.String("config", "mailbox.yaml", "Path to mailbox configuration file") + logLevel := flag.String("log-level", "info", "Log level (debug, info, warn, error)") + flag.Parse() + + // Configure logging + level := slog.LevelInfo + switch *logLevel { + case "debug": + level = slog.LevelDebug + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + } + + logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ + Level: level, + })) + slog.SetDefault(logger) + + // Load configuration + config, err := loadConfig(*configPath) + if err != nil { + slog.Error("failed to load config", "error", err, "path", *configPath) + os.Exit(1) + } + + slog.Info("starting mailbox-runner", + "name", config.Name, + "topic", config.Behavior.Topic, + "database", config.Storage.DatabasePath) + + // Create mailbox pattern + mb, err := mailbox.New(*config) + if err != nil { + slog.Error("failed to create mailbox", "error", err) + os.Exit(1) + } + + // Initialize backends + ctx := context.Background() + + // Initialize NATS message source + natsDriver := nats.New() + natsConfig := &plugin.Config{ + Backend: map[string]interface{}{ + "url": "nats://localhost:4222", + }, + } + if err := natsDriver.Initialize(ctx, natsConfig); err != nil { + slog.Error("failed to initialize NATS driver", "error", err) + os.Exit(1) + } + if err := natsDriver.Start(ctx); err != nil { + slog.Error("failed to start NATS driver", "error", err) + os.Exit(1) + } + defer natsDriver.Stop(ctx) + + // Initialize SQLite storage backend + sqliteDriver := sqlite.New() + sqliteConfig := &plugin.Config{ + Backend: map[string]interface{}{ + "database_path": config.Storage.DatabasePath, + "table_name": config.Storage.TableName, + "retention_days": float64(config.Storage.RetentionDays), + }, + } + if err := sqliteDriver.Initialize(ctx, sqliteConfig); err != nil { + slog.Error("failed to initialize SQLite driver", "error", err) + os.Exit(1) + } + if err := sqliteDriver.Start(ctx); err != nil { + slog.Error("failed to start SQLite driver", "error", err) + os.Exit(1) + } + defer sqliteDriver.Stop(ctx) + + // Get TableWriterInterface and TableReaderInterface from SQLite driver + tableWriter, ok := sqliteDriver.(plugin.TableWriterInterface) + if !ok { + slog.Error("SQLite driver does not implement TableWriterInterface") + os.Exit(1) + } + + tableReader, ok := sqliteDriver.(plugin.TableReaderInterface) + if !ok { + slog.Error("SQLite driver does not implement TableReaderInterface") + os.Exit(1) + } + + // Get message source interface from NATS driver + var messageSource interface{} + if pubsub, ok := natsDriver.(plugin.PubSubInterface); ok { + messageSource = pubsub + } else if queue, ok := natsDriver.(plugin.QueueInterface); ok { + messageSource = queue + } else { + slog.Error("NATS driver does not implement PubSubInterface or QueueInterface") + os.Exit(1) + } + + // Bind slots to mailbox pattern + if err := mb.BindSlots(messageSource, tableWriter, tableReader); err != nil { + slog.Error("failed to bind slots", "error", err) + os.Exit(1) + } + + // Start mailbox + if err := mb.Start(ctx); err != nil { + slog.Error("failed to start mailbox", "error", err) + os.Exit(1) + } + + slog.Info("mailbox started successfully", + "name", config.Name, + "topic", config.Behavior.Topic) + + // Setup signal handling + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + // Periodically log statistics + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + // Wait for termination signal or ticker + for { + select { + case sig := <-sigCh: + slog.Info("received signal, shutting down", "signal", sig) + if err := mb.Stop(ctx); err != nil { + slog.Error("error stopping mailbox", "error", err) + } + return + + case <-ticker.C: + // Log statistics + stats, err := mb.GetStats(ctx) + if err != nil { + slog.Warn("failed to get stats", "error", err) + continue + } + + slog.Info("mailbox statistics", "stats", stats) + + // Check health + health, err := mb.Health(ctx) + if err != nil { + slog.Warn("failed to get health", "error", err) + continue + } + + if health.Status != plugin.HealthHealthy { + slog.Warn("mailbox health degraded", + "status", health.Status, + "message", health.Message, + "details", health.Details) + } + } + } +} + +// loadConfig loads the mailbox configuration from a YAML file. +func loadConfig(path string) (*mailbox.Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read config file: %w", err) + } + + var config mailbox.Config + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, fmt.Errorf("failed to parse config YAML: %w", err) + } + + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + return &config, nil +} diff --git a/patterns/mailbox/config.go b/patterns/mailbox/config.go new file mode 100644 index 000000000..9c9993636 --- /dev/null +++ b/patterns/mailbox/config.go @@ -0,0 +1,61 @@ +package mailbox + +import ( + "fmt" + "time" +) + +// Config defines the configuration for the Mailbox pattern. +type Config struct { + Name string `yaml:"name" json:"name"` + Behavior BehaviorConfig `yaml:"behavior" json:"behavior"` + Storage StorageConfig `yaml:"storage" json:"storage"` +} + +// BehaviorConfig defines mailbox behavior settings. +type BehaviorConfig struct { + Topic string `yaml:"topic" json:"topic"` + ConsumerGroup string `yaml:"consumer_group" json:"consumer_group"` + AutoCommit bool `yaml:"auto_commit" json:"auto_commit"` +} + +// StorageConfig defines storage backend settings. +type StorageConfig struct { + DatabasePath string `yaml:"database_path" json:"database_path"` + TableName string `yaml:"table_name" json:"table_name"` + RetentionDays int `yaml:"retention_days" json:"retention_days"` + CleanupInterval time.Duration `yaml:"cleanup_interval" json:"cleanup_interval"` +} + +// Validate checks if the configuration is valid. +func (c *Config) Validate() error { + if c.Name == "" { + return fmt.Errorf("mailbox name is required") + } + + if c.Behavior.Topic == "" { + return fmt.Errorf("topic is required") + } + + if c.Behavior.ConsumerGroup == "" { + return fmt.Errorf("consumer_group is required") + } + + if c.Storage.DatabasePath == "" { + return fmt.Errorf("storage.database_path is required") + } + + if c.Storage.TableName == "" { + c.Storage.TableName = "mailbox" // Default table name + } + + if c.Storage.RetentionDays <= 0 { + c.Storage.RetentionDays = 90 // Default 90 days retention + } + + if c.Storage.CleanupInterval <= 0 { + c.Storage.CleanupInterval = 24 * time.Hour // Default daily cleanup + } + + return nil +} diff --git a/patterns/mailbox/example-config.yaml b/patterns/mailbox/example-config.yaml new file mode 100644 index 000000000..6e40020c9 --- /dev/null +++ b/patterns/mailbox/example-config.yaml @@ -0,0 +1,28 @@ +# Example mailbox pattern configuration +# This configuration sets up a mailbox that consumes admin events +# from NATS and stores them in a local SQLite database + +name: admin-mailbox + +behavior: + # Topic pattern to subscribe to (NATS wildcard syntax) + topic: "admin.events.>" + + # Consumer group for coordinated consumption across multiple instances + consumer_group: "mailbox-admin" + + # Automatically acknowledge messages after successful storage + auto_commit: true + +storage: + # Path to SQLite database file + database_path: "/Users/jrepp/.prism/mailbox-admin.db" + + # Table name for storing events + table_name: "mailbox" + + # Retention period in days (events older than this will be deleted) + retention_days: 90 + + # Interval between automatic cleanup runs + cleanup_interval: "24h" diff --git a/patterns/mailbox/go.mod b/patterns/mailbox/go.mod new file mode 100644 index 000000000..675609b95 --- /dev/null +++ b/patterns/mailbox/go.mod @@ -0,0 +1,57 @@ +module github.com/jrepp/prism-data-layer/patterns/mailbox + +go 1.24.0 + +require ( + github.com/jrepp/prism-data-layer/pkg/drivers/memstore v0.0.0-20251016004831-50e341824740 + github.com/jrepp/prism-data-layer/pkg/drivers/nats v0.0.0 + github.com/jrepp/prism-data-layer/pkg/drivers/sqlite v0.0.0 + github.com/jrepp/prism-data-layer/pkg/plugin v0.0.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/nats-io/nats.go v1.45.0 // indirect + github.com/nats-io/nkeys v0.4.11 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/tools v0.36.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/grpc v1.76.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect + lukechampine.com/uint128 v1.2.0 // indirect + modernc.org/cc/v3 v3.40.0 // indirect + modernc.org/ccgo/v3 v3.16.13 // indirect + modernc.org/libc v1.29.0 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.7.2 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/sqlite v1.28.0 // indirect + modernc.org/strutil v1.1.3 // indirect + modernc.org/token v1.0.1 // indirect +) + +replace github.com/jrepp/prism-data-layer/pkg/plugin => ../../pkg/plugin + +replace github.com/jrepp/prism-data-layer/pkg/drivers/nats => ../../pkg/drivers/nats + +replace github.com/jrepp/prism-data-layer/pkg/drivers/sqlite => ../../pkg/drivers/sqlite diff --git a/patterns/mailbox/go.sum b/patterns/mailbox/go.sum new file mode 100644 index 000000000..66de5f958 --- /dev/null +++ b/patterns/mailbox/go.sum @@ -0,0 +1,129 @@ +github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0= +github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-tpm v0.9.5 h1:ocUmnDebX54dnW+MQWGQRbdaAcJELsa6PqZhJ48KwVU= +github.com/google/go-tpm v0.9.5/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jrepp/prism-data-layer/pkg/drivers/memstore v0.0.0-20251016004831-50e341824740 h1:WQJCEqIlSwnXy41bdQ9Iioh9CGY6pt5UYuuuvx+PYwE= +github.com/jrepp/prism-data-layer/pkg/drivers/memstore v0.0.0-20251016004831-50e341824740/go.mod h1:C4Mh+jQJTp8a+EAeZNaYeFFY9GXIAWnfMcGs3dZZUvk= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= +github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= +github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= +github.com/nats-io/nats-server/v2 v2.12.0 h1:OIwe8jZUqJFrh+hhiyKu8snNib66qsx806OslqJuo74= +github.com/nats-io/nats-server/v2 v2.12.0/go.mod h1:nr8dhzqkP5E/lDwmn+A2CvQPMd1yDKXQI7iGg3lAvww= +github.com/nats-io/nats.go v1.45.0 h1:/wGPbnYXDM0pLKFjZTX+2JOw9TQPoIgTFrUaH97giwA= +github.com/nats-io/nats.go v1.45.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= +github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.29.0 h1:tTFRFq69YKCF2QyGNuRUQxKBm1uZZLubf6Cjh/pVHXs= +modernc.org/libc v1.29.0/go.mod h1:DaG/4Q3LRRdqpiLyP0C2m1B8ZMGkQ+cCgOIjEtQlYhQ= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= +modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= +modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= diff --git a/patterns/mailbox/local-config.yaml b/patterns/mailbox/local-config.yaml new file mode 100644 index 000000000..fe8b42dec --- /dev/null +++ b/patterns/mailbox/local-config.yaml @@ -0,0 +1,34 @@ +# Local mailbox configuration for testing without external dependencies +# Uses in-memory pub/sub (MemPubSub) and local SQLite database +# +# This configuration allows testing the complete mailbox workflow: +# 1. Publish messages via prismctl to a namespace +# 2. Mailbox consumes from in-memory pub/sub +# 3. Messages stored in SQLite with indexed headers +# 4. Query messages via prismctl mailbox commands + +name: local-mailbox + +behavior: + # Topic pattern to subscribe to (simple wildcard) + # This will capture all events published to topics starting with "test." + topic: "test.events" + + # Consumer group for coordinated consumption + consumer_group: "mailbox-local" + + # Automatically acknowledge messages after successful storage + auto_commit: true + +storage: + # Path to SQLite database file in .prism directory + database_path: ".prism/mailbox-local.db" + + # Table name for storing events + table_name: "mailbox" + + # Retention period in days (keep for 30 days for local testing) + retention_days: 30 + + # Interval between automatic cleanup runs + cleanup_interval: "1h" diff --git a/patterns/mailbox/mailbox.go b/patterns/mailbox/mailbox.go new file mode 100644 index 000000000..8281a6043 --- /dev/null +++ b/patterns/mailbox/mailbox.go @@ -0,0 +1,334 @@ +package mailbox + +import ( + "context" + "fmt" + "log/slog" + "sync" + "time" + + "github.com/jrepp/prism-data-layer/pkg/plugin" +) + +// Mailbox implements a searchable event store pattern by consuming messages +// from a queue and storing them in a structured database with indexed headers. +type Mailbox struct { + name string + config Config + + // Backend interfaces (slots) + messageSource interface{} // PubSubInterface or QueueInterface + tableWriter plugin.TableWriterInterface // Storage backend for writing events + tableReader plugin.TableReaderInterface // Query interface for reading events + + // Runtime state + mu sync.RWMutex + running bool + ctx context.Context + cancel context.CancelFunc + metrics MailboxMetrics +} + +// MailboxMetrics tracks mailbox performance metrics. +type MailboxMetrics struct { + mu sync.RWMutex + EventsReceived int64 + EventsStored int64 + EventsFailed int64 + BytesStored int64 + LastEventTime time.Time + ProcessingLatency time.Duration +} + +// New creates a new Mailbox instance. +// Backend slots must be bound via BindSlots() before starting. +func New(config Config) (*Mailbox, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + return &Mailbox{ + name: config.Name, + config: config, + }, nil +} + +// BindSlots connects backend drivers to the pattern's slots. +func (m *Mailbox) BindSlots( + messageSource interface{}, + tableWriter plugin.TableWriterInterface, + tableReader plugin.TableReaderInterface, +) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.running { + return fmt.Errorf("cannot bind slots while mailbox is running") + } + + // Validate message source implements required interface + switch messageSource.(type) { + case plugin.PubSubInterface, plugin.QueueInterface: + m.messageSource = messageSource + default: + return fmt.Errorf("message_source must implement PubSubInterface or QueueInterface") + } + + // Table writer is required + if tableWriter == nil { + return fmt.Errorf("table_writer slot is required") + } + m.tableWriter = tableWriter + + // Table reader is optional (mailbox can run write-only) + m.tableReader = tableReader + + return nil +} + +// Start begins consuming messages and storing them. +func (m *Mailbox) Start(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.running { + return fmt.Errorf("mailbox already running") + } + + if m.messageSource == nil { + return fmt.Errorf("message_source slot must be bound before starting") + } + + if m.tableWriter == nil { + return fmt.Errorf("table_writer slot must be bound before starting") + } + + m.ctx, m.cancel = context.WithCancel(ctx) + m.running = true + + // Start consumption + go m.consume() + + slog.Info("mailbox started", + "name", m.name, + "topic", m.config.Behavior.Topic, + "database", m.config.Storage.DatabasePath) + + return nil +} + +// Stop stops the mailbox. +func (m *Mailbox) Stop(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if !m.running { + return nil + } + + m.cancel() + m.running = false + + slog.Info("mailbox stopped", "name", m.name) + return nil +} + +// consume is the main consumption loop. +func (m *Mailbox) consume() { + defer func() { + m.mu.Lock() + m.running = false + m.mu.Unlock() + }() + + // Subscribe based on source type + var msgChan <-chan *plugin.PubSubMessage + var subErr error + + if pubsub, ok := m.messageSource.(plugin.PubSubInterface); ok { + msgChan, subErr = pubsub.Subscribe(m.ctx, m.config.Behavior.Topic, m.config.Behavior.ConsumerGroup) + } else if queue, ok := m.messageSource.(plugin.QueueInterface); ok { + msgChan, subErr = queue.Receive(m.ctx, m.config.Behavior.Topic) + } + + if subErr != nil { + slog.Error("failed to subscribe", "error", subErr) + return + } + + // Process messages + for { + select { + case <-m.ctx.Done(): + return + case msg := <-msgChan: + if msg == nil { + continue + } + + start := time.Now() + + if err := m.storeMessage(msg); err != nil { + slog.Error("failed to store message", + "message_id", msg.MessageID, + "error", err) + + m.metrics.mu.Lock() + m.metrics.EventsFailed++ + m.metrics.mu.Unlock() + } else { + m.metrics.mu.Lock() + m.metrics.EventsStored++ + m.metrics.BytesStored += int64(len(msg.Payload)) + m.metrics.LastEventTime = time.Now() + m.metrics.ProcessingLatency = time.Since(start) + m.metrics.mu.Unlock() + } + + m.metrics.mu.Lock() + m.metrics.EventsReceived++ + m.metrics.mu.Unlock() + } + } +} + +// storeMessage extracts headers and stores the message in the table. +func (m *Mailbox) storeMessage(msg *plugin.PubSubMessage) error { + // Extract headers from metadata + event := &plugin.MailboxEvent{ + MessageID: msg.MessageID, + Timestamp: msg.Timestamp, + Topic: msg.Topic, + Body: msg.Payload, + CustomHeaders: make(map[string]string), + } + + // Extract standard headers from metadata + if val, ok := msg.Metadata["prism-content-type"]; ok { + event.ContentType = val + } + if val, ok := msg.Metadata["prism-schema-id"]; ok { + event.SchemaID = val + } + if val, ok := msg.Metadata["prism-encryption"]; ok { + event.Encryption = val + } + if val, ok := msg.Metadata["prism-correlation-id"]; ok { + event.CorrelationID = val + } + if val, ok := msg.Metadata["prism-principal"]; ok { + event.Principal = val + } + if val, ok := msg.Metadata["prism-namespace"]; ok { + event.Namespace = val + } + + // Extract custom headers (x-* prefix) + for key, val := range msg.Metadata { + if len(key) > 2 && key[:2] == "x-" { + event.CustomHeaders[key] = val + } + } + + // Write to table + ctx, cancel := context.WithTimeout(m.ctx, 10*time.Second) + defer cancel() + + return m.tableWriter.WriteEvent(ctx, event) +} + +// QueryEvents retrieves events matching filter criteria. +// Requires table_reader slot to be bound. +func (m *Mailbox) QueryEvents(ctx context.Context, filter *plugin.EventFilter) ([]*plugin.MailboxEvent, error) { + if m.tableReader == nil { + return nil, fmt.Errorf("table_reader slot not bound") + } + + return m.tableReader.QueryEvents(ctx, filter) +} + +// GetEvent retrieves a single event by message ID. +// Requires table_reader slot to be bound. +func (m *Mailbox) GetEvent(ctx context.Context, messageID string) (*plugin.MailboxEvent, error) { + if m.tableReader == nil { + return nil, fmt.Errorf("table_reader slot not bound") + } + + return m.tableReader.GetEvent(ctx, messageID) +} + +// GetStats returns mailbox and storage statistics. +func (m *Mailbox) GetStats(ctx context.Context) (map[string]interface{}, error) { + m.metrics.mu.RLock() + defer m.metrics.mu.RUnlock() + + stats := map[string]interface{}{ + "events_received": m.metrics.EventsReceived, + "events_stored": m.metrics.EventsStored, + "events_failed": m.metrics.EventsFailed, + "bytes_stored": m.metrics.BytesStored, + "last_event_time": m.metrics.LastEventTime, + "processing_latency": m.metrics.ProcessingLatency.String(), + } + + // Add table stats if writer available + if m.tableWriter != nil { + tableStats, err := m.tableWriter.GetTableStats(ctx) + if err == nil { + stats["table_total_events"] = tableStats.TotalEvents + stats["table_total_size_bytes"] = tableStats.TotalSizeBytes + stats["table_oldest_event"] = tableStats.OldestEvent + stats["table_newest_event"] = tableStats.NewestEvent + } + } + + return stats, nil +} + +// Health returns the mailbox's health status. +func (m *Mailbox) Health(ctx context.Context) (*plugin.HealthStatus, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + m.metrics.mu.RLock() + defer m.metrics.mu.RUnlock() + + status := &plugin.HealthStatus{ + Status: plugin.HealthHealthy, + Message: "mailbox operating normally", + Details: map[string]string{ + "name": m.name, + "topic": m.config.Behavior.Topic, + "running": fmt.Sprintf("%t", m.running), + "events_received": fmt.Sprintf("%d", m.metrics.EventsReceived), + "events_stored": fmt.Sprintf("%d", m.metrics.EventsStored), + "events_failed": fmt.Sprintf("%d", m.metrics.EventsFailed), + }, + } + + if !m.running { + status.Status = plugin.HealthDegraded + status.Message = "mailbox not running" + } + + // Check if failure rate is too high + if m.metrics.EventsReceived > 0 { + failureRate := float64(m.metrics.EventsFailed) / float64(m.metrics.EventsReceived) + if failureRate > 0.1 { // 10% failure threshold + status.Status = plugin.HealthDegraded + status.Message = fmt.Sprintf("high failure rate: %.2f%%", failureRate*100) + } + } + + return status, nil +} + +// Name returns the mailbox pattern name. +func (m *Mailbox) Name() string { + return m.name +} + +// Version returns the pattern version. +func (m *Mailbox) Version() string { + return "0.1.0" +} diff --git a/patterns/mailbox/mailbox_test.go b/patterns/mailbox/mailbox_test.go new file mode 100644 index 000000000..b5dc165c4 --- /dev/null +++ b/patterns/mailbox/mailbox_test.go @@ -0,0 +1,336 @@ +package mailbox_test + +import ( + "context" + "testing" + "time" + + "github.com/jrepp/prism-data-layer/patterns/mailbox" + "github.com/jrepp/prism-data-layer/pkg/plugin" +) + +// MockMessageSource provides a mock implementation of PubSubInterface. +type MockMessageSource struct { + messages chan *plugin.PubSubMessage +} + +func NewMockMessageSource() *MockMessageSource { + return &MockMessageSource{ + messages: make(chan *plugin.PubSubMessage, 100), + } +} + +func (m *MockMessageSource) Subscribe(ctx context.Context, topic, subscriberID string) (<-chan *plugin.PubSubMessage, error) { + return m.messages, nil +} + +func (m *MockMessageSource) Unsubscribe(ctx context.Context, topic, subscriberID string) error { + return nil +} + +func (m *MockMessageSource) Publish(ctx context.Context, topic string, payload []byte, metadata map[string]string) (string, error) { + msg := &plugin.PubSubMessage{ + Topic: topic, + Payload: payload, + Metadata: metadata, + MessageID: "test-msg-1", + Timestamp: time.Now().UnixMilli(), + } + m.messages <- msg + return msg.MessageID, nil +} + +// MockTableWriter provides a mock implementation of TableWriterInterface. +type MockTableWriter struct { + events []*plugin.MailboxEvent +} + +func NewMockTableWriter() *MockTableWriter { + return &MockTableWriter{ + events: make([]*plugin.MailboxEvent, 0), + } +} + +func (m *MockTableWriter) WriteEvent(ctx context.Context, event *plugin.MailboxEvent) error { + m.events = append(m.events, event) + return nil +} + +func (m *MockTableWriter) DeleteOldEvents(ctx context.Context, olderThan int64) (int64, error) { + return 0, nil +} + +func (m *MockTableWriter) GetTableStats(ctx context.Context) (*plugin.TableStats, error) { + return &plugin.TableStats{ + TotalEvents: int64(len(m.events)), + TotalSizeBytes: 0, + OldestEvent: time.Now().UnixMilli(), + NewestEvent: time.Now().UnixMilli(), + }, nil +} + +// MockTableReader provides a mock implementation of TableReaderInterface. +type MockTableReader struct { + writer *MockTableWriter +} + +func NewMockTableReader(writer *MockTableWriter) *MockTableReader { + return &MockTableReader{writer: writer} +} + +func (m *MockTableReader) QueryEvents(ctx context.Context, filter *plugin.EventFilter) ([]*plugin.MailboxEvent, error) { + return m.writer.events, nil +} + +func (m *MockTableReader) GetEvent(ctx context.Context, messageID string) (*plugin.MailboxEvent, error) { + for _, event := range m.writer.events { + if event.MessageID == messageID { + return event, nil + } + } + return nil, nil +} + +func (m *MockTableReader) GetTableStats(ctx context.Context) (*plugin.TableStats, error) { + return m.writer.GetTableStats(ctx) +} + +func TestMailboxCreation(t *testing.T) { + config := mailbox.Config{ + Name: "test-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "test.topic", + ConsumerGroup: "test-group", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/tmp/test.db", + TableName: "mailbox", + RetentionDays: 90, + }, + } + + mb, err := mailbox.New(config) + if err != nil { + t.Fatalf("failed to create mailbox: %v", err) + } + + if mb == nil { + t.Fatal("mailbox is nil") + } + + if mb.Name() != "test-mailbox" { + t.Errorf("expected name 'test-mailbox', got '%s'", mb.Name()) + } + + if mb.Version() != "0.1.0" { + t.Errorf("expected version '0.1.0', got '%s'", mb.Version()) + } +} + +func TestMailboxBindSlots(t *testing.T) { + config := mailbox.Config{ + Name: "test-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "test.topic", + ConsumerGroup: "test-group", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/tmp/test.db", + TableName: "mailbox", + RetentionDays: 90, + }, + } + + mb, err := mailbox.New(config) + if err != nil { + t.Fatalf("failed to create mailbox: %v", err) + } + + messageSource := NewMockMessageSource() + tableWriter := NewMockTableWriter() + tableReader := NewMockTableReader(tableWriter) + + err = mb.BindSlots(messageSource, tableWriter, tableReader) + if err != nil { + t.Fatalf("failed to bind slots: %v", err) + } +} + +func TestMailboxMessageStorage(t *testing.T) { + config := mailbox.Config{ + Name: "test-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "test.topic", + ConsumerGroup: "test-group", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/tmp/test.db", + TableName: "mailbox", + RetentionDays: 90, + }, + } + + mb, err := mailbox.New(config) + if err != nil { + t.Fatalf("failed to create mailbox: %v", err) + } + + messageSource := NewMockMessageSource() + tableWriter := NewMockTableWriter() + tableReader := NewMockTableReader(tableWriter) + + if err := mb.BindSlots(messageSource, tableWriter, tableReader); err != nil { + t.Fatalf("failed to bind slots: %v", err) + } + + ctx := context.Background() + + // Start mailbox + if err := mb.Start(ctx); err != nil { + t.Fatalf("failed to start mailbox: %v", err) + } + defer mb.Stop(ctx) + + // Publish test message + metadata := map[string]string{ + "prism-content-type": "application/json", + "prism-principal": "test-user", + "prism-correlation-id": "test-trace-123", + "x-custom-header": "custom-value", + } + + if _, err := messageSource.Publish(ctx, "test.topic", []byte("test payload"), metadata); err != nil { + t.Fatalf("failed to publish message: %v", err) + } + + // Wait for message to be processed + time.Sleep(100 * time.Millisecond) + + // Verify message was stored + if len(tableWriter.events) != 1 { + t.Fatalf("expected 1 stored event, got %d", len(tableWriter.events)) + } + + event := tableWriter.events[0] + if event.MessageID != "test-msg-1" { + t.Errorf("expected message_id 'test-msg-1', got '%s'", event.MessageID) + } + + if event.ContentType != "application/json" { + t.Errorf("expected content_type 'application/json', got '%s'", event.ContentType) + } + + if event.Principal != "test-user" { + t.Errorf("expected principal 'test-user', got '%s'", event.Principal) + } + + if event.CorrelationID != "test-trace-123" { + t.Errorf("expected correlation_id 'test-trace-123', got '%s'", event.CorrelationID) + } + + if event.CustomHeaders["x-custom-header"] != "custom-value" { + t.Errorf("expected custom header 'custom-value', got '%s'", event.CustomHeaders["x-custom-header"]) + } + + if string(event.Body) != "test payload" { + t.Errorf("expected body 'test payload', got '%s'", string(event.Body)) + } +} + +func TestMailboxQueryEvents(t *testing.T) { + config := mailbox.Config{ + Name: "test-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "test.topic", + ConsumerGroup: "test-group", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/tmp/test.db", + TableName: "mailbox", + RetentionDays: 90, + }, + } + + mb, err := mailbox.New(config) + if err != nil { + t.Fatalf("failed to create mailbox: %v", err) + } + + messageSource := NewMockMessageSource() + tableWriter := NewMockTableWriter() + tableReader := NewMockTableReader(tableWriter) + + if err := mb.BindSlots(messageSource, tableWriter, tableReader); err != nil { + t.Fatalf("failed to bind slots: %v", err) + } + + ctx := context.Background() + + // Add test events directly to writer + testEvent := &plugin.MailboxEvent{ + MessageID: "test-msg-1", + Timestamp: time.Now().UnixMilli(), + Topic: "test.topic", + ContentType: "application/json", + Principal: "test-user", + Body: []byte("test payload"), + } + + if err := tableWriter.WriteEvent(ctx, testEvent); err != nil { + t.Fatalf("failed to write test event: %v", err) + } + + // Query events + filter := &plugin.EventFilter{ + Limit: 10, + } + + events, err := mb.QueryEvents(ctx, filter) + if err != nil { + t.Fatalf("failed to query events: %v", err) + } + + if len(events) != 1 { + t.Fatalf("expected 1 event, got %d", len(events)) + } + + if events[0].MessageID != "test-msg-1" { + t.Errorf("expected message_id 'test-msg-1', got '%s'", events[0].MessageID) + } +} + +func TestMailboxHealth(t *testing.T) { + config := mailbox.Config{ + Name: "test-mailbox", + Behavior: mailbox.BehaviorConfig{ + Topic: "test.topic", + ConsumerGroup: "test-group", + AutoCommit: true, + }, + Storage: mailbox.StorageConfig{ + DatabasePath: "/tmp/test.db", + TableName: "mailbox", + RetentionDays: 90, + }, + } + + mb, err := mailbox.New(config) + if err != nil { + t.Fatalf("failed to create mailbox: %v", err) + } + + ctx := context.Background() + + health, err := mb.Health(ctx) + if err != nil { + t.Fatalf("failed to get health: %v", err) + } + + if health.Status != plugin.HealthDegraded { + t.Errorf("expected health status 'degraded' (not running), got '%s'", health.Status) + } +} diff --git a/patterns/mailbox/manifest.yaml b/patterns/mailbox/manifest.yaml new file mode 100644 index 000000000..3c44a63b7 --- /dev/null +++ b/patterns/mailbox/manifest.yaml @@ -0,0 +1,145 @@ +name: mailbox +version: 0.1.0 +description: Searchable event store pattern that consumes messages from queues and stores them in structured databases with indexed headers and blob bodies +author: Prism Team +pattern_type: consumer + +# Pattern slots define the backend interfaces required +slots: + message_source: + description: Message queue or pub/sub backend for consuming events + required: true + interfaces: + - QueueInterface + - PubSubInterface + backends: + - nats + - kafka + - redis + - memstore + + storage: + description: Table storage backend for persisting events with indexed headers + required: true + interfaces: + - TableWriterInterface + backends: + - sqlite + - postgresql + - clickhouse + + query: + description: Query interface for retrieving stored messages + required: false + interfaces: + - TableReaderInterface + backends: + - sqlite + - postgresql + - clickhouse + +# Configuration schema +config_schema: + name: + type: string + required: true + description: Unique name for this mailbox instance + + behavior: + topic: + type: string + required: true + description: Topic or queue to consume messages from + + consumer_group: + type: string + required: true + description: Consumer group ID for coordinated consumption + + auto_commit: + type: boolean + default: true + description: Automatically acknowledge messages after successful storage + + storage: + database_path: + type: string + required: true + description: Path to database file (SQLite) or connection string + + table_name: + type: string + default: mailbox + description: Table name for storing events + + retention_days: + type: integer + default: 90 + description: Number of days to retain events before deletion + + cleanup_interval: + type: duration + default: 24h + description: Interval between automatic cleanup runs + +# Metrics exported by this pattern +metrics: + - name: mailbox_events_received_total + type: counter + description: Total number of events received from message source + + - name: mailbox_events_stored_total + type: counter + description: Total number of events successfully stored + + - name: mailbox_events_failed_total + type: counter + description: Total number of events that failed to store + + - name: mailbox_bytes_stored_total + type: counter + description: Total bytes stored in mailbox + + - name: mailbox_processing_latency_seconds + type: histogram + description: Time taken to process and store each event + +# Health checks +health_checks: + - name: mailbox_running + description: Checks if mailbox consumer is running + critical: true + + - name: failure_rate + description: Checks if failure rate is below 10% + critical: false + +# Use cases +use_cases: + - name: audit_logging + description: Store all system events with searchable metadata and encrypted PII + + - name: message_archives + description: Store communications with searchable headers and encrypted bodies + + - name: event_sourcing + description: Capture all domain events with indexed event types and aggregates + + - name: system_observability + description: Archive traces, logs, and metrics with searchable dimensions + + - name: compliance_retention + description: Retain records with searchable metadata while protecting sensitive payloads + +# Example configuration +example_config: + name: admin-mailbox + behavior: + topic: admin.events.> + consumer_group: mailbox-admin + auto_commit: true + storage: + database_path: /var/lib/prism/mailbox-admin.db + table_name: mailbox + retention_days: 90 + cleanup_interval: 24h diff --git a/patterns/mailbox/test-local.sh b/patterns/mailbox/test-local.sh new file mode 100755 index 000000000..802271d89 --- /dev/null +++ b/patterns/mailbox/test-local.sh @@ -0,0 +1,263 @@ +#!/usr/bin/env bash +# +# Local mailbox testing script +# Tests the publish → store → query workflow without external dependencies +# +# This script demonstrates: +# 1. Direct writes to SQLite mailbox (simulating proxy publish API) +# 2. Querying messages via the mailbox query interface +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MAILBOX_DB=".prism/mailbox-local.db" + +echo "🧪 Mailbox Local Testing" +echo "========================" +echo "" + +# Create .prism directory if it doesn't exist +mkdir -p .prism + +# Clean up old database +if [ -f "$MAILBOX_DB" ]; then + echo "🗑️ Removing old database: $MAILBOX_DB" + rm -f "$MAILBOX_DB" +fi + +# Test 1: Write test events directly to SQLite +echo "📝 Test 1: Writing test events to mailbox..." +echo "" + +# Create test program that uses SQLite driver to write events +cat > /tmp/mailbox-write-test.go <<'EOF' +package main + +import ( + "context" + "fmt" + "time" + + "github.com/jrepp/prism-data-layer/pkg/drivers/sqlite" + "github.com/jrepp/prism-data-layer/pkg/plugin" +) + +func main() { + ctx := context.Background() + + // Initialize SQLite driver + driver := sqlite.New() + config := &plugin.Config{ + Backend: map[string]interface{}{ + "database_path": ".prism/mailbox-local.db", + "table_name": "mailbox", + "retention_days": float64(30), + }, + } + + if err := driver.Initialize(ctx, config); err != nil { + panic(fmt.Sprintf("failed to initialize driver: %v", err)) + } + + if err := driver.Start(ctx); err != nil { + panic(fmt.Sprintf("failed to start driver: %v", err)) + } + defer driver.Stop(ctx) + + // Get table writer interface + writer, ok := driver.(plugin.TableWriterInterface) + if !ok { + panic("driver does not implement TableWriterInterface") + } + + // Write test events + events := []*plugin.MailboxEvent{ + { + MessageID: "msg-001", + Timestamp: time.Now().UnixMilli(), + Topic: "test.events", + ContentType: "application/json", + CorrelationID: "trace-abc-123", + Principal: "test-user", + Namespace: "local-mailbox", + CustomHeaders: map[string]string{ + "x-test-id": "001", + "x-source": "local-test", + }, + Body: []byte(`{"message": "Hello from test 1", "value": 42}`), + }, + { + MessageID: "msg-002", + Timestamp: time.Now().UnixMilli(), + Topic: "test.events", + ContentType: "application/json", + CorrelationID: "trace-abc-123", + Principal: "test-user", + Namespace: "local-mailbox", + CustomHeaders: map[string]string{ + "x-test-id": "002", + "x-source": "local-test", + }, + Body: []byte(`{"message": "Hello from test 2", "value": 100}`), + }, + { + MessageID: "msg-003", + Timestamp: time.Now().UnixMilli(), + Topic: "test.events", + ContentType: "text/plain", + CorrelationID: "trace-xyz-456", + Principal: "admin-user", + Namespace: "local-mailbox", + CustomHeaders: map[string]string{ + "x-test-id": "003", + "x-source": "local-test", + }, + Body: []byte("Plain text message for testing"), + }, + } + + for _, event := range events { + if err := writer.WriteEvent(ctx, event); err != nil { + fmt.Printf("❌ Failed to write event %s: %v\n", event.MessageID, err) + } else { + fmt.Printf("✅ Wrote event: %s (topic: %s, principal: %s)\n", + event.MessageID, event.Topic, event.Principal) + } + } + + // Get table stats + reader, ok := driver.(plugin.TableReaderInterface) + if !ok { + panic("driver does not implement TableReaderInterface") + } + + stats, err := reader.GetTableStats(ctx) + if err != nil { + panic(fmt.Sprintf("failed to get stats: %v", err)) + } + + fmt.Printf("\n📊 Mailbox Stats:\n") + fmt.Printf(" Total Events: %d\n", stats.TotalEvents) + fmt.Printf(" Total Size: %d bytes\n", stats.TotalSizeBytes) + fmt.Printf(" Oldest Event: %s\n", time.UnixMilli(stats.OldestEvent).Format(time.RFC3339)) + fmt.Printf(" Newest Event: %s\n", time.UnixMilli(stats.NewestEvent).Format(time.RFC3339)) +} +EOF + +# Build and run the test program +echo "Building test program..." +cd "$SCRIPT_DIR" +go run /tmp/mailbox-write-test.go +echo "" + +# Test 2: Query events using the table reader +echo "📖 Test 2: Querying events from mailbox..." +echo "" + +cat > /tmp/mailbox-query-test.go <<'EOF' +package main + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/jrepp/prism-data-layer/pkg/drivers/sqlite" + "github.com/jrepp/prism-data-layer/pkg/plugin" +) + +func main() { + ctx := context.Background() + + // Initialize SQLite driver + driver := sqlite.New() + config := &plugin.Config{ + Backend: map[string]interface{}{ + "database_path": ".prism/mailbox-local.db", + "table_name": "mailbox", + "retention_days": float64(30), + }, + } + + if err := driver.Initialize(ctx, config); err != nil { + panic(fmt.Sprintf("failed to initialize driver: %v", err)) + } + + if err := driver.Start(ctx); err != nil { + panic(fmt.Sprintf("failed to start driver: %v", err)) + } + defer driver.Stop(ctx) + + // Get table reader interface + reader, ok := driver.(plugin.TableReaderInterface) + if !ok { + panic("driver does not implement TableReaderInterface") + } + + // Query all events + filter := &plugin.EventFilter{ + Limit: 10, + } + + events, err := reader.QueryEvents(ctx, filter) + if err != nil { + panic(fmt.Sprintf("failed to query events: %v", err)) + } + + fmt.Printf("Found %d event(s):\n\n", len(events)) + + for i, event := range events { + fmt.Printf("Event #%d:\n", i+1) + fmt.Printf(" Message ID: %s\n", event.MessageID) + fmt.Printf(" Topic: %s\n", event.Topic) + fmt.Printf(" Principal: %s\n", event.Principal) + fmt.Printf(" Correlation ID: %s\n", event.CorrelationID) + fmt.Printf(" Content Type: %s\n", event.ContentType) + + if len(event.CustomHeaders) > 0 { + fmt.Printf(" Custom Headers:\n") + for k, v := range event.CustomHeaders { + fmt.Printf(" %s: %s\n", k, v) + } + } + + // Try to pretty-print JSON payloads + if event.ContentType == "application/json" { + var prettyJSON map[string]interface{} + if err := json.Unmarshal(event.Body, &prettyJSON); err == nil { + formatted, _ := json.MarshalIndent(prettyJSON, " ", " ") + fmt.Printf(" Payload:\n %s\n", string(formatted)) + } else { + fmt.Printf(" Payload: %s\n", string(event.Body)) + } + } else { + fmt.Printf(" Payload: %s\n", string(event.Body)) + } + + fmt.Println() + } + + // Query specific event by ID + fmt.Println("🔍 Getting specific event by ID: msg-001") + event, err := reader.GetEvent(ctx, "msg-001") + if err != nil { + fmt.Printf("❌ Failed to get event: %v\n", err) + } else { + fmt.Printf("✅ Retrieved event: %s\n", event.MessageID) + fmt.Printf(" Payload: %s\n", string(event.Body)) + } +} +EOF + +go run /tmp/mailbox-query-test.go + +echo "" +echo "✅ Mailbox local testing complete!" +echo "" +echo "Database location: $MAILBOX_DB" +echo "" +echo "Next steps:" +echo " 1. Start prism-proxy with mailbox namespace configured" +echo " 2. Use 'prismctl publish message' to publish events" +echo " 3. Use 'prismctl mailbox query' to retrieve events" diff --git a/pkg/drivers/memstore/pubsub.go b/pkg/drivers/memstore/pubsub.go new file mode 100644 index 000000000..10859a050 --- /dev/null +++ b/pkg/drivers/memstore/pubsub.go @@ -0,0 +1,206 @@ +package memstore + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + "github.com/jrepp/prism-data-layer/pkg/plugin" + pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism/interfaces" +) + +// MemPubSub implements an in-memory pub/sub plugin +// This is useful for local testing without external dependencies +type MemPubSub struct { + name string + version string + subscribers sync.Map // topic -> map[subscriberID]chan *plugin.PubSubMessage + subscriberMu sync.RWMutex + config *PubSubConfig + configLock sync.RWMutex +} + +// PubSubConfig holds pub/sub-specific configuration +type PubSubConfig struct { + BufferSize int `yaml:"buffer_size"` // Channel buffer size +} + +// NewPubSub creates a new in-memory pub/sub plugin +func NewPubSub() *MemPubSub { + return &MemPubSub{ + name: "mempubsub", + version: "0.1.0", + } +} + +// Name returns the plugin name +func (m *MemPubSub) Name() string { + return m.name +} + +// Version returns the plugin version +func (m *MemPubSub) Version() string { + return m.version +} + +// Initialize prepares the plugin with configuration +func (m *MemPubSub) Initialize(ctx context.Context, config *plugin.Config) error { + // Extract backend-specific config + var backendConfig PubSubConfig + if err := config.GetBackendConfig(&backendConfig); err != nil { + return fmt.Errorf("failed to parse backend config: %w", err) + } + + // Apply defaults + if backendConfig.BufferSize == 0 { + backendConfig.BufferSize = 100 // Default: 100 message buffer + } + + m.configLock.Lock() + m.config = &backendConfig + m.configLock.Unlock() + + return nil +} + +// Start begins serving requests +func (m *MemPubSub) Start(ctx context.Context) error { + // Nothing to start for in-memory pub/sub + return nil +} + +// Stop gracefully shuts down the plugin +func (m *MemPubSub) Stop(ctx context.Context) error { + // Close all subscriber channels + m.subscribers.Range(func(topic, subs interface{}) bool { + if subMap, ok := subs.(*sync.Map); ok { + subMap.Range(func(subID, ch interface{}) bool { + if channel, ok := ch.(chan *plugin.PubSubMessage); ok { + close(channel) + } + return true + }) + } + return true + }) + + return nil +} + +// Health returns the plugin health status +func (m *MemPubSub) Health(ctx context.Context) (*plugin.HealthStatus, error) { + topicCount := 0 + subscriberCount := 0 + + m.subscribers.Range(func(topic, subs interface{}) bool { + topicCount++ + if subMap, ok := subs.(*sync.Map); ok { + subMap.Range(func(subID, ch interface{}) bool { + subscriberCount++ + return true + }) + } + return true + }) + + return &plugin.HealthStatus{ + Status: plugin.HealthHealthy, + Message: fmt.Sprintf("healthy, %d topics, %d subscribers", topicCount, subscriberCount), + Details: map[string]string{ + "topics": fmt.Sprintf("%d", topicCount), + "subscribers": fmt.Sprintf("%d", subscriberCount), + }, + }, nil +} + +// Publish sends a message to all subscribers of a topic +func (m *MemPubSub) Publish(ctx context.Context, topic string, payload []byte, metadata map[string]string) (string, error) { + messageID := uuid.New().String() + + msg := &plugin.PubSubMessage{ + MessageID: messageID, + Topic: topic, + Payload: payload, + Metadata: metadata, + Timestamp: time.Now().UnixMilli(), + } + + // Get subscribers for this topic + if subs, ok := m.subscribers.Load(topic); ok { + if subMap, ok := subs.(*sync.Map); ok { + // Send to all subscribers + subMap.Range(func(subID, ch interface{}) bool { + if channel, ok := ch.(chan *plugin.PubSubMessage); ok { + // Non-blocking send to avoid deadlocks + select { + case channel <- msg: + default: + // Channel full, skip this subscriber + // In production, this would be logged + } + } + return true + }) + } + } + + return messageID, nil +} + +// Subscribe creates a subscription to a topic +func (m *MemPubSub) Subscribe(ctx context.Context, topic string, subscriberID string) (<-chan *plugin.PubSubMessage, error) { + m.configLock.RLock() + bufferSize := 100 // Default + if m.config != nil { + bufferSize = m.config.BufferSize + } + m.configLock.RUnlock() + + // Create channel for this subscriber + ch := make(chan *plugin.PubSubMessage, bufferSize) + + // Get or create subscriber map for this topic + subMap, _ := m.subscribers.LoadOrStore(topic, &sync.Map{}) + + // Add subscriber + if sm, ok := subMap.(*sync.Map); ok { + sm.Store(subscriberID, ch) + } + + return ch, nil +} + +// Unsubscribe removes a subscription +func (m *MemPubSub) Unsubscribe(ctx context.Context, topic string, subscriberID string) error { + if subs, ok := m.subscribers.Load(topic); ok { + if subMap, ok := subs.(*sync.Map); ok { + if ch, ok := subMap.Load(subscriberID); ok { + if channel, ok := ch.(chan *plugin.PubSubMessage); ok { + close(channel) + } + subMap.Delete(subscriberID) + } + } + } + + return nil +} + +// Compile-time interface compliance checks +var ( + _ plugin.Plugin = (*MemPubSub)(nil) // Core plugin interface + _ plugin.PubSubInterface = (*MemPubSub)(nil) // PubSub operations +) + +// GetInterfaceDeclarations returns the interfaces this driver implements +func (m *MemPubSub) GetInterfaceDeclarations() []*pb.InterfaceDeclaration { + return []*pb.InterfaceDeclaration{ + { + Name: "PubSubInterface", + ProtoFile: "prism/interfaces/pubsub/pubsub.proto", + Version: "v1", + }, + } +} diff --git a/pkg/drivers/sqlite/go.mod b/pkg/drivers/sqlite/go.mod new file mode 100644 index 000000000..6ae014809 --- /dev/null +++ b/pkg/drivers/sqlite/go.mod @@ -0,0 +1,10 @@ +module github.com/jrepp/prism-data-layer/pkg/drivers/sqlite + +go 1.23 + +require ( + github.com/jrepp/prism-data-layer/pkg/plugin v0.0.0 + modernc.org/sqlite v1.28.0 +) + +replace github.com/jrepp/prism-data-layer/pkg/plugin => ../../plugin diff --git a/pkg/drivers/sqlite/sqlite.go b/pkg/drivers/sqlite/sqlite.go new file mode 100644 index 000000000..fa3eae43f --- /dev/null +++ b/pkg/drivers/sqlite/sqlite.go @@ -0,0 +1,491 @@ +package sqlite + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/jrepp/prism-data-layer/pkg/plugin" + _ "modernc.org/sqlite" // Pure Go SQLite driver +) + +// Driver implements TableWriterInterface and TableReaderInterface for SQLite storage +type Driver struct { + mu sync.RWMutex + db *sql.DB + dbPath string + table string + config *plugin.Config + started bool + stopChan chan struct{} +} + +// New creates a new SQLite driver instance +func New() plugin.Plugin { + return &Driver{ + stopChan: make(chan struct{}), + } +} + +// Initialize implements plugin.Plugin +func (d *Driver) Initialize(ctx context.Context, config *plugin.Config) error { + d.mu.Lock() + defer d.mu.Unlock() + + d.config = config + + // Extract configuration + dbPath, ok := config.Backend["database_path"].(string) + if !ok { + dbPath = "./mailbox.db" // Default path + } + d.dbPath = dbPath + + tableName, ok := config.Backend["table_name"].(string) + if !ok { + tableName = "mailbox" // Default table name + } + d.table = tableName + + // Open database + db, err := sql.Open("sqlite", d.dbPath) + if err != nil { + return fmt.Errorf("failed to open database: %w", err) + } + d.db = db + + // Configure SQLite for optimal performance + pragmas := []string{ + "PRAGMA journal_mode=WAL", // Write-Ahead Logging + "PRAGMA synchronous=NORMAL", // Balance safety and performance + "PRAGMA cache_size=10000", // 10k pages cache + "PRAGMA temp_store=MEMORY", // In-memory temp tables + "PRAGMA mmap_size=30000000000", // 30GB memory-mapped I/O + "PRAGMA foreign_keys=ON", // Enable foreign keys + "PRAGMA busy_timeout=5000", // 5s timeout for locks + } + + for _, pragma := range pragmas { + if _, err := d.db.Exec(pragma); err != nil { + return fmt.Errorf("failed to set pragma %s: %w", pragma, err) + } + } + + // Create table schema + createTableSQL := fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + + -- Indexed headers (extracted from metadata) + message_id TEXT NOT NULL UNIQUE, + timestamp INTEGER NOT NULL, + topic TEXT NOT NULL, + content_type TEXT, + schema_id TEXT, + encryption TEXT, + correlation_id TEXT, + principal TEXT, + namespace TEXT, + + -- Custom headers (JSON for flexibility) + custom_headers TEXT, + + -- Body (opaque blob, may be encrypted) + body BLOB NOT NULL, + + -- Metadata + created_at INTEGER NOT NULL + ) + `, d.table) + + if _, err := d.db.Exec(createTableSQL); err != nil { + return fmt.Errorf("failed to create table: %w", err) + } + + // Create indexes for common queries + indexes := []string{ + fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_%s_timestamp ON %s(timestamp)", d.table, d.table), + fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_%s_topic ON %s(topic)", d.table, d.table), + fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_%s_principal ON %s(principal)", d.table, d.table), + fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_%s_correlation_id ON %s(correlation_id)", d.table, d.table), + fmt.Sprintf("CREATE INDEX IF NOT EXISTS idx_%s_namespace ON %s(namespace)", d.table, d.table), + } + + for _, indexSQL := range indexes { + if _, err := d.db.Exec(indexSQL); err != nil { + return fmt.Errorf("failed to create index: %w", err) + } + } + + return nil +} + +// Start implements plugin.Plugin +func (d *Driver) Start(ctx context.Context) error { + d.mu.Lock() + defer d.mu.Unlock() + + if d.started { + return fmt.Errorf("driver already started") + } + + d.started = true + + // Start retention cleanup goroutine if configured + retentionDays, ok := d.config.Backend["retention_days"].(float64) + if ok && retentionDays > 0 { + go d.retentionCleanupLoop(int(retentionDays)) + } + + return nil +} + +// Stop implements plugin.Plugin +func (d *Driver) Stop(ctx context.Context) error { + d.mu.Lock() + defer d.mu.Unlock() + + if !d.started { + return nil + } + + close(d.stopChan) + d.started = false + + if d.db != nil { + return d.db.Close() + } + + return nil +} + +// Health implements plugin.Plugin +func (d *Driver) Health(ctx context.Context) (*plugin.HealthStatus, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + status := &plugin.HealthStatus{ + Status: plugin.HealthHealthy, + Message: "sqlite driver healthy", + Details: map[string]string{ + "database": d.dbPath, + "table": d.table, + }, + } + + // Verify database connectivity + if d.db != nil { + if err := d.db.PingContext(ctx); err != nil { + status.Status = plugin.HealthDegraded + status.Message = fmt.Sprintf("database ping failed: %v", err) + return status, nil + } + } else { + status.Status = plugin.HealthDegraded + status.Message = "database not initialized" + } + + return status, nil +} + +// Name implements plugin.Plugin +func (d *Driver) Name() string { + return "sqlite" +} + +// Version implements plugin.Plugin +func (d *Driver) Version() string { + return "0.1.0" +} + +// GetInterfaceDeclarations implements plugin.Plugin +func (d *Driver) GetInterfaceDeclarations() []*plugin.InterfaceDeclaration { + return []*plugin.InterfaceDeclaration{ + { + Name: "TableWriter", + Version: "1.0", + }, + { + Name: "TableReader", + Version: "1.0", + }, + } +} + +// WriteEvent implements TableWriterInterface +func (d *Driver) WriteEvent(ctx context.Context, event *plugin.MailboxEvent) error { + d.mu.RLock() + defer d.mu.RUnlock() + + if !d.started { + return fmt.Errorf("driver not started") + } + + // Serialize custom headers to JSON + customHeadersJSON, err := json.Marshal(event.CustomHeaders) + if err != nil { + return fmt.Errorf("failed to marshal custom headers: %w", err) + } + + insertSQL := fmt.Sprintf(` + INSERT INTO %s ( + message_id, timestamp, topic, content_type, schema_id, + encryption, correlation_id, principal, namespace, + custom_headers, body, created_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, d.table) + + _, err = d.db.ExecContext(ctx, insertSQL, + event.MessageID, + event.Timestamp, + event.Topic, + event.ContentType, + event.SchemaID, + event.Encryption, + event.CorrelationID, + event.Principal, + event.Namespace, + string(customHeadersJSON), + event.Body, + time.Now().UnixMilli(), + ) + + if err != nil { + return fmt.Errorf("failed to insert event: %w", err) + } + + return nil +} + +// QueryEvents implements TableWriterInterface +func (d *Driver) QueryEvents(ctx context.Context, filter *plugin.EventFilter) ([]*plugin.MailboxEvent, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if !d.started { + return nil, fmt.Errorf("driver not started") + } + + // Build query with filters + query := fmt.Sprintf("SELECT message_id, timestamp, topic, content_type, schema_id, encryption, correlation_id, principal, namespace, custom_headers, body FROM %s WHERE 1=1", d.table) + args := []interface{}{} + + if filter.StartTime != nil { + query += " AND timestamp >= ?" + args = append(args, *filter.StartTime) + } + + if filter.EndTime != nil { + query += " AND timestamp <= ?" + args = append(args, *filter.EndTime) + } + + if len(filter.Topics) > 0 { + query += " AND topic IN (" + for i := range filter.Topics { + if i > 0 { + query += ", " + } + query += "?" + args = append(args, filter.Topics[i]) + } + query += ")" + } + + if len(filter.Principals) > 0 { + query += " AND principal IN (" + for i := range filter.Principals { + if i > 0 { + query += ", " + } + query += "?" + args = append(args, filter.Principals[i]) + } + query += ")" + } + + if filter.CorrelationID != nil { + query += " AND correlation_id = ?" + args = append(args, *filter.CorrelationID) + } + + query += " ORDER BY timestamp DESC" + + if filter.Limit > 0 { + query += " LIMIT ?" + args = append(args, filter.Limit) + } + + if filter.Offset > 0 { + query += " OFFSET ?" + args = append(args, filter.Offset) + } + + rows, err := d.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to query events: %w", err) + } + defer rows.Close() + + var events []*plugin.MailboxEvent + for rows.Next() { + var event plugin.MailboxEvent + var customHeadersJSON string + + err := rows.Scan( + &event.MessageID, + &event.Timestamp, + &event.Topic, + &event.ContentType, + &event.SchemaID, + &event.Encryption, + &event.CorrelationID, + &event.Principal, + &event.Namespace, + &customHeadersJSON, + &event.Body, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + + // Deserialize custom headers + if customHeadersJSON != "" { + if err := json.Unmarshal([]byte(customHeadersJSON), &event.CustomHeaders); err != nil { + return nil, fmt.Errorf("failed to unmarshal custom headers: %w", err) + } + } + + events = append(events, &event) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("row iteration error: %w", err) + } + + return events, nil +} + +// GetEvent implements TableReaderInterface +func (d *Driver) GetEvent(ctx context.Context, messageID string) (*plugin.MailboxEvent, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if !d.started { + return nil, fmt.Errorf("driver not started") + } + + query := fmt.Sprintf("SELECT message_id, timestamp, topic, content_type, schema_id, encryption, correlation_id, principal, namespace, custom_headers, body FROM %s WHERE message_id = ?", d.table) + + var event plugin.MailboxEvent + var customHeadersJSON string + + err := d.db.QueryRowContext(ctx, query, messageID).Scan( + &event.MessageID, + &event.Timestamp, + &event.Topic, + &event.ContentType, + &event.SchemaID, + &event.Encryption, + &event.CorrelationID, + &event.Principal, + &event.Namespace, + &customHeadersJSON, + &event.Body, + ) + + if err == sql.ErrNoRows { + return nil, fmt.Errorf("event not found: %s", messageID) + } + if err != nil { + return nil, fmt.Errorf("failed to query event: %w", err) + } + + // Deserialize custom headers + if customHeadersJSON != "" { + if err := json.Unmarshal([]byte(customHeadersJSON), &event.CustomHeaders); err != nil { + return nil, fmt.Errorf("failed to unmarshal custom headers: %w", err) + } + } + + return &event, nil +} + +// DeleteOldEvents implements TableWriterInterface +func (d *Driver) DeleteOldEvents(ctx context.Context, olderThan int64) (int64, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if !d.started { + return 0, fmt.Errorf("driver not started") + } + + deleteSQL := fmt.Sprintf("DELETE FROM %s WHERE timestamp < ?", d.table) + result, err := d.db.ExecContext(ctx, deleteSQL, olderThan) + if err != nil { + return 0, fmt.Errorf("failed to delete old events: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("failed to get rows affected: %w", err) + } + + return rowsAffected, nil +} + +// GetTableStats implements TableWriterInterface +func (d *Driver) GetTableStats(ctx context.Context) (*plugin.TableStats, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if !d.started { + return nil, fmt.Errorf("driver not started") + } + + stats := &plugin.TableStats{} + + // Get total events + countSQL := fmt.Sprintf("SELECT COUNT(*), MIN(timestamp), MAX(timestamp) FROM %s", d.table) + err := d.db.QueryRowContext(ctx, countSQL).Scan(&stats.TotalEvents, &stats.OldestEvent, &stats.NewestEvent) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("failed to get event count: %w", err) + } + + // Get total size (approximate) + sizeSQL := fmt.Sprintf("SELECT SUM(LENGTH(body)) FROM %s", d.table) + err = d.db.QueryRowContext(ctx, sizeSQL).Scan(&stats.TotalSizeBytes) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("failed to get total size: %w", err) + } + + return stats, nil +} + +// retentionCleanupLoop runs periodic cleanup of old events +func (d *Driver) retentionCleanupLoop(retentionDays int) { + ticker := time.NewTicker(24 * time.Hour) // Run daily + defer ticker.Stop() + + for { + select { + case <-d.stopChan: + return + case <-ticker.C: + cutoff := time.Now().Add(-time.Duration(retentionDays) * 24 * time.Hour).UnixMilli() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + deleted, err := d.DeleteOldEvents(ctx, cutoff) + cancel() + + if err != nil { + // Log error (in production, use structured logging) + fmt.Printf("retention cleanup error: %v\n", err) + } else if deleted > 0 { + fmt.Printf("retention cleanup: deleted %d events older than %d days\n", deleted, retentionDays) + } + } + } +} diff --git a/pkg/launcher/admin_client.go b/pkg/launcher/admin_client.go new file mode 100644 index 000000000..e10dc93dc --- /dev/null +++ b/pkg/launcher/admin_client.go @@ -0,0 +1,142 @@ +package launcher + +import ( + "context" + "fmt" + "log" + "time" + + pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// AdminClient handles communication with prism-admin control plane +type AdminClient struct { + client pb.ControlPlaneClient + conn *grpc.ClientConn + launcherID string + address string + region string + maxProcs int32 +} + +// AdminClientConfig configures the admin client +type AdminClientConfig struct { + AdminEndpoint string + LauncherID string + Address string + Region string + MaxProcesses int32 +} + +// NewAdminClient creates a new admin client +func NewAdminClient(cfg *AdminClientConfig) (*AdminClient, error) { + log.Printf("[AdminClient] Connecting to admin at %s...", cfg.AdminEndpoint) + + conn, err := grpc.NewClient( + cfg.AdminEndpoint, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return nil, fmt.Errorf("failed to dial admin: %w", err) + } + + client := pb.NewControlPlaneClient(conn) + + return &AdminClient{ + client: client, + conn: conn, + launcherID: cfg.LauncherID, + address: cfg.Address, + region: cfg.Region, + maxProcs: cfg.MaxProcesses, + }, nil +} + +// Register registers the launcher with admin on startup +func (c *AdminClient) Register(ctx context.Context) (*pb.LauncherRegistrationAck, error) { + log.Printf("[AdminClient] Registering launcher %s with admin...", c.launcherID) + + req := &pb.LauncherRegistration{ + LauncherId: c.launcherID, + Address: c.address, + Region: c.region, + Version: "0.1.0", + Capabilities: []string{"pattern", "proxy", "backend", "utility"}, + MaxProcesses: c.maxProcs, + ProcessTypes: []string{"pattern"}, + Metadata: map[string]string{}, + } + + resp, err := c.client.RegisterLauncher(ctx, req) + if err != nil { + return nil, fmt.Errorf("registration failed: %w", err) + } + + if !resp.Success { + return nil, fmt.Errorf("registration rejected: %s", resp.Message) + } + + log.Printf("[AdminClient] Registration successful: %s", resp.Message) + log.Printf("[AdminClient] Assigned capacity: %d, initial processes: %d", + resp.AssignedCapacity, len(resp.InitialProcesses)) + + return resp, nil +} + +// StartHeartbeatLoop starts sending periodic heartbeats to admin +func (c *AdminClient) StartHeartbeatLoop(ctx context.Context, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + log.Printf("[AdminClient] Starting heartbeat loop (interval: %v)", interval) + + for { + select { + case <-ctx.Done(): + log.Printf("[AdminClient] Heartbeat loop stopping") + return + case <-ticker.C: + if err := c.sendHeartbeat(ctx); err != nil { + log.Printf("[AdminClient] Heartbeat failed: %v", err) + } + } + } +} + +// sendHeartbeat sends a heartbeat to admin +func (c *AdminClient) sendHeartbeat(ctx context.Context) error { + req := &pb.LauncherHeartbeatRequest{ + LauncherId: c.launcherID, + ProcessHealth: map[string]*pb.ProcessHealth{}, + Resources: &pb.LauncherResourceUsage{ + ProcessCount: 0, + MaxProcesses: c.maxProcs, + TotalMemoryMb: 0, + CpuPercent: 0.0, + AvailableSlots: c.maxProcs, + }, + Timestamp: time.Now().Unix(), + } + + resp, err := c.client.LauncherHeartbeat(ctx, req) + if err != nil { + return fmt.Errorf("heartbeat RPC failed: %w", err) + } + + if !resp.Success { + return fmt.Errorf("heartbeat rejected: %s", resp.Message) + } + + log.Printf("[AdminClient] Heartbeat acknowledged (server_time=%d)", resp.ServerTimestamp) + return nil +} + +// Close closes the admin client connection +func (c *AdminClient) Close() error { + if c.conn != nil { + return c.conn.Close() + } + return nil +} diff --git a/pkg/plugin/declarations.go b/pkg/plugin/declarations.go new file mode 100644 index 000000000..00003a412 --- /dev/null +++ b/pkg/plugin/declarations.go @@ -0,0 +1,6 @@ +package plugin + +import pb "github.com/jrepp/prism-data-layer/pkg/plugin/gen/prism/interfaces" + +// InterfaceDeclaration is a convenient alias for the protobuf type +type InterfaceDeclaration = pb.InterfaceDeclaration diff --git a/pkg/plugin/interfaces.go b/pkg/plugin/interfaces.go index 0d5ac5447..90cc8de30 100644 --- a/pkg/plugin/interfaces.go +++ b/pkg/plugin/interfaces.go @@ -98,6 +98,67 @@ type ObjectMetadata struct { ETag string } +// TableWriterInterface defines operations for writing structured events to table storage +// Used by mailbox pattern storage slot to persist events with indexed headers +type TableWriterInterface interface { + // WriteEvent stores an event with indexed headers and body + WriteEvent(ctx context.Context, event *MailboxEvent) error + + // DeleteOldEvents removes events older than retention period + DeleteOldEvents(ctx context.Context, olderThan int64) (int64, error) + + // GetTableStats returns storage statistics + GetTableStats(ctx context.Context) (*TableStats, error) +} + +// TableReaderInterface defines operations for reading structured events from table storage +// Used by mailbox pattern query slot to retrieve stored messages as array +type TableReaderInterface interface { + // QueryEvents retrieves events matching filter criteria + // Returns messages as array of MailboxEvent (header + payload) + QueryEvents(ctx context.Context, filter *EventFilter) ([]*MailboxEvent, error) + + // GetEvent retrieves a single event by message ID + GetEvent(ctx context.Context, messageID string) (*MailboxEvent, error) + + // GetTableStats returns storage statistics + GetTableStats(ctx context.Context) (*TableStats, error) +} + +// MailboxEvent represents a structured event for storage +type MailboxEvent struct { + MessageID string + Timestamp int64 + Topic string + ContentType string + SchemaID string + Encryption string + CorrelationID string + Principal string + Namespace string + CustomHeaders map[string]string // x-* headers + Body []byte // Opaque blob (may be encrypted) +} + +// EventFilter defines query criteria for events +type EventFilter struct { + StartTime *int64 + EndTime *int64 + Topics []string + Principals []string + CorrelationID *string + Limit int + Offset int +} + +// TableStats provides storage metrics +type TableStats struct { + TotalEvents int64 + TotalSizeBytes int64 + OldestEvent int64 // Unix timestamp + NewestEvent int64 // Unix timestamp +} + // NOTE: InterfaceSupport interface removed - interfaces are now declared // at registration time via InterfaceDeclaration in the lifecycle protocol. // See proto/prism/interfaces/lifecycle.proto for the new declaration format. diff --git a/proto/prism/control_plane.proto b/proto/prism/control_plane.proto new file mode 100644 index 000000000..be8ea94e6 --- /dev/null +++ b/proto/prism/control_plane.proto @@ -0,0 +1,321 @@ +syntax = "proto3"; + +package prism; + +option go_package = "github.com/jrepp/prism-data-layer/proto/gen/prism"; + +import "prism/options.proto"; + +// ControlPlane service provides bidirectional gRPC protocol between +// prism-admin and managed components (prism-proxy, prism-launcher). +// +// References: +// - ADR-055: Proxy-Admin Control Plane Protocol +// - ADR-056: Launcher-Admin Control Plane Protocol +// - ADR-057: Prism-Launcher Refactoring +service ControlPlane { + option (require_auth) = true; + option (version) = "1.0.0"; + + // ==================================================================== + // Proxy RPCs (ADR-055) + // ==================================================================== + + // RegisterProxy registers a proxy instance with admin on startup. + rpc RegisterProxy(ProxyRegistration) returns (ProxyRegistrationAck) { + option (idempotent) = true; + option (timeout_ms) = 10000; + } + + // AssignNamespace pushes namespace configuration from admin to proxy. + rpc AssignNamespace(NamespaceAssignment) returns (NamespaceAssignmentAck) { + option (idempotent) = true; + option (timeout_ms) = 5000; + } + + // CreateNamespace handles client-initiated namespace creation requests + // that flow through proxy to admin. + rpc CreateNamespace(CreateNamespaceRequest) returns (CreateNamespaceResponse) { + option (idempotent) = false; + option (timeout_ms) = 10000; + option (required_permissions) = "admin"; + } + + // Heartbeat receives periodic health updates from proxies (every 30s). + rpc Heartbeat(ProxyHeartbeat) returns (HeartbeatAck) { + option (idempotent) = true; + option (timeout_ms) = 5000; + } + + // RevokeNamespace removes namespace assignment from proxy. + rpc RevokeNamespace(NamespaceRevocation) returns (NamespaceRevocationAck) { + option (idempotent) = true; + option (timeout_ms) = 5000; + } + + // ==================================================================== + // Launcher RPCs (ADR-056, ADR-057) + // ==================================================================== + + // RegisterLauncher registers a launcher instance with admin on startup. + rpc RegisterLauncher(LauncherRegistration) returns (LauncherRegistrationAck) { + option (idempotent) = true; + option (timeout_ms) = 10000; + } + + // AssignProcess pushes process assignment from admin to launcher. + // Supports all process types: pattern, proxy, backend, utility. + rpc AssignProcess(ProcessAssignment) returns (ProcessAssignmentAck) { + option (idempotent) = true; + option (timeout_ms) = 5000; + } + + // LauncherHeartbeat receives periodic health updates from launchers (every 30s). + rpc LauncherHeartbeat(LauncherHeartbeatRequest) returns (HeartbeatAck) { + option (idempotent) = true; + option (timeout_ms) = 5000; + } + + // RevokeProcess removes process assignment from launcher with graceful timeout. + rpc RevokeProcess(ProcessRevocation) returns (ProcessRevocationAck) { + option (idempotent) = true; + option (timeout_ms) = 35000; // 30s graceful + 5s overhead + } +} + +// ==================================================================== +// Proxy Registration Messages (ADR-055) +// ==================================================================== + +message ProxyRegistration { + string proxy_id = 1; // Unique proxy identifier (proxy-01) + string address = 2; // Proxy gRPC address (proxy-01.prism.local:8980) + string region = 3; // Deployment region (us-west-2) + string version = 4; // Proxy version (0.1.0) + repeated string capabilities = 5; // Supported patterns (keyvalue, pubsub) + map metadata = 6; // Custom labels +} + +message ProxyRegistrationAck { + bool success = 1; + string message = 2; + repeated NamespaceAssignment initial_namespaces = 3; // Pre-assigned namespaces + repeated PartitionRange partition_ranges = 4; // Assigned partition ranges +} + +message NamespaceAssignment { + string namespace = 1; + int32 partition_id = 2; // Partition ID (0-255) + NamespaceConfig config = 3; // Full namespace configuration + int64 version = 4; // Config version for idempotency +} + +message NamespaceConfig { + map backends = 1; + map patterns = 2; + AuthConfig auth = 3; + map metadata = 4; +} + +message BackendConfig { + string backend_type = 1; // redis, kafka, nats, postgres, memstore + string connection_string = 2; + map credentials = 3; + map options = 4; +} + +message PatternConfig { + string pattern_name = 1; // keyvalue, pubsub, multicast_registry + map settings = 2; + repeated string required_interfaces = 3; // Interfaces this pattern requires +} + +message AuthConfig { + bool enabled = 1; + string provider = 2; // oidc, jwt, mtls + map options = 3; +} + +message CreateNamespaceRequest { + string namespace = 1; + string requesting_proxy = 2; // Proxy ID handling client request + NamespaceConfig config = 3; + string principal = 4; // Authenticated user creating namespace +} + +message CreateNamespaceResponse { + bool success = 1; + string message = 2; + int32 assigned_partition = 3; + string assigned_proxy = 4; // Proxy that will handle this namespace +} + +message ProxyHeartbeat { + string proxy_id = 1; + map namespace_health = 2; + ResourceUsage resources = 3; + int64 timestamp = 4; +} + +message NamespaceHealth { + int32 active_sessions = 1; + int64 requests_per_second = 2; + string status = 3; // healthy, degraded, unhealthy +} + +message ResourceUsage { + float cpu_percent = 1; + int64 memory_mb = 2; + int32 goroutine_count = 3; + int64 uptime_seconds = 4; +} + +message NamespaceAssignmentAck { + bool success = 1; + string message = 2; +} + +message NamespaceRevocation { + string proxy_id = 1; + string namespace = 2; + int32 graceful_timeout_seconds = 3; // Timeout before force removal (default 30s) +} + +message NamespaceRevocationAck { + bool success = 1; + string message = 2; + int64 revoked_at = 3; // Unix timestamp when namespace removed +} + +message PartitionRange { + int32 start = 1; // Inclusive + int32 end = 2; // Inclusive +} + +// ==================================================================== +// Launcher Registration Messages (ADR-056, ADR-057) +// ==================================================================== + +message LauncherRegistration { + string launcher_id = 1; // Unique launcher identifier (launcher-01) + string address = 2; // Launcher gRPC address (launcher-01.prism.local:7070) + string region = 3; // Deployment region (us-west-2) + string version = 4; // Launcher version (0.1.0) + repeated string capabilities = 5; // Supported process types (pattern, proxy, backend, utility) + int32 max_processes = 6; // Maximum concurrent processes + repeated string process_types = 7; // Process types this launcher supports + map metadata = 8; // Custom labels +} + +message LauncherRegistrationAck { + bool success = 1; + string message = 2; + repeated ProcessAssignment initial_processes = 3; // Pre-assigned processes + int32 assigned_capacity = 4; // Number of process slots assigned +} + +message ProcessAssignment { + string process_id = 1; // Unique process identifier + string process_type = 2; // pattern, proxy, backend, utility + string namespace = 3; // Target namespace (if applicable) + ProcessConfig config = 4; // Process-specific configuration + int64 version = 5; // Config version for idempotency +} + +message ProcessAssignmentAck { + bool success = 1; + string message = 2; +} + +message ProcessConfig { + // Common fields (all process types) + string binary = 1; + repeated string args = 2; + map env = 3; + int32 port = 4; + int32 health_port = 5; + string log_level = 6; + + // Type-specific configs (only one should be set based on process_type) + PatternProcessConfig pattern = 10; + ProxyProcessConfig proxy = 11; + BackendProcessConfig backend = 12; + UtilityProcessConfig utility = 13; +} + +message PatternProcessConfig { + string pattern_type = 1; // keyvalue, pubsub, multicast_registry + string isolation_level = 2; // none, namespace, session + map slots = 3; // Backend configurations for pattern slots + map settings = 4; // Pattern-specific settings +} + +message ProxyProcessConfig { + string admin_endpoint = 1; + int32 control_port = 2; + int32 data_port = 3; + string proxy_id = 4; + repeated int32 partition_ranges = 5; // Partition IDs this proxy handles +} + +message BackendProcessConfig { + string backend_type = 1; // redis, kafka, nats, postgres + string connection_string = 2; + map credentials = 3; + map driver_options = 4; +} + +message UtilityProcessConfig { + string utility_type = 1; // log-collector, metrics-exporter, health-monitor + map settings = 2; + repeated string target_processes = 3; // Process IDs this utility monitors/manages +} + +message LauncherHeartbeatRequest { + string launcher_id = 1; + map process_health = 2; + LauncherResourceUsage resources = 3; + int64 timestamp = 4; +} + +message ProcessHealth { + string status = 1; // running, starting, stopping, failed, stopped + int32 pid = 2; // Process ID + int32 restart_count = 3; // Number of restarts + int32 error_count = 4; // Cumulative error count + int64 memory_mb = 5; // Memory usage in MB + int64 uptime_seconds = 6; // Seconds since process started + string last_error = 7; // Last error message (if any) + float cpu_percent = 8; // CPU utilization percentage +} + +message LauncherResourceUsage { + int32 process_count = 1; // Current process count + int32 max_processes = 2; // Maximum capacity + int64 total_memory_mb = 3; // Total memory used by all processes + float cpu_percent = 4; // CPU utilization percentage + int32 available_slots = 5; // Remaining process slots +} + +message ProcessRevocation { + string launcher_id = 1; + string process_id = 2; + int32 graceful_timeout_seconds = 3; // Timeout before force kill (default 30s) +} + +message ProcessRevocationAck { + bool success = 1; + string message = 2; + int64 stopped_at = 3; // Unix timestamp when process stopped + int32 exit_code = 4; // Process exit code +} + +// ==================================================================== +// Common Messages +// ==================================================================== + +message HeartbeatAck { + bool success = 1; + string message = 2; + int64 server_timestamp = 3; // Server's current timestamp for clock sync +} diff --git a/tooling/parallel_test.py b/tooling/parallel_test.py index 29ebc4c0b..5a46dd13d 100755 --- a/tooling/parallel_test.py +++ b/tooling/parallel_test.py @@ -83,35 +83,35 @@ class TestSuite: # Unit Tests (fast, parallel) TestSuite( name="proxy-unit", - command="cd proxy && cargo test --lib", + command="cd prism-proxy && cargo test --lib", description="Rust proxy unit tests", category="unit", timeout=120, ), TestSuite( name="core-unit", - command="cd patterns/core && go test -v -cover ./...", + command="cd pkg/plugin && go test -v -cover ./...", description="Core SDK unit tests", category="unit", timeout=60, ), TestSuite( name="memstore-unit", - command="cd patterns/memstore && go test -v -cover ./...", + command="cd pkg/drivers/memstore && go test -v -cover ./...", description="MemStore unit tests", category="unit", timeout=60, ), TestSuite( name="redis-unit", - command="cd patterns/redis && go test -v -cover ./...", + command="cd pkg/drivers/redis && go test -v -cover ./...", description="Redis unit tests", category="unit", timeout=60, ), TestSuite( name="nats-unit", - command="cd patterns/nats && go test -v -cover ./...", + command="cd pkg/drivers/nats && go test -v -cover ./...", description="NATS unit tests", category="unit", timeout=60, @@ -119,35 +119,35 @@ class TestSuite: # Lint Tests (fast, parallel) TestSuite( name="lint-rust", - command="cd proxy && cargo clippy -- -D warnings", + command="cd prism-proxy && cargo clippy -- -D warnings", description="Rust linting", category="lint", timeout=120, ), TestSuite( name="lint-go-memstore", - command="cd patterns/memstore && go vet ./...", + command="cd pkg/drivers/memstore && go vet ./...", description="Go linting (memstore)", category="lint", timeout=30, ), TestSuite( name="lint-go-redis", - command="cd patterns/redis && go vet ./...", + command="cd pkg/drivers/redis && go vet ./...", description="Go linting (redis)", category="lint", timeout=30, ), TestSuite( name="lint-go-nats", - command="cd patterns/nats && go vet ./...", + command="cd pkg/drivers/nats && go vet ./...", description="Go linting (nats)", category="lint", timeout=30, ), TestSuite( name="lint-go-core", - command="cd patterns/core && go vet ./...", + command="cd pkg/plugin && go vet ./...", description="Go linting (core)", category="lint", timeout=30,