diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..b82cbf8 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +dist +tmp +.git +.github +.vscode +db/data +docs +scripts/rootfs_cache diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c2c5054 --- /dev/null +++ b/.env.example @@ -0,0 +1,14 @@ +# Database Configuration +DATABASE_URL=postgres://castletown:castletown@localhost:5432/castletown?sslmode=disable + +# MinIO Configuration +MINIO_ENDPOINT=localhost:9000 +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +MINIO_USE_SSL=false +MINIO_BUCKET=castletown + +# Testcase Storage Configuration +# Maximum size (in bytes) for storing test case input/output directly in database +# Larger test cases will be stored in MinIO +TESTCASE_MAX_DB_SIZE=10240 # 10KB default diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-container.yml similarity index 75% rename from .github/workflows/test-e2e.yml rename to .github/workflows/test-container.yml index d084c3c..54625ae 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-container.yml @@ -1,4 +1,4 @@ -name: Test E2E +name: Test Container on: push: @@ -26,10 +26,10 @@ jobs: chmod +x umoci sudo mv umoci /usr/local/bin/umoci - wget https://go.dev/dl/go1.25.1.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.25.1.linux-amd64.tar.gz + wget https://go.dev/dl/go1.25.5.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.25.5.linux-amd64.tar.gz export PATH=$PATH:/usr/local/go/bin go version - - name: Run e2e tests - run: make test-e2e + - name: Run container tests + run: make test-container diff --git a/.github/workflows/test-sandbox.yml b/.github/workflows/test-sandbox.yml deleted file mode 100644 index f15453b..0000000 --- a/.github/workflows/test-sandbox.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Test Sandbox - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - test-sandbox: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up dependencies - run: | - sudo apt-get update - sudo apt-get install -y make - - sudo apt install -y skopeo - - sudo apt install -y curl tar - curl -L https://github.com/opencontainers/umoci/releases/download/v0.5.1/umoci.linux.amd64 -o umoci - chmod +x umoci - sudo mv umoci /usr/local/bin/umoci - - wget https://go.dev/dl/go1.25.1.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.25.1.linux-amd64.tar.gz - export PATH=$PATH:/usr/local/go/bin - go version - - - name: Run sandbox tests - run: make test-sandbox diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..4cbe031 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,60 @@ +FROM golang:1.25 AS builder +WORKDIR /src + +COPY go.mod go.sum ./ +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \ + go mod download + +COPY . . +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \ + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 \ + go build -ldflags="-s -w" -o /usr/local/bin/castletown ./main.go + +FROM debian:13-slim AS rootfs +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + skopeo \ + umoci \ + uidmap \ + && rm -rf /var/lib/apt/lists/* + +COPY scripts/rootfs.sh /tmp/rootfs.sh +RUN chmod +x /tmp/rootfs.sh \ + && JUDGE_IMAGES_DIR=/var/castletown/images /tmp/rootfs.sh + +FROM debian:13-slim AS runtime +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y --no-install-recommends \ + bash \ + ca-certificates \ + curl \ + fuse-overlayfs \ + iptables \ + tini \ + uidmap \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt/castletown + +COPY --from=builder /usr/local/bin/castletown /usr/local/bin/castletown +COPY --from=rootfs /var/castletown/images /var/castletown/images +COPY scripts/rootfs.sh /opt/castletown/scripts/rootfs.sh +COPY docker/entrypoint.sh /usr/local/bin/castletown-entrypoint + +RUN chmod +x /usr/local/bin/castletown-entrypoint /opt/castletown/scripts/rootfs.sh + +ENV CASTLETOWN_SKIP_ROOTFS=1 \ + BLOB_ROOT=/var/castletown/blobs \ + WORK_ROOT=/tmp/castletown/work \ + JUDGE_IMAGES_DIR=/var/castletown/images \ + JUDGE_OVERLAYFS_DIR=/tmp/castletown/overlayfs \ + STORAGE_DIR=/tmp/castletown/storage \ + JUDGE_LIBCONTAINER_DIR=/tmp/castletown/libcontainer \ + JUDGE_ROOTFS_DIR=/tmp/castletown/rootfs \ + JUDGE_DISK_CACHE_DIR=/var/castletown/testcases \ + PROBLEM_CACHE_DIR=/var/castletown/problems + +ENTRYPOINT ["/usr/bin/tini","--","castletown-entrypoint"] +CMD ["castletown","start"] diff --git a/Makefile b/Makefile index 7588113..f685b6f 100644 --- a/Makefile +++ b/Makefile @@ -4,42 +4,62 @@ all: .PHONY: prepare-dirs prepare-dirs: @sudo mkdir -p /tmp/castletown/storage - @sudo mkdir -p /tmp/castletown/images @sudo mkdir -p /tmp/castletown/libcontainer @sudo mkdir -p /tmp/castletown/overlayfs @sudo mkdir -p /tmp/castletown/rootfs + @sudo mkdir -p /tmp/castletown/work + @sudo mkdir -p /var/castletown/images + @sudo mkdir -p /var/castletown/testcases .PHONY: make-rootfs make-rootfs: prepare-dirs sudo bash scripts/rootfs.sh -.PHONY: test-sandbox -test-sandbox: make-rootfs - @echo "Running sandbox tests..." - sudo env "PATH=$$PATH:/usr/local/go/bin" go test github.com/joshjms/castletown/sandbox -v - -.PHONY: test-job -test-job: make-rootfs - @echo "Running job tests..." - sudo env "PATH=$$PATH:/usr/local/go/bin" go test github.com/joshjms/castletown/job -v - -.PHONY: test-e2e -test-e2e: prepare-dirs make-rootfs - @echo "Running end-to-end tests..." - @echo "Building castletown..." - @sudo env "PATH=$$PATH:/usr/local/go/bin" go build -o tmp/castletown main.go - @echo "Starting castletown server..." - @sudo tmp/castletown server & - @sleep 2 - sudo env "PATH=$$PATH:/usr/local/go/bin" go test -v ./tests/e2e -timeout 2m - @sudo pkill castletown - @sudo rm -rf tmp - .PHONY: build build: bash scripts/build.sh .PHONY: dev dev: - sudo env "PATH=$$PATH:/usr/local/go/bin" go run main.go server + sudo env "PATH=$$PATH:/usr/local/go/bin" go run main.go start + +.PHONY: docker-up +docker-up: + docker compose up --build -d + +.PHONY: docker-down +docker-down: + docker compose down + +.PHONY: docker-logs +docker-logs: + docker compose logs -f + +.PHONY: migrate-up +migrate-up: + migrate -path db/migrations -database "postgres://castletown:castletown@localhost:5432/castletown?sslmode=disable" up + +.PHONY: migrate-down +migrate-down: + migrate -path db/migrations -database "postgres://castletown:castletown@localhost:5432/castletown?sslmode=disable" down + +.PHONY: migrate-create +migrate-create: + @read -p "Enter migration name: " name; \ + migrate create -ext sql -dir db/migrations -seq $$name + + +.PHONY: test-grader +test-grader: + @echo "Running grader tests..." + sudo env "PATH=$$PATH:/usr/local/go/bin" go test github.com/joshjms/castletown/internal/grader -v + +.PHONY: test-worker +test-worker: make-rootfs + @echo "Running worker tests..." + sudo env "PATH=$$PATH:/usr/local/go/bin" go test github.com/joshjms/castletown/internal/worker -v +.PHONY: test-container +test-container: make-rootfs + @echo "Running container tests..." + sudo env "PATH=$$PATH:/usr/local/go/bin" go test github.com/joshjms/castletown/internal/container -v diff --git a/README.md b/README.md index 75b894b..f52d3a0 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,42 @@ # castletown -## Usage +## Docker quickstart -``` -castletown server [flags] -``` +1. Install Docker (24.x or newer) with Compose v2 and ensure you can run privileged containers. +2. From the repository root run: + ```bash + docker compose up --build + ``` + The first boot downloads the `gcc:15-bookworm` root filesystem via `skopeo`/`umoci`, so expect the initial `castletown` container start to take a few minutes. +3. Watch the worker logs: + ```bash + docker compose logs -f castletown + ``` +4. When you are done: + ```bash + docker compose down + ``` -## Getting Started +The Compose stack launches: -You can find the guide [here](docs/getting-started.md). +- `castletown`: the sandbox worker. Runs privileged so it can create nested containers, exposes metrics on `:9090`, and consumes submissions from RabbitMQ. +- `postgres`: stores problems, submissions, and metadata. +- `rabbitmq`: queue that feeds submissions to the worker (management UI on ). +- `minio`: placeholder object storage for large blobs and artifacts (console on ). + +Named Docker volumes keep the worker stateful directories (`/tmp/castletown/*`, `/var/castletown/*`) so that cached images, overlays, and problem artifacts survive container restarts. + +### Useful commands + +- Rebuild just the worker image: `docker compose build castletown` +- Tail only dependency logs: `docker compose logs -f postgres rabbitmq minio` +- Open a shell inside the worker: `docker compose exec castletown bash` +- Skip the automatic `gcc-15-bookworm` bootstrap if you already populated `castletown-images`: `CASTLETOWN_SKIP_ROOTFS=1 docker compose up` + +## Manual setup + +If you need to run Castletown directly on a host (without Docker), follow the more detailed [Getting Started guide](docs/getting-started.md) to prepare cgroup delegation, rootfs images, and prerequisites. ## Contributing -TODO +Contributions are welcome! Please open an issue or pull request with improvements. When changing the worker runtime, make sure the Docker image stays reproducible and update this README accordingly. diff --git a/client/README.md b/client/README.md deleted file mode 100644 index 1520c49..0000000 --- a/client/README.md +++ /dev/null @@ -1,414 +0,0 @@ -# Castletown Client Library - -A comprehensive Go client library for interacting with the castletown sandboxed code execution service. This library provides both HTTP REST and gRPC clients with a unified interface. - -## Features - -- **Dual Protocol Support**: Connect via HTTP REST or gRPC -- **Unified Interface**: Same API regardless of protocol -- **Fluent Builder API**: Easy-to-use builder patterns for constructing requests -- **Type Safety**: Strongly-typed request/response structures -- **Context Support**: Full support for context-based cancellation and timeouts -- **Helper Functions**: Pre-built patterns for common use cases (compile-and-run, etc.) - -## Installation - -```bash -go get github.com/joshjms/castletown/client -``` - -## Quick Start - -### HTTP Client - -```go -package main - -import ( - "context" - "log" - "github.com/joshjms/castletown/client" -) - -func main() { - // Create HTTP client - c, err := client.NewHTTPClient("http://localhost:8000", nil) - if err != nil { - log.Fatal(err) - } - defer c.Close() - - // Build and execute request - req := client.NewRequest(). - AddFile("hello.txt", "Hello, World!"). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("/bin/cat", "hello.txt"). - WithFiles("hello.txt") - }). - Build() - - resp, err := c.Execute(context.Background(), req) - if err != nil { - log.Fatal(err) - } - - log.Printf("Output: %s", resp.Reports[0].Stdout) -} -``` - -### gRPC Client - -```go -// Create gRPC client -opts := &client.ClientOptions{ - GRPCOptions: &client.GRPCOptions{ - Insecure: true, - }, -} -c, err := client.NewGRPCClient("localhost:8001", opts) -if err != nil { - log.Fatal(err) -} -defer c.Close() - -// Use the same API as HTTP client -``` - -## Core Concepts - -### Client Interface - -The `Client` interface provides three main methods: - -```go -type Client interface { - // Execute submits a job and returns results - Execute(ctx context.Context, req *ExecRequest) (*ExecResponse, error) - - // Done marks a job as complete (cleanup) - Done(ctx context.Context, jobID string) error - - // Close releases client resources - Close() error -} -``` - -### Execution Request - -An execution request consists of: -- **Files**: Files to create in the sandbox -- **Steps**: Sequential processes to execute -- **ID**: Optional job identifier (auto-generated if not provided) - -```go -type ExecRequest struct { - ID string - Files []File - Steps []Process -} -``` - -### Process Configuration - -Each process step supports: -- **Image**: Container image name -- **Command**: Command and arguments -- **Stdin**: Standard input -- **Resource Limits**: Memory, time, and process limits -- **Files**: Which files to make available -- **Persist**: Which files to keep for next step - -```go -type Process struct { - Image string - Cmd []string - Stdin string - MemoryLimitMB int64 - TimeLimitMs uint64 - ProcLimit int64 - Files []string - Persist []string -} -``` - -## Builder API - -### RequestBuilder - -The fluent builder API makes it easy to construct requests: - -```go -req := client.NewRequest(). - WithID("my-job-id"). - AddFile("source.cpp", cppCode). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("g++", "source.cpp", "-o", "program"). - WithFiles("source.cpp"). - WithPersist("program"). - WithMemoryLimit(512). - WithTimeLimit(10000) - }). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("./program"). - WithFiles("program"). - WithStdin("test input"). - WithMemoryLimit(256). - WithTimeLimit(5000) - }). - Build() -``` - -### ProcessBuilder - -Configure individual process steps: - -```go -p.WithImage("gcc:15-bookworm"). // Container image - WithCommand("g++", "main.cpp"). // Command and args - WithStdin("input data"). // Standard input - WithMemoryLimit(512). // Memory limit (MB) - WithTimeLimit(10000). // Time limit (ms) - WithProcLimit(10). // Process limit - WithFiles("main.cpp", "header.h"). // Available files - WithPersist("main", "output.txt") // Files to persist -``` - -## Helper Functions - -### SimpleExecRequest - -For simple single-step executions: - -```go -req := client.SimpleExecRequest( - "gcc:15-bookworm", // Image - []string{"/bin/cat", "file.txt"}, // Command - map[string]string{ // Files - "file.txt": "content", - }, -) -``` - -### CompileAndRunRequest - -For compile-then-run workflows: - -```go -req := client.CompileAndRunRequest( - "gcc:15-bookworm", // Compile image - []string{"g++", "main.cpp", "-o", "main"}, // Compile command - "gcc:15-bookworm", // Run image - []string{"./main"}, // Run command - map[string]string{"main.cpp": sourceCode}, // Source files - []string{"main"}, // Compiled outputs - "test input", // Stdin for run -) -``` - -## Response Handling - -### Execution Response - -```go -type ExecResponse struct { - ID string // Job ID - Reports []Report // One per step -} -``` - -### Report Structure - -Each report contains: - -```go -type Report struct { - Status Status // Execution status - ExitCode int32 // Process exit code - Signal int32 // Termination signal (-1 if normal) - Stdout string // Standard output - Stderr string // Standard error - CPUTime uint64 // CPU time (nanoseconds) - Memory uint64 // Peak memory (bytes) - WallTime int64 // Wall time (milliseconds) - StartAt int64 // Start timestamp (ns) - FinishAt int64 // Finish timestamp (ns) -} -``` - -### Status Codes - -```go -const ( - StatusOK // Successful execution - StatusRuntimeError // Program error - StatusTimeLimitExceeded // Time limit hit - StatusMemoryLimitExceeded // Memory limit hit - StatusOutputLimitExceeded // Output too large - StatusTerminated // Process terminated - StatusUnknown // Unknown error - StatusSkipped // Step skipped -) -``` - -## Configuration Options - -### HTTP Client Options - -```go -opts := &client.ClientOptions{ - Address: "http://localhost:8000", - Timeout: 30 * time.Second, -} -c, err := client.NewHTTPClient("", opts) -``` - -### gRPC Client Options - -```go -opts := &client.ClientOptions{ - Address: "localhost:8001", - Timeout: 30 * time.Second, - GRPCOptions: &client.GRPCOptions{ - Insecure: true, // Disable TLS - MaxMessageSize: 4 * 1024 * 1024, // 4MB max message - }, -} -c, err := client.NewGRPCClient("", opts) -``` - -## Examples - -See the `examples/` directory for complete working examples: - -- **basic_http/**: Simple HTTP client example -- **basic_grpc/**: Simple gRPC client example -- **compile_and_run/**: Compile and run C++ code -- **advanced/**: Multi-step execution with resource limits - -### Running Examples - -```bash -# Start the castletown server first -cd /path/to/castletown -go run main.go server - -# Run examples -cd client/examples/basic_http -go run main.go - -cd ../compile_and_run -go run main.go - -cd ../advanced -go run main.go -``` - -## Error Handling - -Always check for errors and handle them appropriately: - -```go -resp, err := c.Execute(ctx, req) -if err != nil { - log.Fatalf("Execution failed: %v", err) -} - -for i, report := range resp.Reports { - if report.Status != client.StatusOK { - log.Printf("Step %d failed: %s", i, report.Status) - log.Printf("Stderr: %s", report.Stderr) - } -} -``` - -## Context and Timeouts - -Use contexts for cancellation and timeouts: - -```go -// With timeout -ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) -defer cancel() - -resp, err := c.Execute(ctx, req) - -// With cancellation -ctx, cancel := context.WithCancel(context.Background()) -go func() { - time.Sleep(10 * time.Second) - cancel() // Cancel after 10 seconds -}() - -resp, err := c.Execute(ctx, req) -``` - -## Resource Cleanup - -Always clean up resources: - -```go -// Close client when done -defer c.Close() - -// Mark jobs as done for cleanup -resp, err := c.Execute(ctx, req) -if err == nil { - defer c.Done(ctx, resp.ID) -} -``` - -## Best Practices - -1. **Use context timeouts**: Always set appropriate timeouts -2. **Clean up jobs**: Call `Done()` after execution to free server resources -3. **Close clients**: Always `defer c.Close()` after creating clients -4. **Check status codes**: Verify `report.Status` for each step -5. **Handle errors gracefully**: Check stderr for error details -6. **Set resource limits**: Use `WithMemoryLimit()` and `WithTimeLimit()` to prevent runaway processes -7. **Persist selectively**: Only persist files needed for subsequent steps -8. **Use builders**: Leverage the fluent builder API for cleaner code - -## Troubleshooting - -### Connection Issues - -- Verify server is running: `curl http://localhost:8000/exec` -- Check firewall settings -- For gRPC, ensure port 8001 is accessible - -### Execution Failures - -- Check `report.Stderr` for error messages -- Verify container image exists on server -- Ensure files are properly included with `WithFiles()` -- Check resource limits are sufficient - -### Image Not Found - -Images must be available on the server. See castletown docs for image preparation: - -```bash -# On the server -skopeo copy docker://gcc:15-bookworm oci:images/gcc:15-bookworm -umoci unpack --image images/gcc:15-bookworm rootfs/gcc:15-bookworm -``` - -## API Reference - -Full API documentation is available via godoc: - -```bash -godoc -http=:6060 -# Visit http://localhost:6060/pkg/github.com/joshjms/castletown/client/ -``` - -## License - -Same as castletown main project. - -## Contributing - -Contributions are welcome! Please submit issues and pull requests to the main castletown repository. diff --git a/client/builder.go b/client/builder.go deleted file mode 100644 index 9a66e61..0000000 --- a/client/builder.go +++ /dev/null @@ -1,187 +0,0 @@ -package client - -// RequestBuilder provides a fluent API for building execution requests. -type RequestBuilder struct { - req *ExecRequest -} - -// NewRequest creates a new RequestBuilder. -func NewRequest() *RequestBuilder { - return &RequestBuilder{ - req: &ExecRequest{ - Files: []File{}, - Steps: []Process{}, - }, - } -} - -// WithID sets the job ID for the request. -func (b *RequestBuilder) WithID(id string) *RequestBuilder { - b.req.ID = id - return b -} - -// AddFile adds a file to the request. -func (b *RequestBuilder) AddFile(name, content string) *RequestBuilder { - b.req.Files = append(b.req.Files, File{ - Name: name, - Content: content, - }) - return b -} - -// AddStep adds a process/step to the request using a ProcessBuilder. -func (b *RequestBuilder) AddStep(fn func(*ProcessBuilder)) *RequestBuilder { - pb := NewProcess() - fn(pb) - b.req.Steps = append(b.req.Steps, pb.Build()) - return b -} - -// Build returns the constructed ExecRequest. -func (b *RequestBuilder) Build() *ExecRequest { - return b.req -} - -// ProcessBuilder provides a fluent API for building process specifications. -type ProcessBuilder struct { - proc Process -} - -// NewProcess creates a new ProcessBuilder. -func NewProcess() *ProcessBuilder { - return &ProcessBuilder{ - proc: Process{ - Cmd: []string{}, - Files: []string{}, - Persist: []string{}, - }, - } -} - -// WithImage sets the container image for the process. -func (p *ProcessBuilder) WithImage(image string) *ProcessBuilder { - p.proc.Image = image - return p -} - -// WithCommand sets the command and arguments for the process. -func (p *ProcessBuilder) WithCommand(cmd ...string) *ProcessBuilder { - p.proc.Cmd = cmd - return p -} - -// WithStdin sets the standard input for the process. -func (p *ProcessBuilder) WithStdin(stdin string) *ProcessBuilder { - p.proc.Stdin = stdin - return p -} - -// WithMemoryLimit sets the memory limit in megabytes. -func (p *ProcessBuilder) WithMemoryLimit(mb int64) *ProcessBuilder { - p.proc.MemoryLimitMB = mb - return p -} - -// WithTimeLimit sets the time limit in milliseconds. -func (p *ProcessBuilder) WithTimeLimit(ms uint64) *ProcessBuilder { - p.proc.TimeLimitMs = ms - return p -} - -// WithProcLimit sets the maximum number of processes. -func (p *ProcessBuilder) WithProcLimit(limit int64) *ProcessBuilder { - p.proc.ProcLimit = limit - return p -} - -// WithFiles specifies which files to make available in this step. -func (p *ProcessBuilder) WithFiles(files ...string) *ProcessBuilder { - p.proc.Files = append(p.proc.Files, files...) - return p -} - -// WithPersist specifies which files to persist to the next step. -func (p *ProcessBuilder) WithPersist(files ...string) *ProcessBuilder { - p.proc.Persist = append(p.proc.Persist, files...) - return p -} - -// Build returns the constructed Process. -func (p *ProcessBuilder) Build() Process { - return p.proc -} - -// Common helper functions for building requests - -// SimpleExecRequest creates a simple single-step execution request. -// This is a convenience function for common use cases. -func SimpleExecRequest(image string, cmd []string, files map[string]string) *ExecRequest { - req := NewRequest() - - // Add all files - for name, content := range files { - req.AddFile(name, content) - } - - // Add single step with all files - fileNames := make([]string, 0, len(files)) - for name := range files { - fileNames = append(fileNames, name) - } - - req.AddStep(func(p *ProcessBuilder) { - p.WithImage(image). - WithCommand(cmd...). - WithFiles(fileNames...) - }) - - return req.Build() -} - -// CompileAndRunRequest creates a two-step request for compile-then-run scenarios. -// This is useful for compiled languages like C++, Java, etc. -func CompileAndRunRequest( - compileImage string, - compileCmd []string, - runImage string, - runCmd []string, - sourceFiles map[string]string, - compiledOutputs []string, - stdin string, -) *ExecRequest { - req := NewRequest() - - // Add source files - for name, content := range sourceFiles { - req.AddFile(name, content) - } - - // Get source file names - sourceFileNames := make([]string, 0, len(sourceFiles)) - for name := range sourceFiles { - sourceFileNames = append(sourceFileNames, name) - } - - // Step 1: Compile - req.AddStep(func(p *ProcessBuilder) { - p.WithImage(compileImage). - WithCommand(compileCmd...). - WithFiles(sourceFileNames...). - WithPersist(compiledOutputs...). - WithMemoryLimit(512). - WithTimeLimit(10000) - }) - - // Step 2: Run - req.AddStep(func(p *ProcessBuilder) { - p.WithImage(runImage). - WithCommand(runCmd...). - WithFiles(compiledOutputs...). - WithStdin(stdin). - WithMemoryLimit(256). - WithTimeLimit(5000) - }) - - return req.Build() -} diff --git a/client/client.go b/client/client.go deleted file mode 100644 index 761b72c..0000000 --- a/client/client.go +++ /dev/null @@ -1,307 +0,0 @@ -// Package client provides a high-level client library for interacting with castletown -// sandboxed code execution service. It supports both HTTP REST and gRPC protocols. -package client - -import ( - "context" - "time" - - pb "github.com/joshjms/castletown/proto" -) - -// Client is the unified interface for interacting with castletown service. -// It supports both HTTP REST and gRPC protocols through different implementations. -type Client interface { - // Execute submits a job for execution and returns the results. - // If req.ID is empty, a unique ID will be generated by the server. - Execute(ctx context.Context, req *ExecRequest) (*ExecResponse, error) - - // Done notifies the server that the job is complete and can be cleaned up. - // This is optional but recommended to free up resources on the server. - Done(ctx context.Context, jobID string) error - - // Close closes the client and releases any resources. - Close() error -} - -// ExecRequest represents a code execution request. -// It contains files to be created in the sandbox and steps/processes to execute. -type ExecRequest struct { - // ID is a unique identifier for this job. If empty, the server will generate one. - ID string - - // Files are the files to create in the sandbox environment. - Files []File - - // Steps are the processes to execute sequentially. - // Each step can access files from previous steps if persisted. - Steps []Process -} - -// File represents a file to be created in the sandbox. -type File struct { - // Name is the filename (can include relative paths). - Name string - - // Content is the file content. - Content string -} - -// Process represents a single execution step in the sandbox. -type Process struct { - // Image is the container image name (must be available on server). - // Example: "gcc:15-bookworm" - Image string - - // Cmd is the command and arguments to execute. - // Example: []string{"g++", "main.cpp", "-o", "main"} - Cmd []string - - // Stdin is the standard input to provide to the process (optional). - Stdin string - - // MemoryLimitMB is the memory limit in megabytes (0 = unlimited). - MemoryLimitMB int64 - - // TimeLimitMs is the time limit in milliseconds (0 = unlimited). - TimeLimitMs uint64 - - // ProcLimit is the maximum number of processes (0 = unlimited). - ProcLimit int64 - - // Files specifies which files to make available in this step. - // References files by name from the Files array. - Files []string - - // Persist specifies which output files to persist to the next step. - // Only files listed here will be available to subsequent steps. - Persist []string -} - -// ExecResponse contains the execution results. -type ExecResponse struct { - // ID is the unique job identifier. - ID string - - // Reports contains one report per executed process/step. - Reports []Report -} - -// Report contains the execution results for a single process. -type Report struct { - // Status indicates the execution status. - Status Status - - // ExitCode is the process exit code. - ExitCode int32 - - // Signal is the signal that terminated the process (-1 if normal exit). - Signal int32 - - // Stdout is the standard output captured from the process. - Stdout string - - // Stderr is the standard error captured from the process. - Stderr string - - // CPUTime is the CPU time used in nanoseconds. - CPUTime uint64 - - // Memory is the peak memory usage in bytes. - Memory uint64 - - // WallTime is the wall clock time in milliseconds. - WallTime int64 - - // StartAt is the start timestamp in Unix nanoseconds. - StartAt int64 - - // FinishAt is the finish timestamp in Unix nanoseconds. - FinishAt int64 -} - -// Status represents the execution status of a process. -type Status int32 - -const ( - StatusUnspecified Status = 0 - StatusOK Status = 1 - StatusRuntimeError Status = 2 - StatusTimeLimitExceeded Status = 3 - StatusMemoryLimitExceeded Status = 4 - StatusOutputLimitExceeded Status = 5 - StatusTerminated Status = 6 - StatusUnknown Status = 7 - StatusSkipped Status = 8 -) - -// String returns the string representation of the status. -func (s Status) String() string { - switch s { - case StatusUnspecified: - return "UNSPECIFIED" - case StatusOK: - return "OK" - case StatusRuntimeError: - return "RUNTIME_ERROR" - case StatusTimeLimitExceeded: - return "TIME_LIMIT_EXCEEDED" - case StatusMemoryLimitExceeded: - return "MEMORY_LIMIT_EXCEEDED" - case StatusOutputLimitExceeded: - return "OUTPUT_LIMIT_EXCEEDED" - case StatusTerminated: - return "TERMINATED" - case StatusUnknown: - return "UNKNOWN" - case StatusSkipped: - return "SKIPPED" - default: - return "UNKNOWN" - } -} - -// ClientOptions contains configuration options for creating a client. -type ClientOptions struct { - // Address is the server address. For HTTP, include the scheme (http://localhost:8000). - // For gRPC, use host:port format (localhost:8001). - Address string - - // Timeout is the default timeout for requests (default: 30 seconds). - Timeout time.Duration - - // GRPCOptions contains additional gRPC-specific options (only used for gRPC client). - GRPCOptions *GRPCOptions -} - -// GRPCOptions contains gRPC-specific configuration options. -type GRPCOptions struct { - // Insecure, when true, disables transport security (TLS). - // Use this for testing or when connecting to insecure servers. - Insecure bool - - // MaxMessageSize sets the maximum message size in bytes for gRPC (default: 4MB). - MaxMessageSize int -} - -// NewHTTPClient creates a new HTTP REST client for castletown. -// -// Example: -// -// client, err := client.NewHTTPClient("http://localhost:8000", nil) -// if err != nil { -// log.Fatal(err) -// } -// defer client.Close() -func NewHTTPClient(address string, opts *ClientOptions) (Client, error) { - if opts == nil { - opts = &ClientOptions{ - Address: address, - Timeout: 30 * time.Second, - } - } - if opts.Address == "" { - opts.Address = address - } - if opts.Timeout == 0 { - opts.Timeout = 30 * time.Second - } - - return &httpClient{ - address: opts.Address, - timeout: opts.Timeout, - }, nil -} - -// NewGRPCClient creates a new gRPC client for castletown. -// -// Example: -// -// opts := &client.ClientOptions{ -// GRPCOptions: &client.GRPCOptions{ -// Insecure: true, -// }, -// } -// client, err := client.NewGRPCClient("localhost:8001", opts) -// if err != nil { -// log.Fatal(err) -// } -// defer client.Close() -func NewGRPCClient(address string, opts *ClientOptions) (Client, error) { - if opts == nil { - opts = &ClientOptions{ - Address: address, - Timeout: 30 * time.Second, - GRPCOptions: &GRPCOptions{ - Insecure: true, - MaxMessageSize: 4 * 1024 * 1024, // 4MB - }, - } - } - if opts.Address == "" { - opts.Address = address - } - if opts.Timeout == 0 { - opts.Timeout = 30 * time.Second - } - if opts.GRPCOptions == nil { - opts.GRPCOptions = &GRPCOptions{ - Insecure: true, - MaxMessageSize: 4 * 1024 * 1024, - } - } - if opts.GRPCOptions.MaxMessageSize == 0 { - opts.GRPCOptions.MaxMessageSize = 4 * 1024 * 1024 - } - - return newGRPCClient(address, opts) -} - -// Helper functions to convert between client types and protobuf types - -func toProtoFiles(files []File) []*pb.File { - result := make([]*pb.File, len(files)) - for i, f := range files { - result[i] = &pb.File{ - Name: f.Name, - Content: f.Content, - } - } - return result -} - -func toProtoProcesses(processes []Process) []*pb.Process { - result := make([]*pb.Process, len(processes)) - for i, p := range processes { - result[i] = &pb.Process{ - Image: p.Image, - Cmd: p.Cmd, - Stdin: p.Stdin, - MemoryLimitMb: p.MemoryLimitMB, - TimeLimitMs: p.TimeLimitMs, - ProcLimit: p.ProcLimit, - Files: p.Files, - Persist: p.Persist, - } - } - return result -} - -func fromProtoReports(reports []*pb.Report) []Report { - result := make([]Report, len(reports)) - for i, r := range reports { - result[i] = Report{ - Status: Status(r.Status), - ExitCode: r.ExitCode, - Signal: r.Signal, - Stdout: r.Stdout, - Stderr: r.Stderr, - CPUTime: r.CpuTime, - Memory: r.Memory, - WallTime: r.WallTime, - StartAt: r.StartAt, - FinishAt: r.FinishAt, - } - } - return result -} diff --git a/client/examples/advanced/main.go b/client/examples/advanced/main.go deleted file mode 100644 index a4e4b35..0000000 --- a/client/examples/advanced/main.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/joshjms/castletown/client" -) - -func main() { - // Create HTTP client with custom timeout - opts := &client.ClientOptions{ - Timeout: 60 * time.Second, - } - c, err := client.NewHTTPClient("http://localhost:8000", opts) - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - defer c.Close() - - // Python source code - pythonSource := `import sys - -def factorial(n): - if n <= 1: - return 1 - return n * factorial(n - 1) - -n = int(input()) -result = factorial(n) -print(f"Factorial of {n} is {result}") -` - - // Create a request with resource limits - req := client.NewRequest(). - WithID("advanced-example-job"). // Custom job ID - AddFile("factorial.py", pythonSource). - AddFile("input.txt", "10"). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("/bin/cat", "input.txt"). - WithFiles("input.txt"). - WithPersist("input.txt"). // Persist to next step - WithMemoryLimit(128). // 128 MB limit - WithTimeLimit(2000). // 2 second limit - WithProcLimit(10) // Max 10 processes - }). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("python3", "factorial.py"). - WithFiles("factorial.py", "input.txt"). - WithStdin("10"). // Alternative: use stdin directly - WithMemoryLimit(256). - WithTimeLimit(5000). - WithProcLimit(20) - }). - Build() - - // Execute with context - ctx := context.Background() - resp, err := c.Execute(ctx, req) - if err != nil { - log.Fatalf("Failed to execute: %v", err) - } - - // Print detailed results - fmt.Printf("Job ID: %s\n", resp.ID) - fmt.Printf("Total steps: %d\n", len(resp.Reports)) - - for i, report := range resp.Reports { - fmt.Printf("\n=== Step %d ===\n", i+1) - fmt.Printf("Status: %s\n", report.Status) - fmt.Printf("Exit Code: %d\n", report.ExitCode) - - if report.Signal != -1 { - fmt.Printf("Signal: %d\n", report.Signal) - } - - // Check for errors - switch report.Status { - case client.StatusOK: - fmt.Println("✓ Execution successful") - case client.StatusRuntimeError: - fmt.Println("✗ Runtime error") - case client.StatusTimeLimitExceeded: - fmt.Println("✗ Time limit exceeded") - case client.StatusMemoryLimitExceeded: - fmt.Println("✗ Memory limit exceeded") - default: - fmt.Printf("✗ Status: %s\n", report.Status) - } - - // Output - if report.Stdout != "" { - fmt.Printf("\nStdout:\n%s\n", report.Stdout) - } - if report.Stderr != "" { - fmt.Printf("\nStderr:\n%s\n", report.Stderr) - } - - // Resource usage - fmt.Printf("\nResource Usage:\n") - fmt.Printf(" CPU Time: %.2f ms\n", float64(report.CPUTime)/1_000_000) - fmt.Printf(" Memory: %.2f MB\n", float64(report.Memory)/1_048_576) - fmt.Printf(" Wall Time: %d ms\n", report.WallTime) - - // Timing - duration := time.Duration(report.FinishAt - report.StartAt) - fmt.Printf(" Total Duration: %v\n", duration) - } - - // Mark job as done - fmt.Println("\nCleaning up...") - if err := c.Done(ctx, resp.ID); err != nil { - log.Printf("Warning: Failed to mark job as done: %v", err) - } else { - fmt.Println("Job marked as done successfully") - } -} diff --git a/client/examples/basic_grpc/main.go b/client/examples/basic_grpc/main.go deleted file mode 100644 index d7acaf3..0000000 --- a/client/examples/basic_grpc/main.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - - "github.com/joshjms/castletown/client" -) - -func main() { - // Create gRPC client - opts := &client.ClientOptions{ - GRPCOptions: &client.GRPCOptions{ - Insecure: true, // Use insecure connection (for testing) - }, - } - c, err := client.NewGRPCClient("localhost:8001", opts) - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - defer c.Close() - - // Create a simple request using the builder - req := client.NewRequest(). - AddFile("hello.txt", "Hello from gRPC!"). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("/bin/cat", "hello.txt"). - WithFiles("hello.txt") - }). - Build() - - // Execute the request - ctx := context.Background() - resp, err := c.Execute(ctx, req) - if err != nil { - log.Fatalf("Failed to execute: %v", err) - } - - // Print results - fmt.Printf("Job ID: %s\n", resp.ID) - for i, report := range resp.Reports { - fmt.Printf("\nStep %d:\n", i+1) - fmt.Printf(" Status: %s\n", report.Status) - fmt.Printf(" Exit Code: %d\n", report.ExitCode) - fmt.Printf(" Stdout: %s\n", report.Stdout) - fmt.Printf(" Stderr: %s\n", report.Stderr) - fmt.Printf(" CPU Time: %d ns\n", report.CPUTime) - fmt.Printf(" Memory: %d bytes\n", report.Memory) - } - - // Mark job as done (cleanup) - if err := c.Done(ctx, resp.ID); err != nil { - log.Printf("Warning: Failed to mark job as done: %v", err) - } -} diff --git a/client/examples/basic_http/main.go b/client/examples/basic_http/main.go deleted file mode 100644 index f50ed5a..0000000 --- a/client/examples/basic_http/main.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - - "github.com/joshjms/castletown/client" -) - -func main() { - // Create HTTP client - c, err := client.NewHTTPClient("http://localhost:8000", nil) - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - defer c.Close() - - // Create a simple request using the builder - req := client.NewRequest(). - AddFile("hello.txt", "Hello, World!"). - AddStep(func(p *client.ProcessBuilder) { - p.WithImage("gcc:15-bookworm"). - WithCommand("/bin/cat", "hello.txt"). - WithFiles("hello.txt") - }). - Build() - - // Execute the request - ctx := context.Background() - resp, err := c.Execute(ctx, req) - if err != nil { - log.Fatalf("Failed to execute: %v", err) - } - - // Print results - fmt.Printf("Job ID: %s\n", resp.ID) - for i, report := range resp.Reports { - fmt.Printf("\nStep %d:\n", i+1) - fmt.Printf(" Status: %s\n", report.Status) - fmt.Printf(" Exit Code: %d\n", report.ExitCode) - fmt.Printf(" Stdout: %s\n", report.Stdout) - fmt.Printf(" Stderr: %s\n", report.Stderr) - fmt.Printf(" CPU Time: %d ns\n", report.CPUTime) - fmt.Printf(" Memory: %d bytes\n", report.Memory) - } - - // Mark job as done (cleanup) - if err := c.Done(ctx, resp.ID); err != nil { - log.Printf("Warning: Failed to mark job as done: %v", err) - } -} diff --git a/client/examples/compile_and_run/main.go b/client/examples/compile_and_run/main.go deleted file mode 100644 index f1fdcd1..0000000 --- a/client/examples/compile_and_run/main.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - - "github.com/joshjms/castletown/client" -) - -func main() { - // Create HTTP client - c, err := client.NewHTTPClient("http://localhost:8000", nil) - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - defer c.Close() - - // C++ source code - cppSource := `#include -#include - -int main() { - std::string name; - std::getline(std::cin, name); - std::cout << "Hello, " << name << "!" << std::endl; - return 0; -}` - - // Use the compile-and-run helper - req := client.CompileAndRunRequest( - "gcc:15-bookworm", // Compile image - []string{"g++", "main.cpp", "-o", "main"}, // Compile command - "gcc:15-bookworm", // Run image - []string{"./main"}, // Run command - map[string]string{ // Source files - "main.cpp": cppSource, - }, - []string{"main"}, // Compiled outputs to persist - "World", // Stdin for the program - ) - - // Execute the request - ctx := context.Background() - resp, err := c.Execute(ctx, req) - if err != nil { - log.Fatalf("Failed to execute: %v", err) - } - - // Print results - fmt.Printf("Job ID: %s\n", resp.ID) - fmt.Println("\n--- Compilation Step ---") - compileReport := resp.Reports[0] - fmt.Printf("Status: %s\n", compileReport.Status) - fmt.Printf("Exit Code: %d\n", compileReport.ExitCode) - if compileReport.Stdout != "" { - fmt.Printf("Stdout: %s\n", compileReport.Stdout) - } - if compileReport.Stderr != "" { - fmt.Printf("Stderr: %s\n", compileReport.Stderr) - } - fmt.Printf("CPU Time: %d ns\n", compileReport.CPUTime) - fmt.Printf("Memory: %d bytes\n", compileReport.Memory) - - fmt.Println("\n--- Execution Step ---") - runReport := resp.Reports[1] - fmt.Printf("Status: %s\n", runReport.Status) - fmt.Printf("Exit Code: %d\n", runReport.ExitCode) - fmt.Printf("Stdout: %s\n", runReport.Stdout) - if runReport.Stderr != "" { - fmt.Printf("Stderr: %s\n", runReport.Stderr) - } - fmt.Printf("CPU Time: %d ns\n", runReport.CPUTime) - fmt.Printf("Memory: %d bytes\n", runReport.Memory) - - // Mark job as done (cleanup) - if err := c.Done(ctx, resp.ID); err != nil { - log.Printf("Warning: Failed to mark job as done: %v", err) - } -} diff --git a/client/grpc.go b/client/grpc.go deleted file mode 100644 index dc7887f..0000000 --- a/client/grpc.go +++ /dev/null @@ -1,111 +0,0 @@ -package client - -import ( - "context" - "fmt" - "time" - - pb "github.com/joshjms/castletown/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -// grpcClient implements the Client interface using gRPC. -type grpcClient struct { - conn *grpc.ClientConn - execClient pb.ExecServiceClient - doneClient pb.DoneServiceClient - timeout time.Duration -} - -// newGRPCClient creates a new gRPC client. -func newGRPCClient(address string, opts *ClientOptions) (*grpcClient, error) { - // Build gRPC dial options - dialOpts := []grpc.DialOption{} - - if opts.GRPCOptions.Insecure { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - if opts.GRPCOptions.MaxMessageSize > 0 { - dialOpts = append(dialOpts, - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(opts.GRPCOptions.MaxMessageSize), - grpc.MaxCallSendMsgSize(opts.GRPCOptions.MaxMessageSize), - ), - ) - } - - // Connect to server - conn, err := grpc.NewClient(address, dialOpts...) - if err != nil { - return nil, fmt.Errorf("failed to connect to gRPC server: %w", err) - } - - return &grpcClient{ - conn: conn, - execClient: pb.NewExecServiceClient(conn), - doneClient: pb.NewDoneServiceClient(conn), - timeout: opts.Timeout, - }, nil -} - -// Execute submits a job for execution via gRPC. -func (c *grpcClient) Execute(ctx context.Context, req *ExecRequest) (*ExecResponse, error) { - // Set timeout if not already set in context - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, c.timeout) - defer cancel() - } - - // Convert to protobuf format - pbReq := &pb.ExecRequest{ - Id: req.ID, - Files: toProtoFiles(req.Files), - Procs: toProtoProcesses(req.Steps), - } - - // Call gRPC method - pbResp, err := c.execClient.Execute(ctx, pbReq) - if err != nil { - return nil, fmt.Errorf("gRPC Execute failed: %w", err) - } - - // Convert response - return &ExecResponse{ - ID: pbResp.Id, - Reports: fromProtoReports(pbResp.Reports), - }, nil -} - -// Done notifies the server that a job is complete via gRPC. -func (c *grpcClient) Done(ctx context.Context, jobID string) error { - // Set timeout if not already set in context - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, c.timeout) - defer cancel() - } - - // Create request - pbReq := &pb.DoneRequest{ - Id: jobID, - } - - // Call gRPC method - _, err := c.doneClient.Done(ctx, pbReq) - if err != nil { - return fmt.Errorf("gRPC Done failed: %w", err) - } - - return nil -} - -// Close closes the gRPC connection. -func (c *grpcClient) Close() error { - if c.conn != nil { - return c.conn.Close() - } - return nil -} diff --git a/client/http.go b/client/http.go deleted file mode 100644 index 7a67333..0000000 --- a/client/http.go +++ /dev/null @@ -1,217 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" -) - -// httpClient implements the Client interface using HTTP REST API. -type httpClient struct { - address string - timeout time.Duration - client *http.Client -} - -// httpExecRequest is the HTTP JSON request format for /exec endpoint. -type httpExecRequest struct { - ID string `json:"id,omitempty"` - Files []httpFile `json:"files"` - Steps []httpProcess `json:"steps"` -} - -// httpFile is the HTTP JSON format for a file. -type httpFile struct { - Name string `json:"name"` - Content string `json:"content"` -} - -// httpProcess is the HTTP JSON format for a process. -type httpProcess struct { - Image string `json:"image"` - Cmd []string `json:"cmd"` - Stdin string `json:"stdin,omitempty"` - MemoryLimitMB int64 `json:"memoryLimitMB,omitempty"` - TimeLimitMs uint64 `json:"timeLimitMs,omitempty"` - ProcLimit int64 `json:"procLimit,omitempty"` - Files []string `json:"files,omitempty"` - Persist []string `json:"persist,omitempty"` -} - -// httpExecResponse is the HTTP JSON response format for /exec endpoint. -type httpExecResponse struct { - ID string `json:"id"` - Reports []httpReport `json:"reports"` -} - -// httpReport is the HTTP JSON format for a report. -type httpReport struct { - Status string `json:"Status"` - ExitCode int32 `json:"ExitCode"` - Signal int32 `json:"Signal"` - Stdout string `json:"Stdout"` - Stderr string `json:"Stderr"` - CPUTime uint64 `json:"CPUTime"` - Memory uint64 `json:"Memory"` - WallTime int64 `json:"WallTime"` - StartAt int64 `json:"StartAt"` - FinishAt int64 `json:"FinishAt"` -} - -// httpDoneRequest is the HTTP JSON request format for /done endpoint. -type httpDoneRequest struct { - ID string `json:"id"` -} - -// Execute submits a job for execution via HTTP REST API. -func (c *httpClient) Execute(ctx context.Context, req *ExecRequest) (*ExecResponse, error) { - // Convert to HTTP format - httpReq := httpExecRequest{ - ID: req.ID, - Files: make([]httpFile, len(req.Files)), - Steps: make([]httpProcess, len(req.Steps)), - } - - for i, f := range req.Files { - httpReq.Files[i] = httpFile(f) - } - - for i, p := range req.Steps { - httpReq.Steps[i] = httpProcess(p) - } - - // Marshal to JSON - body, err := json.Marshal(httpReq) - if err != nil { - return nil, fmt.Errorf("failed to marshal request: %w", err) - } - - // Create HTTP request - httpRequest, err := http.NewRequestWithContext(ctx, "POST", c.address+"/exec", bytes.NewReader(body)) - if err != nil { - return nil, fmt.Errorf("failed to create HTTP request: %w", err) - } - httpRequest.Header.Set("Content-Type", "application/json") - - // Send request - if c.client == nil { - c.client = &http.Client{ - Timeout: c.timeout, - } - } - - resp, err := c.client.Do(httpRequest) - if err != nil { - return nil, fmt.Errorf("failed to send HTTP request: %w", err) - } - defer resp.Body.Close() - - // Check status code - if resp.StatusCode != http.StatusOK { - bodyBytes, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("HTTP request failed with status %d: %s", resp.StatusCode, string(bodyBytes)) - } - - // Parse response - var httpResp httpExecResponse - if err := json.NewDecoder(resp.Body).Decode(&httpResp); err != nil { - return nil, fmt.Errorf("failed to decode response: %w", err) - } - - // Convert to client format - response := &ExecResponse{ - ID: httpResp.ID, - Reports: make([]Report, len(httpResp.Reports)), - } - - for i, r := range httpResp.Reports { - response.Reports[i] = Report{ - Status: parseStatus(r.Status), - ExitCode: r.ExitCode, - Signal: r.Signal, - Stdout: r.Stdout, - Stderr: r.Stderr, - CPUTime: r.CPUTime, - Memory: r.Memory, - WallTime: r.WallTime, - StartAt: r.StartAt, - FinishAt: r.FinishAt, - } - } - - return response, nil -} - -// Done notifies the server that a job is complete via HTTP REST API. -func (c *httpClient) Done(ctx context.Context, jobID string) error { - // Create request - httpReq := httpDoneRequest{ - ID: jobID, - } - - body, err := json.Marshal(httpReq) - if err != nil { - return fmt.Errorf("failed to marshal request: %w", err) - } - - // Create HTTP request - httpRequest, err := http.NewRequestWithContext(ctx, "POST", c.address+"/done", bytes.NewReader(body)) - if err != nil { - return fmt.Errorf("failed to create HTTP request: %w", err) - } - httpRequest.Header.Set("Content-Type", "application/json") - - // Send request - if c.client == nil { - c.client = &http.Client{ - Timeout: c.timeout, - } - } - - resp, err := c.client.Do(httpRequest) - if err != nil { - return fmt.Errorf("failed to send HTTP request: %w", err) - } - defer resp.Body.Close() - - // Check status code - if resp.StatusCode != http.StatusOK { - bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("HTTP request failed with status %d: %s", resp.StatusCode, string(bodyBytes)) - } - - return nil -} - -// Close closes the HTTP client (no-op for HTTP). -func (c *httpClient) Close() error { - return nil -} - -// parseStatus converts a string status to Status enum. -func parseStatus(s string) Status { - switch s { - case "OK": - return StatusOK - case "RUNTIME_ERROR": - return StatusRuntimeError - case "TIME_LIMIT_EXCEEDED": - return StatusTimeLimitExceeded - case "MEMORY_LIMIT_EXCEEDED": - return StatusMemoryLimitExceeded - case "OUTPUT_LIMIT_EXCEEDED": - return StatusOutputLimitExceeded - case "TERMINATED": - return StatusTerminated - case "SKIPPED": - return StatusSkipped - case "UNKNOWN": - return StatusUnknown - default: - return StatusUnspecified - } -} diff --git a/cmd/server.go b/cmd/server.go deleted file mode 100644 index 764b7a8..0000000 --- a/cmd/server.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright © 2025 NAME HERE -*/ -package cmd - -import ( - "fmt" - "os" - - "github.com/joshjms/castletown/config" - "github.com/joshjms/castletown/job" - "github.com/joshjms/castletown/sandbox" - "github.com/joshjms/castletown/server" - "github.com/spf13/cobra" -) - -// serverCmd represents the server command -var serverCmd = &cobra.Command{ - Use: "server", - Short: "Starts the Castletown server", - Long: `When the light is running low and the shadows start to grow -And the places that you know seem like fantasy -There's a light inside your soul -That's still shining in the cold with the truth -The promise in our hearts -Don't forget, I'm with you in the dark`, - Run: func(cmd *cobra.Command, args []string) { - config.Port, _ = cmd.Flags().GetInt("port") - config.OverlayFSDir, _ = cmd.Flags().GetString("overlayfs-dir") - config.StorageDir, _ = cmd.Flags().GetString("storage-dir") - config.ImagesDir, _ = cmd.Flags().GetString("images-dir") - config.LibcontainerDir, _ = cmd.Flags().GetString("libcontainer-dir") - config.MaxConcurrency, _ = cmd.Flags().GetInt("max-concurrency") - config.RootfsDir, _ = cmd.Flags().GetString("rootfs-dir") - - RunServer() - }, -} - -func RunServer() { - f, err := os.Stat(config.OverlayFSDir) - if os.IsNotExist(err) { - if err := os.Mkdir(config.OverlayFSDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create OverlayFS directory: %v\n", err) - os.Exit(1) - } - } else if !f.IsDir() { - fmt.Fprintf(os.Stderr, "OverlayFS path exists but is not a directory: %s\n", config.OverlayFSDir) - os.Exit(1) - } - - f, err = os.Stat(config.StorageDir) - if os.IsNotExist(err) { - if err := os.Mkdir(config.StorageDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create Storage directory: %v\n", err) - os.Exit(1) - } - } else if !f.IsDir() { - fmt.Fprintf(os.Stderr, "Storage path exists but is not a directory: %s\n", config.StorageDir) - os.Exit(1) - } - - f, err = os.Stat(config.ImagesDir) - if os.IsNotExist(err) { - if err := os.Mkdir(config.ImagesDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create Images directory: %v\n", err) - os.Exit(1) - } - } else if !f.IsDir() { - fmt.Fprintf(os.Stderr, "Images path exists but is not a directory: %s\n", config.ImagesDir) - os.Exit(1) - } - - f, err = os.Stat(config.LibcontainerDir) - if os.IsNotExist(err) { - if err := os.Mkdir(config.LibcontainerDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create Libcontainer directory: %v\n", err) - os.Exit(1) - } - } else if !f.IsDir() { - fmt.Fprintf(os.Stderr, "Libcontainer path exists but is not a directory: %s\n", config.LibcontainerDir) - os.Exit(1) - } - - f, err = os.Stat(config.RootfsDir) - if os.IsNotExist(err) { - if err := os.Mkdir(config.RootfsDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create Rootfs directory: %v\n", err) - os.Exit(1) - } - } else if !f.IsDir() { - fmt.Fprintf(os.Stderr, "Rootfs path exists but is not a directory: %s\n", config.RootfsDir) - os.Exit(1) - } - - job.NewJobPool() - - if err := sandbox.NewManager(config.MaxConcurrency); err != nil { - fmt.Fprintf(os.Stderr, "Error creating sandbox manager: %v\n", err) - os.Exit(1) - } - - s, err := server.NewServer() - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating server: %v\n", err) - os.Exit(1) - } - s.Start() -} - -func init() { - rootCmd.AddCommand(serverCmd) - - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // serverCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // serverCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") - - serverCmd.Flags().String("overlayfs-dir", "/tmp/castletown/overlayfs", "Directory for overlayfs directories (i.e. lower, upper, and work directories)") - serverCmd.Flags().String("storage-dir", "/tmp/castletown/storage", "Directory for persistent file storage") - serverCmd.Flags().String("images-dir", "/tmp/castletown/images", "Directory for container rootfs images") - serverCmd.Flags().String("libcontainer-dir", "/tmp/castletown/libcontainer", "Directory for libcontainer containers") - serverCmd.Flags().String("rootfs-dir", "/tmp/castletown/rootfs", "Directory for temporary root filesystems") - - serverCmd.Flags().IntP("port", "p", 8000, "Port to run the server on") - serverCmd.Flags().Int("max-concurrency", 10, "Maximum number of concurrent sandboxes") -} diff --git a/cmd/start.go b/cmd/start.go new file mode 100644 index 0000000..163b4f3 --- /dev/null +++ b/cmd/start.go @@ -0,0 +1,49 @@ +package cmd + +import ( + "context" + "errors" + "os" + "os/signal" + "syscall" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/worker" + "github.com/spf13/cobra" +) + +var startCmd = &cobra.Command{ + Use: "start", + Short: "Start the Castletown worker", + RunE: func(cmd *cobra.Command, args []string) error { + cfg := config.Load() + w, err := worker.NewWorker(cfg) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) + defer signal.Stop(sigCh) + + go func() { + select { + case <-sigCh: + cancel() + case <-ctx.Done(): + } + }() + + if err := w.Run(ctx); err != nil && !errors.Is(err, context.Canceled) { + return err + } + return nil + }, +} + +func init() { + rootCmd.AddCommand(startCmd) +} diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 6497a2c..0000000 --- a/config/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package config - -var ( - OverlayFSDir string - StorageDir string - ImagesDir string - LibcontainerDir string - RootfsDir string - - MaxConcurrency int - Port int -) - -func UseDefaults() { - OverlayFSDir = "/tmp/castletown/overlayfs" - StorageDir = "/tmp/castletown/storage" - ImagesDir = "/tmp/castletown/images" - LibcontainerDir = "/tmp/castletown/libcontainer" - RootfsDir = "/tmp/castletown/rootfs" - - MaxConcurrency = 10 - Port = 8080 -} diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100644 index 0000000..61b7ed8 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -euo pipefail + +log() { + echo "[castletown-entrypoint] $*" +} + +: "${BLOB_ROOT:=/var/castletown/blobs}" +: "${WORK_ROOT:=/tmp/castletown/work}" +# Prefer config env names (JUDGE_*) but fall back to legacy variables if present. +: "${JUDGE_IMAGES_DIR:=${IMAGES_DIR:-/var/castletown/images}}" +: "${JUDGE_OVERLAYFS_DIR:=${OVERLAYFS_DIR:-/tmp/castletown/overlayfs}}" +: "${STORAGE_DIR:=/tmp/castletown/storage}" +: "${JUDGE_LIBCONTAINER_DIR:=${LIBCONTAINER_DIR:-/tmp/castletown/libcontainer}}" +: "${JUDGE_ROOTFS_DIR:=${ROOTFS_DIR:-/tmp/castletown/rootfs}}" +: "${JUDGE_DISK_CACHE_DIR:=${DISK_CACHE_DIR:-/var/castletown/testcases}}" +: "${PROBLEM_CACHE_DIR:=/var/castletown/problems}" +: "${CASTLETOWN_SKIP_ROOTFS:=1}" + +mkdir -p \ + "${BLOB_ROOT}" \ + "${WORK_ROOT}" \ + "${JUDGE_IMAGES_DIR}" \ + "${JUDGE_OVERLAYFS_DIR}" \ + "${STORAGE_DIR}" \ + "${JUDGE_LIBCONTAINER_DIR}" \ + "${JUDGE_ROOTFS_DIR}" \ + "${JUDGE_DISK_CACHE_DIR}" \ + "${PROBLEM_CACHE_DIR}" + +if [[ "${CASTLETOWN_SKIP_ROOTFS}" != "1" ]]; then + if ! command -v skopeo >/dev/null 2>&1 || ! command -v umoci >/dev/null 2>&1; then + log "rootfs bootstrap requested but skopeo/umoci are not installed in this image" + exit 1 + fi + if [[ ! -d "${JUDGE_IMAGES_DIR}/gcc-15-bookworm" ]]; then + log "bootstraping gcc-15-bookworm rootfs into ${JUDGE_IMAGES_DIR}" + JUDGE_IMAGES_DIR="${JUDGE_IMAGES_DIR}" /opt/castletown/scripts/rootfs.sh + else + log "rootfs already present in ${JUDGE_IMAGES_DIR}, skipping bootstrap" + fi +else + log "CASTLETOWN_SKIP_ROOTFS=1, skipping rootfs bootstrap" +fi + +# Ensure a writable /work mountpoint exists inside each rootfs image so OCI runtimes +# don't attempt to create it on a read-only lowerdir. +if [[ -d "${JUDGE_IMAGES_DIR}" ]]; then + while IFS= read -r -d '' img; do + if [[ -d "${img}" ]]; then + mkdir -p "${img}/work" || log "warning: could not create ${img}/work" + fi + done < <(find "${JUDGE_IMAGES_DIR}" -mindepth 1 -maxdepth 1 -type d -print0) +fi + +log "starting castletown: $*" +exec "$@" diff --git a/docs/getting-started.md b/docs/getting-started.md index 6362384..cd13329 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -2,6 +2,8 @@ This guide will get you started on how to set up the castletown server. The following procedure is run in an Ubuntu 25.04 LTS amd64 brand new GCP Compute Engine instance. +> ⚡️ **Want the fastest path?** Run `docker compose up --build` from the repo root. The container image now bundles everything (including the rootfs bootstrap) and talks to Postgres, RabbitMQ, and MinIO that are part of the Compose stack. Continue with the manual steps below only if you need to run Castletown directly on a host. + ## Downloading `castletown` ```shell diff --git a/go.mod b/go.mod index b7231c5..5922352 100644 --- a/go.mod +++ b/go.mod @@ -5,14 +5,16 @@ go 1.24.3 require ( github.com/containerd/cgroups/v3 v3.0.5 github.com/google/uuid v1.6.0 + github.com/lib/pq v1.10.9 + github.com/minio/minio-go/v7 v7.0.97 github.com/opencontainers/cgroups v0.0.3 github.com/opencontainers/runc v1.3.0 github.com/opencontainers/runtime-spec v1.2.1 + github.com/rabbitmq/amqp091-go v1.10.0 + github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.10.0 golang.org/x/sys v0.34.0 - google.golang.org/grpc v1.76.0 - google.golang.org/protobuf v1.36.6 ) require ( @@ -23,23 +25,37 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-ini/ini v1.67.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.11 // indirect + github.com/klauspost/crc32 v1.3.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/minio/crc64nvme v1.1.0 // indirect + github.com/minio/md5-simd v1.1.2 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/user v0.3.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/mrunalp/fileutils v0.5.1 // indirect github.com/opencontainers/selinux v1.11.1 // indirect + github.com/philhofer/fwd v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/xid v1.6.0 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.9 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/vishvananda/netlink v1.3.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect + golang.org/x/crypto v0.40.0 // indirect golang.org/x/net v0.42.0 // indirect golang.org/x/text v0.27.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index a520d4c..600180e 100644 --- a/go.sum +++ b/go.sum @@ -17,18 +17,16 @@ github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGL github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -41,14 +39,34 @@ github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtL github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU= +github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= +github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= +github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= +github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= @@ -67,10 +85,19 @@ github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= +github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= @@ -86,44 +113,33 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= -google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 0000000..50d029c --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,210 @@ +package cache + +import ( + "container/list" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/joshjms/castletown/internal/models" + "github.com/joshjms/castletown/internal/repository" + "github.com/joshjms/castletown/internal/store" +) + +type ProblemCache struct { + problemsRepository *repository.ProblemsRepository + tcStore *store.TestcaseStore + + mu sync.Mutex + capacity int + ll *list.List + entries map[int]*list.Element + refCount map[int]int + cacheDir string +} + +type problemEntry struct { + id int + problem *models.Problem +} + +func NewProblemCache(pr *repository.ProblemsRepository, tcs *store.TestcaseStore, capacity int, cacheDir string) *ProblemCache { + if capacity <= 0 { + capacity = 128 + } + if cacheDir == "" { + cacheDir = "/var/castletown/testcases" + } + return &ProblemCache{ + problemsRepository: pr, + tcStore: tcs, + capacity: capacity, + ll: list.New(), + entries: make(map[int]*list.Element), + refCount: make(map[int]int), + cacheDir: cacheDir, + } +} + +func (pc *ProblemCache) GetProblemWithLease(ctx context.Context, id int) (*models.Problem, func(), error) { + pc.mu.Lock() + pc.refCount[id]++ + pc.mu.Unlock() + + p, err := pc.GetProblem(ctx, id) + if err != nil { + pc.ReleaseProblem(id) + return nil, nil, err + } + + var once sync.Once + release := func() { + once.Do(func() { + pc.ReleaseProblem(id) + }) + } + return p, release, nil +} + +func (pc *ProblemCache) ReleaseProblem(id int) { + pc.mu.Lock() + defer pc.mu.Unlock() + + if count, ok := pc.refCount[id]; ok { + if count <= 1 { + delete(pc.refCount, id) + } else { + pc.refCount[id] = count - 1 + } + } +} + +func (pc *ProblemCache) GetProblem(ctx context.Context, id int) (*models.Problem, error) { + if problem, ok := pc.getFromCache(id); ok { + if err := pc.ensureTestcasesOnDisk(ctx, id); err != nil { + return nil, err + } + return cloneProblem(problem), nil + } + + problem, err := pc.problemsRepository.GetProblemDetails(ctx, id) + if err != nil { + return nil, err + } + + if err := pc.ensureTestcasesOnDisk(ctx, id); err != nil { + return nil, err + } + + pc.addToCache(id, problem) + + return cloneProblem(problem), nil +} + +func (pc *ProblemCache) GetCacheDir() string { + return pc.cacheDir +} + +func (pc *ProblemCache) getFromCache(id int) (*models.Problem, bool) { + pc.mu.Lock() + defer pc.mu.Unlock() + + elem, ok := pc.entries[id] + if !ok { + return nil, false + } + + pc.ll.MoveToFront(elem) + entry := elem.Value.(*problemEntry) + return entry.problem, true +} + +func (pc *ProblemCache) addToCache(id int, problem *models.Problem) { + pc.mu.Lock() + defer pc.mu.Unlock() + + if elem, ok := pc.entries[id]; ok { + pc.ll.MoveToFront(elem) + entry := elem.Value.(*problemEntry) + entry.problem = cloneProblem(problem) + return + } + + if pc.ll.Len() >= pc.capacity { + if !pc.evictOldestLocked() { + // Everything is in use; skip caching rather than blocking. + return + } + } + + entry := &problemEntry{ + id: id, + problem: cloneProblem(problem), + } + elem := pc.ll.PushFront(entry) + pc.entries[id] = elem +} + +func (pc *ProblemCache) evictOldestLocked() bool { + for elem := pc.ll.Back(); elem != nil; elem = elem.Prev() { + entry := elem.Value.(*problemEntry) + if pc.refCount[entry.id] > 0 { + continue + } + pc.ll.Remove(elem) + delete(pc.entries, entry.id) + _ = os.RemoveAll(filepath.Join(pc.cacheDir, fmt.Sprintf("%d", entry.id))) + return true + } + return false +} + +func (pc *ProblemCache) ensureTestcasesOnDisk(ctx context.Context, id int) error { + finalDir := filepath.Join(pc.cacheDir, fmt.Sprintf("%d", id)) + + info, err := os.Stat(finalDir) + if err == nil { + if info.IsDir() { + return nil + } + if remErr := os.RemoveAll(finalDir); remErr != nil { + return remErr + } + } + + if pc.tcStore != nil { + if err := pc.retrieveTestcasesFromObjectStore(id); err == nil { + return nil + } else if !errors.Is(err, os.ErrNotExist) { + return err + } + } + + return fmt.Errorf("testcases for problem %d not found", id) +} + +func (pc *ProblemCache) retrieveTestcasesFromObjectStore(id int) error { + r, err := pc.tcStore.Get(context.Background(), "testcases", fmt.Sprintf("%d.tar.gz", id)) + if err != nil { + return err + } + defer r.Close() + + if err := untarGzReader(r, pc.cacheDir, id); err != nil { + _ = os.RemoveAll(filepath.Join(pc.cacheDir, fmt.Sprintf("%d", id))) + return err + } + + return nil +} + +func cloneProblem(p *models.Problem) *models.Problem { + if p == nil { + return nil + } + cp := *p + return &cp +} diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go new file mode 100644 index 0000000..66fc8c2 --- /dev/null +++ b/internal/cache/cache_test.go @@ -0,0 +1,30 @@ +package cache + +import ( + "context" + "testing" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/repository" + "github.com/joshjms/castletown/internal/store" + "github.com/stretchr/testify/require" +) + +// TestProblemCache tests the basic functionality of the ProblemCache. It is not used in CI and should only be run locally. +func TestProblemCache(t *testing.T) { + cfg := config.Load() + pr, err := repository.NewProblemsRepository(cfg.Database) + require.NoError(t, err, "cannot create postgres repository: %v", err) + + tcs, err := store.NewTestcaseStore(cfg.Minio) + require.NoError(t, err, "cannot create minio store: %v", err) + + var problemID = 1 + + pc := NewProblemCache(pr, tcs, 5, cfg.Judge.DiskCacheDir) + require.NotNil(t, pc, "problem cache should not be nil") + + _, release, err := pc.GetProblemWithLease(context.Background(), problemID) + require.NoError(t, err, "error getting problem with lease: %v", err) + release() +} diff --git a/internal/cache/utils.go b/internal/cache/utils.go new file mode 100644 index 0000000..1d8b146 --- /dev/null +++ b/internal/cache/utils.go @@ -0,0 +1,111 @@ +package cache + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" +) + +func untarGzReader(r io.Reader, dest string, problemID int) error { + const ( + maxFiles = 200_000 + maxFileSize = int64(1 << 30) // 1 GiB + maxTotalBytes = int64(5 << 30) // 5 GiB + ) + + gzr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + + var files int + var total int64 + + cleanDest := filepath.Clean(dest) + string(os.PathSeparator) + problemDir := fmt.Sprintf("%d", problemID) + + // Rewrite any top-level directory name in the archive to the problem ID. + rewriteTarget := func(hdr *tar.Header) string { + normalized := strings.ReplaceAll(hdr.Name, "\\", "/") + cleanName := path.Clean(normalized) + cleanName = strings.TrimPrefix(cleanName, "/") + if cleanName == "." { + return problemDir + } + parts := strings.SplitN(cleanName, "/", 2) + if len(parts) == 1 { + if hdr.FileInfo() != nil && hdr.FileInfo().IsDir() { + return problemDir + } + return filepath.Join(problemDir, parts[0]) + } + return filepath.Join(problemDir, parts[1]) + } + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + files++ + if files > maxFiles { + return fmt.Errorf("too many files in archive") + } + + if hdr.Size < 0 || hdr.Size > maxFileSize { + return fmt.Errorf("file too large: %s", hdr.Name) + } + total += hdr.Size + if total > maxTotalBytes { + return fmt.Errorf("archive too large (uncompressed)") + } + + target := filepath.Join(dest, rewriteTarget(hdr)) + cleanTarget := filepath.Clean(target) + + if !strings.HasPrefix(cleanTarget, cleanDest) { + return fmt.Errorf("illegal path: %s", hdr.Name) + } + + switch hdr.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(cleanTarget, 0755); err != nil { + return err + } + + case tar.TypeReg: + if err := os.MkdirAll(filepath.Dir(cleanTarget), 0755); err != nil { + return err + } + f, err := os.OpenFile(cleanTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + _, copyErr := io.CopyN(f, tr, hdr.Size) + closeErr := f.Close() + if copyErr != nil { + return copyErr + } + if closeErr != nil { + return closeErr + } + + default: + // Skip symlinks/devices/etc. + } + } + + return nil +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..3e885d1 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,133 @@ +package config + +import ( + "fmt" + "os" + "runtime" + "time" +) + +type Config struct { + Env string + ResultsURL string + WorkRoot string + RootfsDir string + ServerPort int + MetricsAddr string + QueuePollInterval time.Duration + Judge JudgeConfig + Database DatabaseConfig + RabbitMQ RabbitMQConfig + ProblemCacheDir string + DatabaseURL string + Minio MinioConfig +} + +type ServerConfig struct { +} + +type JudgeConfig struct { + DiskCacheDir string + SubmissionsDir string + LibcontainerDir string + ImagesDir string + OverlayFSDir string + RootfsDir string + WorkRoot string + MaxConcurrency int +} + +type DatabaseConfig struct { + Host string + Port int + User string + Password string + Name string + SSLMode string +} + +func (d DatabaseConfig) DSN() string { + return fmt.Sprintf( + "user=%s password=%s dbname=%s host=%s port=%d sslmode=%s", + d.User, + d.Password, + d.Name, + d.Host, + d.Port, + d.SSLMode, + ) +} + +type RabbitMQConfig struct { + URL string + Queue string +} + +type MinioConfig struct { + Endpoint string + AccessKey string + SecretKey string + Bucket string + UseSSL bool +} + +func Load() *Config { + return &Config{ + Env: get("ENV", "dev"), + ResultsURL: get("RESULTS_URL", "http://backend/results"), + ServerPort: getInt("SERVER_PORT", 8000), + MetricsAddr: get("METRICS_ADDR", ":9090"), + QueuePollInterval: time.Duration(getInt64("QUEUE_POLL_MS", 200)) * time.Millisecond, + RabbitMQ: RabbitMQConfig{ + URL: get("RABBITMQ_URL", "amqp://castletown:castletown@localhost:5672/"), + Queue: get("RABBITMQ_QUEUE", "submissions"), + }, + Database: DatabaseConfig{ + Host: get("DATABASE_HOST", "localhost"), + Port: getInt("DATABASE_PORT", 5432), + User: get("DATABASE_USER", "castletown"), + Password: get("DATABASE_PASSWORD", "castletown"), + Name: get("DATABASE_NAME", "castletown"), + SSLMode: get("DATABASE_SSLMODE", "disable"), + }, + Minio: MinioConfig{ + Endpoint: get("MINIO_ENDPOINT", "localhost:9000"), + AccessKey: get("MINIO_ACCESS_KEY", "minioadmin"), + SecretKey: get("MINIO_SECRET_KEY", "minioadmin"), + Bucket: get("MINIO_BUCKET", "castletown"), + UseSSL: get("MINIO_USE_SSL", "false") == "true", + }, + Judge: JudgeConfig{ + DiskCacheDir: get("JUDGE_DISK_CACHE_DIR", "/var/castletown/testcases"), + MaxConcurrency: getInt("JUDGE_MAX_CONCURRENCY", runtime.NumCPU()), + SubmissionsDir: get("JUDGE_SUBMISSIONS_DIR", "/var/castletown/submissions"), + LibcontainerDir: get("JUDGE_LIBCONTAINER_DIR", "/var/castletown/libcontainer"), + ImagesDir: get("JUDGE_IMAGES_DIR", "/var/castletown/images"), + RootfsDir: get("JUDGE_ROOTFS_DIR", "/tmp/castletown/rootfs"), + WorkRoot: get("JUDGE_WORK_ROOT", "/tmp/castletown/work"), + OverlayFSDir: get("JUDGE_OVERLAYFS_DIR", "/tmp/castletown/overlayfs"), + }, + } +} + +func get(k, def string) string { + if v := os.Getenv(k); v != "" { + return v + } + return def +} + +func getInt(k string, def int) int { + return int(getInt64(k, int64(def))) +} + +func getInt64(k string, def int64) int64 { + if v := os.Getenv(k); v != "" { + if x, err := parseInt64(v); err == nil { + return x + } + } + return def +} + +func parseInt64(s string) (int64, error) { var x int64; _, err := fmt.Sscan(s, &x); return x, err } diff --git a/internal/container/allocator.go b/internal/container/allocator.go new file mode 100644 index 0000000..e13cf1b --- /dev/null +++ b/internal/container/allocator.go @@ -0,0 +1,87 @@ +package container + +import ( + "context" + "sync/atomic" +) + +const StartUIDGID = 1000000 +const DefaultSize = 65536 + +type Slot struct { + UIDStart uint32 + UIDSize uint32 + GIDStart uint32 + GIDSize uint32 + CPU int +} + +type SlotPool struct { + ch chan Slot +} + +type SlotPoolOption func(*SlotPool) + +func NewSlotPool(opts ...SlotPoolOption) *SlotPool { + sp := &SlotPool{} + for _, opt := range opts { + opt(sp) + } + + if sp.ch == nil { + sp.ch = make(chan Slot, 1) + sp.ch <- Slot{ + UIDStart: StartUIDGID, + UIDSize: DefaultSize, + GIDStart: StartUIDGID, + GIDSize: DefaultSize, + CPU: 0, + } + } + + return sp +} + +func WithMaxConcurrency(n int) SlotPoolOption { + return func(sp *SlotPool) { + if n < 1 { + n = 1 + } + + sp.ch = make(chan Slot, n) + for i := 0; i < n; i++ { + sp.ch <- Slot{ + UIDStart: StartUIDGID + uint32(i)*DefaultSize, + UIDSize: DefaultSize, + GIDStart: StartUIDGID + uint32(i)*DefaultSize, + GIDSize: DefaultSize, + CPU: i, + } + } + } +} + +type Allocation struct { + pool *SlotPool + slot Slot + released atomic.Bool +} + +func (sp *SlotPool) Allocate(ctx context.Context) (*Allocation, error) { + select { + case r := <-sp.ch: + return &Allocation{pool: sp, slot: r}, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (a *Allocation) Release() { + if a == nil || a.pool == nil { + return + } + if !a.released.CompareAndSwap(false, true) { + return + } + a.pool.ch <- a.slot +} diff --git a/sandbox/cgroup.go b/internal/container/cgroup.go similarity index 68% rename from sandbox/cgroup.go rename to internal/container/cgroup.go index 1631b36..52e918b 100644 --- a/sandbox/cgroup.go +++ b/internal/container/cgroup.go @@ -1,4 +1,4 @@ -package sandbox +package container import ( "fmt" @@ -23,3 +23,18 @@ func loadCgroup(id string) (*cgroup2.Manager, error) { func getSlicePath() (string, error) { return "/castletown.slice", nil } + +func removeCgroup(id string) error { + mgr, err := loadCgroup(id) + if err != nil { + return err + } + + if err := mgr.DeleteSystemd(); err != nil { + if err := mgr.Delete(); err != nil { + return err + } + } + + return nil +} diff --git a/internal/container/config.go b/internal/container/config.go new file mode 100644 index 0000000..14ce628 --- /dev/null +++ b/internal/container/config.go @@ -0,0 +1,91 @@ +package container + +import ( + "fmt" + "io" +) + +// Config holds the configuration for the sandboxed process and environment. +type Config struct { + // Root filesystem image directory. Will be used as the base for the overlay filesystem. + RootfsImageDir string + + // Command-line arguments to pass to the sandboxed process. + Args []string + // Standard input content for the sandboxed process. + Stdin io.Reader + // Working directory inside the sandbox. + Cwd string + // Environment variables for the sandboxed process. + Env []string + + // User namespace configuration. + UserNamespace *UserNamespaceConfig + + // CPU time limit in microseconds. + TimeLimitUs int64 + // Memory limit in bytes. + MemoryLimitBytes int64 + // Maximum number of PIDs allowed. + PidLimit int64 + // Number of threads to use. + UseThreads int64 + // CPU core(s) to which the sandboxed process is pinned to. + CpusetCPUs string + // CPU memory node(s) to which the sandboxed process is pinned to. + CpusetMems string + + // Resource limits configuration. + Rlimit *RlimitConfig + + // Directory on the host to be bind-mounted into the sandbox at /work. + BindMount string + + // Overlay filesystem configuration. + Overlay *Overlay + + allocation *Allocation +} + +// UseAllocation configures the sandbox config with the given allocation. +func (c *Config) UseAllocation(a *Allocation) { + c.allocation = a + + c.UserNamespace = &UserNamespaceConfig{ + UID: IDMapping{ + ContainerID: 0, + HostID: a.slot.UIDStart, + Size: a.slot.UIDSize, + }, + GID: IDMapping{ + ContainerID: 0, + HostID: a.slot.GIDStart, + Size: a.slot.GIDSize, + }, + } + + c.CpusetCPUs = fmt.Sprintf("%d", a.slot.CPU) + c.CpusetMems = "0" +} + +type UserNamespaceConfig struct { + UID IDMapping + GID IDMapping +} + +type IDMapping struct { + ContainerID uint32 + HostID uint32 + Size uint32 +} + +type RlimitConfig struct { + Core *Rlimit + Fsize *Rlimit + NoFile *Rlimit +} + +type Rlimit struct { + Hard uint64 + Soft uint64 +} diff --git a/sandbox/consts.go b/internal/container/consts.go similarity index 83% rename from sandbox/consts.go rename to internal/container/consts.go index 60f5e20..851e199 100644 --- a/sandbox/consts.go +++ b/internal/container/consts.go @@ -1,4 +1,4 @@ -package sandbox +package container const ( CONTAINERS_ROOT = "/tmp/castletown" diff --git a/internal/container/container.go b/internal/container/container.go new file mode 100644 index 0000000..64de780 --- /dev/null +++ b/internal/container/container.go @@ -0,0 +1,168 @@ +package container + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/joshjms/castletown/internal/config" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/specconv" + "golang.org/x/sys/unix" +) + +type Container struct { + id string + config *Config + runtimeCfg *config.Config + containerImpl *libcontainer.Container +} + +func NewContainer(id string, cfg *config.Config, opts ...ContainerOptions) *Container { + if id == "" { + id = uuid.NewString() + } + c := &Container{ + id: id, + runtimeCfg: cfg, + } + for _, opt := range opts { + opt(c) + } + return c +} + +type ContainerOptions func(*Container) + +func WithContainerConfig(cfg *Config) ContainerOptions { + return func(c *Container) { + c.config = cfg + } +} + +func (c *Container) Init(ctx context.Context) error { + spec, err := createSpec(c.id, c.config) + if err != nil { + return fmt.Errorf("error creating spec: %w", err) + } + + libcontainerConfig, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ + UseSystemdCgroup: false, + Spec: spec, + }) + if err != nil { + return fmt.Errorf("error creating libcontainer config: %w", err) + } + + container, err := libcontainer.Create(c.runtimeCfg.Judge.LibcontainerDir, c.id, libcontainerConfig) + if err != nil { + return fmt.Errorf("error creating container: %w", err) + } + + c.containerImpl = container + + return nil +} + +// Run runs a command inside the container and returns a Report +func (c *Container) Run(ctx context.Context) (*Report, error) { + noNewPrivileges := true + + var stdoutBuf, stderrBuf bytes.Buffer + + rlimits := getRlimits(c.config) + + process := &libcontainer.Process{ + Args: c.config.Args, + Env: c.config.Env, + UID: 0, + GID: 0, + Cwd: c.config.Cwd, + NoNewPrivileges: &noNewPrivileges, + Stdin: c.config.Stdin, + Stdout: &stdoutBuf, + Stderr: &stderrBuf, + Rlimits: rlimits, + Init: true, + } + + startAt := time.Now() + + if err := c.containerImpl.Run(process); err != nil { + return nil, fmt.Errorf("error running container: %w", err) + } + + processFinished := make(chan interface{}, 1) + timeLimitExceeded := false + + go func() { + select { + case <-processFinished: + case <-time.After(time.Duration(c.config.TimeLimitUs) * time.Microsecond * 3): + timeLimitExceeded = true + c.containerImpl.Signal(unix.SIGKILL) + } + }() + + state, _ := process.Wait() + processFinished <- struct{}{} + + finishAt := time.Now() + + return c.makeReport(&stdoutBuf, &stderrBuf, state, timeLimitExceeded, startAt, finishAt) +} + +func (c *Container) Destroy() error { + if c.containerImpl != nil { + c.containerImpl.Destroy() + } + + if c.config.Overlay != nil { + if err := Cleanup(c.config.Overlay); err != nil { + return fmt.Errorf("error cleaning up overlay: %w", err) + } + } + + if c.config.allocation != nil { + c.config.allocation.Release() + } + + return nil +} + +func getRlimits(cfg *Config) []configs.Rlimit { + if cfg.Rlimit == nil { + return nil + } + + var rlimits []configs.Rlimit + + if cfg.Rlimit.Core != nil { + rlimits = append(rlimits, configs.Rlimit{ + Type: unix.RLIMIT_CORE, + Hard: cfg.Rlimit.Core.Hard, + Soft: cfg.Rlimit.Core.Soft, + }) + } + + if cfg.Rlimit.Fsize != nil { + rlimits = append(rlimits, configs.Rlimit{ + Type: unix.RLIMIT_FSIZE, + Hard: cfg.Rlimit.Fsize.Hard, + Soft: cfg.Rlimit.Fsize.Soft, + }) + } + + if cfg.Rlimit.NoFile != nil { + rlimits = append(rlimits, configs.Rlimit{ + Type: unix.RLIMIT_NOFILE, + Hard: cfg.Rlimit.NoFile.Hard, + Soft: cfg.Rlimit.NoFile.Soft, + }) + } + + return rlimits +} diff --git a/internal/container/container_test.go b/internal/container/container_test.go new file mode 100644 index 0000000..4cadf91 --- /dev/null +++ b/internal/container/container_test.go @@ -0,0 +1,177 @@ +package container_test + +import ( + "os" + "path/filepath" + "sort" + "testing" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/container" + "github.com/stretchr/testify/require" +) + +var sp *container.SlotPool +var cfg *config.Config + +func TestMain(m *testing.M) { + container.Init() + cfg = config.Load() + cfg.Judge.MaxConcurrency = 2 + + if os.Geteuid() == 0 { + files, err := os.ReadDir("test_files") + require.NoError(nil, err, "failed to read test files directory: %v", err) + + for _, f := range files { + fullPath := filepath.Join("test_files", f.Name()) + if err := os.Chown(fullPath, 0, 0); err != nil { + panic(err) + } + } + } + + sp = container.NewSlotPool(container.WithMaxConcurrency(cfg.Judge.MaxConcurrency)) + + exitCode := m.Run() + + if os.Geteuid() == 0 { + files, err := os.ReadDir("test_files") + require.NoError(nil, err, "failed to read test files directory: %v", err) + + for _, f := range files { + fullPath := filepath.Join("test_files", f.Name()) + if err := os.Chown(fullPath, 1000, 1000); err != nil { + panic(err) + } + } + } + + os.Exit(exitCode) +} + +func TestContainerAdd(t *testing.T) { + expectedStatus := container.STATUS_OK + expectedOutput := "15\n" + + tc := container.Testcase{ + File: "test_files/add.cpp", + Stdin: "6 9\n", + ExpectedStatus: &expectedStatus, + ExpectedOutput: &expectedOutput, + TimeLimitUs: 1000000, + } + + tc.Run(t, cfg, sp) +} + +func TestContainerTimeLimitExceededA(t *testing.T) { + expectedStatus := container.STATUS_TIME_LIMIT_EXCEEDED + + tc := container.Testcase{ + File: "test_files/tl1.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 1000000, + } + + tc.Run(t, cfg, sp) +} + +func TestContainerTimeLimitExceededB(t *testing.T) { + expectedStatus := container.STATUS_TIME_LIMIT_EXCEEDED + + tc := container.Testcase{ + File: "test_files/printloop.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 1000000, + } + + tc.Run(t, cfg, sp) +} + +func TestContainerMemoryLimitExceeded(t *testing.T) { + expectedStatus := container.STATUS_MEMORY_LIMIT_EXCEEDED + + tc := container.Testcase{ + File: "test_files/mem1.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 10000000, + } + + tc.Run(t, cfg, sp) +} + +func TestContainerFork(t *testing.T) { + expectedStatus := container.STATUS_OK + + tc := container.Testcase{ + File: "test_files/fork.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 1000000, + } + + tc.Run(t, cfg, sp) +} + +func TestContainerRusageConsistency(t *testing.T) { + expectedStatus := container.STATUS_OK + + tc := container.Testcase{ + File: "test_files/random.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 1000000, + } + + var minCpuUsage, maxCpuUsage uint64 + + for i := 0; i < 10; i++ { + reports := tc.Run(t, cfg, sp) + report := reports[0] + + if i == 0 { + minCpuUsage = report.CPUTime + maxCpuUsage = report.CPUTime + + continue + } + + minCpuUsage = min(minCpuUsage, report.CPUTime) + maxCpuUsage = max(maxCpuUsage, report.CPUTime) + } + + require.Less(t, maxCpuUsage-minCpuUsage, uint64(10000), "cpu usage inconsistent") +} + +func TestContainerConcurrency(t *testing.T) { + expectedStatus := container.STATUS_OK + + tc := container.Testcase{ + File: "test_files/sleep.cpp", + ExpectedStatus: &expectedStatus, + TimeLimitUs: 3000000, + Concurrency: 5, + } + + reports := tc.Run(t, cfg, sp) + + startTimes := make([]int64, len(reports)) + finishTimes := make([]int64, len(reports)) + + for i, report := range reports { + startTimes[i] = report.StartAt.UnixMilli() + finishTimes[i] = report.FinishAt.UnixMilli() + } + + sort.Slice(startTimes, func(i, j int) bool { + return startTimes[i] < startTimes[j] + }) + sort.Slice(finishTimes, func(i, j int) bool { + return finishTimes[i] < finishTimes[j] + }) + + for i := 2; i < len(startTimes); i++ { + require.Less(t, finishTimes[i-2], startTimes[i], "semaphore didn't work correctly") + } + + tc.Run(t, cfg, sp) +} diff --git a/internal/container/default.go b/internal/container/default.go new file mode 100644 index 0000000..ebef993 --- /dev/null +++ b/internal/container/default.go @@ -0,0 +1,28 @@ +package container + +func UseDefaultConfig() *Config { + return &Config{ + Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + Cwd: "/work", + TimeLimitUs: 1000000, + MemoryLimitBytes: 256 * 1024 * 1024, + PidLimit: 64, + UseThreads: 1, + CpusetCPUs: "0", + CpusetMems: "0", + Rlimit: &RlimitConfig{ + Core: &Rlimit{ + Hard: 0, + Soft: 0, + }, + Fsize: &Rlimit{ + Hard: 1 * 1024 * 1024, + Soft: 1 * 1024 * 1024, + }, + NoFile: &Rlimit{ + Hard: 64, + Soft: 64, + }, + }, + } +} diff --git a/sandbox/init.go b/internal/container/init.go similarity index 92% rename from sandbox/init.go rename to internal/container/init.go index 370ec15..f201d37 100644 --- a/sandbox/init.go +++ b/internal/container/init.go @@ -1,4 +1,4 @@ -package sandbox +package container import ( "os" diff --git a/internal/container/overlay.go b/internal/container/overlay.go new file mode 100644 index 0000000..64bf021 --- /dev/null +++ b/internal/container/overlay.go @@ -0,0 +1,66 @@ +package container + +import ( + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +type Overlay struct { + Base string + LowerDir string + UpperDir string + WorkDir string +} + +func NewOverlay(base, lower string) (*Overlay, error) { + if err := os.MkdirAll(base, 0755); err != nil { + return nil, err + } + + mntFlags := unix.MS_NOSUID | unix.MS_NODEV + if err := unix.Mount("tmpfs", base, "tmpfs", uintptr(mntFlags), "size=1G"); err != nil { + return nil, err + } + + ov := &Overlay{ + Base: base, + LowerDir: lower, + UpperDir: filepath.Join(base, "upper"), + WorkDir: filepath.Join(base, "work"), + } + + for _, dir := range []string{ov.UpperDir, ov.WorkDir} { + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + } + + return ov, nil +} + +func Cleanup(o *Overlay) error { + var errs []error + + if err := unix.Unmount(o.Base, 0); err != nil { + errs = append(errs, err) + } + + for _, dir := range []string{o.UpperDir, o.WorkDir} { + if err := os.RemoveAll(dir); err != nil { + errs = append(errs, err) + } + } + + if err := os.RemoveAll(o.Base); err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return fmt.Errorf("cleanup errors: %v", errs) + } + + return nil +} diff --git a/sandbox/report.go b/internal/container/report.go similarity index 70% rename from sandbox/report.go rename to internal/container/report.go index 0a712f8..91d7cd4 100644 --- a/sandbox/report.go +++ b/internal/container/report.go @@ -1,4 +1,4 @@ -package sandbox +package container import ( "fmt" @@ -49,33 +49,36 @@ func (r Report) String() string { return fmt.Sprintf("status: %s\nexit code: %d\nsignal: %d\nstdout: %s\nstderr:%s\ncpu:%d usec\nmemory:%d bytes\n", r.Status, r.ExitCode, r.Signal, stdoutTrim, stderrTrim, r.CPUTime, r.Memory) } -func (s *Sandbox) makeReport(stdoutBuf, stderrBuf io.Reader, state *os.ProcessState, timeLimitExceeded bool, startAt, finishAt time.Time) (Report, error) { +func (c *Container) makeReport(stdoutBuf, stderrBuf io.Reader, state *os.ProcessState, timeLimitExceeded bool, startAt, finishAt time.Time) (*Report, error) { stdout, err := io.ReadAll(stdoutBuf) if err != nil { - return Report{}, fmt.Errorf("error reading stdout: %w", err) + return nil, fmt.Errorf("error reading stdout: %w", err) } stderr, err := io.ReadAll(stderrBuf) if err != nil { - return Report{}, fmt.Errorf("error reading stderr: %w", err) + return nil, fmt.Errorf("error reading stderr: %w", err) } - cgManager, err := loadCgroup(s.id) + cgManager, err := loadCgroup(c.id) if err != nil { - return Report{}, fmt.Errorf("error loading cgroup: %w", err) + return nil, fmt.Errorf("error loading cgroup: %w", err) } stats, err := cgManager.Stat() if err != nil { - return Report{}, fmt.Errorf("error getting cgroup stats: %w", err) + return nil, fmt.Errorf("error getting cgroup stats: %w", err) } var status Status + cpuUsageUs := stats.GetCPU().GetUsageUsec() + memoryMaxUsage := stats.GetMemory().GetMaxUsage() + switch { - case timeLimitExceeded || stats.GetCPU().GetUsageUsec() > uint64(s.config.TimeLimitMs)*1000: + case timeLimitExceeded || cpuUsageUs > uint64(c.config.TimeLimitUs): status = STATUS_TIME_LIMIT_EXCEEDED - case stats.GetMemory().GetMaxUsage() > uint64(s.config.Cgroup.Memory): + case memoryMaxUsage > uint64(c.config.MemoryLimitBytes): status = STATUS_MEMORY_LIMIT_EXCEEDED case state.ExitCode() != 0: status = STATUS_RUNTIME_ERROR @@ -83,14 +86,14 @@ func (s *Sandbox) makeReport(stdoutBuf, stderrBuf io.Reader, state *os.ProcessSt status = STATUS_OK } - return Report{ + return &Report{ Status: status, ExitCode: state.ExitCode(), Signal: state.Sys().(syscall.WaitStatus).Signal(), Stdout: string(stdout), Stderr: string(stderr), - CPUTime: stats.GetCPU().GetUsageUsec(), - Memory: stats.GetMemory().GetMaxUsage(), + CPUTime: cpuUsageUs, + Memory: memoryMaxUsage, StartAt: startAt, FinishAt: finishAt, }, nil diff --git a/sandbox/spec.go b/internal/container/spec.go similarity index 54% rename from sandbox/spec.go rename to internal/container/spec.go index 946a739..b1f1a7b 100644 --- a/sandbox/spec.go +++ b/internal/container/spec.go @@ -1,73 +1,59 @@ -package sandbox +package container import ( "fmt" "path/filepath" - "github.com/joshjms/castletown/config" "github.com/opencontainers/runtime-spec/specs-go" _ "github.com/opencontainers/cgroups/devices" ) -func (s *Sandbox) createSpec() (*specs.Spec, error) { +func createSpec(containerID string, cfg *Config) (*specs.Spec, error) { slicePath, err := getSlicePath() if err != nil { return nil, fmt.Errorf("failed to get slice path: %w", err) } - mounts := s.getMounts() + mounts := getMounts(cfg) spec := &specs.Spec{ Version: specs.Version, Process: &specs.Process{ NoNewPrivileges: true, }, + // Use a dummy rootfs; actual rootfs will be mounted via overlayfs Root: &specs.Root{ - Path: config.RootfsDir, + Path: cfg.RootfsImageDir, Readonly: false, }, - Hostname: fmt.Sprintf("castletown-%s", s.id), + Hostname: "castletown", Mounts: mounts, Linux: &specs.Linux{ - CgroupsPath: filepath.Join(slicePath, fmt.Sprintf("castletown-%s.scope", s.id), s.id), - Resources: cgroupResources(s.config.Cgroup), + CgroupsPath: filepath.Join(slicePath, fmt.Sprintf("castletown-%s.scope", containerID), containerID), + Resources: cgroupResources(cfg), UIDMappings: []specs.LinuxIDMapping{ { - HostID: s.config.UserNamespace.HostUID, - ContainerID: s.config.UserNamespace.ContainerUID, - Size: s.config.UserNamespace.UIDMapCount, + ContainerID: cfg.UserNamespace.UID.ContainerID, + HostID: cfg.UserNamespace.UID.HostID, + Size: cfg.UserNamespace.UID.Size, }, }, GIDMappings: []specs.LinuxIDMapping{ { - HostID: s.config.UserNamespace.HostGID, - ContainerID: s.config.UserNamespace.ContainerGID, - Size: s.config.UserNamespace.GIDMapCount, + HostID: cfg.UserNamespace.GID.HostID, + ContainerID: cfg.UserNamespace.GID.ContainerID, + Size: cfg.UserNamespace.GID.Size, }, }, Namespaces: []specs.LinuxNamespace{ - { - Type: specs.CgroupNamespace, - }, - { - Type: specs.PIDNamespace, - }, - { - Type: specs.IPCNamespace, - }, - { - Type: specs.UTSNamespace, - }, - { - Type: specs.MountNamespace, - }, - { - Type: specs.UserNamespace, - }, - { - Type: specs.NetworkNamespace, - }, + {Type: specs.CgroupNamespace}, + {Type: specs.PIDNamespace}, + {Type: specs.IPCNamespace}, + {Type: specs.UTSNamespace}, + {Type: specs.MountNamespace}, + {Type: specs.UserNamespace}, + {Type: specs.NetworkNamespace}, }, // https://github.com/moby/moby/blob/master/oci/defaults.go MaskedPaths: []string{ @@ -97,7 +83,7 @@ func (s *Sandbox) createSpec() (*specs.Spec, error) { return spec, nil } -func (s *Sandbox) getMounts() []specs.Mount { +func getMounts(cfg *Config) []specs.Mount { mounts := make([]specs.Mount, 0) rootfsMount := specs.Mount{ @@ -109,18 +95,18 @@ func (s *Sandbox) getMounts() []specs.Mount { "userxattr", "xino=off", "index=off", - fmt.Sprintf("upperdir=%s", s.getUpperDir()), - fmt.Sprintf("lowerdir=%s", s.getLowerDir()), - fmt.Sprintf("workdir=%s", s.getWorkDir()), + fmt.Sprintf("upperdir=%s", cfg.Overlay.UpperDir), + fmt.Sprintf("lowerdir=%s", cfg.Overlay.LowerDir), + fmt.Sprintf("workdir=%s", cfg.Overlay.WorkDir), }, } mounts = append(mounts, rootfsMount) bindMount := specs.Mount{ - Destination: "/box", + Destination: "/work", Type: "bind", - Source: s.config.BoxDir, + Source: cfg.BindMount, Options: []string{ "rbind", "rw", @@ -132,14 +118,14 @@ func (s *Sandbox) getMounts() []specs.Mount { UIDMappings: []specs.LinuxIDMapping{ { ContainerID: 0, - HostID: s.config.UserNamespace.HostUID, + HostID: cfg.UserNamespace.UID.HostID, Size: 1, }, }, GIDMappings: []specs.LinuxIDMapping{ { ContainerID: 0, - HostID: s.config.UserNamespace.HostGID, + HostID: cfg.UserNamespace.GID.HostID, Size: 1, }, }, @@ -170,18 +156,6 @@ func defaultMounts() []specs.Mount { "size=65536k", }, }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{ - "nosuid", - "noexec", - "newinstance", - "ptmxmode=0666", - "mode=0620", - }, - }, { Destination: "/dev/shm", Type: "tmpfs", @@ -194,16 +168,6 @@ func defaultMounts() []specs.Mount { "size=65536k", }, }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{ - "nosuid", - "noexec", - "nodev", - }, - }, { Destination: "/tmp", Type: "tmpfs", @@ -216,48 +180,45 @@ func defaultMounts() []specs.Mount { "nr_inodes=4k", }, }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, + // { + // Destination: "/sys", + // Type: "sysfs", + // Source: "sysfs", + // Options: []string{"nosuid", "noexec", "nodev", "ro"}, + // }, } } -func cgroupResources(cfg *CgroupConfig) *specs.LinuxResources { - cgMemory := &specs.LinuxMemory{} - cgCPU := &specs.LinuxCPU{} - cgPids := &specs.LinuxPids{} - - if cfg.CpuQuota != 0 { - cgCPU.Quota = &cfg.CpuQuota +func cgroupResources(cfg *Config) *specs.LinuxResources { + resources := &specs.LinuxResources{ + CPU: &specs.LinuxCPU{}, + Memory: &specs.LinuxMemory{}, + Pids: &specs.LinuxPids{}, } - if cfg.CpuShares != 0 { - cgCPU.Shares = &cfg.CpuShares - } + var cpuPeriod uint64 = 100000 + var cpuQuota int64 = 100000 * cfg.UseThreads - if cfg.CpusetCpus != "" { - cgCPU.Cpus = cfg.CpusetCpus + resources.CPU.Quota = &cpuQuota + resources.CPU.Period = &cpuPeriod + + if cfg.CpusetCPUs != "" { + resources.CPU.Cpus = cfg.CpusetCPUs } if cfg.CpusetMems != "" { - cgCPU.Mems = cfg.CpusetMems + resources.CPU.Mems = cfg.CpusetMems } - if cfg.Memory != 0 { - cgMemory.Limit = &cfg.Memory - cgMemory.Swap = &cfg.Memory + if cfg.MemoryLimitBytes != 0 { + limit := cfg.MemoryLimitBytes + resources.Memory.Limit = &limit } - if cfg.PidsLimit != 0 { - cgPids.Limit = cfg.PidsLimit + if cfg.PidLimit != 0 { + limit := cfg.PidLimit + resources.Pids.Limit = limit } - return &specs.LinuxResources{ - CPU: cgCPU, - Memory: cgMemory, - Pids: cgPids, - } + return resources } diff --git a/sandbox/test_files/add.cpp b/internal/container/test_files/add.cpp similarity index 100% rename from sandbox/test_files/add.cpp rename to internal/container/test_files/add.cpp diff --git a/sandbox/test_files/fork.cpp b/internal/container/test_files/fork.cpp similarity index 100% rename from sandbox/test_files/fork.cpp rename to internal/container/test_files/fork.cpp diff --git a/sandbox/test_files/mem1.cpp b/internal/container/test_files/mem1.cpp similarity index 100% rename from sandbox/test_files/mem1.cpp rename to internal/container/test_files/mem1.cpp diff --git a/sandbox/test_files/printloop.cpp b/internal/container/test_files/printloop.cpp similarity index 100% rename from sandbox/test_files/printloop.cpp rename to internal/container/test_files/printloop.cpp diff --git a/sandbox/test_files/random.cpp b/internal/container/test_files/random.cpp similarity index 100% rename from sandbox/test_files/random.cpp rename to internal/container/test_files/random.cpp diff --git a/sandbox/test_files/sleep.cpp b/internal/container/test_files/sleep.cpp similarity index 100% rename from sandbox/test_files/sleep.cpp rename to internal/container/test_files/sleep.cpp diff --git a/sandbox/test_files/tl1.cpp b/internal/container/test_files/tl1.cpp similarity index 100% rename from sandbox/test_files/tl1.cpp rename to internal/container/test_files/tl1.cpp diff --git a/internal/container/test_utils.go b/internal/container/test_utils.go new file mode 100644 index 0000000..05a7b6d --- /dev/null +++ b/internal/container/test_utils.go @@ -0,0 +1,110 @@ +package container + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/google/uuid" + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/utils" + "github.com/stretchr/testify/require" +) + +const defaultMemoryLimitBytes = 256 * 1024 * 1024 + +type Testcase struct { + File string + Stdin string + + ExpectedStatus *Status + ExpectedOutput *string + + TimeLimitUs int64 + + Concurrency int +} + +func (tc *Testcase) Run(t *testing.T, cfg *config.Config, sp *SlotPool) []*Report { + t.Helper() + + require.NotNil(t, cfg, "cfg cannot be nil") + require.NotNil(t, sp, "slot pool cannot be nil") + + prepareRuntimeDirs(t, cfg) + + if tc.Concurrency < 1 { + tc.Concurrency = 1 + } + + if tc.TimeLimitUs == 0 { + tc.TimeLimitUs = 1000000 + } + + runDir := filepath.Join(t.TempDir(), uuid.NewString()) + require.NoError(t, os.MkdirAll(runDir, 0755)) + require.NoError(t, utils.FileCopy(tc.File, filepath.Join(runDir, "main.cpp"))) + + rootfsImageDir := resolveRootfsImageDir(t, cfg) + + compileReport, err := runCompile(t, cfg, sp, runDir, rootfsImageDir) + require.NoError(t, err, "compilation failed to start") + require.Equal(t, STATUS_OK, compileReport.Status, "compile status not ok") + + reports := make([]*Report, tc.Concurrency) + errCh := make(chan error, tc.Concurrency) + + wg := sync.WaitGroup{} + for i := 0; i < tc.Concurrency; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + report, err := runExecution(t, tc, cfg, sp, runDir, rootfsImageDir) + if err != nil { + errCh <- err + return + } + reports[idx] = report + + if tc.ExpectedStatus != nil && report.Status != *tc.ExpectedStatus { + errCh <- fmt.Errorf("status != expectedStatus: got %s want %s", report.Status, *tc.ExpectedStatus) + } + + if tc.ExpectedOutput != nil && report.Stdout != *tc.ExpectedOutput { + errCh <- fmt.Errorf("output != expectedOutput: got %q want %q", report.Stdout, *tc.ExpectedOutput) + } + }(i) + } + + wg.Wait() + close(errCh) + + for err := range errCh { + require.NoError(t, err) + } + + return reports +} + +func runCompile(t *testing.T, runtimeCfg *config.Config, sp *SlotPool, workDir, rootfsImageDir string) (*Report, error) { + t.Helper() + + defaultCompileTimeoutUs := int64(10 * 1000 * 1000) + + return RunInContainer(t.Context(), runtimeCfg, sp, workDir, rootfsImageDir, []string{"g++", "-o", "main", "main.cpp"}, &bytes.Buffer{}, defaultCompileTimeoutUs, defaultMemoryLimitBytes, 64) +} + +func runExecution(t *testing.T, tc *Testcase, runtimeCfg *config.Config, sp *SlotPool, workDir, rootfsImageDir string) (*Report, error) { + t.Helper() + + stdin := &bytes.Buffer{} + if tc.Stdin != "" { + stdin.WriteString(tc.Stdin) + } + + return RunInContainer(t.Context(), runtimeCfg, sp, workDir, rootfsImageDir, []string{"./main"}, stdin, tc.TimeLimitUs, defaultMemoryLimitBytes, 1) +} diff --git a/internal/container/utils.go b/internal/container/utils.go new file mode 100644 index 0000000..d7b7cbe --- /dev/null +++ b/internal/container/utils.go @@ -0,0 +1,104 @@ +package container + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/google/uuid" + "github.com/joshjms/castletown/internal/config" + "github.com/stretchr/testify/require" +) + +func RunInContainer(ctx context.Context, runtimeCfg *config.Config, sp *SlotPool, workDir, rootfsImageDir string, args []string, stdin io.Reader, timeLimitUs int64, memoryLimitBytes int64, maxProcs int64) (*Report, error) { + allocation, err := sp.Allocate(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to allocate slot: %w", err) + } + + cfg := UseDefaultConfig() + cfg.Args = args + cfg.Stdin = stdin + cfg.BindMount = workDir + cfg.RootfsImageDir = rootfsImageDir + cfg.TimeLimitUs = timeLimitUs + cfg.MemoryLimitBytes = memoryLimitBytes + cfg.PidLimit = maxProcs + + overlay, err := newOverlay(runtimeCfg, rootfsImageDir) + if err != nil { + allocation.Release() + return nil, err + } + cfg.Overlay = overlay + cfg.UseAllocation(allocation) + + return runContainer(ctx, runtimeCfg, cfg) +} + +func runContainer(ctx context.Context, runtimeCfg *config.Config, cfg *Config) (*Report, error) { + container := NewContainer("", runtimeCfg, WithContainerConfig(cfg)) + + if err := container.Init(ctx); err != nil { + destroyErr := container.Destroy() + if destroyErr != nil { + return nil, fmt.Errorf("init error (%w) and destroy error (%v)", err, destroyErr) + } + + return nil, fmt.Errorf("failed to init container: %w", err) + } + + report, runErr := container.Run(ctx) + destroyErr := container.Destroy() + + if runErr != nil { + return report, fmt.Errorf("failed to run container: %w", runErr) + } + + if destroyErr != nil { + return report, fmt.Errorf("failed to destroy container: %w", destroyErr) + } + + return report, nil +} + +func newOverlay(runtimeCfg *config.Config, rootfsImageDir string) (*Overlay, error) { + base := filepath.Join(runtimeCfg.Judge.OverlayFSDir, uuid.NewString()) + overlay, err := NewOverlay(base, rootfsImageDir) + if err != nil { + return nil, fmt.Errorf("failed to create overlay: %w", err) + } + + return overlay, nil +} + +func resolveRootfsImageDir(t *testing.T, cfg *config.Config) string { + t.Helper() + + defaultImagePath := filepath.Join(cfg.Judge.ImagesDir, "gcc-15-bookworm") + if _, err := os.Stat(defaultImagePath); err == nil { + return defaultImagePath + } + + entries, err := os.ReadDir(cfg.Judge.ImagesDir) + require.NoError(t, err, "failed to read images dir %s", cfg.Judge.ImagesDir) + + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(cfg.Judge.ImagesDir, entry.Name()) + } + } + + t.Fatalf("no rootfs image directory found in %s; run scripts/rootfs.sh to populate one", cfg.Judge.ImagesDir) + return "" +} + +func prepareRuntimeDirs(t *testing.T, cfg *config.Config) { + t.Helper() + + require.NoError(t, os.MkdirAll(cfg.Judge.LibcontainerDir, 0o755)) + require.NoError(t, os.MkdirAll(cfg.Judge.OverlayFSDir, 0o755)) +} diff --git a/internal/grader/checker.go b/internal/grader/checker.go new file mode 100644 index 0000000..c44970f --- /dev/null +++ b/internal/grader/checker.go @@ -0,0 +1,62 @@ +package grader + +import ( + "fmt" + "strings" +) + +type ComparisonMode int + +const ( + ComparisonModeExact ComparisonMode = iota + ComparisonModeToken +) + +type Checker struct { + mode ComparisonMode +} + +func (c *Checker) Check(out, expOut string) (bool, error) { + switch c.mode { + case ComparisonModeExact: + return out == expOut, nil + case ComparisonModeToken: + outTokens := strings.Fields(out) + expOutTokens := strings.Fields(expOut) + if len(outTokens) != len(expOutTokens) { + return false, nil + } + for i := range outTokens { + if outTokens[i] != expOutTokens[i] { + return false, nil + } + } + return true, nil + default: + return false, fmt.Errorf("unknown comparison mode") + } +} + +type Option func(*Checker) + +func WithExactComparison() Option { + return func(c *Checker) { + c.mode = ComparisonModeExact + } +} + +func WithTokenComparison() Option { + return func(c *Checker) { + c.mode = ComparisonModeToken + } +} + +func NewChecker(opts ...Option) *Checker { + c := &Checker{ + mode: ComparisonModeExact, + } + for _, opt := range opts { + opt(c) + } + return c +} diff --git a/internal/grader/grader.go b/internal/grader/grader.go new file mode 100644 index 0000000..223f212 --- /dev/null +++ b/internal/grader/grader.go @@ -0,0 +1,86 @@ +package grader + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/joshjms/castletown/internal/cache" + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/container" + "github.com/joshjms/castletown/internal/models" + "github.com/joshjms/castletown/internal/repository" + "github.com/rs/zerolog" +) + +var handlerMap = map[string]func(*Grader, context.Context, *models.Submission, *models.Problem, string) error{ + "cpp": (*Grader).handleCppSubmission, +} + +type Grader struct { + log zerolog.Logger + runtimeCfg *config.Config + submissionsRepository *repository.SubmissionsRepository + problemCache *cache.ProblemCache + slotPool *container.SlotPool +} + +func NewGrader(log zerolog.Logger, runtimeCfg *config.Config, submissionsRepository *repository.SubmissionsRepository, store *cache.ProblemCache) *Grader { + return &Grader{ + log: log, + runtimeCfg: runtimeCfg, + submissionsRepository: submissionsRepository, + problemCache: store, + slotPool: container.NewSlotPool(container.WithMaxConcurrency(runtimeCfg.Judge.MaxConcurrency)), + } +} + +func (g *Grader) Handle(ctx context.Context, sub *models.Submission) error { + if sub == nil { + return errors.New("submission is nil") + } + + var ( + problem *models.Problem + err error + ) + + if err := g.markSubmissionAsGrading(ctx, sub); err != nil { + g.log.Error().Err(err).Int64("submission_id", sub.ID).Msg("Failed to mark submission as grading") + } + + var release func() + problem, release, err = g.problemCache.GetProblemWithLease(ctx, int(sub.ProblemID)) + if release != nil { + defer release() + } + if err != nil { + _ = g.markSubmissionWithVerdict(ctx, sub, models.VerdictInternalError, 0, "failed to prepare submission for judging") + return fmt.Errorf("failed to get problem with lease: %w", err) + } + + submissionDir := filepath.Join(g.runtimeCfg.Judge.WorkRoot, fmt.Sprintf("submission_%d", sub.ID)) + if err := os.Mkdir(submissionDir, 0700); err != nil { + return err + } + defer os.RemoveAll(submissionDir) + + handler, ok := handlerMap[sub.Language] + if !ok { + return errors.New("no handler for language: " + sub.Language) + } + + if err := handler(g, ctx, sub, problem, submissionDir); err != nil { + _ = g.markSubmissionWithVerdict(ctx, sub, models.VerdictSystemError, 0, "failed to judge submission") + return err + } + + if err := g.persistSubmission(ctx, sub); err != nil { + g.log.Error().Err(err).Int64("submission_id", sub.ID).Msg("Failed to persist submission result") + return err + } + + return nil +} diff --git a/internal/grader/grader_test.go b/internal/grader/grader_test.go new file mode 100644 index 0000000..ac2282a --- /dev/null +++ b/internal/grader/grader_test.go @@ -0,0 +1,98 @@ +package grader_test + +import ( + "testing" + + "github.com/joshjms/castletown/internal/cache" + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/container" + "github.com/joshjms/castletown/internal/grader" + "github.com/joshjms/castletown/internal/models" + "github.com/joshjms/castletown/internal/repository" + "github.com/joshjms/castletown/internal/store" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGrader_HandleSubmissionAccepted(t *testing.T) { + container.Init() + + cfg := config.Load() + + pr, err := repository.NewProblemsRepository(cfg.Database) + require.NoError(t, err) + + tcs, err := store.NewTestcaseStore(cfg.Minio) + require.NoError(t, err) + + store := cache.NewProblemCache(pr, tcs, 20, cfg.Judge.DiskCacheDir) + + log := zerolog.New(nil).With().Timestamp().Logger() + + sr, err := repository.NewSubmissionsRepository(cfg.Database) + require.NoError(t, err) + + g := grader.NewGrader(log, cfg, sr, store) + + sub := &models.Submission{ + ID: 1, + ProblemID: 1, + Language: "cpp", + Code: ` +#include +using namespace std; + +int main() { + long long a, b; + cin >> a >> b; + cout << a + b << endl; + return 0; +} + `, + } + + assert.NoError(t, g.Handle(t.Context(), sub)) + assert.Equal(t, models.VerdictAccepted, sub.Verdict) +} + +func TestGrader_HandleSubmissionWrongAnswer(t *testing.T) { + container.Init() + + cfg := config.Load() + + pr, err := repository.NewProblemsRepository(cfg.Database) + require.NoError(t, err) + + tcs, err := store.NewTestcaseStore(cfg.Minio) + require.NoError(t, err) + + store := cache.NewProblemCache(pr, tcs, 20, cfg.Judge.DiskCacheDir) + + log := zerolog.New(nil).With().Timestamp().Logger() + + sr, err := repository.NewSubmissionsRepository(cfg.Database) + require.NoError(t, err) + + g := grader.NewGrader(log, cfg, sr, store) + + sub := &models.Submission{ + ID: 1, + ProblemID: 1, + Language: "cpp", + Code: ` +#include +using namespace std; + +int main() { + int a, b; + cin >> a >> b; + cout << a + b << endl; + return 0; +} + `, + } + + assert.NoError(t, g.Handle(t.Context(), sub)) + assert.Equal(t, models.VerdictWrongAnswer, sub.Verdict) +} diff --git a/internal/grader/handler.go b/internal/grader/handler.go new file mode 100644 index 0000000..2bdf6b5 --- /dev/null +++ b/internal/grader/handler.go @@ -0,0 +1,234 @@ +package grader + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/container" + "github.com/joshjms/castletown/internal/models" + "github.com/joshjms/castletown/internal/utils" +) + +func (g *Grader) handleCppSubmission(ctx context.Context, sub *models.Submission, problem *models.Problem, submissionDir string) error { + compileDir := filepath.Join(submissionDir, "compile") + if err := os.Mkdir(compileDir, 0700); err != nil { + return fmt.Errorf("create compile dir: %w", err) + } + + r, err := g.compileCpp(ctx, sub, compileDir) + if err != nil { + sub.Verdict = models.VerdictCompilationError + return nil + } + if r.Status != container.STATUS_OK { + sub.Verdict = models.VerdictCompilationError + return nil + } + + testcasesDir := filepath.Join(g.runtimeCfg.Judge.DiskCacheDir, fmt.Sprintf("%d", problem.ID)) + + groups := append([]models.TestcaseGroup(nil), problem.TestcaseGroups...) + sort.Slice(groups, func(i, j int) bool { return groups[i].OrderID < groups[j].OrderID }) + + testcaseResults := make([]models.TestcaseResult, 0) + var totalScore int = 0 + + for _, grp := range groups { + tcs := append([]models.Testcase(nil), grp.Testcases...) + sort.Slice(tcs, func(i, j int) bool { return tcs[i].OrderID < tcs[j].OrderID }) + + passGroup := true + + for _, tc := range tcs { + var tcr models.TestcaseResult + tcr.TestcaseID = tc.ID + tcr.SubmissionID = sub.ID + + if !passGroup { + tcr.Verdict = models.VerdictSkipped + testcaseResults = append(testcaseResults, tcr) + continue + } + + base := fmt.Sprintf("%d_%d", grp.OrderID, tc.OrderID) + inPath := filepath.Join(testcasesDir, base+".in") + outPath := filepath.Join(testcasesDir, base+".out") + + if _, err := os.ReadFile(inPath); err != nil { + return fmt.Errorf("read input %s: %w", inPath, err) + } + if _, err := os.ReadFile(outPath); err != nil { + return fmt.Errorf("read output %s: %w", outPath, err) + } + + execDir := filepath.Join(submissionDir, base) + if err := os.Mkdir(execDir, 0700); err != nil { + return fmt.Errorf("create exec dir: %w", err) + } + utils.FileCopy(filepath.Join(compileDir, "main"), filepath.Join(execDir, "main")) + + if _, err := os.Stat(filepath.Join(execDir, "main")); err != nil { + return fmt.Errorf("stat executable: %w", err) + } + + report, err := g.executeCpp( + ctx, + problem.TimeLimit*1000, + problem.MemoryLimit*1024*1024, + inPath, + execDir, + ) + if err != nil { + return err + } + + tcr.CPUTime = int64(report.CPUTime) + tcr.Memory = int64(report.Memory) + + switch report.Status { + case container.STATUS_TIME_LIMIT_EXCEEDED: + tcr.Verdict = models.VerdictTimeLimitExceeded + case container.STATUS_MEMORY_LIMIT_EXCEEDED: + tcr.Verdict = models.VerdictMemoryLimitExceeded + case container.STATUS_RUNTIME_ERROR: + tcr.Verdict = models.VerdictRuntimeError + case container.STATUS_OK: + // continue to checking + default: + return fmt.Errorf("unknown container status: %s", report.Status) + } + + if tcr.Verdict != "" { + passGroup = false + testcaseResults = append(testcaseResults, tcr) + continue + } + + expected, err := os.ReadFile(outPath) + if err != nil { + return fmt.Errorf("read expected: %w", err) + } + + match, err := NewChecker(WithTokenComparison()).Check(report.Stdout, string(expected)) + if err != nil { + return err + } + if !match { + tcr.Verdict = models.VerdictWrongAnswer + passGroup = false + testcaseResults = append(testcaseResults, tcr) + continue + } + + tcr.Verdict = models.VerdictAccepted + testcaseResults = append(testcaseResults, tcr) + } + + if passGroup { + totalScore += grp.Points + } + } + + for _, tcr := range testcaseResults { + if tcr.Verdict != models.VerdictAccepted && tcr.Verdict != models.VerdictSkipped { + sub.Verdict = tcr.Verdict + break + } + } + + for _, tcr := range testcaseResults { + sub.CPUTime = max(sub.CPUTime, tcr.CPUTime) + sub.Memory = max(sub.Memory, tcr.Memory) + } + + sub.TestcaseResults = testcaseResults + sub.Score = totalScore + + if sub.Verdict == "" || sub.Verdict == models.VerdictJudging { + sub.Verdict = models.VerdictAccepted // YAY! + } + + return nil +} + +func (g *Grader) compileCpp(ctx context.Context, sub *models.Submission, workDir string) (*container.Report, error) { + sourcePath := filepath.Join(workDir, "main.cpp") + if err := os.WriteFile(sourcePath, []byte(sub.Code), 0644); err != nil { + return nil, fmt.Errorf("write source: %w", err) + } + + imageDir, err := resolveRootfsImageDir(g.runtimeCfg, "gcc-15-bookworm") + if err != nil { + return nil, err + } + + compileArgs := []string{"g++", "-O2", "-std=c++20", "-o", "main", "main.cpp"} + return container.RunInContainer( + ctx, + g.runtimeCfg, + g.slotPool, + workDir, + imageDir, + compileArgs, + nil, + 10_000_000, + 512*1024*1024, + 64, + ) +} + +func (g *Grader) executeCpp(ctx context.Context, timeLimitUs, memoryLimitBytes int64, inputPath, submissionDir string) (*container.Report, error) { + inFile, err := os.Open(inputPath) + if err != nil { + return nil, fmt.Errorf("open input: %w", err) + } + defer inFile.Close() + + imageDir, err := resolveRootfsImageDir(g.runtimeCfg, "gcc-15-bookworm") + if err != nil { + return nil, err + } + + execArgs := []string{"./main"} + report, err := container.RunInContainer( + ctx, + g.runtimeCfg, + g.slotPool, + submissionDir, + imageDir, + execArgs, + inFile, + timeLimitUs, + memoryLimitBytes, + 1, + ) + if err != nil { + return nil, err + } + + return report, nil +} + +func resolveRootfsImageDir(cfg *config.Config, image string) (string, error) { + defaultImagePath := filepath.Join(cfg.Judge.ImagesDir, image) + if _, err := os.Stat(defaultImagePath); err == nil { + return defaultImagePath, nil + } + + entries, err := os.ReadDir(cfg.Judge.ImagesDir) + if err != nil { + return "", fmt.Errorf("failed to read images dir %s: %w", cfg.Judge.ImagesDir, err) + } + + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(cfg.Judge.ImagesDir, entry.Name()), nil + } + } + + return "", fmt.Errorf("image not found") +} diff --git a/internal/grader/images.go b/internal/grader/images.go new file mode 100644 index 0000000..cfff63c --- /dev/null +++ b/internal/grader/images.go @@ -0,0 +1,7 @@ +package grader + +var imagesMap = map[string]map[string]string{ + "cpp": { + "20": "gcc-15-bookworm", + }, +} diff --git a/internal/grader/submission.go b/internal/grader/submission.go new file mode 100644 index 0000000..1a74ed1 --- /dev/null +++ b/internal/grader/submission.go @@ -0,0 +1,49 @@ +package grader + +import ( + "context" + "fmt" + + "github.com/joshjms/castletown/internal/models" +) + +func (g *Grader) markSubmissionAsGrading(ctx context.Context, sub *models.Submission) error { + sub.Verdict = models.VerdictJudging + sub.Score = 0 + sub.Message = "Grading in progress" + + return g.submissionsRepository.UpdateSubmissionResult(ctx, int(sub.ID), sub) +} + +func (g *Grader) markSubmissionWithVerdict(ctx context.Context, sub *models.Submission, verdict models.Verdict, score int, message string) error { + sub.Verdict = verdict + sub.Score = score + sub.Message = message + + return g.submissionsRepository.UpdateSubmissionResult(ctx, int(sub.ID), sub) +} + +func (g *Grader) persistSubmission(ctx context.Context, sub *models.Submission) error { + if sub.Message == "Grading in progress" { + sub.Message = "" + } + + sub.TestsTotal = len(sub.TestcaseResults) + sub.TestsPassed = 0 + + for _, result := range sub.TestcaseResults { + if result.Verdict == models.VerdictAccepted { + sub.TestsPassed++ + } + } + + if err := g.submissionsRepository.UpdateSubmissionResult(ctx, int(sub.ID), sub); err != nil { + return fmt.Errorf("update submission result: %w", err) + } + + if err := g.submissionsRepository.InsertTestcaseResults(ctx, sub.ID, sub.TestcaseResults); err != nil { + return fmt.Errorf("insert testcase results: %w", err) + } + + return nil +} diff --git a/internal/grader/utils.go b/internal/grader/utils.go new file mode 100644 index 0000000..7e7a422 --- /dev/null +++ b/internal/grader/utils.go @@ -0,0 +1 @@ +package grader diff --git a/internal/models/problem.go b/internal/models/problem.go new file mode 100644 index 0000000..83cb3b8 --- /dev/null +++ b/internal/models/problem.go @@ -0,0 +1,34 @@ +package models + +import "time" + +type Problem struct { + ID int `json:"id" db:"id"` + Title string `json:"title" db:"title"` + Description string `json:"description" db:"description"` + Difficulty int `json:"difficulty" db:"difficulty"` + TimeLimit int64 `json:"time_limit" db:"time_limit"` + MemoryLimit int64 `json:"memory_limit" db:"memory_limit"` + TestcaseGroups []TestcaseGroup `json:"testcase_groups" db:"testcase_groups"` + Tags []string `json:"tags" db:"tags"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} + +type TestcaseGroup struct { + ID int `json:"id" db:"id"` + OrderID int `json:"order_id" db:"order_id"` + ProblemID int `json:"problem_id" db:"problem_id"` + Name string `json:"name" db:"name"` + Testcases []Testcase `json:"testcases" db:"testcases"` + Points int `json:"points" db:"points"` +} + +type Testcase struct { + ID int `json:"id" db:"id"` + OrderID int `json:"order_id" db:"order_id"` + TestcaseGroupID int `json:"testcase_group_id" db:"testcase_group_id"` + Input string `json:"input" db:"input"` + Output string `json:"output" db:"output"` + IsHidden bool `json:"is_hidden" db:"is_hidden"` +} diff --git a/internal/models/submission.go b/internal/models/submission.go new file mode 100644 index 0000000..ff8b078 --- /dev/null +++ b/internal/models/submission.go @@ -0,0 +1,64 @@ +package models + +import "time" + +type Verdict string + +const ( + VerdictPending Verdict = "PENDING" + VerdictJudging Verdict = "JUDGING" + VerdictAccepted Verdict = "AC" + VerdictWrongAnswer Verdict = "WA" + VerdictTimeLimitExceeded Verdict = "TLE" + VerdictMemoryLimitExceeded Verdict = "MLE" + VerdictRuntimeError Verdict = "RE" + VerdictCompilationError Verdict = "CE" + VerdictSystemError Verdict = "SE" + VerdictInternalError Verdict = "IE" + VerdictSkipped Verdict = "SKIPPED" +) + +// Submission represents a user's submission to a problem +type Submission struct { + ID int64 `json:"id" db:"id"` + ProblemID int64 `json:"problem_id" db:"problem_id"` + UserID int64 `json:"user_id" db:"user_id"` + Code string `json:"code" db:"code"` + Language string `json:"language" db:"language"` + Verdict Verdict `json:"verdict" db:"verdict"` + Score int `json:"score" db:"score"` + CPUTime int64 `json:"cpu_time" db:"cpu_time"` + Memory int64 `json:"memory" db:"memory"` + Message string `json:"message" db:"message"` + TestsPassed int `json:"tests_passed" db:"tests_passed"` + TestsTotal int `json:"tests_total" db:"tests_total"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + + TestcaseResults []TestcaseResult `json:"testcase_results" db:"testcase_results"` +} + +// TestcaseResult represents the result of running a single test case +type TestcaseResult struct { + SubmissionID int64 `json:"submission_id" db:"submission_id"` + TestcaseID int `json:"testcase_id" db:"testcase_id"` + Verdict Verdict `json:"verdict" db:"verdict"` + CPUTime int64 `json:"cpu_time" db:"cpu_time"` + Memory int64 `json:"memory" db:"memory"` + Input string `json:"input,omitempty" db:"input,omitempty"` + ExpectedOutput string `json:"expected_output,omitempty" db:"expected_output,omitempty"` + ActualOutput string `json:"actual_output,omitempty" db:"actual_output,omitempty"` + ErrorMessage string `json:"error_message,omitempty" db:"error_message,omitempty"` +} + +// Language represents a supported programming language +type Language struct { + ID string `json:"id"` + Name string `json:"name"` + Extension string `json:"extension"` + CompileCommand string `json:"compile_command"` + ExecuteCommand string `json:"execute_command"` + Version string `json:"version"` + TimeMultiplier float64 `json:"time_multiplier"` + MemoryMultiplier float64 `json:"memory_multiplier"` +} diff --git a/internal/models/user.go b/internal/models/user.go new file mode 100644 index 0000000..6326978 --- /dev/null +++ b/internal/models/user.go @@ -0,0 +1,13 @@ +package models + +import "time" + +// User represents a user in the system +type User struct { + ID int64 `json:"id" db:"id"` + Username string `json:"username" db:"username"` + Email string `json:"email" db:"email"` + Name string `json:"name" db:"name"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} diff --git a/internal/mq/consumer.go b/internal/mq/consumer.go new file mode 100644 index 0000000..b9f333c --- /dev/null +++ b/internal/mq/consumer.go @@ -0,0 +1,118 @@ +package mq + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/joshjms/castletown/internal/config" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/rs/zerolog" +) + +// Handler processes a single message body. Returning an error indicates the message should be requeued. +type Handler func(context.Context, []byte) error + +type Consumer interface { + Run(ctx context.Context, handler Handler) +} + +type amqpConsumer struct { + cfg config.RabbitMQConfig + log zerolog.Logger + prefetch int +} + +// NewConsumer returns a RabbitMQ-backed consumer. If the configuration is incomplete, nil is returned. +func NewConsumer(cfg config.RabbitMQConfig, log zerolog.Logger, prefetch int) Consumer { + if cfg.URL == "" || cfg.Queue == "" { + return nil + } + + if prefetch <= 0 { + prefetch = 1 + } + return &amqpConsumer{ + cfg: cfg, + log: log, + prefetch: prefetch, + } +} + +func (c *amqpConsumer) Run(ctx context.Context, handler Handler) { + backoff := time.Second + for { + if err := c.consumeOnce(ctx, handler); err != nil { + c.log.Error().Err(err).Msg("rabbitmq consumer exited") + } else { + c.log.Info().Msg("rabbitmq consumer stopped") + } + + select { + case <-ctx.Done(): + return + case <-time.After(backoff): + } + + if backoff < 30*time.Second { + backoff *= 2 + if backoff > 30*time.Second { + backoff = 30 * time.Second + } + } + } +} + +func (c *amqpConsumer) consumeOnce(ctx context.Context, handler Handler) error { + conn, err := amqp.Dial(c.cfg.URL) + if err != nil { + return fmt.Errorf("connect to rabbitmq: %w", err) + } + defer conn.Close() + + ch, err := conn.Channel() + if err != nil { + return fmt.Errorf("open channel: %w", err) + } + defer ch.Close() + + if err := ch.Qos(c.prefetch, 0, false); err != nil { + c.log.Warn().Err(err).Msg("failed to set rabbitmq qos") + } + + deliveries, err := ch.Consume(c.cfg.Queue, "", false, false, false, false, nil) + if err != nil { + return fmt.Errorf("consume queue: %w", err) + } + + for { + select { + case <-ctx.Done(): + return nil + case msg, ok := <-deliveries: + if !ok { + return fmt.Errorf("rabbitmq deliveries channel closed") + } + + itemCtx, cancel := context.WithCancel(ctx) + err := handler(itemCtx, msg.Body) + cancel() + + if err != nil { + c.log.Error().Err(err).Msg("handler failed, requeueing message") + _ = msg.Nack(false, true) + continue + } + + if err := msg.Ack(false); err != nil { + c.log.Warn().Err(err).Msg("failed to ack rabbitmq message") + } + } + } +} + +// Decode is a helper to unmarshal JSON payloads into a destination struct. +func Decode(body []byte, dst interface{}) error { + return json.Unmarshal(body, dst) +} diff --git a/internal/repository/problems.go b/internal/repository/problems.go new file mode 100644 index 0000000..f44d883 --- /dev/null +++ b/internal/repository/problems.go @@ -0,0 +1,230 @@ +package repository + +import ( + "context" + "database/sql" + "time" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/models" + "github.com/lib/pq" +) + +type ProblemsRepository struct { + db *sql.DB +} + +func NewProblemsRepository(cfg config.DatabaseConfig) (*ProblemsRepository, error) { + db, err := sql.Open("postgres", cfg.DSN()) + if err != nil { + return nil, err + } + if err := db.Ping(); err != nil { + db.Close() + return nil, err + } + return &ProblemsRepository{db: db}, nil +} + +func (r *ProblemsRepository) GetProblemDetails(ctx context.Context, problemID int) (*models.Problem, error) { + const query = ` + SELECT + p.id, + p.time_limit, + p.memory_limit, + p.tags, + p.created_at, + p.updated_at, + g.id, + g.order_id, + g.name, + g.points, + t.id, + t.order_id, + t.testcase_group_id, + t.input, + t.output, + t.is_hidden + FROM problems p + LEFT JOIN testcase_groups g ON p.id = g.problem_id + LEFT JOIN testcases t ON g.id = t.testcase_group_id + WHERE p.id = $1 + ORDER BY g.order_id NULLS LAST, t.order_id NULLS LAST; + ` + + rows, err := r.db.QueryContext(ctx, query, problemID) + if err != nil { + return nil, err + } + defer rows.Close() + + var ( + p *models.Problem + groupByID = map[int]*models.TestcaseGroup{} + groupOrder []int + ) + + for rows.Next() { + // Problem fields + var ( + pID int + timeLimit int64 + memoryLimit int64 + tags []string + createdAt time.Time + updatedAt time.Time + ) + + // Group fields (nullable because LEFT JOIN) + var ( + gID sql.NullInt64 + gName sql.NullString + gPoints sql.NullInt64 + gOrderID sql.NullInt64 + ) + + // Testcase fields (nullable because LEFT JOIN) + var ( + tID sql.NullInt64 + tGroupID sql.NullInt64 + tOrderID sql.NullInt64 + tInput sql.NullString + tOutput sql.NullString + tHidden sql.NullBool + ) + + if err := rows.Scan( + &pID, + &timeLimit, + &memoryLimit, + pq.Array(&tags), + &createdAt, + &updatedAt, + &gID, + &gOrderID, + &gName, + &gPoints, + &tID, + &tOrderID, + &tGroupID, + &tInput, + &tOutput, + &tHidden, + ); err != nil { + return nil, err + } + + // Create Problem once + if p == nil { + p = &models.Problem{ + ID: pID, + TimeLimit: timeLimit, + MemoryLimit: memoryLimit, + Tags: tags, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + } + } + + // If there is a group row, ensure group exists in map + var grp *models.TestcaseGroup + if gID.Valid { + id := int(gID.Int64) + grp = groupByID[id] + if grp == nil { + grp = &models.TestcaseGroup{ + ID: id, + OrderID: int(gOrderID.Int64), + ProblemID: pID, + Name: gName.String, + Points: int(gPoints.Int64), + } + groupByID[id] = grp + groupOrder = append(groupOrder, id) + } + } + + // If there is a testcase row, attach it + if tID.Valid { + tc := models.Testcase{ + ID: int(tID.Int64), + OrderID: int(tOrderID.Int64), + TestcaseGroupID: int(tGroupID.Int64), + Input: tInput.String, + Output: tOutput.String, + IsHidden: tHidden.Valid && tHidden.Bool, + } + + if grp != nil { + grp.Testcases = append(grp.Testcases, tc) + } + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + if p == nil { + return nil, sql.ErrNoRows + } + + // Move groups from map into slice, preserving order + p.TestcaseGroups = make([]models.TestcaseGroup, 0, len(groupOrder)) + for _, id := range groupOrder { + p.TestcaseGroups = append(p.TestcaseGroups, *groupByID[id]) + } + + return p, nil +} + +func (r *ProblemsRepository) GetTestcases(ctx context.Context, problemID int) ([]models.Testcase, error) { + const query = ` + SELECT + t.id, + t.order_id, + t.testcase_group_id, + t.input, + t.output, + t.is_hidden + FROM testcases t + JOIN testcase_groups g ON t.testcase_group_id = g.id + WHERE g.problem_id = $1 + ORDER BY g.order_id, t.order_id; + ` + + rows, err := r.db.QueryContext(ctx, query, problemID) + if err != nil { + return nil, err + } + defer rows.Close() + + var testcases []models.Testcase + for rows.Next() { + var tc models.Testcase + if err := rows.Scan( + &tc.ID, + &tc.OrderID, + &tc.TestcaseGroupID, + &tc.Input, + &tc.Output, + &tc.IsHidden, + ); err != nil { + return nil, err + } + testcases = append(testcases, tc) + } + + if err := rows.Err(); err != nil { + return nil, err + } + return testcases, nil +} + +func (r *ProblemsRepository) UpdateSubmissionResult(ctx context.Context, subID int, sub *models.Submission) error { + _, err := r.db.ExecContext(ctx, "UPDATE submissions SET verdict = $1, score = $2, message = $3 WHERE id = $4", sub.Verdict, sub.Score, sub.Message, subID) + return err +} + +func (r *ProblemsRepository) Close() error { + return r.db.Close() +} diff --git a/internal/repository/submissions.go b/internal/repository/submissions.go new file mode 100644 index 0000000..ccd5746 --- /dev/null +++ b/internal/repository/submissions.go @@ -0,0 +1,101 @@ +package repository + +import ( + "context" + "database/sql" + + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/models" +) + +type SubmissionsRepository struct { + db *sql.DB +} + +type TestcaseResultsWriter interface { + InsertTestcaseResults(ctx context.Context, subID int64, results []models.TestcaseResult) error +} + +func NewSubmissionsRepository(cfg config.DatabaseConfig) (*SubmissionsRepository, error) { + db, err := sql.Open("postgres", cfg.DSN()) + if err != nil { + return nil, err + } + if err := db.Ping(); err != nil { + db.Close() + return nil, err + } + return &SubmissionsRepository{db: db}, nil +} + +func (r *SubmissionsRepository) UpdateSubmissionResult(ctx context.Context, subID int, sub *models.Submission) error { + _, err := r.db.ExecContext(ctx, ` + UPDATE submissions + SET verdict = $1, + score = $2, + message = $3, + cpu_time = $4, + memory = $5, + tests_passed = $6, + tests_total = $7, + updated_at = NOW() + WHERE id = $8 + `, sub.Verdict, sub.Score, sub.Message, sub.CPUTime, sub.Memory, sub.TestsPassed, sub.TestsTotal, subID) + return err +} + +func (r *SubmissionsRepository) InsertTestcaseResults(ctx context.Context, subID int64, results []models.TestcaseResult) error { + if len(results) == 0 { + return nil + } + + const outputLimit = 200 + + stmt, err := r.db.PrepareContext(ctx, ` + INSERT INTO testcase_results ( + submission_id, testcase_id, verdict, cpu_time, memory, input, expected_output, actual_output, error_message + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + `) + if err != nil { + return err + } + defer stmt.Close() + + for _, res := range results { + expected := truncateString(res.ExpectedOutput, outputLimit) + actual := truncateString(res.ActualOutput, outputLimit) + + if _, err := stmt.ExecContext( + ctx, + subID, + res.TestcaseID, + res.Verdict, + res.CPUTime, + res.Memory, + res.Input, + expected, + actual, + res.ErrorMessage, + ); err != nil { + return err + } + } + return nil +} + +func (r *SubmissionsRepository) Close() error { + return r.db.Close() +} + +func truncateString(s string, limit int) string { + if limit <= 0 { + return "" + } + + runes := []rune(s) + if len(runes) <= limit { + return s + } + + return string(runes[:limit]) +} diff --git a/internal/store/store.go b/internal/store/store.go new file mode 100644 index 0000000..40fd1e6 --- /dev/null +++ b/internal/store/store.go @@ -0,0 +1,50 @@ +package store + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/joshjms/castletown/internal/config" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +type TestcaseStore struct { + client *minio.Client +} + +func (tcs *TestcaseStore) Get(ctx context.Context, bucket, key string) (io.ReadCloser, error) { + obj, err := tcs.client.GetObject(ctx, bucket, key, minio.GetObjectOptions{}) + if err != nil { + return nil, err + } + + if _, err := obj.Stat(); err != nil { + obj.Close() + return nil, err + } + + return obj, nil +} + +func (tcs *TestcaseStore) PresignGet(ctx context.Context, bucket, key string, expiry time.Duration) (*url.URL, error) { + return tcs.client.PresignedGetObject(ctx, bucket, key, expiry, nil) +} + +// NewTestcaseStore builds a MinIO client from config and returns an ObjectStore implementation. +func NewTestcaseStore(cfg config.MinioConfig) (*TestcaseStore, error) { + client, err := newClient(cfg.Endpoint, cfg.AccessKey, cfg.SecretKey, cfg.UseSSL) + if err != nil { + return nil, err + } + return &TestcaseStore{client: client}, nil +} + +func newClient(endpoint, accessKey, secretKey string, useSSL bool) (*minio.Client, error) { + return minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: useSSL, + }) +} diff --git a/internal/telemetry/logger.go b/internal/telemetry/logger.go new file mode 100644 index 0000000..2af1662 --- /dev/null +++ b/internal/telemetry/logger.go @@ -0,0 +1,11 @@ +package telemetry + +import ( + "os" + + "github.com/rs/zerolog" +) + +func NewLogger() zerolog.Logger { + return zerolog.New(os.Stdout).With().Timestamp().Logger() +} diff --git a/internal/telemetry/metrics.go b/internal/telemetry/metrics.go new file mode 100644 index 0000000..e4c7e2e --- /dev/null +++ b/internal/telemetry/metrics.go @@ -0,0 +1,10 @@ +package telemetry + +type Metrics struct{} + +func NewMetricsRegistry() *Metrics { + return &Metrics{} +} + +func (m *Metrics) Inc(name string) {} +func (m *Metrics) Observe(name string, value float64) {} diff --git a/internal/utils/files.go b/internal/utils/files.go new file mode 100644 index 0000000..9965a05 --- /dev/null +++ b/internal/utils/files.go @@ -0,0 +1,28 @@ +package utils + +import ( + "io" + "os" +) + +func FileCopy(src, dst string) (err error) { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.Create(dst) + if err != nil { + return err + } + + if _, err := io.Copy(dstFile, srcFile); err != nil { + return err + } + if err := os.Chmod(dst, 0700); err != nil { + return err + } + + return dstFile.Close() +} diff --git a/internal/worker/worker.go b/internal/worker/worker.go new file mode 100644 index 0000000..37e3669 --- /dev/null +++ b/internal/worker/worker.go @@ -0,0 +1,98 @@ +package worker + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/joshjms/castletown/internal/cache" + "github.com/joshjms/castletown/internal/config" + "github.com/joshjms/castletown/internal/grader" + "github.com/joshjms/castletown/internal/models" + "github.com/joshjms/castletown/internal/mq" + "github.com/joshjms/castletown/internal/repository" + "github.com/joshjms/castletown/internal/store" + "github.com/joshjms/castletown/internal/telemetry" + "github.com/rs/zerolog" +) + +type SubmissionGrader interface { + Handle(ctx context.Context, sub *models.Submission) error +} + +type Worker struct { + log zerolog.Logger + metrics *telemetry.Metrics + + g SubmissionGrader + + queueConsumer mq.Consumer +} + +func NewWorker(cfg *config.Config) (*Worker, error) { + problemsRepo, err := repository.NewProblemsRepository(cfg.Database) + if err != nil { + return nil, fmt.Errorf("create problems repository: %w", err) + } + + submissionsRepo, err := repository.NewSubmissionsRepository(cfg.Database) + if err != nil { + return nil, fmt.Errorf("create submissions repository: %w", err) + } + + testcaseStore, err := store.NewTestcaseStore(cfg.Minio) + if err != nil { + return nil, fmt.Errorf("create testcase store: %w", err) + } + + if err := os.MkdirAll(cfg.Judge.DiskCacheDir, 0755); err != nil { + return nil, fmt.Errorf("create disk cache dir: %w", err) + } + + problemCache := cache.NewProblemCache(problemsRepo, testcaseStore, 256, cfg.Judge.DiskCacheDir) + + log := zerolog.New(os.Stdout).With().Timestamp().Logger() + + w := &Worker{ + log: log, + metrics: telemetry.NewMetricsRegistry(), + g: grader.NewGrader(log, cfg, submissionsRepo, problemCache), + } + + w.queueConsumer = mq.NewConsumer(cfg.RabbitMQ, log, cfg.Judge.MaxConcurrency) + + return w, nil +} + +func (w *Worker) Run(ctx context.Context) error { + if w.queueConsumer == nil { + <-ctx.Done() + return ctx.Err() + } + + w.queueConsumer.Run(ctx, w.handleQueueMessage) + return ctx.Err() +} + +func (w *Worker) handle(ctx context.Context, sub *models.Submission) error { + if w.g == nil { + return nil + } + return w.g.Handle(ctx, sub) +} + +func (w *Worker) handleQueueMessage(ctx context.Context, body []byte) error { + var sub models.Submission + if err := json.Unmarshal(body, &sub); err != nil { + return fmt.Errorf("invalid submission payload: %w", err) + } + w.log.Info().Int64("submission_id", sub.ID).Msg("Processing submission") + if err := w.handle(ctx, &sub); err != nil { + w.log.Error().Err(err).Int64("submission_id", sub.ID).Msg("Failed to process submission") + return fmt.Errorf("handle submission: %w", err) + } + w.log.Info().Int64("submission_id", sub.ID).Msg("Finished processing submission") + w.log.Info().Any("submission", sub).Msg("Submission result") + return nil +} diff --git a/job/job.go b/job/job.go deleted file mode 100644 index f7b7618..0000000 --- a/job/job.go +++ /dev/null @@ -1,119 +0,0 @@ -package job - -import ( - "context" - "fmt" - "sync" - - "github.com/joshjms/castletown/sandbox" -) - -type Job struct { - ID string `json:"id"` - Files []File `json:"files"` - Procs []Process `json:"steps"` - - step int - - mu sync.Mutex -} - -type File struct { - Name string `json:"name"` - Content string `json:"content"` -} - -type Process struct { - Image string `json:"image"` - Cmd []string `json:"cmd"` - Stdin string `json:"stdin"` - MemoryLimitMB int64 `json:"memoryLimitMB"` - TimeLimitMs uint64 `json:"timeLimitMs"` - ProcLimit int64 `json:"procLimit"` - Files []string `json:"files"` - Persist []string `json:"persist"` -} - -func (j *Job) Prepare() error { - j.mu.Lock() - defer j.mu.Unlock() - - if len(j.Procs) == 0 { - return fmt.Errorf("no processes specified") - } - - if err := verifyImages(j.Procs); err != nil { - return fmt.Errorf("invalid images: %w", err) - } - - if err := prepareFileDirs(j.ID, j.Procs); err != nil { - return fmt.Errorf("error preparing file directories: %w", err) - } - - return nil -} - -func (j *Job) ExecuteAll(ctx context.Context) ([]sandbox.Report, error) { - j.mu.Lock() - defer j.mu.Unlock() - - var reports []sandbox.Report - - for j.step < len(j.Procs) { - report, err := j.execute(ctx) - if err != nil { - return nil, err - } - reports = append(reports, report) - } - - return reports, nil -} - -func (j *Job) execute(ctx context.Context) (sandbox.Report, error) { - proc := j.Procs[j.step] - fileDeps, err := getFileDependencies(j.ID, j.Procs, j.Files, j.step) - if err != nil { - return sandbox.Report{}, fmt.Errorf("error getting file dependencies: %w", err) - } - - cfg := sandbox.GetDefaultConfig() - cfg.Args = proc.Cmd - cfg.RootfsImageDir = getImageDir(proc.Image) - cfg.BoxDir = getProcFileDir(j.ID, j.step) - cfg.Files = fileDeps - - if proc.TimeLimitMs > 0 { - cfg.TimeLimitMs = int64(proc.TimeLimitMs) - } - if proc.MemoryLimitMB > 0 { - cfg.Cgroup.Memory = int64(proc.MemoryLimitMB) * 1024 * 1024 - } - if proc.ProcLimit > 0 { - cfg.Cgroup.PidsLimit = proc.ProcLimit - } - cfg.Stdin = proc.Stdin - - containerId := fmt.Sprintf("%s-%d", j.ID, j.step) - if err := sandbox.GetManager().NewSandbox(containerId, cfg); err != nil { - return sandbox.Report{}, fmt.Errorf("cannot create sandbox for process %d: %v", j.step, err) - } - defer sandbox.GetManager().DestroySandbox(containerId) - - report, err := sandbox.GetManager().RunSandbox(ctx, containerId) - if err != nil { - return sandbox.Report{}, fmt.Errorf("error running process %d: %v", j.step, err) - } - - j.next() - - return report, nil -} - -func (j *Job) next() bool { - if j.step < len(j.Procs) { - j.step++ - return true - } - return false -} diff --git a/job/job_test.go b/job/job_test.go deleted file mode 100644 index 96cf4fb..0000000 --- a/job/job_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package job_test - -import ( - "context" - "os" - "testing" - - "github.com/google/uuid" - "github.com/joshjms/castletown/config" - "github.com/joshjms/castletown/job" - "github.com/joshjms/castletown/sandbox" - "github.com/stretchr/testify/require" -) - -func TestMain(m *testing.M) { - sandbox.Init() - config.UseDefaults() - - job.NewJobPool() - sandbox.NewManager(config.MaxConcurrency) - - exitCode := m.Run() - - os.Exit(exitCode) -} - -func TestJob(t *testing.T) { - var err error - - jobId := uuid.NewString() - j := &job.Job{ - ID: jobId, - Procs: []job.Process{ - { - Image: "gcc:15-bookworm", - Cmd: []string{"g++", "-o", "main", "main.cpp"}, - Files: []string{"main.cpp"}, - Persist: []string{"main"}, - }, - { - Image: "gcc:15-bookworm", - Cmd: []string{"./main"}, - Stdin: "5\n", - Files: []string{"main"}, - }, - }, - Files: []job.File{ - { - Name: "main.cpp", - Content: ` -#include -int main() { - int n; - std::cin >> n; - std::cout << n * n << std::endl; - return 0; -} -`, - }, - }, - } - - err = j.Prepare() - require.NoError(t, err, "error preparing job: %v", err) - - reports, err := j.ExecuteAll(context.Background()) - require.NoError(t, err, "error executing job: %v", err) - require.Len(t, reports, 2, "expected 2 reports, got %d", len(reports)) - require.Equal(t, sandbox.STATUS_OK, reports[0].Status, "expected first report status to be OK, got %v", reports[0].Status) - require.Equal(t, sandbox.STATUS_OK, reports[1].Status, "expected second report status to be OK, got %v", reports[1].Status) - require.Equal(t, "25\n", reports[1].Stdout, "expected second report output to be '25', got '%s'", reports[1].Stdout) -} - -func TestJobAppend(t *testing.T) { - var err error - - pool := job.GetJobPool() - - jobId := uuid.NewString() - firstJob := &job.Job{ - ID: jobId, - Procs: []job.Process{ - { - Image: "gcc:15-bookworm", - Cmd: []string{"g++", "-o", "main", "main.cpp"}, - Files: []string{"main.cpp"}, - Persist: []string{"main"}, - }, - { - Image: "gcc:15-bookworm", - Cmd: []string{"./main"}, - Stdin: "5\n", - Files: []string{"main"}, - }, - }, - Files: []job.File{ - { - Name: "main.cpp", - Content: ` -#include -int main() { - int n; - std::cin >> n; - std::cout << n * n << std::endl; - return 0; -} -`, - }, - }, - } - - pool.AddOrAppendJob(firstJob) - err = firstJob.Prepare() - require.NoError(t, err, "error preparing job: %v", err) - - reports, err := firstJob.ExecuteAll(context.Background()) - - require.NoError(t, err, "error executing job: %v", err) - require.Len(t, reports, 2, "expected 2 reports, got %d", len(reports)) - require.Equal(t, sandbox.STATUS_OK, reports[0].Status, "expected first report status to be OK, got %v", reports[0].Status) - require.Equal(t, sandbox.STATUS_OK, reports[1].Status, "expected second report status to be OK, got %v", reports[1].Status) - require.Equal(t, "25\n", reports[1].Stdout, "expected second report output to be '25', got '%s'", reports[1].Stdout) - - anotherJob := &job.Job{ - ID: jobId, - Procs: []job.Process{ - { - Image: "gcc:15-bookworm", - Cmd: []string{"./main"}, - Stdin: "10\n", - Files: []string{"main"}, - }, - }, - } - - pool.AddOrAppendJob(anotherJob) - - pooledJob, exists := pool.Jobs[jobId] - require.True(t, exists, "expected job to exist in pool") - require.Len(t, pooledJob.Procs, 3, "expected 3 processes in pooled job, got %d", len(pooledJob.Procs)) - - reports, err = pooledJob.ExecuteAll(context.Background()) - - require.NoError(t, err, "error executing pooled job: %v", err) - require.Len(t, reports, 1, "expected 1 report, got %d", len(reports)) - require.Equal(t, sandbox.STATUS_OK, reports[0].Status, "expected first report status to be OK, got %v", reports[0].Status) - require.Equal(t, "100\n", reports[0].Stdout, "expected first report output to be '100', got '%s'", reports[0].Stdout) - - pool.RemoveJob(jobId) - _, exists = pool.Jobs[jobId] - require.False(t, exists, "expected job to be removed from pool") -} diff --git a/job/pool.go b/job/pool.go deleted file mode 100644 index e0016f7..0000000 --- a/job/pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package job - -import "sync" - -var jp *JobPool - -type JobPool struct { - Jobs map[string]*Job - - mu sync.Mutex -} - -func NewJobPool() { - jp = &JobPool{ - Jobs: make(map[string]*Job), - } -} - -func GetJobPool() *JobPool { - return jp -} - -func (jp *JobPool) GetJob(id string) (*Job, bool) { - jp.mu.Lock() - defer jp.mu.Unlock() - - job, exists := jp.Jobs[id] - return job, exists -} - -func (jp *JobPool) AddOrAppendJob(job *Job) *Job { - jp.mu.Lock() - defer jp.mu.Unlock() - - if existingJob, exists := jp.Jobs[job.ID]; exists { - existingJob.append(job) - } else { - jp.Jobs[job.ID] = job - } - - return jp.Jobs[job.ID] -} - -func (jp *JobPool) RemoveJob(id string) { - jp.mu.Lock() - defer jp.mu.Unlock() - - delete(jp.Jobs, id) -} - -func (j *Job) append(other *Job) { - j.mu.Lock() - defer j.mu.Unlock() - - j.Files = append(j.Files, other.Files...) - j.Procs = append(j.Procs, other.Procs...) -} diff --git a/job/utils.go b/job/utils.go deleted file mode 100644 index cdd63c3..0000000 --- a/job/utils.go +++ /dev/null @@ -1,98 +0,0 @@ -package job - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/joshjms/castletown/config" - "github.com/joshjms/castletown/sandbox" -) - -func getImageDir(image string) string { - image = strings.Replace(image, ":", "-", 1) - return filepath.Join(config.ImagesDir, image) -} - -func verifyImages(procs []Process) error { - for _, process := range procs { - image := process.Image - rootfsDir := getImageDir(image) - - f, err := os.Stat(rootfsDir) - if os.IsNotExist(err) { - return fmt.Errorf("rootfs directory does not exist: %s", rootfsDir) - } - if !f.IsDir() { - return fmt.Errorf("rootfs path exists but is not a directory: %s", rootfsDir) - } - } - - return nil -} - -func prepareFileDirs(reqId string, procs []Process) error { - rootFileDir := filepath.Join(config.StorageDir, reqId) - if err := os.MkdirAll(rootFileDir, 0755); err != nil { - return fmt.Errorf("cannot create root files directory: %v", err) - } - - for i := range procs { - procDir := filepath.Join(rootFileDir, fmt.Sprintf("proc-%d", i)) - if err := os.MkdirAll(procDir, 0755); err != nil { - return fmt.Errorf("cannot create process directory: %v", err) - } - } - - return nil -} - -func getRootFileDir(reqId string) string { - return filepath.Join(config.StorageDir, reqId) -} - -func getProcFileDir(reqId string, procIndex int) string { - return filepath.Join(getRootFileDir(reqId), fmt.Sprintf("proc-%d", procIndex)) -} - -func getFileDependencies(reqId string, procs []Process, files []File, step int) ([]sandbox.File, error) { - fileMap := make(map[string]File) - for _, file := range files { - fileMap[file.Name] = file - } - - fileDeps := make([]sandbox.File, 0) - lastOcc := make(map[string]int) - - procDir := getProcFileDir(reqId, step) - - for i, proc := range procs[:step] { - for _, fileName := range proc.Persist { - lastOcc[fileName] = i - } - } - - proc := procs[step] - - for _, fileName := range proc.Files { - if _, exists := lastOcc[fileName]; !exists { - file, exists := fileMap[fileName] - if !exists { - return nil, fmt.Errorf("file %s not found", fileName) - } - - fileDeps = append(fileDeps, sandbox.File{ - Content: file.Content, - Dst: filepath.Join(procDir, fileName), - }) - } else { - fileDeps = append(fileDeps, sandbox.File{ - Src: filepath.Join(getProcFileDir(reqId, lastOcc[fileName]), fileName), - Dst: filepath.Join(procDir, fileName), - }) - } - } - - return fileDeps, nil -} diff --git a/main.go b/main.go index 521c8a7..b569520 100644 --- a/main.go +++ b/main.go @@ -17,11 +17,11 @@ package main import ( "github.com/joshjms/castletown/cmd" - "github.com/joshjms/castletown/sandbox" + "github.com/joshjms/castletown/internal/container" ) func init() { - sandbox.Init() + container.Init() } func main() { diff --git a/proto/common.pb.go b/proto/common.pb.go deleted file mode 100644 index c7bac16..0000000 --- a/proto/common.pb.go +++ /dev/null @@ -1,457 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.10 -// protoc v6.32.0 -// source: common.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Status represents the execution status -type Status int32 - -const ( - Status_STATUS_UNSPECIFIED Status = 0 - Status_STATUS_OK Status = 1 - Status_STATUS_RUNTIME_ERROR Status = 2 - Status_STATUS_TIME_LIMIT_EXCEEDED Status = 3 - Status_STATUS_MEMORY_LIMIT_EXCEEDED Status = 4 - Status_STATUS_OUTPUT_LIMIT_EXCEEDED Status = 5 - Status_STATUS_TERMINATED Status = 6 - Status_STATUS_UNKNOWN Status = 7 - Status_STATUS_SKIPPED Status = 8 -) - -// Enum value maps for Status. -var ( - Status_name = map[int32]string{ - 0: "STATUS_UNSPECIFIED", - 1: "STATUS_OK", - 2: "STATUS_RUNTIME_ERROR", - 3: "STATUS_TIME_LIMIT_EXCEEDED", - 4: "STATUS_MEMORY_LIMIT_EXCEEDED", - 5: "STATUS_OUTPUT_LIMIT_EXCEEDED", - 6: "STATUS_TERMINATED", - 7: "STATUS_UNKNOWN", - 8: "STATUS_SKIPPED", - } - Status_value = map[string]int32{ - "STATUS_UNSPECIFIED": 0, - "STATUS_OK": 1, - "STATUS_RUNTIME_ERROR": 2, - "STATUS_TIME_LIMIT_EXCEEDED": 3, - "STATUS_MEMORY_LIMIT_EXCEEDED": 4, - "STATUS_OUTPUT_LIMIT_EXCEEDED": 5, - "STATUS_TERMINATED": 6, - "STATUS_UNKNOWN": 7, - "STATUS_SKIPPED": 8, - } -) - -func (x Status) Enum() *Status { - p := new(Status) - *p = x - return p -} - -func (x Status) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Status) Descriptor() protoreflect.EnumDescriptor { - return file_common_proto_enumTypes[0].Descriptor() -} - -func (Status) Type() protoreflect.EnumType { - return &file_common_proto_enumTypes[0] -} - -func (x Status) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Status.Descriptor instead. -func (Status) EnumDescriptor() ([]byte, []int) { - return file_common_proto_rawDescGZIP(), []int{0} -} - -// File represents a file to be created in the sandbox -type File struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Content string `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *File) Reset() { - *x = File{} - mi := &file_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *File) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*File) ProtoMessage() {} - -func (x *File) ProtoReflect() protoreflect.Message { - mi := &file_common_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use File.ProtoReflect.Descriptor instead. -func (*File) Descriptor() ([]byte, []int) { - return file_common_proto_rawDescGZIP(), []int{0} -} - -func (x *File) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *File) GetContent() string { - if x != nil { - return x.Content - } - return "" -} - -// Process represents a single execution step -type Process struct { - state protoimpl.MessageState `protogen:"open.v1"` - Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` - Cmd []string `protobuf:"bytes,2,rep,name=cmd,proto3" json:"cmd,omitempty"` - Stdin string `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"` - MemoryLimitMb int64 `protobuf:"varint,4,opt,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"` - TimeLimitMs uint64 `protobuf:"varint,5,opt,name=time_limit_ms,json=timeLimitMs,proto3" json:"time_limit_ms,omitempty"` - ProcLimit int64 `protobuf:"varint,6,opt,name=proc_limit,json=procLimit,proto3" json:"proc_limit,omitempty"` - Files []string `protobuf:"bytes,7,rep,name=files,proto3" json:"files,omitempty"` - Persist []string `protobuf:"bytes,8,rep,name=persist,proto3" json:"persist,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Process) Reset() { - *x = Process{} - mi := &file_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Process) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Process) ProtoMessage() {} - -func (x *Process) ProtoReflect() protoreflect.Message { - mi := &file_common_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Process.ProtoReflect.Descriptor instead. -func (*Process) Descriptor() ([]byte, []int) { - return file_common_proto_rawDescGZIP(), []int{1} -} - -func (x *Process) GetImage() string { - if x != nil { - return x.Image - } - return "" -} - -func (x *Process) GetCmd() []string { - if x != nil { - return x.Cmd - } - return nil -} - -func (x *Process) GetStdin() string { - if x != nil { - return x.Stdin - } - return "" -} - -func (x *Process) GetMemoryLimitMb() int64 { - if x != nil { - return x.MemoryLimitMb - } - return 0 -} - -func (x *Process) GetTimeLimitMs() uint64 { - if x != nil { - return x.TimeLimitMs - } - return 0 -} - -func (x *Process) GetProcLimit() int64 { - if x != nil { - return x.ProcLimit - } - return 0 -} - -func (x *Process) GetFiles() []string { - if x != nil { - return x.Files - } - return nil -} - -func (x *Process) GetPersist() []string { - if x != nil { - return x.Persist - } - return nil -} - -// Report contains the execution results -type Report struct { - state protoimpl.MessageState `protogen:"open.v1"` - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=castletown.Status" json:"status,omitempty"` - ExitCode int32 `protobuf:"varint,2,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` - Signal int32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - Stdout string `protobuf:"bytes,4,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,5,opt,name=stderr,proto3" json:"stderr,omitempty"` - CpuTime uint64 `protobuf:"varint,6,opt,name=cpu_time,json=cpuTime,proto3" json:"cpu_time,omitempty"` - Memory uint64 `protobuf:"varint,7,opt,name=memory,proto3" json:"memory,omitempty"` - WallTime int64 `protobuf:"varint,8,opt,name=wall_time,json=wallTime,proto3" json:"wall_time,omitempty"` - StartAt int64 `protobuf:"varint,9,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"` // Unix timestamp in nanoseconds - FinishAt int64 `protobuf:"varint,10,opt,name=finish_at,json=finishAt,proto3" json:"finish_at,omitempty"` // Unix timestamp in nanoseconds - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Report) Reset() { - *x = Report{} - mi := &file_common_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Report) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Report) ProtoMessage() {} - -func (x *Report) ProtoReflect() protoreflect.Message { - mi := &file_common_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Report.ProtoReflect.Descriptor instead. -func (*Report) Descriptor() ([]byte, []int) { - return file_common_proto_rawDescGZIP(), []int{2} -} - -func (x *Report) GetStatus() Status { - if x != nil { - return x.Status - } - return Status_STATUS_UNSPECIFIED -} - -func (x *Report) GetExitCode() int32 { - if x != nil { - return x.ExitCode - } - return 0 -} - -func (x *Report) GetSignal() int32 { - if x != nil { - return x.Signal - } - return 0 -} - -func (x *Report) GetStdout() string { - if x != nil { - return x.Stdout - } - return "" -} - -func (x *Report) GetStderr() string { - if x != nil { - return x.Stderr - } - return "" -} - -func (x *Report) GetCpuTime() uint64 { - if x != nil { - return x.CpuTime - } - return 0 -} - -func (x *Report) GetMemory() uint64 { - if x != nil { - return x.Memory - } - return 0 -} - -func (x *Report) GetWallTime() int64 { - if x != nil { - return x.WallTime - } - return 0 -} - -func (x *Report) GetStartAt() int64 { - if x != nil { - return x.StartAt - } - return 0 -} - -func (x *Report) GetFinishAt() int64 { - if x != nil { - return x.FinishAt - } - return 0 -} - -var File_common_proto protoreflect.FileDescriptor - -const file_common_proto_rawDesc = "" + - "\n" + - "\fcommon.proto\x12\n" + - "castletown\"4\n" + - "\x04File\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + - "\acontent\x18\x02 \x01(\tR\acontent\"\xe2\x01\n" + - "\aProcess\x12\x14\n" + - "\x05image\x18\x01 \x01(\tR\x05image\x12\x10\n" + - "\x03cmd\x18\x02 \x03(\tR\x03cmd\x12\x14\n" + - "\x05stdin\x18\x03 \x01(\tR\x05stdin\x12&\n" + - "\x0fmemory_limit_mb\x18\x04 \x01(\x03R\rmemoryLimitMb\x12\"\n" + - "\rtime_limit_ms\x18\x05 \x01(\x04R\vtimeLimitMs\x12\x1d\n" + - "\n" + - "proc_limit\x18\x06 \x01(\x03R\tprocLimit\x12\x14\n" + - "\x05files\x18\a \x03(\tR\x05files\x12\x18\n" + - "\apersist\x18\b \x03(\tR\apersist\"\xa1\x02\n" + - "\x06Report\x12*\n" + - "\x06status\x18\x01 \x01(\x0e2\x12.castletown.StatusR\x06status\x12\x1b\n" + - "\texit_code\x18\x02 \x01(\x05R\bexitCode\x12\x16\n" + - "\x06signal\x18\x03 \x01(\x05R\x06signal\x12\x16\n" + - "\x06stdout\x18\x04 \x01(\tR\x06stdout\x12\x16\n" + - "\x06stderr\x18\x05 \x01(\tR\x06stderr\x12\x19\n" + - "\bcpu_time\x18\x06 \x01(\x04R\acpuTime\x12\x16\n" + - "\x06memory\x18\a \x01(\x04R\x06memory\x12\x1b\n" + - "\twall_time\x18\b \x01(\x03R\bwallTime\x12\x19\n" + - "\bstart_at\x18\t \x01(\x03R\astartAt\x12\x1b\n" + - "\tfinish_at\x18\n" + - " \x01(\x03R\bfinishAt*\xec\x01\n" + - "\x06Status\x12\x16\n" + - "\x12STATUS_UNSPECIFIED\x10\x00\x12\r\n" + - "\tSTATUS_OK\x10\x01\x12\x18\n" + - "\x14STATUS_RUNTIME_ERROR\x10\x02\x12\x1e\n" + - "\x1aSTATUS_TIME_LIMIT_EXCEEDED\x10\x03\x12 \n" + - "\x1cSTATUS_MEMORY_LIMIT_EXCEEDED\x10\x04\x12 \n" + - "\x1cSTATUS_OUTPUT_LIMIT_EXCEEDED\x10\x05\x12\x15\n" + - "\x11STATUS_TERMINATED\x10\x06\x12\x12\n" + - "\x0eSTATUS_UNKNOWN\x10\a\x12\x12\n" + - "\x0eSTATUS_SKIPPED\x10\bB%Z#github.com/joshjms/castletown/protob\x06proto3" - -var ( - file_common_proto_rawDescOnce sync.Once - file_common_proto_rawDescData []byte -) - -func file_common_proto_rawDescGZIP() []byte { - file_common_proto_rawDescOnce.Do(func() { - file_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_common_proto_rawDesc), len(file_common_proto_rawDesc))) - }) - return file_common_proto_rawDescData -} - -var file_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_common_proto_goTypes = []any{ - (Status)(0), // 0: castletown.Status - (*File)(nil), // 1: castletown.File - (*Process)(nil), // 2: castletown.Process - (*Report)(nil), // 3: castletown.Report -} -var file_common_proto_depIdxs = []int32{ - 0, // 0: castletown.Report.status:type_name -> castletown.Status - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_common_proto_init() } -func file_common_proto_init() { - if File_common_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_common_proto_rawDesc), len(file_common_proto_rawDesc)), - NumEnums: 1, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_common_proto_goTypes, - DependencyIndexes: file_common_proto_depIdxs, - EnumInfos: file_common_proto_enumTypes, - MessageInfos: file_common_proto_msgTypes, - }.Build() - File_common_proto = out.File - file_common_proto_goTypes = nil - file_common_proto_depIdxs = nil -} diff --git a/proto/common.proto b/proto/common.proto deleted file mode 100644 index 662a2b6..0000000 --- a/proto/common.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package castletown; - -option go_package = "github.com/joshjms/castletown/proto"; - -// File represents a file to be created in the sandbox -message File { - string name = 1; - string content = 2; -} - -// Process represents a single execution step -message Process { - string image = 1; - repeated string cmd = 2; - string stdin = 3; - int64 memory_limit_mb = 4; - uint64 time_limit_ms = 5; - int64 proc_limit = 6; - repeated string files = 7; - repeated string persist = 8; -} - -// Report contains the execution results -message Report { - Status status = 1; - int32 exit_code = 2; - int32 signal = 3; - string stdout = 4; - string stderr = 5; - uint64 cpu_time = 6; - uint64 memory = 7; - int64 wall_time = 8; - int64 start_at = 9; // Unix timestamp in nanoseconds - int64 finish_at = 10; // Unix timestamp in nanoseconds -} - -// Status represents the execution status -enum Status { - STATUS_UNSPECIFIED = 0; - STATUS_OK = 1; - STATUS_RUNTIME_ERROR = 2; - STATUS_TIME_LIMIT_EXCEEDED = 3; - STATUS_MEMORY_LIMIT_EXCEEDED = 4; - STATUS_OUTPUT_LIMIT_EXCEEDED = 5; - STATUS_TERMINATED = 6; - STATUS_UNKNOWN = 7; - STATUS_SKIPPED = 8; -} diff --git a/proto/done.pb.go b/proto/done.pb.go deleted file mode 100644 index a62f29d..0000000 --- a/proto/done.pb.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.10 -// protoc v6.32.0 -// source: done.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// DoneRequest contains the job ID to mark as done -type DoneRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DoneRequest) Reset() { - *x = DoneRequest{} - mi := &file_done_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DoneRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoneRequest) ProtoMessage() {} - -func (x *DoneRequest) ProtoReflect() protoreflect.Message { - mi := &file_done_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoneRequest.ProtoReflect.Descriptor instead. -func (*DoneRequest) Descriptor() ([]byte, []int) { - return file_done_proto_rawDescGZIP(), []int{0} -} - -func (x *DoneRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -// DoneResponse is an empty response -type DoneResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DoneResponse) Reset() { - *x = DoneResponse{} - mi := &file_done_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DoneResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoneResponse) ProtoMessage() {} - -func (x *DoneResponse) ProtoReflect() protoreflect.Message { - mi := &file_done_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoneResponse.ProtoReflect.Descriptor instead. -func (*DoneResponse) Descriptor() ([]byte, []int) { - return file_done_proto_rawDescGZIP(), []int{1} -} - -var File_done_proto protoreflect.FileDescriptor - -const file_done_proto_rawDesc = "" + - "\n" + - "\n" + - "done.proto\x12\n" + - "castletown\"\x1d\n" + - "\vDoneRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\"\x0e\n" + - "\fDoneResponse2H\n" + - "\vDoneService\x129\n" + - "\x04Done\x12\x17.castletown.DoneRequest\x1a\x18.castletown.DoneResponseB%Z#github.com/joshjms/castletown/protob\x06proto3" - -var ( - file_done_proto_rawDescOnce sync.Once - file_done_proto_rawDescData []byte -) - -func file_done_proto_rawDescGZIP() []byte { - file_done_proto_rawDescOnce.Do(func() { - file_done_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_done_proto_rawDesc), len(file_done_proto_rawDesc))) - }) - return file_done_proto_rawDescData -} - -var file_done_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_done_proto_goTypes = []any{ - (*DoneRequest)(nil), // 0: castletown.DoneRequest - (*DoneResponse)(nil), // 1: castletown.DoneResponse -} -var file_done_proto_depIdxs = []int32{ - 0, // 0: castletown.DoneService.Done:input_type -> castletown.DoneRequest - 1, // 1: castletown.DoneService.Done:output_type -> castletown.DoneResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_done_proto_init() } -func file_done_proto_init() { - if File_done_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_done_proto_rawDesc), len(file_done_proto_rawDesc)), - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_done_proto_goTypes, - DependencyIndexes: file_done_proto_depIdxs, - MessageInfos: file_done_proto_msgTypes, - }.Build() - File_done_proto = out.File - file_done_proto_goTypes = nil - file_done_proto_depIdxs = nil -} diff --git a/proto/done.proto b/proto/done.proto deleted file mode 100644 index c8aea28..0000000 --- a/proto/done.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package castletown; - -option go_package = "github.com/joshjms/castletown/proto"; - -// DoneService handles job completion notifications -service DoneService { - rpc Done(DoneRequest) returns (DoneResponse); -} - -// DoneRequest contains the job ID to mark as done -message DoneRequest { - string id = 1; -} - -// DoneResponse is an empty response -message DoneResponse { -} diff --git a/proto/done_grpc.pb.go b/proto/done_grpc.pb.go deleted file mode 100644 index a3a90dd..0000000 --- a/proto/done_grpc.pb.go +++ /dev/null @@ -1,125 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 -// source: done.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - DoneService_Done_FullMethodName = "/castletown.DoneService/Done" -) - -// DoneServiceClient is the client API for DoneService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// DoneService handles job completion notifications -type DoneServiceClient interface { - Done(ctx context.Context, in *DoneRequest, opts ...grpc.CallOption) (*DoneResponse, error) -} - -type doneServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewDoneServiceClient(cc grpc.ClientConnInterface) DoneServiceClient { - return &doneServiceClient{cc} -} - -func (c *doneServiceClient) Done(ctx context.Context, in *DoneRequest, opts ...grpc.CallOption) (*DoneResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(DoneResponse) - err := c.cc.Invoke(ctx, DoneService_Done_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DoneServiceServer is the server API for DoneService service. -// All implementations must embed UnimplementedDoneServiceServer -// for forward compatibility. -// -// DoneService handles job completion notifications -type DoneServiceServer interface { - Done(context.Context, *DoneRequest) (*DoneResponse, error) - mustEmbedUnimplementedDoneServiceServer() -} - -// UnimplementedDoneServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedDoneServiceServer struct{} - -func (UnimplementedDoneServiceServer) Done(context.Context, *DoneRequest) (*DoneResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Done not implemented") -} -func (UnimplementedDoneServiceServer) mustEmbedUnimplementedDoneServiceServer() {} -func (UnimplementedDoneServiceServer) testEmbeddedByValue() {} - -// UnsafeDoneServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DoneServiceServer will -// result in compilation errors. -type UnsafeDoneServiceServer interface { - mustEmbedUnimplementedDoneServiceServer() -} - -func RegisterDoneServiceServer(s grpc.ServiceRegistrar, srv DoneServiceServer) { - // If the following call pancis, it indicates UnimplementedDoneServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&DoneService_ServiceDesc, srv) -} - -func _DoneService_Done_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DoneRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DoneServiceServer).Done(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DoneService_Done_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DoneServiceServer).Done(ctx, req.(*DoneRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// DoneService_ServiceDesc is the grpc.ServiceDesc for DoneService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var DoneService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "castletown.DoneService", - HandlerType: (*DoneServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Done", - Handler: _DoneService_Done_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "done.proto", -} diff --git a/proto/exec.pb.go b/proto/exec.pb.go deleted file mode 100644 index 3d05dbb..0000000 --- a/proto/exec.pb.go +++ /dev/null @@ -1,211 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.10 -// protoc v6.32.0 -// source: exec.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// ExecRequest contains the job execution parameters -type ExecRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Files []*File `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"` - Procs []*Process `protobuf:"bytes,3,rep,name=procs,proto3" json:"procs,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ExecRequest) Reset() { - *x = ExecRequest{} - mi := &file_exec_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ExecRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExecRequest) ProtoMessage() {} - -func (x *ExecRequest) ProtoReflect() protoreflect.Message { - mi := &file_exec_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExecRequest.ProtoReflect.Descriptor instead. -func (*ExecRequest) Descriptor() ([]byte, []int) { - return file_exec_proto_rawDescGZIP(), []int{0} -} - -func (x *ExecRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *ExecRequest) GetFiles() []*File { - if x != nil { - return x.Files - } - return nil -} - -func (x *ExecRequest) GetProcs() []*Process { - if x != nil { - return x.Procs - } - return nil -} - -// ExecResponse contains the execution results -type ExecResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Reports []*Report `protobuf:"bytes,2,rep,name=reports,proto3" json:"reports,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ExecResponse) Reset() { - *x = ExecResponse{} - mi := &file_exec_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ExecResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExecResponse) ProtoMessage() {} - -func (x *ExecResponse) ProtoReflect() protoreflect.Message { - mi := &file_exec_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExecResponse.ProtoReflect.Descriptor instead. -func (*ExecResponse) Descriptor() ([]byte, []int) { - return file_exec_proto_rawDescGZIP(), []int{1} -} - -func (x *ExecResponse) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *ExecResponse) GetReports() []*Report { - if x != nil { - return x.Reports - } - return nil -} - -var File_exec_proto protoreflect.FileDescriptor - -const file_exec_proto_rawDesc = "" + - "\n" + - "\n" + - "exec.proto\x12\n" + - "castletown\x1a\fcommon.proto\"p\n" + - "\vExecRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12&\n" + - "\x05files\x18\x02 \x03(\v2\x10.castletown.FileR\x05files\x12)\n" + - "\x05procs\x18\x03 \x03(\v2\x13.castletown.ProcessR\x05procs\"L\n" + - "\fExecResponse\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12,\n" + - "\areports\x18\x02 \x03(\v2\x12.castletown.ReportR\areports2K\n" + - "\vExecService\x12<\n" + - "\aExecute\x12\x17.castletown.ExecRequest\x1a\x18.castletown.ExecResponseB%Z#github.com/joshjms/castletown/protob\x06proto3" - -var ( - file_exec_proto_rawDescOnce sync.Once - file_exec_proto_rawDescData []byte -) - -func file_exec_proto_rawDescGZIP() []byte { - file_exec_proto_rawDescOnce.Do(func() { - file_exec_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_exec_proto_rawDesc), len(file_exec_proto_rawDesc))) - }) - return file_exec_proto_rawDescData -} - -var file_exec_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_exec_proto_goTypes = []any{ - (*ExecRequest)(nil), // 0: castletown.ExecRequest - (*ExecResponse)(nil), // 1: castletown.ExecResponse - (*File)(nil), // 2: castletown.File - (*Process)(nil), // 3: castletown.Process - (*Report)(nil), // 4: castletown.Report -} -var file_exec_proto_depIdxs = []int32{ - 2, // 0: castletown.ExecRequest.files:type_name -> castletown.File - 3, // 1: castletown.ExecRequest.procs:type_name -> castletown.Process - 4, // 2: castletown.ExecResponse.reports:type_name -> castletown.Report - 0, // 3: castletown.ExecService.Execute:input_type -> castletown.ExecRequest - 1, // 4: castletown.ExecService.Execute:output_type -> castletown.ExecResponse - 4, // [4:5] is the sub-list for method output_type - 3, // [3:4] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_exec_proto_init() } -func file_exec_proto_init() { - if File_exec_proto != nil { - return - } - file_common_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_exec_proto_rawDesc), len(file_exec_proto_rawDesc)), - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_exec_proto_goTypes, - DependencyIndexes: file_exec_proto_depIdxs, - MessageInfos: file_exec_proto_msgTypes, - }.Build() - File_exec_proto = out.File - file_exec_proto_goTypes = nil - file_exec_proto_depIdxs = nil -} diff --git a/proto/exec.proto b/proto/exec.proto deleted file mode 100644 index 989b247..0000000 --- a/proto/exec.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package castletown; - -import "common.proto"; - -option go_package = "github.com/joshjms/castletown/proto"; - -// ExecService handles job execution requests -service ExecService { - rpc Execute(ExecRequest) returns (ExecResponse); -} - -// ExecRequest contains the job execution parameters -message ExecRequest { - string id = 1; - repeated File files = 2; - repeated Process procs = 3; -} - -// ExecResponse contains the execution results -message ExecResponse { - string id = 1; - repeated Report reports = 2; -} diff --git a/proto/exec_grpc.pb.go b/proto/exec_grpc.pb.go deleted file mode 100644 index 21273e3..0000000 --- a/proto/exec_grpc.pb.go +++ /dev/null @@ -1,125 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 -// source: exec.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - ExecService_Execute_FullMethodName = "/castletown.ExecService/Execute" -) - -// ExecServiceClient is the client API for ExecService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// ExecService handles job execution requests -type ExecServiceClient interface { - Execute(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) -} - -type execServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewExecServiceClient(cc grpc.ClientConnInterface) ExecServiceClient { - return &execServiceClient{cc} -} - -func (c *execServiceClient) Execute(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ExecResponse) - err := c.cc.Invoke(ctx, ExecService_Execute_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ExecServiceServer is the server API for ExecService service. -// All implementations must embed UnimplementedExecServiceServer -// for forward compatibility. -// -// ExecService handles job execution requests -type ExecServiceServer interface { - Execute(context.Context, *ExecRequest) (*ExecResponse, error) - mustEmbedUnimplementedExecServiceServer() -} - -// UnimplementedExecServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedExecServiceServer struct{} - -func (UnimplementedExecServiceServer) Execute(context.Context, *ExecRequest) (*ExecResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") -} -func (UnimplementedExecServiceServer) mustEmbedUnimplementedExecServiceServer() {} -func (UnimplementedExecServiceServer) testEmbeddedByValue() {} - -// UnsafeExecServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ExecServiceServer will -// result in compilation errors. -type UnsafeExecServiceServer interface { - mustEmbedUnimplementedExecServiceServer() -} - -func RegisterExecServiceServer(s grpc.ServiceRegistrar, srv ExecServiceServer) { - // If the following call pancis, it indicates UnimplementedExecServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&ExecService_ServiceDesc, srv) -} - -func _ExecService_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExecRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecServiceServer).Execute(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ExecService_Execute_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecServiceServer).Execute(ctx, req.(*ExecRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// ExecService_ServiceDesc is the grpc.ServiceDesc for ExecService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ExecService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "castletown.ExecService", - HandlerType: (*ExecServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Execute", - Handler: _ExecService_Execute_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "exec.proto", -} diff --git a/sandbox/allocator/allocator.go b/sandbox/allocator/allocator.go deleted file mode 100644 index 7c7f51e..0000000 --- a/sandbox/allocator/allocator.go +++ /dev/null @@ -1,65 +0,0 @@ -package allocator - -import ( - "sync" -) - -const DEFAULT_SIZE uint32 = 65536 -const START_UID_GID uint32 = 1000000 - -type Range struct { - UidStart uint32 - UidSize uint32 - GidStart uint32 - GidSize uint32 -} - -type Allocator struct { - used map[int]bool - mex int - - mu sync.Mutex -} - -func NewAllocator() *Allocator { - return &Allocator{ - used: make(map[int]bool), - mex: 0, - } -} - -func (a *Allocator) Allocate() (int, Range) { - a.mu.Lock() - defer a.mu.Unlock() - - a.used[a.mex] = true - r := Range{ - UidStart: START_UID_GID + uint32(a.mex)*DEFAULT_SIZE, - UidSize: DEFAULT_SIZE, - GidStart: START_UID_GID + uint32(a.mex)*DEFAULT_SIZE, - GidSize: DEFAULT_SIZE, - } - use := a.mex - - for a.used[a.mex] { - a.mex++ - } - - return use, r -} - -func (a *Allocator) Free(i int) int { - a.mu.Lock() - defer a.mu.Unlock() - - if i < 0 || !a.used[i] { - return -1 - } - - delete(a.used, i) - if i < a.mex { - a.mex = i - } - - return 0 -} diff --git a/sandbox/allocator/allocator_test.go b/sandbox/allocator/allocator_test.go deleted file mode 100644 index 04b5e13..0000000 --- a/sandbox/allocator/allocator_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package allocator_test - -import ( - "testing" - - "github.com/joshjms/castletown/config" - "github.com/joshjms/castletown/sandbox/allocator" - "github.com/stretchr/testify/require" -) - -func TestAllocator(t *testing.T) { - config.UseDefaults() - - a := allocator.NewAllocator() - - i1, _ := a.Allocate() - i2, _ := a.Allocate() - i3, _ := a.Allocate() - a.Free(i1) - i4, _ := a.Allocate() - - require.Equal(t, 0, i1, "incorrect index for first allocation, expected 0") - require.Equal(t, 1, i2, "incorrect index for second allocation, expected 1") - require.Equal(t, 2, i3, "incorrect index for third allocation, expected 2") - require.Equal(t, 0, i4, "incorrect index for fourth allocation, expected 0") -} diff --git a/sandbox/config.go b/sandbox/config.go deleted file mode 100644 index 34428ae..0000000 --- a/sandbox/config.go +++ /dev/null @@ -1,54 +0,0 @@ -package sandbox - -type Config struct { - RootfsImageDir string - - Args []string - Stdin string - Cwd string - Env []string - - UserNamespace *UserNamespaceConfig - - TimeLimitMs int64 - Cgroup *CgroupConfig - Rlimit *RlimitConfig - - BoxDir string - Files []File -} - -type UserNamespaceConfig struct { - HostUID uint32 - HostGID uint32 - ContainerUID uint32 - ContainerGID uint32 - UIDMapCount uint32 - GIDMapCount uint32 -} - -type CgroupConfig struct { - CpuShares uint64 - CpuQuota int64 - CpusetCpus string - CpusetMems string - Memory int64 - PidsLimit int64 -} - -type RlimitConfig struct { - Core *Rlimit - Fsize *Rlimit - NoFile *Rlimit -} - -type Rlimit struct { - Hard uint64 - Soft uint64 -} - -type File struct { - Src string - Content string - Dst string -} diff --git a/sandbox/default.go b/sandbox/default.go deleted file mode 100644 index e4cc5e7..0000000 --- a/sandbox/default.go +++ /dev/null @@ -1,33 +0,0 @@ -package sandbox - -func GetDefaultConfig() *Config { - return &Config{ - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - Cwd: "/box", - TimeLimitMs: 1000, - Cgroup: &CgroupConfig{ - CpuShares: 100000, - CpuQuota: 100000, - Memory: 256 * 1024 * 1024, - PidsLimit: 100, - CpusetCpus: "0", - CpusetMems: "0", - }, - Rlimit: &RlimitConfig{ - Core: &Rlimit{ - Hard: 0, - Soft: 0, - }, - Fsize: &Rlimit{ - Hard: 1 * 1024 * 1024, - Soft: 1 * 1024 * 1024, - }, - NoFile: &Rlimit{ - Hard: 64, - Soft: 64, - }, - }, - } -} diff --git a/sandbox/files.go b/sandbox/files.go deleted file mode 100644 index cdf2b39..0000000 --- a/sandbox/files.go +++ /dev/null @@ -1,40 +0,0 @@ -package sandbox - -import ( - "os" - "path/filepath" -) - -func (s *Sandbox) prepareFiles() error { - for _, file := range s.config.Files { - if err := os.MkdirAll(filepath.Dir(file.Dst), 0755); err != nil { - return err - } - - if file.Src != "" { - err := copyFile(file.Src, file.Dst) - if err != nil { - return err - } - } else { - err := writeFile(file.Content, file.Dst) - if err != nil { - return err - } - } - } - - return nil -} - -func copyFile(src, dst string) error { - input, err := os.ReadFile(src) - if err != nil { - return err - } - return os.WriteFile(dst, input, 0744) -} - -func writeFile(content, dst string) error { - return os.WriteFile(dst, []byte(content), 0744) -} diff --git a/sandbox/manager.go b/sandbox/manager.go deleted file mode 100644 index 2deda48..0000000 --- a/sandbox/manager.go +++ /dev/null @@ -1,119 +0,0 @@ -package sandbox - -import ( - "context" - "fmt" - "sync" - - "github.com/joshjms/castletown/sandbox/allocator" -) - -var m *Manager - -type Manager struct { - sandboxes map[string]*Sandbox - allocatedRanges map[string]int - - allocator *allocator.Allocator - maxConcurrency int - - mu sync.Mutex - sem chan struct{} -} - -func NewManager(maxConcurrency int) error { - alloc := allocator.NewAllocator() - - m = &Manager{ - sandboxes: make(map[string]*Sandbox), - allocatedRanges: make(map[string]int), - allocator: alloc, - maxConcurrency: maxConcurrency, - sem: make(chan struct{}, maxConcurrency), - } - return nil -} - -func GetManager() *Manager { - return m -} - -func (m *Manager) NewSandbox(id string, cfg *Config) error { - m.mu.Lock() - defer m.mu.Unlock() - - if _, exists := m.sandboxes[id]; exists { - return fmt.Errorf("sandbox with id %q already exists", id) - } - - idx, rng := m.allocator.Allocate() - if idx == -1 { - return fmt.Errorf("no available uid/gid ranges") - } - - cfg.UserNamespace = &UserNamespaceConfig{ - HostUID: uint32(rng.UidStart), - ContainerUID: 0, - UIDMapCount: uint32(rng.UidSize), - HostGID: uint32(rng.GidStart), - ContainerGID: 0, - GIDMapCount: uint32(rng.GidSize), - } - - sandbox := &Sandbox{ - id: id, - config: cfg, - } - - m.sandboxes[id] = sandbox - m.allocatedRanges[id] = idx - - return nil -} - -func (m *Manager) RunSandbox(ctx context.Context, id string) (Report, error) { - m.sem <- struct{}{} - defer func() { <-m.sem }() - - m.mu.Lock() - sandbox, exists := m.sandboxes[id] - m.mu.Unlock() - - if !exists { - return Report{}, fmt.Errorf("sandbox with id %q does not exist", id) - } - - report, err := sandbox.Run(ctx) - if err != nil { - return Report{}, fmt.Errorf("error running sandbox %q: %w", id, err) - } - - return report, nil -} - -func (m *Manager) DestroySandbox(id string) error { - m.mu.Lock() - defer m.mu.Unlock() - - sandbox, exists := m.sandboxes[id] - if !exists { - return fmt.Errorf("sandbox with id %q does not exist", id) - } - - if err := sandbox.Destroy(); err != nil { - return fmt.Errorf("error destroying sandbox: %w", err) - } - - idx, ok := m.allocatedRanges[id] - if !ok { - return fmt.Errorf("no allocated range found for sandbox id %q", id) - } - - if freedIdx := m.allocator.Free(idx); freedIdx == -1 { - return fmt.Errorf("failed to free allocated range for sandbox id %q", id) - } - - delete(m.sandboxes, id) - delete(m.allocatedRanges, id) - return nil -} diff --git a/sandbox/rootfs.go b/sandbox/rootfs.go deleted file mode 100644 index d1a5ef3..0000000 --- a/sandbox/rootfs.go +++ /dev/null @@ -1,35 +0,0 @@ -package sandbox - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/joshjms/castletown/config" -) - -func (s *Sandbox) prepareOverlayfs() error { - upperDir := filepath.Join(config.OverlayFSDir, fmt.Sprintf("sandbox-%s", s.id), "upper") - workDir := filepath.Join(config.OverlayFSDir, fmt.Sprintf("sandbox-%s", s.id), "work") - - if err := os.MkdirAll(upperDir, 0755); err != nil { - return err - } - if err := os.MkdirAll(workDir, 0755); err != nil { - return err - } - - return nil -} - -func (s *Sandbox) getLowerDir() string { - return s.config.RootfsImageDir -} - -func (s *Sandbox) getUpperDir() string { - return filepath.Join(config.OverlayFSDir, fmt.Sprintf("sandbox-%s", s.id), "upper") -} - -func (s *Sandbox) getWorkDir() string { - return filepath.Join(config.OverlayFSDir, fmt.Sprintf("sandbox-%s", s.id), "work") -} diff --git a/sandbox/sandbox.go b/sandbox/sandbox.go deleted file mode 100644 index 71a6416..0000000 --- a/sandbox/sandbox.go +++ /dev/null @@ -1,153 +0,0 @@ -package sandbox - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/joshjms/castletown/config" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/specconv" - "golang.org/x/sys/unix" -) - -type Sandbox struct { - id string - config *Config - - container *libcontainer.Container -} - -func (s *Sandbox) GetId() string { - return s.id -} - -// Run runs a command inside the sandbox and returns a Report -func (s *Sandbox) Run(ctx context.Context) (Report, error) { - err := s.prepareOverlayfs() - if err != nil { - return Report{}, fmt.Errorf("error preparing rootfs: %w", err) - } - - if err := s.prepareFiles(); err != nil { - return Report{}, fmt.Errorf("error preparing files: %w", err) - } - - spec, err := s.createSpec() - if err != nil { - return Report{}, fmt.Errorf("error creating oci spec: %w", err) - } - - libcontainerConfig, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ - UseSystemdCgroup: false, - Spec: spec, - // RootlessEUID: true, - // RootlessCgroups: true, - }) - if err != nil { - return Report{}, fmt.Errorf("error creating libcontainer config: %w", err) - } - - container, err := libcontainer.Create(config.LibcontainerDir, s.id, libcontainerConfig) - if err != nil { - return Report{}, fmt.Errorf("error creating container: %w", err) - } - defer container.Destroy() - - noNewPrivileges := true - - var stdinBuf, stdoutBuf, stderrBuf bytes.Buffer - - if s.config.Stdin != "" { - stdinBuf.WriteString(s.config.Stdin) - } - - process := &libcontainer.Process{ - Args: s.config.Args, - Env: s.config.Env, - UID: 0, - GID: 0, - Cwd: s.config.Cwd, - NoNewPrivileges: &noNewPrivileges, - Stdin: &stdinBuf, - Stdout: &stdoutBuf, - Stderr: &stderrBuf, - Rlimits: getRlimits(s.config.Rlimit), - Init: true, - } - - startAt := time.Now() - - if err := container.Run(process); err != nil { - return Report{}, fmt.Errorf("error running container: %w", err) - } - - processFinished := make(chan interface{}, 1) - timeLimitExceeded := false - - go func() { - select { - case <-processFinished: - case <-time.After(time.Duration(s.config.TimeLimitMs) * time.Millisecond * 3): - timeLimitExceeded = true - container.Signal(unix.SIGKILL) - } - }() - - state, _ := process.Wait() - processFinished <- struct{}{} - - finishAt := time.Now() - - return s.makeReport(&stdoutBuf, &stderrBuf, state, timeLimitExceeded, startAt, finishAt) -} - -func getRlimits(cfg *RlimitConfig) []configs.Rlimit { - if cfg == nil { - return nil - } - - var rlimits []configs.Rlimit - - if cfg.Core != nil { - rlimits = append(rlimits, configs.Rlimit{ - Type: unix.RLIMIT_CORE, - Hard: cfg.Core.Hard, - Soft: cfg.Core.Soft, - }) - } - - if cfg.Fsize != nil { - rlimits = append(rlimits, configs.Rlimit{ - Type: unix.RLIMIT_FSIZE, - Hard: cfg.Fsize.Hard, - Soft: cfg.Fsize.Soft, - }) - } - - if cfg.NoFile != nil { - rlimits = append(rlimits, configs.Rlimit{ - Type: unix.RLIMIT_NOFILE, - Hard: cfg.NoFile.Hard, - Soft: cfg.NoFile.Soft, - }) - } - - return rlimits -} - -func (s *Sandbox) Destroy() error { - if s.container != nil { - s.container.Destroy() - } - - if err := os.RemoveAll(filepath.Join(config.OverlayFSDir, fmt.Sprintf("sandbox-%s", s.id))); err != nil { - return fmt.Errorf("error removing overlayfs dirs: %w", err) - } - - return nil -} diff --git a/sandbox/sandbox_test.go b/sandbox/sandbox_test.go deleted file mode 100644 index 9e8cb54..0000000 --- a/sandbox/sandbox_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package sandbox_test - -import ( - "os" - "path/filepath" - "sort" - "testing" - - "github.com/joshjms/castletown/config" - "github.com/joshjms/castletown/sandbox" - "github.com/stretchr/testify/require" -) - -func TestMain(m *testing.M) { - sandbox.Init() - config.UseDefaults() - - sandbox.NewManager(2) - - files, err := os.ReadDir("test_files") - require.NoError(nil, err, "failed to read test files directory: %v", err) - - for _, f := range files { - fullPath := filepath.Join("test_files", f.Name()) - if err := os.Chown(fullPath, 0, 0); err != nil { - panic(err) - } - } - - exitCode := m.Run() - - for _, f := range files { - fullPath := filepath.Join("test_files", f.Name()) - if err := os.Chown(fullPath, 1000, 1000); err != nil { - panic(err) - } - } - - os.Exit(exitCode) -} - -func TestSandboxAdd(t *testing.T) { - expectedStatus := sandbox.STATUS_OK - expectedOutput := "15\n" - - tc := sandbox.Testcase{ - File: "test_files/add.cpp", - Stdin: "6 9\n", - ExpectedStatus: &expectedStatus, - ExpectedOutput: &expectedOutput, - TimeLimitMs: 1000, - } - - tc.Run(t) -} - -func TestSandboxTimeLimitExceededA(t *testing.T) { - expectedStatus := sandbox.STATUS_TIME_LIMIT_EXCEEDED - - tc := sandbox.Testcase{ - File: "test_files/tl1.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 1000, - } - - tc.Run(t) -} - -func TestSandboxTimeLimitExceededB(t *testing.T) { - expectedStatus := sandbox.STATUS_TIME_LIMIT_EXCEEDED - - tc := sandbox.Testcase{ - File: "test_files/printloop.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 1000, - } - - tc.Run(t) -} - -func TestSandboxMemoryLimitExceeded(t *testing.T) { - expectedStatus := sandbox.STATUS_MEMORY_LIMIT_EXCEEDED - - tc := sandbox.Testcase{ - File: "test_files/mem1.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 10000, - } - - tc.Run(t) -} - -func TestSandboxFork(t *testing.T) { - expectedStatus := sandbox.STATUS_OK - - tc := sandbox.Testcase{ - File: "test_files/fork.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 1000, - } - - tc.Run(t) -} - -func TestSandboxRusageConsistency(t *testing.T) { - expectedStatus := sandbox.STATUS_OK - - tc := sandbox.Testcase{ - File: "test_files/random.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 1000, - } - - var minCpuUsage, maxCpuUsage uint64 - - for i := 0; i < 10; i++ { - reports := tc.Run(t) - report := reports[0] - - if i == 0 { - minCpuUsage = report.CPUTime - maxCpuUsage = report.CPUTime - - continue - } - - minCpuUsage = min(minCpuUsage, report.CPUTime) - maxCpuUsage = max(maxCpuUsage, report.CPUTime) - } - - require.Less(t, maxCpuUsage-minCpuUsage, uint64(10000), "cpu usage inconsistent") -} - -func TestSandboxConcurrency(t *testing.T) { - expectedStatus := sandbox.STATUS_OK - - tc := sandbox.Testcase{ - File: "test_files/sleep.cpp", - ExpectedStatus: &expectedStatus, - TimeLimitMs: 3000, - Concurrency: 5, - } - - reports := tc.Run(t) - - startTimes := make([]int64, len(reports)) - finishTimes := make([]int64, len(reports)) - - for i, report := range reports { - startTimes[i] = report.StartAt.UnixMilli() - finishTimes[i] = report.FinishAt.UnixMilli() - } - - sort.Slice(startTimes, func(i, j int) bool { - return startTimes[i] < startTimes[j] - }) - sort.Slice(finishTimes, func(i, j int) bool { - return finishTimes[i] < finishTimes[j] - }) - - for i := 2; i < len(startTimes); i++ { - require.Less(t, finishTimes[i-2], startTimes[i], "semaphore didn't work correctly") - } - - tc.Run(t) -} diff --git a/sandbox/test_utils.go b/sandbox/test_utils.go deleted file mode 100644 index 3b6da49..0000000 --- a/sandbox/test_utils.go +++ /dev/null @@ -1,164 +0,0 @@ -package sandbox - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/google/uuid" - "github.com/joshjms/castletown/config" - "github.com/stretchr/testify/require" -) - -type Testcase struct { - File string - Stdin string - - ExpectedStatus *Status - ExpectedOutput *string - - TimeLimitMs int64 - - Concurrency int -} - -func (tc *Testcase) Run(t *testing.T) []Report { - m := GetManager() - require.NotNil(t, m, "failed to get manager") - - id := uuid.NewString() - rootFileDir := filepath.Join(config.StorageDir, id) - defer os.RemoveAll(rootFileDir) - - compileFileDir := filepath.Join(rootFileDir, "proc-0") - os.MkdirAll(compileFileDir, 0755) - - rootfsDir := "/tmp/castletown/images/gcc-15-bookworm" - - compileConfig := &Config{ - RootfsImageDir: rootfsDir, - BoxDir: compileFileDir, - Args: []string{"g++", "-o", "main", "main.cpp"}, - Cwd: "/box", - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - TimeLimitMs: 10000, - Cgroup: &CgroupConfig{ - CpuQuota: 100000, - Memory: 512 * 1024 * 1024, - }, - Rlimit: &RlimitConfig{ - Core: &Rlimit{ - Hard: 0, - Soft: 0, - }, - Fsize: &Rlimit{ - Hard: 1024 * 1024 * 1024, - Soft: 1024 * 1024 * 1024, - }, - NoFile: &Rlimit{ - Hard: 64, - Soft: 64, - }, - }, - Files: []File{ - { - Src: tc.File, - Dst: filepath.Join(compileFileDir, "main.cpp"), - }, - }, - } - - compileId := fmt.Sprintf("%s-%d", id, 0) - err := m.NewSandbox(compileId, compileConfig) - defer require.NoError(t, err, "failed to create compile sandbox: %v", err) - defer m.DestroySandbox(compileId) - - ctx := context.Background() - compileStartTime := time.Now() - compileReport, err := m.RunSandbox(ctx, compileId) - require.NoError(t, err, "failed to compile code") - compileElapsed := time.Since(compileStartTime) - t.Logf("Compile took %v", compileElapsed) - - require.Equal(t, STATUS_OK, compileReport.Status, "compile status not ok") - - if tc.Concurrency < 1 { - tc.Concurrency = 1 - } - - wg := sync.WaitGroup{} - - finishTimes := make([]time.Time, tc.Concurrency) - reports := make([]Report, tc.Concurrency) - - for i := 1; i <= tc.Concurrency; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - execId := fmt.Sprintf("%s-%d", id, i) - - execFileDir := filepath.Join(rootFileDir, fmt.Sprintf("proc-%d", i)) - os.MkdirAll(execFileDir, 0755) - - execConfig := &Config{ - RootfsImageDir: rootfsDir, - BoxDir: execFileDir, - Args: []string{"./main"}, - Stdin: tc.Stdin, - Cwd: "/box", - Env: []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - }, - TimeLimitMs: tc.TimeLimitMs, - Cgroup: &CgroupConfig{ - CpuQuota: 100000, - Memory: 256 * 1024 * 1024, - PidsLimit: 1, - CpusetCpus: "0", - CpusetMems: "0", - }, - Files: []File{ - { - Src: filepath.Join(compileFileDir, "main"), - Dst: filepath.Join(execFileDir, "main"), - }, - }, - } - - err = m.NewSandbox(execId, execConfig) - defer m.DestroySandbox(execId) - require.NoError(t, err, "failed to create exec sandbox: %v", err) - - ctx = context.Background() - execStartTime := time.Now() - t.Logf("Starting execution %d at %v", i, execStartTime) - execReport, err := m.RunSandbox(ctx, execId) - execFinishTime := time.Now() - execElapsed := time.Since(execStartTime) - t.Logf("Finished execution %d at %v", i, execFinishTime) - finishTimes[i-1] = execFinishTime - t.Logf("Execution %d took %v", i, execElapsed) - require.NoError(t, err, "failed to execute code") - - if tc.ExpectedStatus != nil { - require.Equal(t, *tc.ExpectedStatus, execReport.Status, "status != expectedStatus") - } - - if tc.ExpectedOutput != nil { - require.Equal(t, *tc.ExpectedOutput, execReport.Stdout, "output != expectedOutput") - } - - reports[i-1] = execReport - }(i) - } - - wg.Wait() - - return reports -} diff --git a/scripts/rootfs.sh b/scripts/rootfs.sh index 3d5374c..1848822 100644 --- a/scripts/rootfs.sh +++ b/scripts/rootfs.sh @@ -2,18 +2,56 @@ set -euo pipefail -if [ -d "/tmp/castletown/images/gcc-15-bookworm" ]; then +IMAGE_REF=${CASTLETOWN_IMAGE_REF:-"gcc:15-bookworm"} +IMAGE_NAME=${CASTLETOWN_IMAGE_NAME:-"gcc-15-bookworm"} +# Prefer the env var name used by internal/config (JUDGE_IMAGES_DIR), but +# fall back to the legacy IMAGES_DIR if set. +IMAGES_DIR=${JUDGE_IMAGES_DIR:-${IMAGES_DIR:-"/var/castletown/images"}} +TMP_DIR=${CASTLETOWN_TMP_DIR:-"/tmp"} + +mkdir -p "${IMAGES_DIR}" +mkdir -p "${TMP_DIR}" + +if [[ -d "${IMAGES_DIR}/${IMAGE_NAME}" ]]; then + echo "[rootfs] ${IMAGES_DIR}/${IMAGE_NAME} already exists, nothing to do" exit 0 fi -skopeo copy docker://gcc:15-bookworm oci:/tmp/_tmp_gcc:15-bookworm +if ! command -v skopeo >/dev/null 2>&1; then + echo "[rootfs] skopeo is required but not installed" >&2 + exit 1 +fi + +if ! command -v umoci >/dev/null 2>&1; then + echo "[rootfs] umoci is required but not installed" >&2 + exit 1 +fi + +oci_dir=$(mktemp -d -p "${TMP_DIR}" castletown-oci-XXXXXX) +rootfs_dir=$(mktemp -d -p "${TMP_DIR}" castletown-rootfs-XXXXXX) +trap 'rm -rf "${oci_dir}" "${rootfs_dir}"' EXIT +echo "[rootfs] downloading ${IMAGE_REF}" +skopeo copy "docker://${IMAGE_REF}" "oci:${oci_dir}:${IMAGE_NAME}" + +echo "[rootfs] unpacking image" umoci raw unpack --rootless \ - --image /tmp/_tmp_gcc:15-bookworm \ - /tmp/_tmp_gcc_15-bookworm + --image "${oci_dir}:${IMAGE_NAME}" \ + "${rootfs_dir}" + +source_dir="${rootfs_dir}/rootfs" +if [[ ! -d "${source_dir}" ]]; then + # umoci versions prior to 0.5 place the rootfs directly at the destination + source_dir="${rootfs_dir}" +fi + +if [[ ! -d "${source_dir}" ]]; then + echo "[rootfs] unpack failed: ${source_dir} not found" >&2 + exit 1 +fi -mkdir -p /tmp/castletown/images/gcc-15-bookworm -cp -r /tmp/_tmp_gcc_15-bookworm/* /tmp/castletown/images/gcc-15-bookworm -rm -rf /tmp/_tmp_gcc_15-bookworm +mkdir -p "${IMAGES_DIR}/${IMAGE_NAME}" +cp -r "${source_dir}/." "${IMAGES_DIR}/${IMAGE_NAME}/" +mkdir -p "${IMAGES_DIR}/${IMAGE_NAME}/box" -mkdir -p /tmp/castletown/images/gcc-15-bookworm/box +echo "[rootfs] prepared ${IMAGES_DIR}/${IMAGE_NAME}" diff --git a/server/handler/done/api.go b/server/handler/done/api.go deleted file mode 100644 index 9d134cc..0000000 --- a/server/handler/done/api.go +++ /dev/null @@ -1,7 +0,0 @@ -package done - -type Request struct { - ID string `json:"id"` -} - -type Response struct{} diff --git a/server/handler/done/done.go b/server/handler/done/done.go deleted file mode 100644 index 2cad4fe..0000000 --- a/server/handler/done/done.go +++ /dev/null @@ -1,30 +0,0 @@ -package done - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/joshjms/castletown/job" -) - -func Handler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - return - } - - var req Request - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, fmt.Sprintf("invalid json: %v", err), http.StatusBadRequest) - return - } - - jp := job.GetJobPool() - jp.RemoveJob(req.ID) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"status":"ok"}`)) -} diff --git a/server/handler/done/grpc.go b/server/handler/done/grpc.go deleted file mode 100644 index 4e12ad8..0000000 --- a/server/handler/done/grpc.go +++ /dev/null @@ -1,23 +0,0 @@ -package done - -import ( - "context" - - "github.com/joshjms/castletown/job" - pb "github.com/joshjms/castletown/proto" -) - -type DoneServer struct { - pb.UnimplementedDoneServiceServer -} - -func NewDoneServer() *DoneServer { - return &DoneServer{} -} - -func (s *DoneServer) Done(ctx context.Context, req *pb.DoneRequest) (*pb.DoneResponse, error) { - jp := job.GetJobPool() - jp.RemoveJob(req.Id) - - return &pb.DoneResponse{}, nil -} diff --git a/server/handler/exec/api.go b/server/handler/exec/api.go deleted file mode 100644 index ea017c9..0000000 --- a/server/handler/exec/api.go +++ /dev/null @@ -1,17 +0,0 @@ -package exec - -import ( - "github.com/joshjms/castletown/job" - "github.com/joshjms/castletown/sandbox" -) - -type Request struct { - ID string `json:"id"` - Files []job.File `json:"files"` - Procs []job.Process `json:"steps"` -} - -type Response struct { - ID string `json:"id"` - Reports []sandbox.Report `json:"reports"` -} diff --git a/server/handler/exec/exec.go b/server/handler/exec/exec.go deleted file mode 100644 index 1b0d87f..0000000 --- a/server/handler/exec/exec.go +++ /dev/null @@ -1,72 +0,0 @@ -package exec - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/google/uuid" - "github.com/joshjms/castletown/job" - "github.com/joshjms/castletown/sandbox" -) - -func Handler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - return - } - - var req Request - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, fmt.Sprintf("invalid json: %v", err), http.StatusBadRequest) - return - } - - if req.ID == "" { - req.ID = uuid.NewString() - } - - reports, err := handleRequest(r.Context(), req) - if err != nil { - http.Error(w, fmt.Sprintf("error running processes: %v", err), http.StatusInternalServerError) - return - } - - response := Response{ - ID: req.ID, - Reports: reports, - } - - responseJson, err := json.MarshalIndent(response, "", " ") - if err != nil { - http.Error(w, fmt.Sprintf("cannot marshal reports: %v", err), http.StatusInternalServerError) - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write(responseJson) -} - -func handleRequest(ctx context.Context, req Request) ([]sandbox.Report, error) { - j := job.Job{ - ID: req.ID, - Files: req.Files, - Procs: req.Procs, - } - - jp := job.GetJobPool() - _job := jp.AddOrAppendJob(&j) - - if err := _job.Prepare(); err != nil { - return nil, fmt.Errorf("error preparing job: %w", err) - } - - reports, err := _job.ExecuteAll(ctx) - if err != nil { - return nil, fmt.Errorf("error executing job: %w", err) - } - - return reports, nil -} diff --git a/server/handler/exec/grpc.go b/server/handler/exec/grpc.go deleted file mode 100644 index c1793f4..0000000 --- a/server/handler/exec/grpc.go +++ /dev/null @@ -1,104 +0,0 @@ -package exec - -import ( - "context" - - "github.com/google/uuid" - "github.com/joshjms/castletown/job" - pb "github.com/joshjms/castletown/proto" - "github.com/joshjms/castletown/sandbox" -) - -type ExecServer struct { - pb.UnimplementedExecServiceServer -} - -func NewExecServer() *ExecServer { - return &ExecServer{} -} - -func (s *ExecServer) Execute(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) { - id := req.Id - if id == "" { - id = uuid.NewString() - } - - files := make([]job.File, len(req.Files)) - for i, f := range req.Files { - files[i] = job.File{ - Name: f.Name, - Content: f.Content, - } - } - - procs := make([]job.Process, len(req.Procs)) - for i, p := range req.Procs { - procs[i] = job.Process{ - Image: p.Image, - Cmd: p.Cmd, - Stdin: p.Stdin, - MemoryLimitMB: p.MemoryLimitMb, - TimeLimitMs: p.TimeLimitMs, - ProcLimit: p.ProcLimit, - Files: p.Files, - Persist: p.Persist, - } - } - - apiReq := Request{ - ID: id, - Files: files, - Procs: procs, - } - - reports, err := handleRequest(ctx, apiReq) - if err != nil { - return nil, err - } - - protoReports := make([]*pb.Report, len(reports)) - for i, r := range reports { - protoReports[i] = convertToProtoReport(r) - } - - return &pb.ExecResponse{ - Id: id, - Reports: protoReports, - }, nil -} - -func convertToProtoReport(r sandbox.Report) *pb.Report { - return &pb.Report{ - Status: convertToProtoStatus(r.Status), - ExitCode: int32(r.ExitCode), - Signal: int32(r.Signal), - Stdout: r.Stdout, - Stderr: r.Stderr, - CpuTime: r.CPUTime, - Memory: r.Memory, - WallTime: r.WallTime, - StartAt: r.StartAt.UnixNano(), - FinishAt: r.FinishAt.UnixNano(), - } -} - -func convertToProtoStatus(status sandbox.Status) pb.Status { - switch status { - case sandbox.STATUS_OK: - return pb.Status_STATUS_OK - case sandbox.STATUS_RUNTIME_ERROR: - return pb.Status_STATUS_RUNTIME_ERROR - case sandbox.STATUS_TIME_LIMIT_EXCEEDED: - return pb.Status_STATUS_TIME_LIMIT_EXCEEDED - case sandbox.STATUS_MEMORY_LIMIT_EXCEEDED: - return pb.Status_STATUS_MEMORY_LIMIT_EXCEEDED - case sandbox.STATUS_OUTPUT_LIMIT_EXCEEDED: - return pb.Status_STATUS_OUTPUT_LIMIT_EXCEEDED - case sandbox.STATUS_TERMINATED: - return pb.Status_STATUS_TERMINATED - case sandbox.STATUS_SKIPPED: - return pb.Status_STATUS_SKIPPED - default: - return pb.Status_STATUS_UNKNOWN - } -} diff --git a/server/server.go b/server/server.go deleted file mode 100644 index ba07a25..0000000 --- a/server/server.go +++ /dev/null @@ -1,80 +0,0 @@ -package server - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "os/signal" - "time" - - "github.com/joshjms/castletown/config" - pb "github.com/joshjms/castletown/proto" - "github.com/joshjms/castletown/server/handler/done" - "github.com/joshjms/castletown/server/handler/exec" - "google.golang.org/grpc" -) - -type Server struct { - httpSrv *http.Server - grpcSrv *grpc.Server -} - -func NewServer() (*Server, error) { - grpcSrv := grpc.NewServer() - - pb.RegisterExecServiceServer(grpcSrv, exec.NewExecServer()) - pb.RegisterDoneServiceServer(grpcSrv, done.NewDoneServer()) - - return &Server{ - httpSrv: &http.Server{ - Addr: fmt.Sprintf(":%d", config.Port), - Handler: http.DefaultServeMux, - }, - grpcSrv: grpcSrv, - }, nil -} - -func (s *Server) Start() { - http.HandleFunc("/exec", exec.Handler) - http.HandleFunc("/done", done.Handler) - - stop := make(chan os.Signal, 1) - signal.Notify(stop, os.Interrupt) - - go func() { - fmt.Printf("Starting HTTP server at port %s\n", s.httpSrv.Addr) - if err := s.httpSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("Error starting HTTP server: %v\n", err) - } - }() - - grpcPort := config.Port + 1 - go func() { - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", grpcPort)) - if err != nil { - fmt.Printf("Failed to listen for gRPC: %v\n", err) - return - } - fmt.Printf("Starting gRPC server at port %d\n", grpcPort) - if err := s.grpcSrv.Serve(lis); err != nil { - fmt.Printf("Error starting gRPC server: %v\n", err) - } - }() - - <-stop - - fmt.Println("Shutting down servers...") - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - if err := s.httpSrv.Shutdown(ctx); err != nil { - fmt.Printf("Error shutting down HTTP server: %v\n", err) - } - - s.grpcSrv.GracefulStop() - - fmt.Println("Servers gracefully stopped") -} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go deleted file mode 100644 index 6fa9da5..0000000 --- a/tests/e2e/e2e_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package e2e - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/http" - "testing" - "time" - - pb "github.com/joshjms/castletown/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -const ( - httpURL = "http://localhost:8000" - grpcAddr = "localhost:8001" - defaultImage = "gcc:15-bookworm" -) - -type HTTPExecRequest struct { - ID string `json:"id"` - Files []HTTPFile `json:"files"` - Steps []HTTPProcess `json:"steps"` -} - -type HTTPFile struct { - Name string `json:"name"` - Content string `json:"content"` -} - -type HTTPProcess struct { - Image string `json:"image"` - Cmd []string `json:"cmd"` - Stdin string `json:"stdin"` - MemoryLimitMB int64 `json:"memoryLimitMB"` - TimeLimitMs uint64 `json:"timeLimitMs"` - ProcLimit int64 `json:"procLimit"` - Files []string `json:"files"` - Persist []string `json:"persist"` -} - -type HTTPExecResponse struct { - ID string `json:"id"` - Reports []HTTPReport `json:"reports"` -} - -type HTTPReport struct { - Status string `json:"Status"` - ExitCode int `json:"ExitCode"` - Signal int `json:"Signal"` - Stdout string `json:"Stdout"` - Stderr string `json:"Stderr"` - CPUTime uint64 `json:"CPUTime"` - Memory uint64 `json:"Memory"` - WallTime int64 `json:"WallTime"` -} - -type HTTPDoneRequest struct { - ID string `json:"id"` -} - -func TestHTTPExec(t *testing.T) { - // Create a simple exec request - req := HTTPExecRequest{ - ID: "test-http-exec", - Files: []HTTPFile{ - { - Name: "test.txt", - Content: "Hello from HTTP test", - }, - }, - Steps: []HTTPProcess{ - { - Image: defaultImage, - Cmd: []string{"/bin/cat", "test.txt"}, - Files: []string{"test.txt"}, - }, - }, - } - - reqBody, err := json.Marshal(req) - if err != nil { - t.Fatalf("Failed to marshal request: %v", err) - } - - resp, err := http.Post(httpURL+"/exec", "application/json", bytes.NewReader(reqBody)) - if err != nil { - t.Fatalf("Failed to send HTTP request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - t.Fatalf("Expected status 200, got %d: %s", resp.StatusCode, string(body)) - } - - var execResp HTTPExecResponse - if err := json.NewDecoder(resp.Body).Decode(&execResp); err != nil { - t.Fatalf("Failed to decode response: %v", err) - } - - if execResp.ID != "test-http-exec" { - t.Errorf("Expected ID 'test-http-exec', got '%s'", execResp.ID) - } - - if len(execResp.Reports) == 0 { - t.Fatal("Expected at least one report") - } - - report := execResp.Reports[0] - if report.Status != "OK" { - t.Errorf("Expected status OK, got %s", report.Status) - } - - if report.ExitCode != 0 { - t.Errorf("Expected exit code 0, got %d", report.ExitCode) - } - - t.Logf("HTTP Exec test passed. Stdout: %s", report.Stdout) -} - -func TestHTTPDone(t *testing.T) { - req := HTTPDoneRequest{ - ID: "test-http-exec", - } - - reqBody, err := json.Marshal(req) - if err != nil { - t.Fatalf("Failed to marshal request: %v", err) - } - - resp, err := http.Post(httpURL+"/done", "application/json", bytes.NewReader(reqBody)) - if err != nil { - t.Fatalf("Failed to send HTTP request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - t.Fatalf("Expected status 200, got %d: %s", resp.StatusCode, string(body)) - } - - t.Log("HTTP Done test passed") -} - -func TestGRPCExec(t *testing.T) { - conn, err := grpc.NewClient(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("Failed to connect to gRPC server: %v", err) - } - defer conn.Close() - - client := pb.NewExecServiceClient(conn) - - req := &pb.ExecRequest{ - Id: "test-grpc-exec", - Files: []*pb.File{ - { - Name: "test.txt", - Content: "Hello from gRPC test", - }, - }, - Procs: []*pb.Process{ - { - Image: defaultImage, - Cmd: []string{"/bin/cat", "test.txt"}, - Files: []string{"test.txt"}, - }, - }, - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.Execute(ctx, req) - if err != nil { - t.Fatalf("Failed to execute gRPC request: %v", err) - } - - if resp.Id != "test-grpc-exec" { - t.Errorf("Expected ID 'test-grpc-exec', got '%s'", resp.Id) - } - - if len(resp.Reports) == 0 { - t.Fatal("Expected at least one report") - } - - report := resp.Reports[0] - if report.Status != pb.Status_STATUS_OK { - t.Errorf("Expected status OK, got %v", report.Status) - } - - if report.ExitCode != 0 { - t.Errorf("Expected exit code 0, got %d", report.ExitCode) - } - - t.Logf("gRPC Exec test passed. Stdout: %s", report.Stdout) -} - -func TestGRPCDone(t *testing.T) { - conn, err := grpc.NewClient(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("Failed to connect to gRPC server: %v", err) - } - defer conn.Close() - - client := pb.NewDoneServiceClient(conn) - - req := &pb.DoneRequest{ - Id: "test-grpc-exec", - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - resp, err := client.Done(ctx, req) - if err != nil { - t.Fatalf("Failed to execute gRPC done request: %v", err) - } - - if resp == nil { - t.Error("Expected non-nil response") - } - - t.Log("gRPC Done test passed") -} - -func TestHTTPAndGRPCIntegration(t *testing.T) { - httpReq := HTTPExecRequest{ - ID: "test-integration", - Files: []HTTPFile{ - { - Name: "input.txt", - Content: "Hello, world!", - }, - }, - Steps: []HTTPProcess{ - { - Image: defaultImage, - Cmd: []string{"/bin/echo", "Hello, world!"}, - Files: []string{}, - }, - }, - } - - reqBody, _ := json.Marshal(httpReq) - httpResp, err := http.Post(httpURL+"/exec", "application/json", bytes.NewReader(reqBody)) - if err != nil { - t.Fatalf("Failed to send HTTP request: %v", err) - } - defer httpResp.Body.Close() - - if httpResp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(httpResp.Body) - t.Fatalf("HTTP request failed with status %d: %s", httpResp.StatusCode, string(body)) - } - - conn, err := grpc.NewClient(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("Failed to connect to gRPC server: %v", err) - } - defer conn.Close() - - doneClient := pb.NewDoneServiceClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _, err = doneClient.Done(ctx, &pb.DoneRequest{Id: "test-integration"}) - if err != nil { - t.Fatalf("Failed to mark job done via gRPC: %v", err) - } - - t.Log("Integration test passed: HTTP exec + gRPC done") -}