diff --git a/.dockerignore b/.dockerignore index ea75a23d..92fcf6f7 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,7 @@ .docker cmd/tps/data +# Python virtual environments +**/.venv +**/__pycache__ +**/*.pyc +**/node_modules diff --git a/.github/workflows/release-plugin-csharp.yml b/.github/workflows/release-plugin-csharp.yml new file mode 100644 index 00000000..6d4ea5fd --- /dev/null +++ b/.github/workflows/release-plugin-csharp.yml @@ -0,0 +1,81 @@ +name: Release C# Plugin + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag for the plugin release (e.g. plugin-csharp-v1.0.0)" + required: true + +permissions: + contents: write + +jobs: + release: + name: Build and Release C# Plugin + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: "8.0.x" + + - name: Restore dependencies + working-directory: plugin/csharp + run: dotnet restore + + - name: Build and publish + working-directory: plugin/csharp + run: | + mkdir -p release + # Linux glibc x64 (Ubuntu, Debian, etc.) + dotnet publish CanopyPlugin.csproj -c Release -r linux-x64 --self-contained true -o release/linux-glibc-x64 + # Linux glibc ARM64 + dotnet publish CanopyPlugin.csproj -c Release -r linux-arm64 --self-contained true -o release/linux-glibc-arm64 + # Linux musl x64 (Alpine Docker) + dotnet publish CanopyPlugin.csproj -c Release -r linux-musl-x64 --self-contained true -o release/linux-musl-x64 + # Linux musl ARM64 (Alpine Docker) + dotnet publish CanopyPlugin.csproj -c Release -r linux-musl-arm64 --self-contained true -o release/linux-musl-arm64 + + - name: Create tarballs + working-directory: plugin/csharp + run: | + cd release + # glibc versions (standard Linux) + tar -czf csharp-plugin-linux-x64.tar.gz -C linux-glibc-x64 . + tar -czf csharp-plugin-linux-arm64.tar.gz -C linux-glibc-arm64 . + # musl versions (Alpine Docker) + tar -czf csharp-plugin-linux-musl-x64.tar.gz -C linux-musl-x64 . + tar -czf csharp-plugin-linux-musl-arm64.tar.gz -C linux-musl-arm64 . + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event.inputs.tag }} + name: "C# Plugin ${{ github.event.inputs.tag }}" + body: | + C# Plugin Release ${{ github.event.inputs.tag }} + + ## Installation + 1. Extract the appropriate tarball to `plugin/csharp/bin/` + 2. The plugin is self-contained (no .NET runtime required) + + ## Assets + ### Standard Linux (glibc) - Ubuntu, Debian, RHEL, etc. + - `csharp-plugin-linux-x64.tar.gz` - Linux x86_64 + - `csharp-plugin-linux-arm64.tar.gz` - Linux ARM64 + + ### Alpine Linux (musl) - Docker containers + - `csharp-plugin-linux-musl-x64.tar.gz` - Alpine x86_64 + - `csharp-plugin-linux-musl-arm64.tar.gz` - Alpine ARM64 + files: | + plugin/csharp/release/csharp-plugin-linux-x64.tar.gz + plugin/csharp/release/csharp-plugin-linux-arm64.tar.gz + plugin/csharp/release/csharp-plugin-linux-musl-x64.tar.gz + plugin/csharp/release/csharp-plugin-linux-musl-arm64.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-plugin-go.yml b/.github/workflows/release-plugin-go.yml new file mode 100644 index 00000000..eb046eb4 --- /dev/null +++ b/.github/workflows/release-plugin-go.yml @@ -0,0 +1,55 @@ +name: Release Go Plugin + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag for the plugin release (e.g. plugin-go-v1.0.0)" + required: true + +permissions: + contents: write + +jobs: + release: + name: Build and Release Go Plugin + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.24" + + - name: Build binaries + working-directory: plugin/go + run: | + mkdir -p dist + # Linux AMD64 + GOARCH=amd64 CGO_ENABLED=0 GOOS=linux go build -a -o dist/go-plugin . + tar -czf dist/go-plugin-linux-amd64.tar.gz -C dist go-plugin + rm dist/go-plugin + # Linux ARM64 + GOARCH=arm64 CGO_ENABLED=0 GOOS=linux go build -a -o dist/go-plugin . + tar -czf dist/go-plugin-linux-arm64.tar.gz -C dist go-plugin + rm dist/go-plugin + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event.inputs.tag }} + name: "Go Plugin ${{ github.event.inputs.tag }}" + body: | + Go Plugin Release ${{ github.event.inputs.tag }} + + ## Assets + - `go-plugin-linux-amd64.tar.gz` - Linux x86_64 + - `go-plugin-linux-arm64.tar.gz` - Linux ARM64 + files: | + plugin/go/dist/go-plugin-linux-amd64.tar.gz + plugin/go/dist/go-plugin-linux-arm64.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-plugin-kotlin.yml b/.github/workflows/release-plugin-kotlin.yml new file mode 100644 index 00000000..111ce444 --- /dev/null +++ b/.github/workflows/release-plugin-kotlin.yml @@ -0,0 +1,62 @@ +name: Release Kotlin Plugin + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag for the plugin release (e.g. plugin-kotlin-v1.0.0)" + required: true + +permissions: + contents: write + +jobs: + release: + name: Build and Release Kotlin Plugin + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up JDK 21 + uses: actions/setup-java@v4 + with: + java-version: "21" + distribution: "temurin" + + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Build fat JAR + working-directory: plugin/kotlin + run: ./gradlew fatJar --no-daemon + + - name: Prepare release artifacts + working-directory: plugin/kotlin + run: | + mkdir -p release + cp build/libs/canopy-plugin-kotlin-*-all.jar release/kotlin-plugin.jar + cd release && tar -czf kotlin-plugin.tar.gz kotlin-plugin.jar + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event.inputs.tag }} + name: "Kotlin Plugin ${{ github.event.inputs.tag }}" + body: | + Kotlin Plugin Release ${{ github.event.inputs.tag }} + + ## Requirements + - Java Runtime Environment (JRE) 21 or later + + ## Installation + 1. Download `kotlin-plugin.tar.gz` to `plugin/kotlin/` + 2. The plugin will auto-extract on first start + + ## Assets + - `kotlin-plugin.tar.gz` - Fat JAR with all dependencies included + files: | + plugin/kotlin/release/kotlin-plugin.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-plugin-python.yml b/.github/workflows/release-plugin-python.yml new file mode 100644 index 00000000..01a7b79a --- /dev/null +++ b/.github/workflows/release-plugin-python.yml @@ -0,0 +1,62 @@ +name: Release Python Plugin + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag for the plugin release (e.g. plugin-python-v1.0.0)" + required: true + +permissions: + contents: write + +jobs: + release: + name: Build and Release Python Plugin + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Create distribution tarball + working-directory: plugin/python + run: | + mkdir -p release + # Create tarball with source code (venv is created on first run by pluginctl.sh) + # Note: pluginctl.sh is NOT included - it's copied separately by Dockerfile + # and we don't want old releases to overwrite newer pluginctl.sh versions + tar --exclude='*.pyc' \ + --exclude='__pycache__' \ + -czf release/python-plugin.tar.gz \ + main.py \ + contract/ \ + pyproject.toml \ + Makefile + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event.inputs.tag }} + name: "Python Plugin ${{ github.event.inputs.tag }}" + body: | + Python Plugin Release ${{ github.event.inputs.tag }} + + ## Requirements + - Python 3.9 or later + + ## Installation + 1. Extract `python-plugin.tar.gz` to `plugin/python/` + 2. The plugin will create a virtual environment on first run + + ## Assets + - `python-plugin.tar.gz` - Source code with pyproject.toml + files: | + plugin/python/release/python-plugin.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-plugin-typescript.yml b/.github/workflows/release-plugin-typescript.yml new file mode 100644 index 00000000..6fcdb230 --- /dev/null +++ b/.github/workflows/release-plugin-typescript.yml @@ -0,0 +1,67 @@ +name: Release TypeScript Plugin + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag for the plugin release (e.g. plugin-typescript-v1.0.0)" + required: true + +permissions: + contents: write + +jobs: + release: + name: Build and Release TypeScript Plugin + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + + - name: Install dependencies + working-directory: plugin/typescript + run: npm ci + + - name: Build plugin + working-directory: plugin/typescript + run: npm run build:all + + - name: Install production dependencies only + working-directory: plugin/typescript + run: | + rm -rf node_modules + npm ci --production + + - name: Create distribution tarball + working-directory: plugin/typescript + run: | + mkdir -p release + # Create tarball with dist/, node_modules, package.json (for "type": "module") + # Note: pluginctl.sh is NOT included - it's copied separately by Dockerfile + # and we don't want old releases to overwrite newer pluginctl.sh versions + tar -czf release/typescript-plugin.tar.gz dist/ node_modules/ package.json + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event.inputs.tag }} + name: "TypeScript Plugin ${{ github.event.inputs.tag }}" + body: | + TypeScript Plugin Release ${{ github.event.inputs.tag }} + + ## Installation + 1. Extract `typescript-plugin.tar.gz` to `plugin/typescript/` + 2. The plugin is ready to run (node_modules included) + + ## Assets + - `typescript-plugin.tar.gz` - Compiled plugin with dependencies + files: | + plugin/typescript/release/typescript-plugin.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/Makefile b/Makefile index 2af7dfbf..7e04113e 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ # Variables GO_BIN_DIR := ~/go/bin CLI_DIR := ./cmd/main/... +AUTO_UPDATE_DIR := ./cmd/auto-update/... WALLET_DIR := ./cmd/rpc/web/wallet EXPLORER_DIR := ./cmd/rpc/web/explorer DOCKER_DIR := ./.docker/compose.yaml @@ -16,7 +17,7 @@ help: @sed -n 's/^##//p' ${MAKEFILE_LIST} | column -t -s ':' | sed -e 's/^/ /' # Targets, this is a list of all available commands which can be executed using the make command. -.PHONY: build/canopy build/canopy-full build/wallet build/explorer test/all dev/deps docker/up \ +.PHONY: build/canopy build/canopy-full build/wallet build/explorer build/auto-update build/auto-update-local run/auto-update run/auto-update-build run/auto-update-test test/all dev/deps docker/up \ docker/down docker/build docker/up-fast docker/down docker/logs \ build/plugin build/kotlin-plugin build/go-plugin build/all-plugins docker/plugin \ docker/run docker/run-kotlin docker/run-go docker/run-typescript docker/run-python docker/run-csharp @@ -40,6 +41,23 @@ build/wallet: build/explorer: npm install --prefix $(EXPLORER_DIR) && npm run build --prefix $(EXPLORER_DIR) +## build/auto-update: build the canopy auto-update binary into the GO_BIN_DIR +build/auto-update: + go build -o $(GO_BIN_DIR)/canopy-auto-update $(AUTO_UPDATE_DIR) + +## build/auto-update-local: build canopy CLI to ./cli and auto-update binary for local development +build/auto-update-local: + go build -o ./cli $(CLI_DIR) + go build -o $(GO_BIN_DIR)/canopy-auto-update $(AUTO_UPDATE_DIR) + +## run/auto-update: run the canopy auto-update binary with 'start' command (requires ./cli to exist) +run/auto-update: + BIN_PATH=./cli go run $(AUTO_UPDATE_DIR) start + +## run/auto-update-build: build canopy CLI to ./cli and then run auto-update +run/auto-update-build: build/auto-update-local + BIN_PATH=./cli go run $(AUTO_UPDATE_DIR) start + # ==================================================================================== # # TESTING # ==================================================================================== # @@ -105,13 +123,13 @@ build/plugin: ifeq ($(PLUGIN),kotlin) cd plugin/kotlin && ./gradlew fatJar --no-daemon else ifeq ($(PLUGIN),go) - $(MAKE) -C plugin/go build + cd plugin/go && go build -o go-plugin . else ifeq ($(PLUGIN),typescript) - cd plugin/typescript && npm ci && npm run build + cd plugin/typescript && npm ci && npm run build:all else ifeq ($(PLUGIN),python) - cd plugin/python && pip install -e ".[dev]" 2>/dev/null || true + cd plugin/python && make dev else ifeq ($(PLUGIN),csharp) - cd plugin/csharp && dotnet publish -c Release -o out + cd plugin/csharp && rm -rf bin && dotnet publish -c Release -r linux-x64 --self-contained true -o bin else ifeq ($(PLUGIN),all) $(MAKE) build/plugin PLUGIN=go $(MAKE) build/plugin PLUGIN=kotlin diff --git a/cmd/auto-update/coordinator.go b/cmd/auto-update/coordinator.go index 0fef5fd5..a2a35908 100644 --- a/cmd/auto-update/coordinator.go +++ b/cmd/auto-update/coordinator.go @@ -19,21 +19,23 @@ import ( // Supervisor manages the CLI process lifecycle, from start to stop, // and notifies listeners when the process exits type Supervisor struct { - cmd *exec.Cmd // canopy sub-process - mu sync.RWMutex // mutex for concurrent access - running atomic.Bool // flag indicating if process is running - stopping atomic.Bool // flag indicating if process is stopping - exit chan error // channel to notify listeners when process exits - unexpectedExit chan error // channel to notify listeners when process exits unexpectedly - log lib.LoggerI // logger instance + cmd *exec.Cmd // canopy sub-process + mu sync.RWMutex // mutex for concurrent access + running atomic.Bool // flag indicating if process is running + stopping atomic.Bool // flag indicating if process is stopping + exit chan error // channel to notify listeners when process exits + unexpectedExit chan error // channel to notify listeners when process exits unexpectedly + pluginConfig *PluginReleaseConfig // optional plugin configuration + log lib.LoggerI // logger instance } // NewSupervisor creates a new ProcessSupervisor instance -func NewSupervisor(logger lib.LoggerI) *Supervisor { +func NewSupervisor(logger lib.LoggerI, pluginConfig *PluginReleaseConfig) *Supervisor { return &Supervisor{ log: logger, exit: make(chan error, 1), unexpectedExit: make(chan error, 1), + pluginConfig: pluginConfig, } } @@ -127,6 +129,26 @@ func (s *Supervisor) UnexpectedExit() <-chan error { return s.unexpectedExit } +// KillPlugin kills the configured plugin process and cleans up its PID file +func (s *Supervisor) KillPlugin() { + if s.pluginConfig == nil { + return + } + // kill process matching the pattern + if s.pluginConfig.ProcessPattern != "" { + cmd := exec.Command("pkill", "-9", "-f", s.pluginConfig.ProcessPattern) + if err := cmd.Run(); err == nil { + s.log.Infof("killed process matching: %s", s.pluginConfig.ProcessPattern) + } + } + // clean up PID file + if s.pluginConfig.PIDFile != "" { + if err := os.Remove(s.pluginConfig.PIDFile); err == nil { + s.log.Infof("removed PID file: %s", s.pluginConfig.PIDFile) + } + } +} + // Coordinator code below // CoordinatorConfig holds the configuration for the Coordinator @@ -142,7 +164,8 @@ type CoordinatorConfig struct { // handles the coordination between checking updates, stopping processes, and // restarting type Coordinator struct { - updater *UpdateManager // updater instance reference + updater *ReleaseManager // CLI updater instance reference + pluginUpdater *ReleaseManager // plugin updater instance reference supervisor *Supervisor // supervisor instance reference snapshot *SnapshotManager // snapshot instance reference config *CoordinatorConfig // coordinator configuration @@ -151,10 +174,11 @@ type Coordinator struct { } // NewCoordinator creates a new Coordinator instance -func NewCoordinator(config *CoordinatorConfig, updater *UpdateManager, +func NewCoordinator(config *CoordinatorConfig, updater, pluginUpdater *ReleaseManager, supervisor *Supervisor, snapshot *SnapshotManager, logger lib.LoggerI) *Coordinator { return &Coordinator{ updater: updater, + pluginUpdater: pluginUpdater, supervisor: supervisor, snapshot: snapshot, config: config, @@ -163,10 +187,59 @@ func NewCoordinator(config *CoordinatorConfig, updater *UpdateManager, } } +// EnsurePluginReady checks if the plugin binary or tarball exists, and downloads if needed. +// This ensures the plugin is available before starting the CLI for the first time. +func (c *Coordinator) EnsurePluginReady() error { + // skip if no plugin updater configured + if c.pluginUpdater == nil { + return nil + } + cfg := c.pluginUpdater.config + if cfg == nil || cfg.PluginConfig == nil { + return nil + } + // check if binary already exists + binaryPath := filepath.Join(cfg.PluginDir, cfg.PluginConfig.OldBinaryPath) + if _, err := os.Stat(binaryPath); err == nil { + c.log.Debug("plugin binary exists, skipping initial download") + return nil + } + // check if tarball already exists + tarballPath := filepath.Join(cfg.PluginDir, c.pluginUpdater.getAssetName()) + if _, err := os.Stat(tarballPath); err == nil { + c.log.Debug("plugin tarball exists, skipping initial download") + return nil + } + // neither exists, download the plugin + c.log.Info("plugin not found, downloading from release...") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + // check for latest release + release, err := c.pluginUpdater.Check() + if err != nil { + return fmt.Errorf("failed to check for plugin release: %w", err) + } + if release == nil { + return fmt.Errorf("no plugin release found") + } + // download the plugin + if err := c.pluginUpdater.Download(ctx, release); err != nil { + return fmt.Errorf("failed to download plugin: %w", err) + } + c.pluginUpdater.Version = release.Version + c.log.Infof("plugin %s downloaded successfully", release.Version) + return nil +} + // UpdateLoop starts the update loop for the coordinator. This loop continuously checks // for updates and applies them if necessary while also providing graceful shutdown for any // termination signal received. func (c *Coordinator) UpdateLoop(cancelSignal chan os.Signal) error { + // ensure plugin is ready before starting CLI (downloads if needed) + if err := c.EnsurePluginReady(); err != nil { + c.log.Warnf("failed to ensure plugin ready: %v", err) + // continue anyway - CLI might work without plugin or plugin might exist + } // start the process if err := c.supervisor.Start(c.config.BinPath); err != nil { return err @@ -174,6 +247,7 @@ func (c *Coordinator) UpdateLoop(cancelSignal chan os.Signal) error { // create a cancellable context ctx, cancel := context.WithCancel(context.Background()) defer cancel() + // kick off an immediate check timer := time.NewTimer(0) defer timer.Stop() @@ -185,6 +259,8 @@ func (c *Coordinator) UpdateLoop(cancelSignal chan os.Signal) error { c.log.Warn("unexpected process exit, stopping program") // cancel the context to clean up resources cancel() + // kill any lingering plugin processes + c.supervisor.KillPlugin() // wait for context to clean up gracePeriodTimer := time.NewTimer(c.config.GracePeriod) defer gracePeriodTimer.Stop() @@ -222,12 +298,17 @@ func (c *Coordinator) GracefulShutdown() error { c.updateInProgress.Store(false) // check if the supervisor process is running if !c.supervisor.IsRunning() { + // still kill any lingering plugin processes + c.supervisor.KillPlugin() return nil } // stop the supervised process shutdownCtx, cancel := context.WithTimeout(context.Background(), c.config.GracePeriod) defer cancel() - return c.supervisor.Stop(shutdownCtx) + err := c.supervisor.Stop(shutdownCtx) + // kill plugin process after stopping CLI + c.supervisor.KillPlugin() + return err } // CheckAndApplyUpdate performs a single update check and applies if needed @@ -237,29 +318,58 @@ func (c *Coordinator) CheckAndApplyUpdate(ctx context.Context) error { c.log.Debug("update already in progress, skipping check") return nil } - // check for new version - release, err := c.updater.Check() + + var canopyUpdate, pluginUpdate bool + var release, pluginRelease *Release + + // check for new Canopy version + var err error + release, err = c.updater.Check() if err != nil { - return fmt.Errorf("failed to check for update: %w", err) + c.log.Warnf("failed to check for Canopy update: %v", err) + } else if release.ShouldUpdate { + canopyUpdate = true + c.log.Infof("new Canopy version found: %s snapshot needed: %t", release.Version, release.ApplySnapshot) } - // check if an update is required - if !release.ShouldUpdate { - c.log.Debug("no update available") + + // check for new plugin version if plugin updater is configured + if c.pluginUpdater != nil { + pluginRelease, err = c.pluginUpdater.Check() + if err != nil { + c.log.Warnf("failed to check for plugin update: %v", err) + } else if pluginRelease.ShouldUpdate { + pluginUpdate = true + c.log.Infof("new plugin version found: %s", pluginRelease.Version) + } + } + + // if no updates needed, return early + if !canopyUpdate && !pluginUpdate { + c.log.Debug("no updates available") return nil } - c.log.Infof("new version found: %s snapshot needed: %t", release.Version, - release.ApplySnapshot) - // download the new version - if err := c.updater.Download(ctx, release); err != nil { - return fmt.Errorf("failed to download release: %w", err) + + // download Canopy update if needed + if canopyUpdate { + if err := c.updater.Download(ctx, release); err != nil { + return fmt.Errorf("failed to download Canopy release: %w", err) + } + } + + // download plugin update if needed + if pluginUpdate { + if err := c.pluginUpdater.Download(ctx, pluginRelease); err != nil { + return fmt.Errorf("failed to download plugin release: %w", err) + } } - // apply the update - return c.ApplyUpdate(ctx, release) + + // apply the updates (this will restart the process) + return c.ApplyUpdate(ctx, release, pluginRelease, canopyUpdate, pluginUpdate) } // ApplyUpdate coordinates the update process, stopping the old process and starting the new one // while applying a snapshot if required -func (c *Coordinator) ApplyUpdate(ctx context.Context, release *Release) error { +func (c *Coordinator) ApplyUpdate(ctx context.Context, release, pluginRelease *Release, canopyUpdate, pluginUpdate bool) error { canopy := c.config.Canopy // check if an update is already in progress if !c.updateInProgress.CompareAndSwap(false, true) { @@ -267,9 +377,10 @@ func (c *Coordinator) ApplyUpdate(ctx context.Context, release *Release) error { } defer c.updateInProgress.Store(false) c.log.Info("starting update process") - // download snapshot if required + + // download snapshot if required (only for Canopy updates) var snapshotPath string - if release.ApplySnapshot { + if canopyUpdate && release != nil && release.ApplySnapshot { snapshotPath = filepath.Join(canopy.DataDirPath, "snapshot") c.log.Info("downloading and extracting required snapshot") err := c.snapshot.DownloadAndExtract(ctx, snapshotPath, c.config.Canopy.ChainId) @@ -278,6 +389,7 @@ func (c *Coordinator) ApplyUpdate(ctx context.Context, release *Release) error { } c.log.Info("snapshot downloaded and extracted") } + // add random delay for staggered updates if c.supervisor.IsRunning() { delay := time.Duration(rand.IntN(c.config.MaxDelayTime)+1) * time.Minute @@ -291,17 +403,27 @@ func (c *Coordinator) ApplyUpdate(ctx context.Context, release *Release) error { case <-timer.C: } } + // stop current process if running if c.supervisor.IsRunning() { c.log.Info("stopping current CLI process for update") - stopCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + stopCtx, cancel := context.WithTimeout(ctx, 8*time.Second) defer cancel() if err := c.supervisor.Stop(stopCtx); err != nil { // program may have exited with a non zero exit code due to forced close // this is to be expected so the update can still proceed c.log.Warnf("failed to stop process for update: %w", err) } + // kill any remaining plugin processes (only if plugin is configured) + if c.supervisor.pluginConfig != nil { + c.log.Info("cleaning up plugin processes") + c.supervisor.KillPlugin() + // wait for processes to fully terminate + c.log.Info("waiting for processes to terminate") + time.Sleep(2 * time.Second) + } } + // replace current db with the snapshot if needed if snapshotPath != "" { c.log.Info("replacing current db with snapshot") @@ -311,13 +433,30 @@ func (c *Coordinator) ApplyUpdate(ctx context.Context, release *Release) error { // continue with update even if snapshot fails } } - // restart with new version - c.log.Infof("starting updated CLI process with version %s", release.Version) + + // log what was updated + if canopyUpdate && pluginUpdate { + c.log.Infof("starting updated CLI process with Canopy %s and plugin %s", release.Version, pluginRelease.Version) + } else if canopyUpdate { + c.log.Infof("starting updated CLI process with Canopy %s", release.Version) + } else if pluginUpdate { + c.log.Infof("starting CLI process with updated plugin %s", pluginRelease.Version) + } + + // restart the process (pluginctl.sh will extract the new tarball on start) if err := c.supervisor.Start(c.config.BinPath); err != nil { return fmt.Errorf("failed to start updated process: %w", err) } - c.log.Infof("update to version %s completed successfully", release.Version) - // update UpdateManager to have the new version - c.updater.Version = release.Version + + // update version trackers + if canopyUpdate && release != nil { + c.updater.Version = release.Version + c.log.Infof("Canopy update to version %s completed successfully", release.Version) + } + if pluginUpdate && pluginRelease != nil && c.pluginUpdater != nil { + c.pluginUpdater.Version = pluginRelease.Version + c.log.Infof("Plugin update to version %s completed successfully", pluginRelease.Version) + } + return nil } diff --git a/cmd/auto-update/main.go b/cmd/auto-update/main.go index ee3fbffa..6a13f61c 100644 --- a/cmd/auto-update/main.go +++ b/cmd/auto-update/main.go @@ -26,7 +26,7 @@ const ( defaultBinPath = "./cli" defaultCheckPeriod = time.Minute * 30 // default check period for updates defaultGracePeriod = time.Second * 2 // default grace period for graceful shutdown - defaultMaxDelayTime = 30 // default max delay time for staggered updates + defaultMaxDelayTime = 30 // default max delay time for staggered updates (minutes) ) var ( @@ -35,6 +35,46 @@ var ( 1: envOrDefault("SNAPSHOT_1_URL", "http://canopy-mainnet-latest-chain-id1.us.nodefleet.net"), 2: envOrDefault("SNAPSHOT_2_URL", "http://canopy-mainnet-latest-chain-id2.us.nodefleet.net"), } + // pluginReleaseConfigs maps plugin type to its full release configuration + pluginReleaseConfigs = map[string]*PluginReleaseConfig{ + "go": { + AssetName: "go-plugin-%s-%s.tar.gz", + ArchSpecific: true, + OldBinaryPath: "go-plugin", + ProcessPattern: "go-plugin", + PIDFile: "/tmp/plugin/go-plugin.pid", + }, + "kotlin": { + AssetName: "kotlin-plugin.tar.gz", + ArchSpecific: false, + OldBinaryPath: "build/libs/canopy-plugin-kotlin-1.0.0-all.jar", + ProcessPattern: "canopy-plugin-kotlin", + PIDFile: "/tmp/plugin/kotlin-plugin.pid", + }, + "typescript": { + AssetName: "typescript-plugin.tar.gz", + ArchSpecific: false, + OldBinaryPath: "dist/main.js", + ProcessPattern: "plugin/typescript/dist/main.js", + PIDFile: "/tmp/plugin/typescript-plugin.pid", + }, + "python": { + AssetName: "python-plugin.tar.gz", + ArchSpecific: false, + OldBinaryPath: "main.py", + ProcessPattern: "plugin/python/main.py", + PIDFile: "/tmp/plugin/python-plugin.pid", + }, + "csharp": { + AssetName: "csharp-plugin-%s-%s.tar.gz", + MuslAssetName: "csharp-plugin-%s-musl-%s.tar.gz", + ArchSpecific: true, + UseX64Arch: true, + OldBinaryPath: "bin/CanopyPlugin", + ProcessPattern: "plugin/csharp/bin/CanopyPlugin", + PIDFile: "/tmp/plugin/csharp-plugin.pid", + }, + } ) func main() { @@ -62,10 +102,22 @@ func main() { sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) // setup the dependencies - updater := NewUpdateManager(configs.Updater, rpc.SoftwareVersion) + updater := NewReleaseManager(configs.Updater, rpc.SoftwareVersion) snapshot := NewSnapshotManager(configs.Snapshot) - supervisor := NewSupervisor(logger) - coordinator := NewCoordinator(configs.Coordinator, updater, supervisor, snapshot, logger) + + // setup plugin updater and config if configured + var pluginUpdater *ReleaseManager + var pluginConfig *PluginReleaseConfig + if configs.PluginUpdater != nil { + pluginUpdater = NewReleaseManager(configs.PluginUpdater, "v0.0.0") + pluginConfig = configs.PluginUpdater.PluginConfig + logger.Infof("plugin auto-update enabled from %s/%s", + configs.PluginUpdater.RepoOwner, + configs.PluginUpdater.RepoName) + } + supervisor := NewSupervisor(logger, pluginConfig) + + coordinator := NewCoordinator(configs.Coordinator, updater, pluginUpdater, supervisor, snapshot, logger) // start the update loop err := coordinator.UpdateLoop(sigChan) if err != nil { @@ -84,10 +136,11 @@ func main() { // Configs holds the configuration for the updater, snapshotter, and process supervisor. type Configs struct { - Updater *UpdaterConfig - Snapshot *SnapshotConfig - Coordinator *CoordinatorConfig - LoggerI lib.LoggerI + Updater *ReleaseManagerConfig + PluginUpdater *ReleaseManagerConfig + Snapshot *SnapshotConfig + Coordinator *CoordinatorConfig + LoggerI lib.LoggerI } // getConfigs returns the configuration for the updater, snapshotter, and process supervisor. @@ -100,11 +153,13 @@ func getConfigs() (*Configs, lib.LoggerI) { }) binPath := envOrDefault("BIN_PATH", defaultBinPath) + githubToken := envOrDefault("CANOPY_GITHUB_API_TOKEN", "") - updater := &UpdaterConfig{ + updater := &ReleaseManagerConfig{ + Type: ReleaseTypeCLI, RepoName: envOrDefault("REPO_NAME", defaultRepoName), RepoOwner: envOrDefault("REPO_OWNER", defaultRepoOwner), - GithubApiToken: envOrDefault("CANOPY_GITHUB_API_TOKEN", ""), + GithubApiToken: githubToken, BinPath: binPath, SnapshotKey: snapshotMetadataKey, } @@ -115,16 +170,46 @@ func getConfigs() (*Configs, lib.LoggerI) { coordinator := &CoordinatorConfig{ Canopy: canopyConfig, BinPath: binPath, - MaxDelayTime: defaultMaxDelayTime, - CheckPeriod: defaultCheckPeriod, + MaxDelayTime: envOrDefaultInt("AUTO_UPDATE_MAX_DELAY_MINUTES", defaultMaxDelayTime), + CheckPeriod: envOrDefaultDuration("AUTO_UPDATE_CHECK_PERIOD", defaultCheckPeriod), GracePeriod: defaultGracePeriod, } + // setup plugin updater config if plugin auto-update is enabled + var pluginUpdater *ReleaseManagerConfig + pluginAutoUpdate := canopyConfig.PluginAutoUpdate + if pluginAutoUpdate.Enabled && canopyConfig.Plugin != "" { + // lookup plugin release config from map + pluginReleaseCfg, ok := pluginReleaseConfigs[canopyConfig.Plugin] + if !ok { + l.Warnf("unknown plugin type %q, plugin auto-update disabled", canopyConfig.Plugin) + } else { + // use configured repo or default to canopy-network/canopy + repoOwner := pluginAutoUpdate.RepoOwner + if repoOwner == "" { + repoOwner = defaultRepoOwner + } + repoName := pluginAutoUpdate.RepoName + if repoName == "" { + repoName = defaultRepoName + } + pluginUpdater = &ReleaseManagerConfig{ + Type: ReleaseTypePlugin, + RepoOwner: repoOwner, + RepoName: repoName, + PluginDir: fmt.Sprintf("plugin/%s", canopyConfig.Plugin), + PluginConfig: pluginReleaseCfg, + GithubApiToken: githubToken, + } + } + } + return &Configs{ - Updater: updater, - Snapshot: snapshot, - Coordinator: coordinator, - LoggerI: l, + Updater: updater, + PluginUpdater: pluginUpdater, + Snapshot: snapshot, + Coordinator: coordinator, + LoggerI: l, }, l } @@ -137,3 +222,32 @@ func envOrDefault(key, defaultValue string) string { } return value } + +// envOrDefaultInt returns the value of the environment variable as an int, +// or the default value if the variable is not set or invalid. +func envOrDefaultInt(key string, defaultValue int) int { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + var result int + if _, err := fmt.Sscanf(value, "%d", &result); err != nil { + return defaultValue + } + return result +} + +// envOrDefaultDuration returns the value of the environment variable as a duration, +// or the default value if the variable is not set or invalid. +// Accepts formats like "30m", "1h", "30s", etc. +func envOrDefaultDuration(key string, defaultValue time.Duration) time.Duration { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + result, err := time.ParseDuration(value) + if err != nil { + return defaultValue + } + return result +} diff --git a/cmd/auto-update/updater.go b/cmd/auto-update/releaser.go similarity index 58% rename from cmd/auto-update/updater.go rename to cmd/auto-update/releaser.go index 51ab235a..92555568 100644 --- a/cmd/auto-update/updater.go +++ b/cmd/auto-update/releaser.go @@ -26,62 +26,85 @@ type GithubRelease struct { } `json:"assets"` } -// Release represents a release of the current binary with metadata on what to update +// Release represents a release with metadata on what to update type Release struct { Version string // version of the release DownloadURL string // url to download the release ShouldUpdate bool // whether the release should be updated - ApplySnapshot bool // whether the release should apply a snapshot + ApplySnapshot bool // whether the release should apply a snapshot (CLI only) } -// UpdaterConfig contains configuration for the updater -type UpdaterConfig struct { - RepoName string // name of the repository - RepoOwner string // owner of the repository - BinPath string // path to the binary to be updated - SnapshotKey string // version metadata key to know if a snapshot should be applied - GithubApiToken string // github api token for authenticated requests +// ReleaseType indicates whether this is a CLI or plugin release +type ReleaseType int + +const ( + ReleaseTypeCLI ReleaseType = iota + ReleaseTypePlugin +) + +// PluginReleaseConfig contains all plugin-specific configuration for releases +type PluginReleaseConfig struct { + // Asset configuration + AssetName string // asset filename (e.g., "go-plugin-%s-%s.tar.gz" or "typescript-plugin.tar.gz") + ArchSpecific bool // whether to format AssetName with OS/arch (uses fmt.Sprintf with GOOS, GOARCH) + UseX64Arch bool // use "x64" instead of "amd64" for architecture (e.g., C#) + MuslAssetName string // alternative asset name for musl/Alpine systems (optional) + // Extraction trigger + OldBinaryPath string // relative path to binary to remove to trigger extraction (e.g., "go-plugin") + // Process management + ProcessPattern string // process pattern for pkill (e.g., "go-plugin") + PIDFile string // path to PID file (e.g., "/tmp/plugin/go-plugin.pid") +} + +// ReleaseManagerConfig contains configuration for the release manager +type ReleaseManagerConfig struct { + Type ReleaseType // type of release (CLI or plugin) + RepoName string // name of the repository + RepoOwner string // owner of the repository + GithubApiToken string // github api token for authenticated requests + // CLI-specific fields + BinPath string // path to the binary to be updated (CLI only) + SnapshotKey string // version metadata key for snapshot (CLI only) + // Plugin-specific fields + PluginDir string // path to the plugin directory + PluginConfig *PluginReleaseConfig // plugin-specific release configuration } -// UpdateManager manages the update process for the current binary -type UpdateManager struct { - // updater config - config *UpdaterConfig - // http client to download the release +// ReleaseManager manages the update process for CLI or plugins +type ReleaseManager struct { + config *ReleaseManagerConfig httpClient *http.Client - // current version of the binary - Version string + Version string // current version } -// NewUpdateManager creates a new UpdateManager instance -func NewUpdateManager(config *UpdaterConfig, version string) *UpdateManager { - return &UpdateManager{ +// NewReleaseManager creates a new ReleaseManager instance +func NewReleaseManager(config *ReleaseManagerConfig, version string) *ReleaseManager { + return &ReleaseManager{ config: config, httpClient: &http.Client{Timeout: httpReleaseClientTimeout}, Version: version, } } -// Check checks for updates of the current binary -func (um *UpdateManager) Check() (*Release, error) { - // Get the latest release - release, err := um.GetLatestRelease() +// Check checks for updates and returns a release if one is available +func (rm *ReleaseManager) Check() (*Release, error) { + release, err := rm.GetLatestRelease() if err != nil { return nil, err } // Check if the release is valid to update - if err := um.ShouldUpdate(release); err != nil { + if err := rm.ShouldUpdate(release); err != nil { return nil, err } // exit return release, nil } -// GetLatestRelease returns the latest valid release for the system from the GitHub API -func (um *UpdateManager) GetLatestRelease() (release *Release, err error) { +// GetLatestRelease returns the latest release from the GitHub API +func (rm *ReleaseManager) GetLatestRelease() (*Release, error) { // build the URL: https://api.github.com/repos///releases/latest apiURL, err := url.JoinPath("https://api.github.com", "repos", - um.config.RepoOwner, um.config.RepoName, "releases", "latest") + rm.config.RepoOwner, rm.config.RepoName, "releases", "latest") if err != nil { return nil, err } @@ -91,11 +114,11 @@ func (um *UpdateManager) GetLatestRelease() (release *Release, err error) { } // github recommends to add an user agent to any API request req.Header.Set("User-Agent", "canopy-updater/1.0") - if token := um.config.GithubApiToken; token != "" { + if token := rm.config.GithubApiToken; token != "" { req.Header.Set("Authorization", "Bearer "+token) } // make the request - resp, err := um.httpClient.Do(req) + resp, err := rm.httpClient.Do(req) if err != nil { return nil, err } @@ -109,65 +132,111 @@ func (um *UpdateManager) GetLatestRelease() (release *Release, err error) { if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil { return nil, err } - // find asset matching OS and ARCH - targetName := fmt.Sprintf("cli-%s-%s", runtime.GOOS, runtime.GOARCH) + // find matching asset + targetName := rm.getAssetName() for _, asset := range rel.Assets { if asset.Name == targetName { - // match found, stop - release = &Release{ + return &Release{ Version: rel.TagName, DownloadURL: asset.BrowserDownloadURL, - } - break + }, nil } } // return based on tagName - if release == nil { + if rm.config.Type == ReleaseTypeCLI { return nil, fmt.Errorf("unsupported architecture: %s-%s", runtime.GOOS, runtime.GOARCH) } - return release, nil + return nil, fmt.Errorf("no matching asset found for plugin (looking for %s)", targetName) +} + +// isMuslLibc detects if the system uses musl libc (Alpine Linux) +func isMuslLibc() bool { + // Check for musl dynamic linker + matches, _ := filepath.Glob("/lib/ld-musl-*.so.1") + return len(matches) > 0 +} + +// getAssetName returns the expected asset name based on release type +func (rm *ReleaseManager) getAssetName() string { + if rm.config.Type == ReleaseTypeCLI { + return fmt.Sprintf("cli-%s-%s", runtime.GOOS, runtime.GOARCH) + } + // Plugin asset name from config + cfg := rm.config.PluginConfig + if cfg == nil { + return "" + } + // Use musl-specific asset name if available and running on musl + assetName := cfg.AssetName + if cfg.MuslAssetName != "" && isMuslLibc() { + assetName = cfg.MuslAssetName + } + if !cfg.ArchSpecific { + return assetName + } + // Format with OS and arch + arch := runtime.GOARCH + if cfg.UseX64Arch && arch == "amd64" { + arch = "x64" + } + return fmt.Sprintf(assetName, runtime.GOOS, arch) } // ShouldUpdate determines whether the given release should be applied -func (um *UpdateManager) ShouldUpdate(release *Release) error { +func (rm *ReleaseManager) ShouldUpdate(release *Release) error { if release == nil { return fmt.Errorf("release is nil") } - // convert the versions to their canonical form - candidate := semver.Canonical(release.Version) - current := semver.Canonical(um.Version) + candidateTag := release.Version + // for plugins, extract version from prefixed tags like "plugin-go-v1.0.0" + if rm.config.Type == ReleaseTypePlugin && strings.Contains(candidateTag, "-v") { + parts := strings.Split(candidateTag, "-v") + if len(parts) >= 2 { + candidateTag = "v" + parts[len(parts)-1] + } + } // check if the versions are valid + candidate := semver.Canonical(candidateTag) + current := semver.Canonical(rm.Version) + // for plugins, if current version is invalid (first run), always update + if rm.config.Type == ReleaseTypePlugin && (current == "" || !semver.IsValid(current)) { + release.ShouldUpdate = true + release.Version = candidate + return nil + } + // validate versions if candidate == "" || !semver.IsValid(candidate) { return fmt.Errorf("invalid release version: %s", release.Version) } if current == "" || !semver.IsValid(current) { - return fmt.Errorf("invalid local version: %s", um.Version) + return fmt.Errorf("invalid local version: %s", rm.Version) } // should update if the candidate version is greater than the current version release.ShouldUpdate = semver.Compare(candidate, current) > 0 if !release.ShouldUpdate { return nil } - // should apply snapshot if the candidate's build metadata contains the snapshot key - release.ApplySnapshot = strings.Contains(semver.Build(release.Version), - um.config.SnapshotKey) release.Version = candidate + // for CLI, check if snapshot should be applied + if rm.config.Type == ReleaseTypeCLI { + // should apply snapshot if the candidate's build metadata contains the snapshot key + release.ApplySnapshot = strings.Contains(semver.Build(release.Version), rm.config.SnapshotKey) + } return nil } -// Download downloads the release assets into the config bin directory -func (um *UpdateManager) Download(ctx context.Context, release *Release) error { - // download the release binary +// Download downloads the release asset +func (rm *ReleaseManager) Download(ctx context.Context, release *Release) error { req, err := http.NewRequestWithContext(ctx, http.MethodGet, release.DownloadURL, nil) if err != nil { return err } // github recommends to add an user agent to any API request req.Header.Set("User-Agent", "canopy-updater/1.0") - if token := um.config.GithubApiToken; token != "" { + if token := rm.config.GithubApiToken; token != "" { req.Header.Set("Authorization", "Bearer "+token) } - resp, err := um.httpClient.Do(req) + resp, err := rm.httpClient.Do(req) if err != nil { return err } @@ -175,14 +244,50 @@ func (um *UpdateManager) Download(ctx context.Context, release *Release) error { if resp.StatusCode != http.StatusOK { return fmt.Errorf("unexpected status code: %d", resp.StatusCode) } - // save the response as an executable - bin, err := SaveToFile(um.config.BinPath, resp.Body, 0755) + // download the release binary + if rm.config.Type == ReleaseTypeCLI { + return rm.downloadCLI(resp.Body) + } + return rm.downloadPlugin(resp.Body) +} + +// downloadCLI saves the CLI binary +func (rm *ReleaseManager) downloadCLI(body io.Reader) error { + bin, err := SaveToFile(rm.config.BinPath, body, 0755) if err != nil { return err } return bin.Close() } +// downloadPlugin saves the plugin tarball and removes old binary +func (rm *ReleaseManager) downloadPlugin(body io.Reader) error { + tarballPath := filepath.Join(rm.config.PluginDir, rm.getAssetName()) + file, err := SaveToFile(tarballPath, body, 0644) + if err != nil { + return err + } + if err := file.Close(); err != nil { + return err + } + // remove old binary so pluginctl.sh will extract the new tarball + if oldPath := rm.getOldBinaryPath(); oldPath != "" { + _ = os.Remove(oldPath) + } + return nil +} + +// getOldBinaryPath returns the path to the old plugin binary to trigger extraction +func (rm *ReleaseManager) getOldBinaryPath() string { + if rm.config.Type == ReleaseTypeCLI || rm.config.PluginConfig == nil { + return "" + } + if rm.config.PluginConfig.OldBinaryPath == "" { + return "" + } + return filepath.Join(rm.config.PluginDir, rm.config.PluginConfig.OldBinaryPath) +} + // SnapshotManager code below // SnapshotConfig is the config for the snapshot manager diff --git a/lib/config.go b/lib/config.go index 87401208..ce0a588e 100644 --- a/lib/config.go +++ b/lib/config.go @@ -60,15 +60,23 @@ func DefaultConfig() Config { // MAIN CONFIG BELOW type MainConfig struct { - LogLevel string `json:"logLevel"` // any level includes the levels above it: debug < info < warning < error - ChainId uint64 `json:"chainId"` // the identifier of this particular chain within a single 'network id' - SleepUntil uint64 `json:"sleepUntil"` // allows coordinated 'wake-ups' for genesis or chain halt events - RootChain []RootChain `json:"rootChain"` // a list of the root chain(s) a node could connect to as dictated by the governance parameter 'RootChainId' - RunVDF bool `json:"runVDF"` // whether the node should run a Verifiable Delay Function to help secure the network against Long-Range-Attacks - Headless bool `json:"headless"` // turn off the web wallet and block explorer 'web' front ends - AutoUpdate bool `json:"autoUpdate"` // check for new versions of software each X time - Plugin string `json:"plugin"` // the configured plugin to use - PluginTimeoutMS int `json:"pluginTimeoutMS"` // plugin request timeout in milliseconds + LogLevel string `json:"logLevel"` // any level includes the levels above it: debug < info < warning < error + ChainId uint64 `json:"chainId"` // the identifier of this particular chain within a single 'network id' + SleepUntil uint64 `json:"sleepUntil"` // allows coordinated 'wake-ups' for genesis or chain halt events + RootChain []RootChain `json:"rootChain"` // a list of the root chain(s) a node could connect to as dictated by the governance parameter 'RootChainId' + RunVDF bool `json:"runVDF"` // whether the node should run a Verifiable Delay Function to help secure the network against Long-Range-Attacks + Headless bool `json:"headless"` // turn off the web wallet and block explorer 'web' front ends + AutoUpdate bool `json:"autoUpdate"` // check for new versions of software each X time + Plugin string `json:"plugin"` // the configured plugin to use + PluginTimeoutMS int `json:"pluginTimeoutMS"` // plugin request timeout in milliseconds + PluginAutoUpdate PluginAutoUpdateConfig `json:"pluginAutoUpdate"` // plugin auto-update configuration +} + +// PluginAutoUpdateConfig holds configuration for plugin auto-updates +type PluginAutoUpdateConfig struct { + Enabled bool `json:"enabled"` // whether plugin auto-update is enabled + RepoOwner string `json:"repoOwner"` // GitHub repository owner (e.g., "canopy-network") + RepoName string `json:"repoName"` // GitHub repository name (e.g., "canopy") } // DefaultMainConfig() sets log level to 'info' diff --git a/plugin/csharp/AGENTS.md b/plugin/csharp/AGENTS.md index 9d220cb1..b3fd96b9 100644 --- a/plugin/csharp/AGENTS.md +++ b/plugin/csharp/AGENTS.md @@ -67,9 +67,13 @@ Defines transaction message types: # Restore dependencies make restore -# Build the plugin +# Build the plugin (framework-dependent, requires .NET runtime) make build +# Build self-contained executable for local Linux development +# This produces a native executable that doesn't require .NET runtime installed +make build-local + # Run the plugin (starts socket server) make run @@ -86,6 +90,73 @@ make format make lint ``` +### Build Variants + +| Target | Output | Use Case | +|--------|--------|----------| +| `make build` | Framework-dependent DLL | Development with .NET SDK | +| `make build-local` | Self-contained executable (`bin/CanopyPlugin`) | Local Linux testing without .NET runtime | +| Release workflow | Self-contained for glibc + musl | Docker/production (handled by CI) | + +## Running with Docker + +The C# plugin can be run in a Docker container that includes both Canopy and the plugin. + +### Build the Docker Image + +From the repository root: + +```bash +make docker/plugin PLUGIN=csharp +``` + +This builds a Docker image named `canopy-csharp` that contains: +- The Canopy binary +- The C# plugin as a self-contained executable (no .NET runtime required) +- Pre-configured `config.json` with `"plugin": "csharp"` + +Note: The release workflow builds both glibc (standard Linux) and musl (Alpine) variants. The auto-update system automatically downloads the correct variant based on the runtime environment. + +### Run the Container + +```bash +make docker/run-csharp +``` + +Or manually with volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-csharp +``` + +### Expose Ports for Testing + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-csharp +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002`. + +### View Logs + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/csharp-plugin.log +``` + ## Development Guidelines ### Adding New Transaction Types diff --git a/plugin/csharp/Dockerfile b/plugin/csharp/Dockerfile index 0e26ed28..240ecd36 100644 --- a/plugin/csharp/Dockerfile +++ b/plugin/csharp/Dockerfile @@ -9,23 +9,32 @@ RUN go mod download COPY . . RUN go build -trimpath -ldflags="-s -w" -o /canopy ./cmd/main/... -# Stage 2: Build C# Plugin (using Debian-based image for glibc compatibility with grpc.tools) -FROM mcr.microsoft.com/dotnet/sdk:9.0 AS csharp-builder +# Stage 2: Build C# Plugin (using Debian SDK for grpc.tools compatibility, target musl for Alpine runtime) +FROM mcr.microsoft.com/dotnet/sdk:8.0 AS csharp-builder WORKDIR /app COPY plugin/csharp/ ./ -RUN dotnet publish CanopyPlugin.csproj -c Release -o /out +# Build self-contained for Alpine (musl) - must use Debian SDK because grpc.tools protoc is glibc-only +RUN dotnet publish CanopyPlugin.csproj -c Release -r linux-musl-x64 --self-contained true -o /out -# Stage 3: Runtime -FROM mcr.microsoft.com/dotnet/runtime:9.0 +# Stage 3: Runtime (Alpine-based, self-contained binary includes .NET runtime) +FROM alpine:3.19 WORKDIR /app -RUN mkdir -p /tmp/plugin /root/.canopy + +# Install runtime dependencies +RUN apk add --no-cache bash procps libstdc++ libgcc icu-libs + +RUN mkdir -p /tmp/plugin /root/.canopy plugin/csharp/bin COPY --from=canopy-builder /canopy . -COPY --from=csharp-builder /out ./plugin/ +COPY --from=csharp-builder /out ./plugin/csharp/bin/ + +# Copy pluginctl.sh from source +COPY plugin/csharp/pluginctl.sh ./plugin/csharp/pluginctl.sh +RUN chmod +x plugin/csharp/pluginctl.sh # Set plugin type for Canopy to start RUN printf '{"plugin":"csharp"}\n' > /root/.canopy/config.json -# Default: run canopy (start plugin separately with: dotnet plugin/CanopyPlugin.dll) +# Default: run canopy # Mount config at runtime: -v ~/.canopy:/root/.canopy CMD ["./canopy", "start"] diff --git a/plugin/csharp/Makefile b/plugin/csharp/Makefile index 2b29084f..bde70407 100644 --- a/plugin/csharp/Makefile +++ b/plugin/csharp/Makefile @@ -11,6 +11,11 @@ build: build-release: dotnet build --configuration Release +# Build self-contained for local development (same as root Makefile) +# This builds a native executable that doesn't require .NET runtime +build-local: + rm -rf bin && dotnet publish -c Release -r linux-x64 --self-contained true -o bin + # Testing test: dotnet test diff --git a/plugin/csharp/TUTORIAL.md b/plugin/csharp/TUTORIAL.md index 52f0bb76..122a8148 100644 --- a/plugin/csharp/TUTORIAL.md +++ b/plugin/csharp/TUTORIAL.md @@ -387,9 +387,11 @@ make build ## Step 7: Running Canopy with the Plugin -To run Canopy with the C# plugin enabled, you need to configure the `plugin` field in your Canopy configuration file. +There are two ways to run Canopy with the C# plugin: locally or with Docker. -### 1. Locate your config.json +### Option A: Running Locally + +#### 1. Locate your config.json The configuration file is typically located at `~/.canopy/config.json`. If it doesn't exist, start Canopy once to generate the default configuration: @@ -398,7 +400,7 @@ The configuration file is typically located at `~/.canopy/config.json`. If it do # Stop it after it generates the config (Ctrl+C) ``` -### 2. Enable the C# plugin +#### 2. Enable the C# plugin Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"csharp"`: @@ -411,7 +413,7 @@ Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"csharp"`: **Note**: The `plugin` field should be at the top level of the JSON configuration. -### 3. Start Canopy +#### 3. Start Canopy ```bash ~/go/bin/canopy start @@ -419,7 +421,7 @@ Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"csharp"`: Canopy will automatically start the C# plugin and connect to it via Unix socket. -### 4. Verify the plugin is running +#### 4. Verify the plugin is running Check the plugin logs: @@ -429,6 +431,68 @@ tail -f /tmp/plugin/csharp-plugin.log You should see messages indicating the plugin has connected and performed the handshake with Canopy. +### Step 7b: Running with Docker (Alternative) + +Instead of running Canopy and the plugin locally, you can use Docker to run everything in a container. + +#### 1. Build the Docker image + +From the repository root: + +```bash +make docker/plugin PLUGIN=csharp +``` + +This creates a `canopy-csharp` image containing both Canopy and the C# plugin pre-configured. + +#### 2. Run the container + +```bash +make docker/run-csharp +``` + +Or with a custom volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-csharp +``` + +#### 3. Expose RPC ports (for running tests) + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-csharp +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002` and `localhost:50003`. + +#### 4. View logs inside the container + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/csharp-plugin.log +``` + +#### 5. Interactive shell (for debugging) + +To inspect the container or debug issues: + +```bash +docker run -it --entrypoint /bin/sh canopy-csharp +``` + ## Step 8: Testing Run the RPC tests from the `tutorial` directory: diff --git a/plugin/csharp/pluginctl.sh b/plugin/csharp/pluginctl.sh index 2cb5bdab..f374807d 100755 --- a/plugin/csharp/pluginctl.sh +++ b/plugin/csharp/pluginctl.sh @@ -1,14 +1,77 @@ #!/bin/bash # pluginctl.sh - Control script for managing the csharp-plugin binary -# Usage: ./pluginctl.sh {start|stop|status|restart|build} +# Usage: ./pluginctl.sh {start|stop|status|restart} # Configuration variables for paths and files SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -BINARY_PATH="$SCRIPT_DIR/bin/Debug/net9.0/CanopyPlugin" +# Self-contained executable (not DLL) +BINARY_PATH="$SCRIPT_DIR/bin/CanopyPlugin" PID_FILE="/tmp/plugin/csharp-plugin.pid" LOG_FILE="/tmp/plugin/csharp-plugin.log" PLUGIN_DIR="/tmp/plugin" # Timeout in seconds for graceful shutdown STOP_TIMEOUT=10 + +# Detect system architecture +get_arch() { + local arch=$(uname -m) + case "$arch" in + x86_64|amd64) + echo "x64" + ;; + aarch64|arm64) + echo "arm64" + ;; + *) + echo "x64" # Default to x64 + ;; + esac +} + +# Detect if running on musl libc (Alpine) +is_musl() { + [ -f /lib/ld-musl-*.so.1 ] 2>/dev/null && return 0 + return 1 +} + +# Extract tarball if binary doesn't exist +extract_if_needed() { + # If binary already exists, nothing to do + if [ -f "$BINARY_PATH" ]; then + return 0 + fi + + # Check for architecture-specific tarball + local arch=$(get_arch) + local tarball="" + + # Try musl tarball first on Alpine, then glibc + if is_musl; then + tarball="$SCRIPT_DIR/csharp-plugin-linux-musl-${arch}.tar.gz" + fi + + # Fall back to glibc tarball if musl not found or not on Alpine + if [ -z "$tarball" ] || [ ! -f "$tarball" ]; then + tarball="$SCRIPT_DIR/csharp-plugin-linux-${arch}.tar.gz" + fi + + if [ -f "$tarball" ]; then + echo "Extracting $tarball..." + # Clear old bin directory to avoid leftover files from previous builds + rm -rf "$SCRIPT_DIR/bin" + mkdir -p "$SCRIPT_DIR/bin" + tar -xzf "$tarball" -C "$SCRIPT_DIR/bin" + if [ $? -eq 0 ] && [ -f "$BINARY_PATH" ]; then + echo "Extraction complete" + return 0 + else + echo "Error: Failed to extract from $tarball" + return 1 + fi + fi + + return 1 +} + # Check if the process is running based on PID file is_running() { # Return 1 if PID file doesn't exist @@ -38,18 +101,6 @@ cleanup_pid() { rm -f "$PID_FILE" fi } -# Build the csharp-plugin binary -build() { - echo "Building csharp-plugin..." - cd "$SCRIPT_DIR" && dotnet build - if [ $? -eq 0 ]; then - echo "Build completed successfully" - return 0 - else - echo "Error: Build failed" - return 1 - fi -} # Start the csharp-plugin binary start() { # Check if already running @@ -59,14 +110,17 @@ start() { fi # Clean up any stale PID file cleanup_pid - # Check if binary exists and is executable + # Try to extract from tarball if binary doesn't exist + extract_if_needed + # Check if binary exists if [ ! -f "$BINARY_PATH" ]; then - echo "Binary not found at $BINARY_PATH, building..." - build || return 1 + echo "Error: Binary not found at $BINARY_PATH" + echo "Run 'make build' or download csharp-plugin-linux-$(get_arch).tar.gz" + return 1 fi # Ensure plugin directory exists mkdir -p "$PLUGIN_DIR" - # Start the binary in background with nohup + # Start the self-contained binary in background with nohup echo "Starting csharp-plugin..." nohup "$BINARY_PATH" > "$LOG_FILE" 2>&1 & local pid=$! @@ -160,11 +214,8 @@ case "${1:-}" in restart) restart ;; - build) - build - ;; *) - echo "Usage: $0 {start|stop|status|restart|build}" + echo "Usage: $0 {start|stop|status|restart}" exit 1 ;; esac diff --git a/plugin/go/.gitignore b/plugin/go/.gitignore index 8d87b1d2..c4793e9f 100644 --- a/plugin/go/.gitignore +++ b/plugin/go/.gitignore @@ -1 +1,3 @@ node_modules/* +go-plugin +tutorial/tutorial diff --git a/plugin/go/AGENTS.md b/plugin/go/AGENTS.md index f732f80d..e89a413e 100644 --- a/plugin/go/AGENTS.md +++ b/plugin/go/AGENTS.md @@ -71,7 +71,64 @@ c.plugin.StateWrite(c, &PluginStateWriteRequest{Sets: [...], Deletes: [...]}) ```bash cd plugin/go -make build # Builds to ~/go/bin/go-plugin +make build # Builds to plugin/go/go-plugin +``` + +## Running with Docker + +The Go plugin can be run in a Docker container that includes both Canopy and the plugin. + +### Build the Docker Image + +From the repository root: + +```bash +make docker/plugin PLUGIN=go +``` + +This builds a Docker image named `canopy-go` that contains: +- The Canopy binary +- The Go plugin binary and control script +- Pre-configured `config.json` with `"plugin": "go"` + +### Run the Container + +```bash +make docker/run-go +``` + +Or manually with volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-go +``` + +### Expose Ports for Testing + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-go +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002`. + +### View Logs + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/go-plugin.log ``` ## Running with Canopy diff --git a/plugin/go/Dockerfile b/plugin/go/Dockerfile index 62ae486c..074ac402 100644 --- a/plugin/go/Dockerfile +++ b/plugin/go/Dockerfile @@ -2,7 +2,7 @@ # Build from repository root: docker build -f plugin/go/Dockerfile -t canopy-go . # Stage 1: Build Canopy -FROM golang:1.24-alpine AS canopy-builder +FROM golang:1.25-alpine AS canopy-builder WORKDIR /app COPY go.mod go.sum ./ RUN go mod download @@ -10,7 +10,7 @@ COPY . . RUN go build -trimpath -ldflags="-s -w" -o /canopy ./cmd/main/... # Stage 2: Build Go Plugin -FROM golang:1.24-alpine AS plugin-builder +FROM golang:1.25-alpine AS plugin-builder WORKDIR /app COPY plugin/go/ ./ RUN go build -o /plugin . @@ -18,10 +18,12 @@ RUN go build -o /plugin . # Stage 3: Runtime FROM alpine:latest WORKDIR /app -RUN mkdir -p /tmp/plugin /root/.canopy +RUN apk add --no-cache bash && mkdir -p /tmp/plugin /root/.canopy plugin/go COPY --from=canopy-builder /canopy . -COPY --from=plugin-builder /plugin . +COPY --from=plugin-builder /plugin plugin/go/go-plugin +COPY plugin/go/pluginctl.sh plugin/go/pluginctl.sh +RUN chmod +x plugin/go/pluginctl.sh plugin/go/go-plugin # Set plugin type for Canopy to start RUN printf '{"plugin":"go"}\n' > /root/.canopy/config.json diff --git a/plugin/go/Makefile b/plugin/go/Makefile index ea96e662..8fba553f 100644 --- a/plugin/go/Makefile +++ b/plugin/go/Makefile @@ -1,2 +1,2 @@ build: - go build -o ~/go/bin/go-plugin . + go build -o go-plugin . diff --git a/plugin/go/TUTORIAL.md b/plugin/go/TUTORIAL.md index 7373c3cb..717e63b1 100644 --- a/plugin/go/TUTORIAL.md +++ b/plugin/go/TUTORIAL.md @@ -392,6 +392,68 @@ tail -f /tmp/plugin/go-plugin.log You should see messages indicating the plugin has connected and performed the handshake with Canopy. +## Step 7b: Running with Docker (Alternative) + +Instead of running Canopy and the plugin locally, you can use Docker to run everything in a container. + +### 1. Build the Docker image + +From the repository root: + +```bash +make docker/plugin PLUGIN=go +``` + +This creates a `canopy-go` image containing both Canopy and the Go plugin pre-configured. + +### 2. Run the container + +```bash +make docker/run-go +``` + +Or with a custom volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-go +``` + +### 3. Expose RPC ports (for running tests) + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-go +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002` and `localhost:50003`. + +### 4. View logs inside the container + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/go-plugin.log +``` + +### 5. Interactive shell (for debugging) + +To inspect the container or debug issues: + +```bash +docker run -it --entrypoint /bin/sh canopy-go +``` + ## Step 8: Testing Run the RPC tests from the `tutorial` directory: diff --git a/plugin/go/contract/plugin.go b/plugin/go/contract/plugin.go index 84934076..72e91a3b 100644 --- a/plugin/go/contract/plugin.go +++ b/plugin/go/contract/plugin.go @@ -151,8 +151,6 @@ func (p *Plugin) ListenForInbound() { log.Println("Received deliver request from FSM") response = &PluginToFSM_Deliver{c.DeliverTx(msg.GetDeliver())} case *FSMToPlugin_End: - log.Println("PABLITO WAS HERE") - log.Println("EL AYUWOKI WAS HERE") log.Println("Received end request from FSM") response = &PluginToFSM_End{c.EndBlock(msg.GetEnd())} default: diff --git a/plugin/go/pluginctl.sh b/plugin/go/pluginctl.sh index bf6f7737..00ed4e17 100755 --- a/plugin/go/pluginctl.sh +++ b/plugin/go/pluginctl.sh @@ -9,6 +9,50 @@ LOG_FILE="/tmp/plugin/go-plugin.log" PLUGIN_DIR="/tmp/plugin" # Timeout in seconds for graceful shutdown STOP_TIMEOUT=10 + +# Detect system architecture +get_arch() { + local arch=$(uname -m) + case "$arch" in + x86_64|amd64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + *) + echo "amd64" # Default to amd64 + ;; + esac +} + +# Extract tarball if binary doesn't exist +extract_if_needed() { + # If binary already exists, nothing to do + if [ -f "$BINARY_PATH" ]; then + return 0 + fi + + # Check for architecture-specific tarball + local arch=$(get_arch) + local tarball="$SCRIPT_DIR/go-plugin-linux-${arch}.tar.gz" + + if [ -f "$tarball" ]; then + echo "Extracting $tarball..." + tar -xzf "$tarball" -C "$SCRIPT_DIR" + if [ $? -eq 0 ] && [ -f "$BINARY_PATH" ]; then + chmod +x "$BINARY_PATH" + echo "Extraction complete" + return 0 + else + echo "Error: Failed to extract binary from $tarball" + return 1 + fi + fi + + return 1 +} + # Check if the process is running based on PID file is_running() { # Return 1 if PID file doesn't exist @@ -47,9 +91,12 @@ start() { fi # Clean up any stale PID file cleanup_pid + # Try to extract from tarball if binary doesn't exist + extract_if_needed # Check if binary exists and is executable if [ ! -x "$BINARY_PATH" ]; then echo "Error: Binary not found or not executable at $BINARY_PATH" + echo "Run 'make build' to build the plugin or download go-plugin-linux-$(get_arch).tar.gz" return 1 fi # Ensure plugin directory exists diff --git a/plugin/go/tutorial/tutorial b/plugin/go/tutorial/tutorial deleted file mode 100755 index d0f5de14..00000000 Binary files a/plugin/go/tutorial/tutorial and /dev/null differ diff --git a/plugin/kotlin/AGENTS.md b/plugin/kotlin/AGENTS.md index 8e91eb57..1d6533d6 100644 --- a/plugin/kotlin/AGENTS.md +++ b/plugin/kotlin/AGENTS.md @@ -193,6 +193,15 @@ make build ./gradlew build -x test ``` +### Build Fat JAR (for deployment/Docker) +```bash +make fatjar +# or +./gradlew fatJar --no-daemon +``` + +This creates a single JAR with all dependencies bundled, used by Docker and the auto-update system. + ### Run Plugin ```bash make run @@ -212,6 +221,64 @@ make proto make test ``` +## Running with Docker + +The Kotlin plugin can be run in a Docker container that includes both Canopy and the plugin. + +### Build the Docker Image + +From the repository root: + +```bash +make docker/plugin PLUGIN=kotlin +``` + +This builds a Docker image named `canopy-kotlin` that contains: +- The Canopy binary +- The Kotlin plugin fat JAR +- JRE 21 runtime +- Pre-configured `config.json` with `"plugin": "kotlin"` + +### Run the Container + +```bash +make docker/run-kotlin +``` + +Or manually with volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-kotlin +``` + +### Expose Ports for Testing + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-kotlin +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002`. + +### View Logs + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/kotlin-plugin.log +``` + ## Important Conventions ### Address Format diff --git a/plugin/kotlin/Dockerfile b/plugin/kotlin/Dockerfile index 19758b96..9ddf13c9 100644 --- a/plugin/kotlin/Dockerfile +++ b/plugin/kotlin/Dockerfile @@ -18,10 +18,12 @@ RUN ./gradlew fatJar --no-daemon # Stage 3: Runtime FROM eclipse-temurin:21-jre-alpine WORKDIR /app -RUN mkdir -p /tmp/plugin /root/.canopy +RUN apk add --no-cache bash && mkdir -p /tmp/plugin /root/.canopy plugin/kotlin/build/libs COPY --from=canopy-builder /canopy . -COPY --from=kotlin-builder /app/build/libs/*-all.jar plugin.jar +COPY --from=kotlin-builder /app/build/libs/*-all.jar plugin/kotlin/build/libs/canopy-plugin-kotlin-1.0.0-all.jar +COPY plugin/kotlin/pluginctl.sh plugin/kotlin/pluginctl.sh +RUN chmod +x plugin/kotlin/pluginctl.sh # Set plugin type for Canopy to start RUN printf '{"plugin":"kotlin"}\n' > /root/.canopy/config.json diff --git a/plugin/kotlin/Makefile b/plugin/kotlin/Makefile index 3e9f3863..9ab396b7 100644 --- a/plugin/kotlin/Makefile +++ b/plugin/kotlin/Makefile @@ -58,10 +58,10 @@ jar: @echo "Building JAR..." ./gradlew jar -# Build fat JAR with all dependencies +# Build fat JAR with all dependencies (same as root Makefile) fatjar: @echo "Building fat JAR..." - ./gradlew shadowJar + ./gradlew fatJar --no-daemon # Run with debugging enabled debug: diff --git a/plugin/kotlin/TUTORIAL.md b/plugin/kotlin/TUTORIAL.md index 02a9e25a..d694f504 100644 --- a/plugin/kotlin/TUTORIAL.md +++ b/plugin/kotlin/TUTORIAL.md @@ -362,9 +362,11 @@ make build ## Step 8: Running Canopy with the Plugin -To run Canopy with the Kotlin plugin enabled, you need to configure the `plugin` field in your Canopy configuration file. +There are two ways to run Canopy with the Kotlin plugin: locally or with Docker. -### 1. Locate your config.json +### Option A: Running Locally + +#### 1. Locate your config.json The configuration file is typically located at `~/.canopy/config.json`. If it doesn't exist, start Canopy once to generate the default configuration: @@ -373,7 +375,7 @@ The configuration file is typically located at `~/.canopy/config.json`. If it do # Stop it after it generates the config (Ctrl+C) ``` -### 2. Enable the Kotlin plugin +#### 2. Enable the Kotlin plugin Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"kotlin"`: @@ -384,7 +386,7 @@ Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"kotlin"`: } ``` -### 3. Start Canopy +#### 3. Start Canopy ```bash ~/go/bin/canopy start @@ -392,6 +394,68 @@ Edit `~/.canopy/config.json` and add or modify the `plugin` field to `"kotlin"`: Canopy will automatically start the Kotlin plugin and connect to it via Unix socket. +### Step 8b: Running with Docker (Alternative) + +Instead of running Canopy and the plugin locally, you can use Docker to run everything in a container. + +#### 1. Build the Docker image + +From the repository root: + +```bash +make docker/plugin PLUGIN=kotlin +``` + +This creates a `canopy-kotlin` image containing both Canopy and the Kotlin plugin pre-configured. + +#### 2. Run the container + +```bash +make docker/run-kotlin +``` + +Or with a custom volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-kotlin +``` + +#### 3. Expose RPC ports (for running tests) + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-kotlin +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002` and `localhost:50003`. + +#### 4. View logs inside the container + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/kotlin-plugin.log +``` + +#### 5. Interactive shell (for debugging) + +To inspect the container or debug issues: + +```bash +docker run -it --entrypoint /bin/bash canopy-kotlin +``` + ## Step 9: Testing Run the RPC tests from the `tutorial` directory: @@ -478,6 +542,8 @@ plugin/kotlin/ After implementing the new transaction types and starting Canopy with the plugin: +### Option A: With Local Canopy + ```bash # Terminal 1: Start Canopy with the plugin cd ~/canopy @@ -488,6 +554,17 @@ cd ~/canopy/plugin/kotlin/tutorial make test-rpc ``` +### Option B: With Docker + +```bash +# Terminal 1: Start Canopy in Docker with ports exposed +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-kotlin + +# Terminal 2: Run the tests (they connect to localhost:50002/50003) +cd ~/canopy/plugin/kotlin/tutorial +make test-rpc +``` + The test will: 1. Create two new accounts in the keystore 2. Use faucet to mint 1000 tokens to account 1 diff --git a/plugin/kotlin/pluginctl.sh b/plugin/kotlin/pluginctl.sh index 46220ec4..2528f046 100755 --- a/plugin/kotlin/pluginctl.sh +++ b/plugin/kotlin/pluginctl.sh @@ -1,14 +1,44 @@ #!/bin/bash # pluginctl.sh - Control script for managing the kotlin-plugin -# Usage: ./pluginctl.sh {start|stop|status|restart|build} +# Usage: ./pluginctl.sh {start|stop|status|restart} # Configuration variables for paths and files SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" JAR_PATH="$SCRIPT_DIR/build/libs/canopy-plugin-kotlin-1.0.0-all.jar" +TARBALL="$SCRIPT_DIR/kotlin-plugin.tar.gz" PID_FILE="/tmp/plugin/kotlin-plugin.pid" LOG_FILE="/tmp/plugin/kotlin-plugin.log" PLUGIN_DIR="/tmp/plugin" # Timeout in seconds for graceful shutdown STOP_TIMEOUT=10 + +# Extract tarball if JAR doesn't exist +extract_if_needed() { + # If JAR already exists, nothing to do + if [ -f "$JAR_PATH" ]; then + return 0 + fi + + # Check for tarball + if [ -f "$TARBALL" ]; then + echo "Extracting $TARBALL..." + mkdir -p "$SCRIPT_DIR/build/libs" + tar -xzf "$TARBALL" -C "$SCRIPT_DIR/build/libs" + # Rename if needed (tarball contains kotlin-plugin.jar) + if [ -f "$SCRIPT_DIR/build/libs/kotlin-plugin.jar" ] && [ ! -f "$JAR_PATH" ]; then + mv "$SCRIPT_DIR/build/libs/kotlin-plugin.jar" "$JAR_PATH" + fi + if [ $? -eq 0 ] && [ -f "$JAR_PATH" ]; then + echo "Extraction complete" + return 0 + else + echo "Error: Failed to extract JAR from $TARBALL" + return 1 + fi + fi + + return 1 +} + # Check if the process is running based on PID file is_running() { # Return 1 if PID file doesn't exist @@ -38,19 +68,6 @@ cleanup_pid() { rm -f "$PID_FILE" fi } -# Build the kotlin-plugin fat JAR -build() { - echo "Building kotlin-plugin..." - cd "$SCRIPT_DIR" - ./gradlew fatJar --no-daemon - if [ $? -eq 0 ]; then - echo "Build successful" - return 0 - else - echo "Build failed" - return 1 - fi -} # Start the kotlin-plugin start() { # Check if already running @@ -60,14 +77,13 @@ start() { fi # Clean up any stale PID file cleanup_pid + # Try to extract from tarball if JAR doesn't exist + extract_if_needed # Check if JAR exists if [ ! -f "$JAR_PATH" ]; then - echo "JAR not found at $JAR_PATH, building..." - build - if [ $? -ne 0 ]; then - echo "Error: Build failed" - return 1 - fi + echo "Error: JAR not found at $JAR_PATH" + echo "Run 'make build' or download kotlin-plugin.tar.gz" + return 1 fi # Ensure plugin directory exists mkdir -p "$PLUGIN_DIR" @@ -166,11 +182,8 @@ case "${1:-}" in restart) restart ;; - build) - build - ;; *) - echo "Usage: $0 {start|stop|status|restart|build}" + echo "Usage: $0 {start|stop|status|restart}" exit 1 ;; esac diff --git a/plugin/python/AGENTS.md b/plugin/python/AGENTS.md index 33f735fb..9456426b 100644 --- a/plugin/python/AGENTS.md +++ b/plugin/python/AGENTS.md @@ -136,6 +136,64 @@ make test-cov cd tutorial && make test ``` +### Running with Docker + +The Python plugin can be run in a Docker container that includes both Canopy and the plugin. + +#### Build the Docker Image + +From the repository root: + +```bash +make docker/plugin PLUGIN=python +``` + +This builds a Docker image named `canopy-python` that contains: +- The Canopy binary +- The Python plugin with virtual environment +- Python 3.12 runtime +- Pre-configured `config.json` with `"plugin": "python"` + +#### Run the Container + +```bash +make docker/run-python +``` + +Or manually with volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-python +``` + +#### Expose Ports for Testing + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-python +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002`. + +#### View Logs + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/python-plugin.log +``` + ### Regenerating Protobuf Code ```bash diff --git a/plugin/python/Dockerfile b/plugin/python/Dockerfile index 7633a897..c22cdbf8 100644 --- a/plugin/python/Dockerfile +++ b/plugin/python/Dockerfile @@ -12,13 +12,23 @@ RUN go build -trimpath -ldflags="-s -w" -o /canopy ./cmd/main/... # Stage 2: Runtime with Python FROM python:3.12-alpine WORKDIR /app + +# Install bash (required for pluginctl.sh) +RUN apk add --no-cache bash + RUN mkdir -p /tmp/plugin /root/.canopy +# Copy Canopy binary COPY --from=canopy-builder /canopy . -COPY plugin/python/ ./plugin/ -# Install dependencies if requirements.txt exists -RUN if [ -f plugin/requirements.txt ]; then pip install --no-cache-dir -r plugin/requirements.txt; fi +# Copy Python plugin to correct path (Canopy expects plugin/python/pluginctl.sh) +COPY plugin/python/ ./plugin/python/ + +# Remove any existing venv from copy, create fresh venv, and install dependencies +RUN rm -rf /app/plugin/python/.venv && \ + python3 -m venv /app/plugin/python/.venv && \ + /app/plugin/python/.venv/bin/pip install --no-cache-dir --upgrade pip && \ + /app/plugin/python/.venv/bin/pip install --no-cache-dir -e /app/plugin/python/ # Set plugin type for Canopy to start RUN printf '{"plugin":"python"}\n' > /root/.canopy/config.json diff --git a/plugin/python/Makefile b/plugin/python/Makefile index 3f4d59c8..81b39a19 100644 --- a/plugin/python/Makefile +++ b/plugin/python/Makefile @@ -1,42 +1,54 @@ -.PHONY: install dev test lint format type-check clean proto build docs +.PHONY: venv install dev test lint format type-check clean proto build docs + +# Virtual environment +VENV_DIR := .venv +PYTHON := $(VENV_DIR)/bin/python3 +PIP := $(VENV_DIR)/bin/pip + +# Create virtual environment +venv: + @if [ ! -d "$(VENV_DIR)" ]; then \ + echo "Creating virtual environment..."; \ + python3 -m venv $(VENV_DIR); \ + fi # Development setup -install: - pip install -e . +install: venv + $(PIP) install -e . -dev: - pip install -e ".[dev]" +dev: venv + $(PIP) install -e ".[dev]" # Testing -test: - pytest +test: venv + $(VENV_DIR)/bin/pytest -test-cov: - pytest --cov=plugin --cov-report=html --cov-report=term +test-cov: venv + $(VENV_DIR)/bin/pytest --cov=plugin --cov-report=html --cov-report=term -test-verbose: - pytest -v +test-verbose: venv + $(VENV_DIR)/bin/pytest -v # Code quality -lint: - flake8 plugin/ tests/ +lint: venv + $(VENV_DIR)/bin/flake8 plugin/ tests/ -format: - black plugin/ tests/ - isort plugin/ tests/ +format: venv + $(VENV_DIR)/bin/black plugin/ tests/ + $(VENV_DIR)/bin/isort plugin/ tests/ -type-check: - mypy plugin/ +type-check: venv + $(VENV_DIR)/bin/mypy plugin/ # Protobuf generation -proto: - cd contract/proto && python3 -m grpc_tools.protoc --python_out=. --proto_path=. *.proto +proto: venv + cd contract/proto && $(PYTHON) -m grpc_tools.protoc --python_out=. --proto_path=. *.proto # Fix relative imports in generated files sed -i 's/^import \([^.]\)/from . import \1/' contract/proto/*_pb2.py # Build and distribution -build: - python3 -m build +build: venv + $(PYTHON) -m build clean: rm -rf build/ @@ -48,18 +60,18 @@ clean: find . -type f -name "*.pyc" -delete # Development servers -serve: - uvicorn plugin.server:app --host 0.0.0.0 --port 8000 +serve: venv + $(VENV_DIR)/bin/uvicorn plugin.server:app --host 0.0.0.0 --port 8000 -serve-dev: - uvicorn plugin.server:app --reload --host 0.0.0.0 --port 8000 +serve-dev: venv + $(VENV_DIR)/bin/uvicorn plugin.server:app --reload --host 0.0.0.0 --port 8000 # Plugin execution -run-plugin: - python3 main.py +run-plugin: venv + $(PYTHON) main.py # Full validation -validate: lint type-check test +validate: venv lint type-check test # Setup pre-commit hooks hooks: diff --git a/plugin/python/TUTORIAL.md b/plugin/python/TUTORIAL.md index 6154a121..a10fdd80 100644 --- a/plugin/python/TUTORIAL.md +++ b/plugin/python/TUTORIAL.md @@ -393,6 +393,68 @@ Check the plugin logs: tail -f /tmp/plugin/python-plugin.log ``` +### Step 7b: Running with Docker (Alternative) + +Instead of running Canopy and the plugin locally, you can use Docker to run everything in a container. + +#### 1. Build the Docker image + +From the repository root: + +```bash +make docker/plugin PLUGIN=python +``` + +This creates a `canopy-python` image containing both Canopy and the Python plugin pre-configured. + +#### 2. Run the container + +```bash +make docker/run-python +``` + +Or with a custom volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-python +``` + +#### 3. Expose RPC ports (for running tests) + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-python +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002` and `localhost:50003`. + +#### 4. View logs inside the container + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/python-plugin.log +``` + +#### 5. Interactive shell (for debugging) + +To inspect the container or debug issues: + +```bash +docker run -it --entrypoint /bin/sh canopy-python +``` + ## Step 8: Testing Run the RPC tests from the `tutorial` directory: diff --git a/plugin/python/pluginctl.sh b/plugin/python/pluginctl.sh index c3b1fd49..71b88b0f 100755 --- a/plugin/python/pluginctl.sh +++ b/plugin/python/pluginctl.sh @@ -4,12 +4,120 @@ # Configuration variables for paths and files SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PYTHON_SCRIPT="$SCRIPT_DIR/main.py" -PYTHON_CMD="python" +VENV_DIR="$SCRIPT_DIR/.venv" +PYTHON_CMD="$VENV_DIR/bin/python3" PID_FILE="/tmp/plugin/python-plugin.pid" LOG_FILE="/tmp/plugin/python-plugin.log" PLUGIN_DIR="/tmp/plugin" +TARBALL="$SCRIPT_DIR/python-plugin.tar.gz" # Timeout in seconds for graceful shutdown STOP_TIMEOUT=10 + +# Extract tarball if main.py doesn't exist +# Returns: 0 = no extraction needed, 1 = error, 2 = extraction successful (deps need reinstall) +extract_if_needed() { + # If main.py already exists, nothing to do + if [ -f "$PYTHON_SCRIPT" ]; then + return 0 + fi + + # Check for tarball + if [ -f "$TARBALL" ]; then + echo "Extracting $TARBALL..." + tar -xzf "$TARBALL" -C "$SCRIPT_DIR" + if [ $? -eq 0 ] && [ -f "$PYTHON_SCRIPT" ]; then + echo "Extraction complete" + # Return 2 to indicate extraction happened and deps need reinstall + return 2 + else + echo "Error: Failed to extract from $TARBALL" + return 1 + fi + fi + + return 1 +} + +# Install dependencies into venv +install_dependencies() { + # Ensure pip is installed in venv (Alpine doesn't include it by default) + if [ ! -f "$VENV_DIR/bin/pip" ]; then + echo "Installing pip in virtual environment..." + "$VENV_DIR/bin/python3" -m ensurepip --upgrade + if [ $? -ne 0 ]; then + echo "Error: Failed to install pip" + return 1 + fi + fi + + # Upgrade pip and install dependencies + echo "Installing dependencies..." + "$VENV_DIR/bin/pip" install --upgrade pip + if [ $? -ne 0 ]; then + echo "Error: Failed to upgrade pip" + return 1 + fi + + # Install the package in editable mode + "$VENV_DIR/bin/pip" install -e "$SCRIPT_DIR" + if [ $? -ne 0 ]; then + echo "Warning: Editable install failed, trying direct dependency install..." + # Fallback: install dependencies directly from pyproject.toml + "$VENV_DIR/bin/pip" install protobuf fastapi uvicorn pydantic structlog + if [ $? -ne 0 ]; then + echo "Error: Failed to install dependencies" + return 1 + fi + fi + + echo "Dependencies installed successfully" + return 0 +} + +# Create virtual environment if it doesn't exist or Python binary is missing/broken +# If force_reinstall is set to 1, always reinstall dependencies +setup_venv_if_needed() { + local force_reinstall=${1:-0} + + # Check if venv exists, Python binary works, AND pip exists + if [ -d "$VENV_DIR" ] && [ -x "$PYTHON_CMD" ] && [ -f "$VENV_DIR/bin/pip" ]; then + # Test if the Python binary actually works + if "$PYTHON_CMD" --version > /dev/null 2>&1; then + # If force reinstall, always reinstall dependencies + if [ "$force_reinstall" -eq 1 ]; then + echo "New version extracted, reinstalling dependencies..." + install_dependencies + return $? + fi + # Check if protobuf is installed (basic dependency check) + if "$PYTHON_CMD" -c "import google.protobuf" > /dev/null 2>&1; then + return 0 + fi + echo "Dependencies missing, reinstalling..." + install_dependencies + return $? + else + echo "Existing venv is broken, recreating..." + rm -rf "$VENV_DIR" + fi + else + # venv missing or incomplete, remove and recreate + rm -rf "$VENV_DIR" + fi + + # Create virtual environment + echo "Creating virtual environment..." + python3 -m venv "$VENV_DIR" + if [ $? -ne 0 ]; then + echo "Error: Failed to create virtual environment" + return 1 + fi + + # Install dependencies + install_dependencies + return $? +} + # Check if the process is running based on PID file is_running() { # Return 1 if PID file doesn't exist @@ -48,9 +156,32 @@ start() { fi # Clean up any stale PID file cleanup_pid + # Try to extract from tarball if source doesn't exist + extract_if_needed + local extract_result=$? + + # Check extraction result: 1 = error, 2 = extracted (need deps reinstall) + if [ $extract_result -eq 1 ]; then + echo "Error: Failed to extract plugin" + return 1 + fi + # Check if Python script exists if [ ! -f "$PYTHON_SCRIPT" ]; then echo "Error: Python script not found at $PYTHON_SCRIPT" + echo "Download python-plugin.tar.gz or clone the source" + return 1 + fi + + # Setup virtual environment if needed + # If extraction just happened (result=2), force reinstall dependencies + local force_reinstall=0 + if [ $extract_result -eq 2 ]; then + force_reinstall=1 + fi + + setup_venv_if_needed $force_reinstall + if [ $? -ne 0 ]; then return 1 fi # Ensure plugin directory exists diff --git a/plugin/typescript/AGENTS.md b/plugin/typescript/AGENTS.md index 1a34d638..86689988 100644 --- a/plugin/typescript/AGENTS.md +++ b/plugin/typescript/AGENTS.md @@ -111,6 +111,15 @@ See `TUTORIAL.md` for the complete guide. Summary: ### Building the Plugin +Using Makefile (recommended): +```bash +make build-all # Full rebuild (install + proto + descriptors + TypeScript) +make build # TypeScript compilation only +make build-proto # Regenerate protobuf code only +make build-descriptors # Regenerate descriptor file only +``` + +Using npm directly: ```bash npm run build:all # Full rebuild (proto + descriptors + TypeScript) npm run build:proto # Regenerate protobuf code only @@ -124,10 +133,71 @@ The plugin is started by Canopy when configured with `"plugin": "typescript"` in For development: ```bash +make dev # Run with nodemon for hot reload +make run # Run compiled output +# or npm run dev # Run with nodemon for hot reload npm start # Run compiled output ``` +### Running with Docker + +The TypeScript plugin can be run in a Docker container that includes both Canopy and the plugin. + +#### Build the Docker Image + +From the repository root: + +```bash +make docker/plugin PLUGIN=typescript +``` + +This builds a Docker image named `canopy-typescript` that contains: +- The Canopy binary +- The TypeScript plugin (compiled with all proto descriptors) +- Node.js 20 runtime +- Pre-configured `config.json` with `"plugin": "typescript"` + +#### Run the Container + +```bash +make docker/run-typescript +``` + +Or manually with volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-typescript +``` + +#### Expose Ports for Testing + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-typescript +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002`. + +#### View Logs + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/typescript-plugin.log +``` + ### Running Tests Tests are in the `tutorial/` subdirectory (separate project): diff --git a/plugin/typescript/Dockerfile b/plugin/typescript/Dockerfile index aed0d970..5f4997cd 100644 --- a/plugin/typescript/Dockerfile +++ b/plugin/typescript/Dockerfile @@ -2,7 +2,7 @@ # Build from repository root: docker build -f plugin/typescript/Dockerfile -t canopy-typescript . # Stage 1: Build Canopy (Go) -FROM golang:1.24-alpine AS canopy-builder +FROM golang:1.25-alpine AS canopy-builder WORKDIR /app COPY go.mod go.sum ./ RUN go mod download @@ -13,17 +13,19 @@ RUN go build -trimpath -ldflags="-s -w" -o /canopy ./cmd/main/... FROM node:20-alpine AS typescript-builder WORKDIR /app COPY plugin/typescript/ ./ -RUN npm ci && npm run build +RUN npm ci && npm run build:all # Stage 3: Runtime FROM node:20-alpine WORKDIR /app -RUN mkdir -p /tmp/plugin /root/.canopy +RUN apk add --no-cache bash && mkdir -p /tmp/plugin /root/.canopy plugin/typescript COPY --from=canopy-builder /canopy . -COPY --from=typescript-builder /app/dist ./plugin/ -COPY --from=typescript-builder /app/node_modules ./plugin/node_modules/ -COPY --from=typescript-builder /app/package.json ./plugin/ +COPY --from=typescript-builder /app/dist plugin/typescript/dist/ +COPY --from=typescript-builder /app/node_modules plugin/typescript/node_modules/ +COPY --from=typescript-builder /app/package.json plugin/typescript/ +COPY plugin/typescript/pluginctl.sh plugin/typescript/pluginctl.sh +RUN chmod +x plugin/typescript/pluginctl.sh # Set plugin type for Canopy to start RUN printf '{"plugin":"typescript"}\n' > /root/.canopy/config.json diff --git a/plugin/typescript/Makefile b/plugin/typescript/Makefile new file mode 100644 index 00000000..f9e3bceb --- /dev/null +++ b/plugin/typescript/Makefile @@ -0,0 +1,79 @@ +# Makefile for Canopy TypeScript Plugin + +.PHONY: all install build build-proto build-descriptors build-all clean test run dev lint format validate help + +# Default target +all: build-all + +# Install dependencies +install: + @echo "Installing dependencies..." + npm ci + +# Build TypeScript only +build: + @echo "Building TypeScript..." + npm run build + +# Build protobuf code +build-proto: + @echo "Building protobuf..." + npm run build:proto + +# Build descriptors +build-descriptors: + @echo "Building descriptors..." + npm run build:descriptors + +# Full build (same as root Makefile: npm ci && npm run build:all) +build-all: install + @echo "Full build (proto + descriptors + TypeScript)..." + npm run build:all + +# Clean build artifacts +clean: + @echo "Cleaning build artifacts..." + rm -rf dist/ + rm -rf node_modules/ + +# Run tests (in tutorial directory) +test: + @echo "Running tests..." + cd tutorial && npm install && npm test + +# Run the application +run: + @echo "Running plugin..." + npm start + +# Run in development mode +dev: + @echo "Running in development mode..." + npm run dev + +# Lint placeholder +lint: + @echo "Linting not configured - add ESLint target if needed" + +# Format placeholder +format: + @echo "Formatting not configured - add Prettier target if needed" + +# Full validation +validate: lint test + @echo "All validation checks passed!" + +# Show help +help: + @echo "Available targets:" + @echo " make install - Install dependencies (npm ci)" + @echo " make build - Build TypeScript only" + @echo " make build-proto - Build protobuf code" + @echo " make build-descriptors - Build descriptors" + @echo " make build-all - Full build (install + proto + descriptors + TypeScript)" + @echo " make clean - Clean build artifacts" + @echo " make test - Run tests (tutorial)" + @echo " make run - Run the application" + @echo " make dev - Run in development mode" + @echo " make validate - Run all validation checks" + @echo " make help - Show this help message" diff --git a/plugin/typescript/TUTORIAL.md b/plugin/typescript/TUTORIAL.md index b0e33659..742c6991 100644 --- a/plugin/typescript/TUTORIAL.md +++ b/plugin/typescript/TUTORIAL.md @@ -452,6 +452,68 @@ tail -f /tmp/plugin/typescript-plugin.log You should see messages indicating the plugin has connected and performed the handshake with Canopy. +### Step 8b: Running with Docker (Alternative) + +Instead of running Canopy and the plugin locally, you can use Docker to run everything in a container. + +#### 1. Build the Docker image + +From the repository root: + +```bash +make docker/plugin PLUGIN=typescript +``` + +This creates a `canopy-typescript` image containing both Canopy and the TypeScript plugin pre-configured. + +#### 2. Run the container + +```bash +make docker/run-typescript +``` + +Or with a custom volume mount for persistent data: + +```bash +docker run -v ~/.canopy:/root/.canopy canopy-typescript +``` + +#### 3. Expose RPC ports (for running tests) + +To run tests against the containerized Canopy, expose the RPC ports: + +```bash +docker run -p 50002:50002 -p 50003:50003 -v ~/.canopy:/root/.canopy canopy-typescript +``` + +| Port | Service | +|------|---------| +| 50002 | RPC API (transactions, queries) | +| 50003 | Admin RPC (keystore operations) | + +Now you can run tests from your host machine that connect to `localhost:50002` and `localhost:50003`. + +#### 4. View logs inside the container + +```bash +# Get the container ID +docker ps + +# View Canopy logs +docker exec -it tail -f /root/.canopy/logs/log + +# View plugin logs +docker exec -it tail -f /tmp/plugin/typescript-plugin.log +``` + +#### 5. Interactive shell (for debugging) + +To inspect the container or debug issues: + +```bash +docker run -it --entrypoint /bin/sh canopy-typescript +``` + ## Step 9: Testing Run the RPC tests from the `tutorial` directory: diff --git a/plugin/typescript/pluginctl.sh b/plugin/typescript/pluginctl.sh index fa6fba91..ece9ae2e 100755 --- a/plugin/typescript/pluginctl.sh +++ b/plugin/typescript/pluginctl.sh @@ -8,8 +8,33 @@ NODE_CMD="node" PID_FILE="/tmp/plugin/typescript-plugin.pid" LOG_FILE="/tmp/plugin/typescript-plugin.log" PLUGIN_DIR="/tmp/plugin" +TARBALL="$SCRIPT_DIR/typescript-plugin.tar.gz" # Timeout in seconds for graceful shutdown STOP_TIMEOUT=10 + +# Extract tarball if dist doesn't exist +extract_if_needed() { + # If dist/main.js already exists, nothing to do + if [ -f "$NODE_SCRIPT" ]; then + return 0 + fi + + # Check for tarball + if [ -f "$TARBALL" ]; then + echo "Extracting $TARBALL..." + tar -xzf "$TARBALL" -C "$SCRIPT_DIR" + if [ $? -eq 0 ] && [ -f "$NODE_SCRIPT" ]; then + echo "Extraction complete" + return 0 + else + echo "Error: Failed to extract from $TARBALL" + return 1 + fi + fi + + return 1 +} + # Check if the process is running based on PID file is_running() { # Return 1 if PID file doesn't exist @@ -48,10 +73,12 @@ start() { fi # Clean up any stale PID file cleanup_pid + # Try to extract from tarball if dist doesn't exist + extract_if_needed # Check if Node.js script exists if [ ! -f "$NODE_SCRIPT" ]; then echo "Error: Node.js script not found at $NODE_SCRIPT" - echo "Run 'npm run build' to compile TypeScript before starting" + echo "Run 'npm run build' to compile TypeScript or download typescript-plugin.tar.gz" return 1 fi # Ensure plugin directory exists