diff --git a/Makefile b/Makefile
index 6e7438f9..5d5bf72c 100644
--- a/Makefile
+++ b/Makefile
@@ -23,6 +23,9 @@ run:
gen:
go generate -v ./...
+gendocs:
+ go run ./cmd/gendocs
+
build:
go build -tags="nomsgpack,remote,exclude_graphdriver_btrfs,containers_image_openpgp" -v ./cmd/sablier
diff --git a/README.md b/README.md
index 48f006f6..2065ec98 100644
--- a/README.md
+++ b/README.md
@@ -22,6 +22,7 @@ Whether you don't want to overload your Raspberry Pi, or your QA environment is
- [Environment Variables](#environment-variables)
- [Arguments](#arguments)
- [Providers](#providers)
+ - [Digital Ocean](#digital-ocean)
- [Docker](#docker)
- [Docker Swarm](#docker-swarm)
- [Podman](#podman)
@@ -226,6 +227,21 @@ TODO: Add link to full auto-generated reference
## Providers
+### Digital Ocean
+
+
+
+Sablier integrates with Digital Ocean's App Platform to scale apps on demand.
+
+**Features:**
+- Scale App Platform services and workers automatically
+- Stop apps when not in use to save costs
+- API-based integration with Digital Ocean
+
+📚 **[Full Documentation](https://sablierapp.dev/#/providers/digitalocean)**
+
+---
+
### Docker
diff --git a/docs/_sidebar.md b/docs/_sidebar.md
index 09aa85a6..788b56ba 100644
--- a/docs/_sidebar.md
+++ b/docs/_sidebar.md
@@ -7,9 +7,11 @@
- [Versioning](/versioning)
- **Providers**
- [Overview](/providers/overview)
+ - [
Digital Ocean](/providers/digitalocean)
- [
Docker](/providers/docker)
- [
Docker Swarm](/providers/docker_swarm)
- [
Kubernetes](/providers/kubernetes)
+ - [
Nomad](/providers/nomad)
- [
Podman](/providers/podman)
- **Reverse Proxy Plugins**
- [Overview](/plugins/overview)
diff --git a/docs/assets/img/digitalocean.svg b/docs/assets/img/digitalocean.svg
new file mode 100644
index 00000000..dade76aa
--- /dev/null
+++ b/docs/assets/img/digitalocean.svg
@@ -0,0 +1,4 @@
+
diff --git a/docs/providers/digitalocean.md b/docs/providers/digitalocean.md
new file mode 100644
index 00000000..104123ed
--- /dev/null
+++ b/docs/providers/digitalocean.md
@@ -0,0 +1,193 @@
+# Digital Ocean
+
+The Digital Ocean provider integrates with Digital Ocean's App Platform to scale apps on demand.
+
+## Use the Digital Ocean provider
+
+In order to use the Digital Ocean provider you can configure the [provider.name](../configuration) property along with your Digital Ocean API token.
+
+
+
+#### **File (YAML)**
+
+```yaml
+provider:
+ name: digitalocean
+ digitalocean:
+ token: your-digitalocean-api-token
+ region: nyc1 # Optional, defaults to nyc1
+```
+
+#### **CLI**
+
+```bash
+sablier start --provider.name=digitalocean --provider.digitalocean.token=your-digitalocean-api-token
+```
+
+#### **Environment Variable**
+
+```bash
+PROVIDER_NAME=digitalocean
+PROVIDER_DIGITALOCEAN_TOKEN=your-digitalocean-api-token
+PROVIDER_DIGITALOCEAN_REGION=nyc1
+```
+
+
+
+!> **Keep your Digital Ocean API token secure! Never commit it to version control.**
+
+## Register Apps
+
+For Sablier to work, it needs to know which Digital Ocean apps to scale.
+
+You register your apps by adding environment variables to your app specification.
+
+```yaml
+spec:
+ name: my-app
+ services:
+ - name: web
+ instance_count: 1
+ envs:
+ - key: SABLIER_ENABLE
+ value: "true"
+ - key: SABLIER_GROUP
+ value: "mygroup"
+```
+
+## How does Sablier know when an app is ready?
+
+Sablier monitors the deployment phase of your Digital Ocean app. An app is considered ready when:
+
+- The active deployment is in the `ACTIVE` phase
+- Instance count is greater than 0
+
+Apps are considered not ready during:
+- `PENDING_BUILD`
+- `BUILDING`
+- `PENDING_DEPLOY`
+- `DEPLOYING`
+
+Apps are in an unrecoverable state during:
+- `ERROR`
+- `CANCELED`
+
+## Configuration Options
+
+### Digital Ocean API Token
+
+```yaml
+provider:
+ digitalocean:
+ token: your-digitalocean-api-token
+```
+
+**Required.** Your Digital Ocean API token for authentication. You can generate a token in the Digital Ocean control panel under API > Tokens.
+
+### Region
+
+```yaml
+provider:
+ digitalocean:
+ region: nyc1
+```
+
+**Optional.** The Digital Ocean region to use. Defaults to `nyc1`. While the API is global, this setting may be used for future region-specific features.
+
+### Auto-stop on Startup
+
+```yaml
+provider:
+ auto-stop-on-startup: true
+```
+
+When enabled, Sablier will scale down all apps with `SABLIER_ENABLE=true` environment variable that are running when Sablier starts.
+
+## App Environment Variables
+
+| Variable | Required | Description | Example |
+|----------|----------|-------------|---------|
+| `SABLIER_ENABLE` | Yes | Enable Sablier management for this app | `true` |
+| `SABLIER_GROUP` | No | Logical group name for the app | `myapp` |
+
+## How Scaling Works
+
+### Starting an App
+
+When Sablier receives a request to start an app:
+
+1. It retrieves the current app specification
+2. Updates the `instance_count` for all services and workers to 1 (or their previous value if it was already > 0)
+3. Triggers a new deployment with the updated specification
+
+### Stopping an App
+
+When Sablier needs to stop an app:
+
+1. It retrieves the current app specification
+2. Sets the `instance_count` for all services and workers to 0
+3. Triggers a new deployment with the updated specification
+
+## Event Monitoring
+
+Unlike Docker, Digital Ocean doesn't provide a real-time event stream. Sablier polls the App Platform API every 30 seconds to detect when apps are stopped (scaled to 0 instances).
+
+## Limitations
+
+- Polling-based event detection (30-second interval)
+- Requires a valid Digital Ocean API token
+- Only works with Digital Ocean App Platform (not Droplets, Kubernetes, or other services)
+- Scaling operations trigger full deployments, which may take several minutes
+- App identification uses App ID, not human-readable names
+
+## Cost Considerations
+
+⚠️ **Important:** Be aware of Digital Ocean App Platform pricing:
+
+- Apps are billed per hour when running
+- Scaling to 0 instances stops billing for compute resources
+- Deployments may incur brief charges even when scaling down
+- Database and storage resources may have separate billing
+
+Sablier helps reduce costs by automatically scaling apps to 0 when not in use.
+
+## Troubleshooting
+
+### App not starting
+
+1. Check Sablier logs for API errors
+2. Verify your Digital Ocean API token is valid and has the correct permissions
+3. Ensure the app exists and is accessible via the API
+4. Check the app's deployment status in the Digital Ocean console
+
+### Authentication errors
+
+1. Verify your token in the Digital Ocean console
+2. Ensure the token has read/write permissions for App Platform
+3. Check that the token hasn't expired
+
+### Slow scaling
+
+Digital Ocean App Platform deployments can take several minutes:
+- Building the app (if code changes were made)
+- Deploying new instances
+- Health checks
+
+This is expected behavior. Consider adjusting your Sablier timeout settings accordingly.
+
+## Security Best Practices
+
+1. **Store tokens securely**: Use environment variables or secrets management
+2. **Use scoped tokens**: Create tokens with minimal required permissions
+3. **Rotate tokens regularly**: Update API tokens periodically
+4. **Monitor API usage**: Check Digital Ocean console for unexpected API calls
+
+## Full Example
+
+A complete example would include:
+
+1. A Digital Ocean app with `SABLIER_ENABLE=true` environment variable
+2. Sablier running with Digital Ocean provider configuration
+3. A reverse proxy (Traefik, Nginx, etc.) configured to use Sablier's API
+
+See the Digital Ocean provider example (if available) for a complete setup.
diff --git a/docs/providers/docker.md b/docs/providers/docker.md
index 24603808..af8864f3 100644
--- a/docs/providers/docker.md
+++ b/docs/providers/docker.md
@@ -59,8 +59,61 @@ services:
- sablier.group=mygroup
```
-## How does Sablier knows when a container is ready?
+## How does Sablier know when a container is ready?
-If the container defines a Healthcheck, then it will check for healthiness before stating the `ready` status.
+If the container defines a Healthcheck, then Sablier will check for healthiness before marking the container as `ready`.
-If the containers do not define a Healthcheck, then as soon as the container has the status `started`
\ No newline at end of file
+If the container does not define a Healthcheck, then as soon as the container has the status `started`, it is considered ready.
+
+## Configuration Options
+
+### Auto-stop on Startup
+
+```yaml
+provider:
+ auto-stop-on-startup: true
+```
+
+When enabled, Sablier will stop all containers with `sablier.enable=true` label that are running but not registered in an active session when Sablier starts.
+
+## Container Labels
+
+| Label | Required | Description | Example |
+|-------|----------|-------------|---------|
+| `sablier.enable` | Yes | Enable Sablier management for this container | `true` |
+| `sablier.group` | Yes | Logical group name for the container | `myapp` |
+
+## Full Example
+
+See the [Docker provider example](../../examples/docker/) for a complete, working setup.
+
+## Limitations
+
+- Requires access to the Docker socket
+- Cannot manage containers in remote Docker hosts (use Docker Swarm for multi-host scenarios)
+- Healthchecks must be defined in the container image or compose file
+
+## Troubleshooting
+
+### Container not starting
+
+1. Check Sablier logs for errors
+2. Verify the container has the correct labels
+3. Ensure Sablier has access to the Docker socket
+
+### Permission denied
+
+Sablier needs read/write access to `/var/run/docker.sock`. Ensure the Sablier container has the socket mounted:
+
+```yaml
+volumes:
+ - '/var/run/docker.sock:/var/run/docker.sock'
+```
+
+### Container starts but Sablier doesn't detect it
+
+If your container has a healthcheck, ensure it's passing. Check with:
+
+```bash
+docker inspect | grep Health -A 10
+```
\ No newline at end of file
diff --git a/docs/providers/docker_swarm.md b/docs/providers/docker_swarm.md
index b2140bb1..1bd131e2 100644
--- a/docs/providers/docker_swarm.md
+++ b/docs/providers/docker_swarm.md
@@ -61,8 +61,75 @@ services:
- sablier.group=mygroup
```
-## How does Sablier knows when a service is ready?
+## How does Sablier know when a service is ready?
-Sablier checks for the service replicas. As soon as the current replicas matches the wanted replicas, then the service is considered `ready`.
+Sablier checks for the service replicas. As soon as the current replicas match the wanted replicas, then the service is considered `ready`.
-?> Docker Swarm uses the container's healthcheck to check if the container is up and running. So the provider has a native healthcheck support.
\ No newline at end of file
+?> Docker Swarm uses the container's healthcheck to check if the container is up and running. So the provider has native healthcheck support.
+
+## Configuration Options
+
+### Auto-stop on Startup
+
+```yaml
+provider:
+ auto-stop-on-startup: true
+```
+
+When enabled, Sablier will scale down all services with `sablier.enable=true` label that have non-zero replicas but are not registered in an active session when Sablier starts.
+
+## Service Labels
+
+| Label | Required | Description | Example |
+|-------|----------|-------------|---------|
+| `sablier.enable` | Yes | Enable Sablier management for this service | `true` |
+| `sablier.group` | Yes | Logical group name for the service | `myapp` |
+
+**Important:** Labels must be in the `deploy` section for services, not at the service level.
+
+## Full Example
+
+See the [Docker Swarm provider example](../../examples/docker-swarm/) for a complete, working setup.
+
+## Scaling Behavior
+
+- Services start with 0 replicas
+- On first request, Sablier scales to the last known replica count (default: 1)
+- When session expires, Sablier scales back to 0
+- Swarm automatically distributes replicas across nodes
+
+## Limitations
+
+- Requires Docker Swarm mode to be initialized
+- Requires access to the Docker socket on a manager node
+- Cannot scale global services (only replicated services)
+- Services must use `replicated` mode, not `global`
+
+## Troubleshooting
+
+### Service not scaling
+
+1. Check Sablier logs for errors
+2. Verify the service has labels in the `deploy` section
+3. Ensure Sablier is running on a manager node
+4. Check service status: `docker service ps `
+
+### Sablier not starting
+
+Ensure Sablier is deployed with a constraint to run on manager nodes:
+
+```yaml
+deploy:
+ placement:
+ constraints:
+ - node.role == manager
+```
+
+### Services stuck in "preparing" state
+
+Check if nodes have capacity and if images are available:
+
+```bash
+docker service ps
+docker node ls
+```
\ No newline at end of file
diff --git a/docs/providers/kubernetes.md b/docs/providers/kubernetes.md
index 86a366f5..02ab6249 100644
--- a/docs/providers/kubernetes.md
+++ b/docs/providers/kubernetes.md
@@ -91,8 +91,225 @@ spec:
image: acouvreur/whoami:v1.10.2
```
-## How does Sablier knows when a deployment is ready?
+## How does Sablier know when a deployment is ready?
-Sablier checks for the deployment replicas. As soon as the current replicas matches the wanted replicas, then the deployment is considered `ready`.
+Sablier checks for the deployment replicas. As soon as the current replicas match the wanted replicas, then the deployment is considered `ready`.
-?> Kubernetes uses the Pod healthcheck to check if the Pod is up and running. So the provider has a native healthcheck support.
\ No newline at end of file
+?> Kubernetes uses the Pod healthcheck to check if the Pod is up and running. So the provider has native healthcheck support.
+
+## Configuration Options
+
+### Kubernetes-specific Settings
+
+```yaml
+provider:
+ name: kubernetes
+ kubernetes:
+ qps: 5 # K8S API QPS limit (default: 5)
+ burst: 10 # K8S API burst limit (default: 10)
+ delimiter: "_" # Namespace/resource delimiter (default: "_")
+```
+
+#### QPS and Burst
+
+These settings control client-side throttling for Kubernetes API requests:
+
+- **QPS (Queries Per Second)**: Maximum sustained request rate
+- **Burst**: Maximum burst of requests allowed
+
+For large clusters with many deployments, you may need to increase these values:
+
+```yaml
+provider:
+ kubernetes:
+ qps: 50
+ burst: 100
+```
+
+#### Delimiter
+
+The delimiter separates parts of the resource identifier:
+
+```yaml
+# With delimiter="_" (default)
+sablier.group: namespace_deployment_name
+
+# With delimiter="/"
+sablier.group: namespace/deployment/name
+
+# With delimiter="."
+sablier.group: namespace.deployment.name
+```
+
+### Auto-stop on Startup
+
+```yaml
+provider:
+ auto-stop-on-startup: true
+```
+
+When enabled, Sablier will scale to 0 all deployments/statefulsets with `sablier.enable=true` label that have non-zero replicas but are not registered in an active session when Sablier starts.
+
+## Resource Labels
+
+| Label | Required | Description | Example |
+|-------|----------|-------------|---------|
+| `sablier.enable` | Yes | Enable Sablier management | `"true"` |
+| `sablier.group` | Yes | Logical group name | `myapp` |
+
+**Important:** In Kubernetes, label values must be strings (use quotes for boolean/numeric values).
+
+## Supported Resources
+
+Sablier supports the following Kubernetes resources:
+
+### Deployments
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: myapp
+ labels:
+ sablier.enable: "true"
+ sablier.group: mygroup
+spec:
+ replicas: 0
+ # ...
+```
+
+### StatefulSets
+
+```yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: myapp
+ labels:
+ sablier.enable: "true"
+ sablier.group: mygroup
+spec:
+ replicas: 0
+ # ...
+```
+
+## RBAC Requirements
+
+Sablier requires specific permissions to function:
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: sablier
+rules:
+ - apiGroups: ["apps", ""]
+ resources: ["deployments", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps", ""]
+ resources: ["deployments/scale", "statefulsets/scale"]
+ verbs: ["get", "list", "watch", "patch", "update"]
+```
+
+### Minimal Permissions
+
+If you want to restrict Sablier to specific namespaces, use a `Role` instead of `ClusterRole`:
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: sablier
+ namespace: my-namespace
+# ... same rules as above
+```
+
+## Full Example
+
+See the [Kubernetes provider example](../../examples/kubernetes/) for a complete, working setup with all manifests.
+
+## Scaling Behavior
+
+- Deployments/StatefulSets start with 0 replicas
+- On first request, Sablier scales to the last known replica count (default: 1)
+- When session expires, Sablier scales back to 0
+- Kubernetes scheduler handles pod placement
+- Pod readiness probes determine when the deployment is ready
+
+## Deployment Strategies
+
+Sablier respects Kubernetes deployment strategies:
+
+### Rolling Update
+
+```yaml
+spec:
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 0
+ maxSurge: 1
+```
+
+### Recreate
+
+```yaml
+spec:
+ strategy:
+ type: Recreate
+```
+
+## Limitations
+
+- Sablier must run inside the Kubernetes cluster (uses InClusterConfig)
+- Requires appropriate RBAC permissions
+- Does not support DaemonSets (scaling to 0 not applicable)
+- Does not support plain ReplicaSets (use Deployments instead)
+
+## Troubleshooting
+
+### Permission Denied Errors
+
+Check RBAC configuration:
+
+```bash
+kubectl describe clusterrole sablier
+kubectl describe clusterrolebinding sablier
+kubectl auth can-i get deployments --as=system:serviceaccount:sablier-system:sablier
+```
+
+### Deployment Not Scaling
+
+1. Check Sablier logs:
+ ```bash
+ kubectl logs -l app=sablier -n sablier-system
+ ```
+
+2. Verify labels:
+ ```bash
+ kubectl get deployment -o yaml | grep -A 5 labels
+ ```
+
+3. Check Sablier can access the deployment:
+ ```bash
+ kubectl get deployments --all-namespaces -l sablier.enable=true
+ ```
+
+### Pods Not Starting
+
+Check pod events:
+```bash
+kubectl describe deployment
+kubectl get events --sort-by='.lastTimestamp'
+```
+
+### Rate Limiting
+
+If you see rate limiting errors in logs, increase QPS and burst:
+
+```yaml
+provider:
+ kubernetes:
+ qps: 50
+ burst: 100
+```
\ No newline at end of file
diff --git a/docs/providers/overview.md b/docs/providers/overview.md
index c5d7069c..2dd9e17b 100644
--- a/docs/providers/overview.md
+++ b/docs/providers/overview.md
@@ -18,6 +18,7 @@ A Provider typically has the following capabilities:
| [Docker Swarm](docker_swarm) | `docker_swarm` or `swarm` | Scale down to zero and up **services** on demand |
| [Kubernetes](kubernetes) | `kubernetes` | Scale down and up **deployments** and **statefulsets** on demand |
| [Podman](podman) | `podman` | Stop and start **containers** on demand |
+| [Digital Ocean](digitalocean) | `digitalocean` | Scale **App Platform apps** on demand |
*Your Provider is not on the list? [Open an issue to request the missing provider here!](https://github.com/sablierapp/sablier/issues/new?assignees=&labels=enhancement%2C+provider&projects=&template=instance-provider-request.md&title=Add+%60%5BPROVIDER%5D%60+provider)*
diff --git a/docs/providers/podman.md b/docs/providers/podman.md
index efb5534b..94abe755 100644
--- a/docs/providers/podman.md
+++ b/docs/providers/podman.md
@@ -59,8 +59,240 @@ services:
- sablier.group=mygroup
```
-## How does Sablier knows when a container is ready?
+## How does Sablier know when a container is ready?
-If the container defines a Healthcheck, then it will check for healthiness before stating the `ready` status.
+If the container defines a Healthcheck, then Sablier will check for healthiness before marking the container as `ready`.
-If the containers do not define a Healthcheck, then as soon as the container has the status `started`
\ No newline at end of file
+If the container does not define a Healthcheck, then as soon as the container has the status `started`, it is considered ready.
+
+## Configuration Options
+
+### Podman Socket URI
+
+```yaml
+provider:
+ name: podman
+ podman:
+ uri: unix:///run/podman/podman.sock
+```
+
+The socket URI depends on your Podman setup:
+
+| Mode | Default Socket Path | URI |
+|------|---------------------|-----|
+| Rootful | `/run/podman/podman.sock` | `unix:///run/podman/podman.sock` |
+| Rootless | `$XDG_RUNTIME_DIR/podman/podman.sock` | `unix:///run/user/1000/podman/podman.sock` |
+
+### Enabling the Podman Socket
+
+#### Rootless Mode (Recommended)
+
+```bash
+# Enable the socket for the current user
+systemctl --user enable podman.socket
+systemctl --user start podman.socket
+
+# Verify it's running
+systemctl --user status podman.socket
+
+# Check socket location
+echo $XDG_RUNTIME_DIR
+ls -l $XDG_RUNTIME_DIR/podman/podman.sock
+```
+
+Configuration for rootless:
+```yaml
+provider:
+ podman:
+ uri: unix:///run/user/1000/podman/podman.sock
+```
+
+#### Rootful Mode
+
+```bash
+# Enable the socket as root
+sudo systemctl enable podman.socket
+sudo systemctl start podman.socket
+
+# Verify it's running
+sudo systemctl status podman.socket
+
+# Check socket location
+sudo ls -l /run/podman/podman.sock
+```
+
+Configuration for rootful:
+```yaml
+provider:
+ podman:
+ uri: unix:///run/podman/podman.sock
+```
+
+### Auto-stop on Startup
+
+```yaml
+provider:
+ auto-stop-on-startup: true
+```
+
+When enabled, Sablier will stop all containers with `sablier.enable=true` label that are running but not registered in an active session when Sablier starts.
+
+## Container Labels
+
+| Label | Required | Description | Example |
+|-------|----------|-------------|---------|
+| `sablier.enable` | Yes | Enable Sablier management for this container | `true` |
+| `sablier.group` | Yes | Logical group name for the container | `myapp` |
+
+## Using with Podman Compose
+
+Podman supports Docker Compose files via `podman-compose`:
+
+```yaml
+version: '3.8'
+
+services:
+ sablier:
+ image: docker.io/sablierapp/sablier:1.10.1
+ command:
+ - start
+ - --provider.name=podman
+ - --provider.podman.uri=unix:///run/podman/podman.sock
+ volumes:
+ - /run/podman/podman.sock:/run/podman/podman.sock
+ networks:
+ - sablier-network
+ ports:
+ - "10000:10000"
+
+ myapp:
+ image: docker.io/myapp:latest
+ labels:
+ - sablier.enable=true
+ - sablier.group=myapp
+ networks:
+ - sablier-network
+```
+
+### Installing podman-compose
+
+```bash
+# Using pip
+pip install podman-compose
+
+# Or using pipx
+pipx install podman-compose
+
+# Verify installation
+podman-compose --version
+```
+
+## Full Example
+
+See the [Podman provider example](../../examples/podman/) for a complete, working setup.
+
+## Rootless vs Rootful
+
+### Rootless Mode (Recommended)
+
+**Advantages:**
+- Better security (runs as regular user)
+- No root privileges required
+- User-specific containers
+
+**Considerations:**
+- Socket path is user-specific
+- Must configure `$XDG_RUNTIME_DIR` correctly
+- May need to handle user namespaces
+
+### Rootful Mode
+
+**Advantages:**
+- Traditional Docker-like behavior
+- System-wide containers
+- Simpler socket path
+
+**Considerations:**
+- Requires root privileges
+- Less isolated
+- System-wide impact
+
+## Limitations
+
+- Requires Podman socket to be enabled and accessible
+- Cannot manage containers on remote Podman hosts
+- Healthchecks must be defined in the container image or compose file
+- podman-compose has some limitations compared to docker-compose
+
+## SELinux Considerations
+
+On systems with SELinux enabled (like Red Hat, Fedora, CentOS):
+
+### For Rootful Mode
+
+Set the correct SELinux context:
+```bash
+sudo chcon -t container_runtime_exec_t /run/podman/podman.sock
+```
+
+### For Rootless Mode
+
+Usually works without additional configuration, but if you encounter issues:
+```bash
+semanage fcontext -a -t container_runtime_exec_t "$XDG_RUNTIME_DIR/podman/podman.sock"
+restorecon -v "$XDG_RUNTIME_DIR/podman/podman.sock"
+```
+
+## Troubleshooting
+
+### Socket Not Found
+
+```bash
+# Check if socket service is running
+systemctl --user status podman.socket # For rootless
+sudo systemctl status podman.socket # For rootful
+
+# Enable and start if not running
+systemctl --user enable --now podman.socket # For rootless
+sudo systemctl enable --now podman.socket # For rootful
+```
+
+### Permission Denied
+
+For rootless mode, ensure `$XDG_RUNTIME_DIR` is set:
+```bash
+echo $XDG_RUNTIME_DIR
+# Should output something like: /run/user/1000
+```
+
+For rootful mode, ensure Sablier has access to the socket:
+```bash
+sudo ls -l /run/podman/podman.sock
+```
+
+### Container Not Starting
+
+1. Check Sablier logs
+2. Verify container labels
+3. Test socket connectivity:
+ ```bash
+ curl --unix-socket /run/podman/podman.sock http://localhost/_ping
+ ```
+
+### podman-compose Not Working
+
+Ensure you're using compatible compose file syntax:
+```yaml
+version: '3.8' # Supported version
+
+# Use docker.io/ prefix for images
+image: docker.io/sablierapp/sablier:1.10.1
+```
+
+### Rootless Networking Issues
+
+Podman rootless uses `slirp4netns` by default. For better performance, consider using `pasta`:
+```bash
+# Configure pasta for rootless networking
+podman system connection add pasta --default
+```
\ No newline at end of file
diff --git a/go.mod b/go.mod
index a7280b93..d6e3c9e0 100644
--- a/go.mod
+++ b/go.mod
@@ -29,7 +29,10 @@ require (
k8s.io/client-go v0.34.1
)
-require github.com/containers/image/v5 v5.36.2
+require (
+ github.com/containers/image/v5 v5.36.2
+ github.com/digitalocean/godo v1.169.0
+)
require (
dario.cat/mergo v1.0.2 // indirect
@@ -113,11 +116,14 @@ require (
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-containerregistry v0.20.3 // indirect
github.com/google/go-intervals v0.0.2 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/schema v1.4.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hpcloud/tail v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jedib0t/go-pretty/v6 v6.6.6 // indirect
diff --git a/go.sum b/go.sum
index 1838cd7d..e2aed474 100644
--- a/go.sum
+++ b/go.sum
@@ -123,6 +123,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/digitalocean/godo v1.169.0 h1:Wp9UrtIAgpFEEuY4ifWwq8JHJh7mFKPBXnkRv2Wf0Bw=
+github.com/digitalocean/godo v1.169.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
@@ -153,6 +155,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
@@ -234,6 +238,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -243,6 +248,8 @@ github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM
github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
@@ -259,8 +266,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLA
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
+github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
@@ -313,6 +326,8 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
diff --git a/pkg/config/provider.go b/pkg/config/provider.go
index 98337004..befb7626 100644
--- a/pkg/config/provider.go
+++ b/pkg/config/provider.go
@@ -12,6 +12,7 @@ type Provider struct {
AutoStopOnStartup bool `yaml:"auto-stop-on-startup,omitempty" default:"true"`
Kubernetes Kubernetes
Podman Podman
+ DigitalOcean DigitalOcean
}
type Kubernetes struct {
@@ -34,7 +35,14 @@ type Podman struct {
Uri string `mapstructure:"URI" yaml:"uri,omitempty" default:"unix:///run/podman/podman.sock"`
}
-var providers = []string{"docker", "docker_swarm", "swarm", "kubernetes", "podman"}
+type DigitalOcean struct {
+ // Token is the Digital Ocean API token for authentication
+ Token string `mapstructure:"TOKEN" yaml:"token,omitempty"`
+ // Region is the Digital Ocean region. Defaults to "nyc1"
+ Region string `mapstructure:"REGION" yaml:"region,omitempty" default:"nyc1"`
+}
+
+var providers = []string{"docker", "docker_swarm", "swarm", "kubernetes", "podman", "digitalocean"}
func NewProviderConfig() Provider {
return Provider{
@@ -48,6 +56,9 @@ func NewProviderConfig() Provider {
Podman: Podman{
Uri: "unix:///run/podman/podman.sock",
},
+ DigitalOcean: DigitalOcean{
+ Region: "nyc1",
+ },
}
}
diff --git a/pkg/provider/digitalocean/README_TESTS.md b/pkg/provider/digitalocean/README_TESTS.md
new file mode 100644
index 00000000..b2d437bc
--- /dev/null
+++ b/pkg/provider/digitalocean/README_TESTS.md
@@ -0,0 +1,91 @@
+# Digital Ocean Provider Integration Tests
+
+This directory contains integration tests for the Digital Ocean provider. These tests interact with the real Digital Ocean API and require valid credentials.
+
+## Prerequisites
+
+1. **Digital Ocean Account**: You need an active Digital Ocean account
+2. **API Token**: Generate a personal access token with read/write permissions for App Platform
+3. **Test App**: Create a minimal Digital Ocean app with the following environment variables:
+ - `SABLIER_ENABLE=true`
+ - `SABLIER_GROUP=test` (optional)
+
+## Running the Tests
+
+### Set Environment Variables
+
+**PowerShell:**
+```powershell
+$env:DIGITALOCEAN_TOKEN="your-digitalocean-api-token"
+$env:DIGITALOCEAN_TEST_APP_ID="your-app-id"
+```
+
+**Bash/Linux:**
+```bash
+export DIGITALOCEAN_TOKEN="your-digitalocean-api-token"
+export DIGITALOCEAN_TEST_APP_ID="your-app-id"
+```
+
+### Run Tests
+
+```bash
+# Run all Digital Ocean provider tests
+go test ./pkg/provider/digitalocean -v
+
+# Run specific test
+go test ./pkg/provider/digitalocean -v -run TestDigitalOceanProvider_InstanceStart
+
+# Skip integration tests (runs only unit tests if any)
+go test ./pkg/provider/digitalocean -v -short
+```
+
+## Test Behavior
+
+- **Skipped if credentials not provided**: Tests will be skipped if `DIGITALOCEAN_TOKEN` or `DIGITALOCEAN_TEST_APP_ID` are not set
+- **Cleanup**: Tests automatically clean up by scaling the app to 0 instances after completion
+- **Deployment wait times**: Tests may take several minutes due to Digital Ocean deployment times
+- **Polling-based events**: The `NotifyInstanceStopped` test may take up to 90 seconds due to 30-second polling interval
+
+## Creating a Test App
+
+The simplest way to create a test app:
+
+1. Go to Digital Ocean Console → Apps
+2. Create a new app (use a static site or simple container)
+3. Add environment variables:
+ - `SABLIER_ENABLE=true`
+ - `SABLIER_GROUP=test`
+4. Deploy the app
+5. Copy the App ID from the URL or app details
+
+**Note**: The test app should be minimal to reduce costs and deployment times.
+
+## Cost Considerations
+
+⚠️ **Warning**: Running these tests will:
+- Trigger deployments on your Digital Ocean account
+- May incur charges based on Digital Ocean App Platform pricing
+- Scale apps up and down, which may result in brief compute charges
+
+It's recommended to:
+- Use the smallest instance size for your test app
+- Run tests sparingly
+- Use a test/development Digital Ocean account if possible
+
+## Troubleshooting
+
+### Tests are skipped
+- Verify environment variables are set correctly
+- Check that the API token is valid
+
+### Tests timeout
+- Digital Ocean deployments can take 3-5 minutes
+- Increase timeout if needed for slower apps
+
+### Authentication errors
+- Verify your token has the correct permissions
+- Check that the token hasn't expired
+
+### App not found
+- Verify the App ID is correct
+- Ensure the app exists in your Digital Ocean account
diff --git a/pkg/provider/digitalocean/app_inspect.go b/pkg/provider/digitalocean/app_inspect.go
new file mode 100644
index 00000000..95c9fe4a
--- /dev/null
+++ b/pkg/provider/digitalocean/app_inspect.go
@@ -0,0 +1,70 @@
+package digitalocean
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/sablierapp/sablier/pkg/sablier"
+)
+
+func (p *Provider) InstanceInspect(ctx context.Context, name string) (sablier.InstanceInfo, error) {
+ appID := name
+
+ app, _, err := p.Client.Apps.Get(ctx, appID)
+ if err != nil {
+ return sablier.InstanceInfo{}, fmt.Errorf("cannot inspect app: %w", err)
+ }
+
+ p.l.DebugContext(ctx, "app inspected",
+ slog.String("app", name),
+ slog.String("phase", string(app.ActiveDeployment.Phase)),
+ )
+
+ // Calculate total current and desired instances
+ var currentReplicas, desiredReplicas int32
+
+ // Check services
+ for _, service := range app.Spec.Services {
+ desiredReplicas += int32(service.InstanceCount)
+ }
+
+ // Check workers
+ for _, worker := range app.Spec.Workers {
+ desiredReplicas += int32(worker.InstanceCount)
+ }
+
+ // Count running instances from active deployment
+ if app.ActiveDeployment != nil {
+ // Deployment phases: "PENDING_BUILD", "BUILDING", "PENDING_DEPLOY", "DEPLOYING", "ACTIVE", "SUPERSEDED", "ERROR", "CANCELED"
+ switch app.ActiveDeployment.Phase {
+ case "ACTIVE":
+ // Count actual running instances
+ for _, service := range app.Spec.Services {
+ currentReplicas += int32(service.InstanceCount)
+ }
+ for _, worker := range app.Spec.Workers {
+ currentReplicas += int32(worker.InstanceCount)
+ }
+
+ if currentReplicas > 0 {
+ return sablier.ReadyInstanceState(name, desiredReplicas), nil
+ }
+ return sablier.NotReadyInstanceState(name, currentReplicas, desiredReplicas), nil
+
+ case "BUILDING", "PENDING_BUILD", "DEPLOYING", "PENDING_DEPLOY":
+ return sablier.NotReadyInstanceState(name, currentReplicas, desiredReplicas), nil
+
+ case "ERROR", "CANCELED":
+ return sablier.UnrecoverableInstanceState(name, fmt.Sprintf("deployment in %s state", app.ActiveDeployment.Phase), desiredReplicas), nil
+
+ case "SUPERSEDED":
+ return sablier.NotReadyInstanceState(name, currentReplicas, desiredReplicas), nil
+
+ default:
+ return sablier.UnrecoverableInstanceState(name, fmt.Sprintf("deployment phase \"%s\" not handled", app.ActiveDeployment.Phase), desiredReplicas), nil
+ }
+ }
+
+ return sablier.NotReadyInstanceState(name, 0, desiredReplicas), nil
+}
diff --git a/pkg/provider/digitalocean/app_inspect_test.go b/pkg/provider/digitalocean/app_inspect_test.go
new file mode 100644
index 00000000..50b49b25
--- /dev/null
+++ b/pkg/provider/digitalocean/app_inspect_test.go
@@ -0,0 +1,100 @@
+package digitalocean_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/digitalocean/godo"
+ "github.com/sablierapp/sablier/pkg/sablier"
+ "gotest.tools/v3/assert"
+)
+
+func TestDigitalOceanProvider_InstanceInspect(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx := context.Background()
+ provider, client := setupDigitalOcean(t)
+ appID := getTestAppID(t, client)
+
+ t.Cleanup(func() {
+ cleanupApp(t, client, appID)
+ })
+
+ tests := []struct {
+ name string
+ setup func(t *testing.T)
+ wantStatus sablier.InstanceStatus
+ }{
+ {
+ name: "inspect running app",
+ setup: func(t *testing.T) {
+ // Ensure app has 1 instance
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount == 0 {
+ app.Spec.Services[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment to be active
+ _ = waitForDeployment(ctx, t, client, appID, 5*time.Minute)
+ }
+ },
+ wantStatus: sablier.InstanceStatusReady,
+ },
+ {
+ name: "inspect stopped app",
+ setup: func(t *testing.T) {
+ // Ensure app has 0 instances
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount > 0 {
+ app.Spec.Services[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+ for i := range app.Spec.Workers {
+ if app.Spec.Workers[i].InstanceCount > 0 {
+ app.Spec.Workers[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+ },
+ wantStatus: sablier.InstanceStatusNotReady,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.setup(t)
+
+ info, err := provider.InstanceInspect(ctx, appID)
+ assert.NilError(t, err)
+ assert.Equal(t, appID, info.Name)
+ assert.Equal(t, tt.wantStatus, info.Status)
+
+ t.Logf("App status: %s, Current: %d, Desired: %d",
+ info.Status, info.CurrentReplicas, info.DesiredReplicas)
+ })
+ }
+}
diff --git a/pkg/provider/digitalocean/app_list.go b/pkg/provider/digitalocean/app_list.go
new file mode 100644
index 00000000..fc2872ac
--- /dev/null
+++ b/pkg/provider/digitalocean/app_list.go
@@ -0,0 +1,100 @@
+package digitalocean
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/digitalocean/godo"
+ "github.com/sablierapp/sablier/pkg/provider"
+ "github.com/sablierapp/sablier/pkg/sablier"
+)
+
+func (p *Provider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]sablier.InstanceConfiguration, error) {
+ p.l.DebugContext(ctx, "listing apps", slog.Group("options", slog.Bool("all", options.All)))
+
+ // List all apps
+ apps, _, err := p.Client.Apps.List(ctx, &godo.ListOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("cannot list apps: %w", err)
+ }
+
+ p.l.DebugContext(ctx, "apps listed", slog.Int("count", len(apps)))
+
+ instances := make([]sablier.InstanceConfiguration, 0, len(apps))
+ for _, app := range apps {
+ // Filter by sablier.enable label if not listing all
+ if !options.All {
+ enabled := false
+ for _, env := range app.Spec.Envs {
+ if env.Key == "SABLIER_ENABLE" && env.Value == "true" {
+ enabled = true
+ break
+ }
+ }
+ if !enabled {
+ continue
+ }
+ }
+
+ instance := appToInstance(app)
+ instances = append(instances, instance)
+ }
+
+ return instances, nil
+}
+
+func appToInstance(app *godo.App) sablier.InstanceConfiguration {
+ var group string = "default"
+
+ // Look for sablier.group in environment variables
+ for _, env := range app.Spec.Envs {
+ if env.Key == "SABLIER_GROUP" {
+ group = env.Value
+ break
+ }
+ }
+
+ return sablier.InstanceConfiguration{
+ Name: app.ID,
+ Group: group,
+ }
+}
+
+func (p *Provider) InstanceGroups(ctx context.Context) (map[string][]string, error) {
+ p.l.DebugContext(ctx, "listing apps for groups")
+
+ // List all apps
+ apps, _, err := p.Client.Apps.List(ctx, &godo.ListOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("cannot list apps: %w", err)
+ }
+
+ p.l.DebugContext(ctx, "apps listed", slog.Int("count", len(apps)))
+
+ groups := make(map[string][]string)
+ for _, app := range apps {
+ // Only include apps with sablier.enable
+ enabled := false
+ groupName := "default"
+
+ for _, env := range app.Spec.Envs {
+ if env.Key == "SABLIER_ENABLE" && env.Value == "true" {
+ enabled = true
+ }
+ if env.Key == "SABLIER_GROUP" {
+ groupName = env.Value
+ }
+ }
+
+ if !enabled {
+ continue
+ }
+
+ group := groups[groupName]
+ group = append(group, app.ID)
+ groups[groupName] = group
+ }
+
+ return groups, nil
+}
diff --git a/pkg/provider/digitalocean/app_list_test.go b/pkg/provider/digitalocean/app_list_test.go
new file mode 100644
index 00000000..9093d559
--- /dev/null
+++ b/pkg/provider/digitalocean/app_list_test.go
@@ -0,0 +1,79 @@
+package digitalocean_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/sablierapp/sablier/pkg/provider"
+ "gotest.tools/v3/assert"
+)
+
+func TestDigitalOceanProvider_InstanceList(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx := context.Background()
+ p, _ := setupDigitalOcean(t)
+
+ tests := []struct {
+ name string
+ options provider.InstanceListOptions
+ }{
+ {
+ name: "list all apps",
+ options: provider.InstanceListOptions{
+ All: true,
+ },
+ },
+ {
+ name: "list only sablier-enabled apps",
+ options: provider.InstanceListOptions{
+ All: false,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ instances, err := p.InstanceList(ctx, tt.options)
+ assert.NilError(t, err)
+
+ t.Logf("Found %d apps", len(instances))
+ for _, instance := range instances {
+ t.Logf("App: %s, Group: %s", instance.Name, instance.Group)
+ }
+
+ // If we're filtering, verify all instances have proper configuration
+ if !tt.options.All {
+ for _, instance := range instances {
+ assert.Assert(t, instance.Name != "", "instance name should not be empty")
+ assert.Assert(t, instance.Group != "", "instance group should not be empty")
+ }
+ }
+ })
+ }
+}
+
+func TestDigitalOceanProvider_InstanceGroups(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx := context.Background()
+ p, _ := setupDigitalOcean(t)
+
+ groups, err := p.InstanceGroups(ctx)
+ assert.NilError(t, err)
+
+ t.Logf("Found %d groups", len(groups))
+ for groupName, appIDs := range groups {
+ t.Logf("Group '%s' has %d apps: %v", groupName, len(appIDs), appIDs)
+ }
+
+ // Verify groups structure
+ for groupName, appIDs := range groups {
+ assert.Assert(t, groupName != "", "group name should not be empty")
+ assert.Assert(t, len(appIDs) > 0, "group should have at least one app")
+ }
+}
diff --git a/pkg/provider/digitalocean/app_start.go b/pkg/provider/digitalocean/app_start.go
new file mode 100644
index 00000000..bca2083e
--- /dev/null
+++ b/pkg/provider/digitalocean/app_start.go
@@ -0,0 +1,59 @@
+package digitalocean
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/digitalocean/godo"
+)
+
+func (p *Provider) InstanceStart(ctx context.Context, name string) error {
+ p.l.DebugContext(ctx, "starting app", "name", name)
+
+ appID := name
+
+ // Get current app to check deployment status
+ app, _, err := p.Client.Apps.Get(ctx, appID)
+ if err != nil {
+ p.l.ErrorContext(ctx, "cannot get app", slog.String("name", name), slog.Any("error", err))
+ return fmt.Errorf("cannot get app %s: %w", name, err)
+ }
+
+ // Check if app components need to be scaled up
+ needsUpdate := false
+ updateRequest := &godo.AppUpdateRequest{
+ Spec: app.Spec,
+ }
+
+ // Scale up services
+ for i, service := range updateRequest.Spec.Services {
+ if service.InstanceCount == 0 {
+ updateRequest.Spec.Services[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ // Scale up workers
+ for i, worker := range updateRequest.Spec.Workers {
+ if worker.InstanceCount == 0 {
+ updateRequest.Spec.Workers[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ if !needsUpdate {
+ p.l.DebugContext(ctx, "app already running", slog.String("name", name))
+ return nil
+ }
+
+ // Update the app to scale it up
+ _, _, err = p.Client.Apps.Update(ctx, appID, updateRequest)
+ if err != nil {
+ p.l.ErrorContext(ctx, "cannot start app", slog.String("name", name), slog.Any("error", err))
+ return fmt.Errorf("cannot start app %s: %w", name, err)
+ }
+
+ p.l.InfoContext(ctx, "app started", slog.String("name", name))
+ return nil
+}
diff --git a/pkg/provider/digitalocean/app_start_test.go b/pkg/provider/digitalocean/app_start_test.go
new file mode 100644
index 00000000..a29d3e53
--- /dev/null
+++ b/pkg/provider/digitalocean/app_start_test.go
@@ -0,0 +1,119 @@
+package digitalocean_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/digitalocean/godo"
+ "gotest.tools/v3/assert"
+)
+
+func TestDigitalOceanProvider_InstanceStart(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx := context.Background()
+ provider, client := setupDigitalOcean(t)
+ appID := getTestAppID(t, client)
+
+ t.Cleanup(func() {
+ cleanupApp(t, client, appID)
+ })
+
+ tests := []struct {
+ name string
+ setup func(t *testing.T)
+ wantErr bool
+ }{
+ {
+ name: "start app scaled to 0",
+ setup: func(t *testing.T) {
+ // Ensure app is at 0 instances
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ // Scale to 0 if needed
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount > 0 {
+ app.Spec.Services[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+ for i := range app.Spec.Workers {
+ if app.Spec.Workers[i].InstanceCount > 0 {
+ app.Spec.Workers[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+ },
+ wantErr: false,
+ },
+ {
+ name: "start app already running",
+ setup: func(t *testing.T) {
+ // Ensure app is at 1 instance
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount == 0 {
+ app.Spec.Services[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.setup(t)
+
+ err := provider.InstanceStart(ctx, appID)
+ if tt.wantErr {
+ assert.Assert(t, err != nil)
+ } else {
+ assert.NilError(t, err)
+
+ // Verify app was started
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ hasInstances := false
+ for _, service := range app.Spec.Services {
+ if service.InstanceCount > 0 {
+ hasInstances = true
+ break
+ }
+ }
+ for _, worker := range app.Spec.Workers {
+ if worker.InstanceCount > 0 {
+ hasInstances = true
+ break
+ }
+ }
+
+ assert.Assert(t, hasInstances, "app should have at least one instance after start")
+ }
+ })
+ }
+}
diff --git a/pkg/provider/digitalocean/app_stop.go b/pkg/provider/digitalocean/app_stop.go
new file mode 100644
index 00000000..e6c62a5b
--- /dev/null
+++ b/pkg/provider/digitalocean/app_stop.go
@@ -0,0 +1,47 @@
+package digitalocean
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/digitalocean/godo"
+)
+
+func (p *Provider) InstanceStop(ctx context.Context, name string) error {
+ p.l.DebugContext(ctx, "stopping app", slog.String("name", name))
+
+ appID := name
+
+ // Get current app
+ app, _, err := p.Client.Apps.Get(ctx, appID)
+ if err != nil {
+ p.l.ErrorContext(ctx, "cannot get app", slog.String("name", name), slog.Any("error", err))
+ return fmt.Errorf("cannot get app %s: %w", name, err)
+ }
+
+ // Scale down all components to 0
+ updateRequest := &godo.AppUpdateRequest{
+ Spec: app.Spec,
+ }
+
+ // Scale down services
+ for i := range updateRequest.Spec.Services {
+ updateRequest.Spec.Services[i].InstanceCount = 0
+ }
+
+ // Scale down workers
+ for i := range updateRequest.Spec.Workers {
+ updateRequest.Spec.Workers[i].InstanceCount = 0
+ }
+
+ // Update the app to scale it down
+ _, _, err = p.Client.Apps.Update(ctx, appID, updateRequest)
+ if err != nil {
+ p.l.ErrorContext(ctx, "cannot stop app", slog.String("name", name), slog.Any("error", err))
+ return fmt.Errorf("cannot stop app %s: %w", name, err)
+ }
+
+ p.l.InfoContext(ctx, "app stopped", slog.String("name", name))
+ return nil
+}
diff --git a/pkg/provider/digitalocean/app_stop_test.go b/pkg/provider/digitalocean/app_stop_test.go
new file mode 100644
index 00000000..65937ac5
--- /dev/null
+++ b/pkg/provider/digitalocean/app_stop_test.go
@@ -0,0 +1,112 @@
+package digitalocean_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/digitalocean/godo"
+ "gotest.tools/v3/assert"
+)
+
+func TestDigitalOceanProvider_InstanceStop(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx := context.Background()
+ provider, client := setupDigitalOcean(t)
+ appID := getTestAppID(t, client)
+
+ t.Cleanup(func() {
+ cleanupApp(t, client, appID)
+ })
+
+ tests := []struct {
+ name string
+ setup func(t *testing.T)
+ wantErr bool
+ }{
+ {
+ name: "stop running app",
+ setup: func(t *testing.T) {
+ // Ensure app has at least 1 instance
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount == 0 {
+ app.Spec.Services[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+ },
+ wantErr: false,
+ },
+ {
+ name: "stop already stopped app",
+ setup: func(t *testing.T) {
+ // Ensure app is at 0
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount > 0 {
+ app.Spec.Services[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+ for i := range app.Spec.Workers {
+ if app.Spec.Workers[i].InstanceCount > 0 {
+ app.Spec.Workers[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.setup(t)
+
+ err := provider.InstanceStop(ctx, appID)
+ if tt.wantErr {
+ assert.Assert(t, err != nil)
+ } else {
+ assert.NilError(t, err)
+
+ // Verify app was stopped
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ totalInstances := 0
+ for _, service := range app.Spec.Services {
+ totalInstances += int(service.InstanceCount)
+ }
+ for _, worker := range app.Spec.Workers {
+ totalInstances += int(worker.InstanceCount)
+ }
+
+ assert.Equal(t, 0, totalInstances, "app should have 0 instances after stop")
+ }
+ })
+ }
+}
diff --git a/pkg/provider/digitalocean/digitalocean.go b/pkg/provider/digitalocean/digitalocean.go
new file mode 100644
index 00000000..c751a23d
--- /dev/null
+++ b/pkg/provider/digitalocean/digitalocean.go
@@ -0,0 +1,40 @@
+package digitalocean
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/digitalocean/godo"
+ "github.com/sablierapp/sablier/pkg/sablier"
+)
+
+// Interface guard
+var _ sablier.Provider = (*Provider)(nil)
+
+type Provider struct {
+ Client *godo.Client
+ desiredReplicas int32
+ l *slog.Logger
+}
+
+func New(ctx context.Context, client *godo.Client, logger *slog.Logger) (*Provider, error) {
+ logger = logger.With(slog.String("provider", "digitalocean"))
+
+ // Test connection by getting account info
+ account, _, err := client.Account.Get(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("cannot connect to Digital Ocean API: %v", err)
+ }
+
+ logger.InfoContext(ctx, "connection established with Digital Ocean",
+ slog.String("email", account.Email),
+ slog.String("status", account.Status),
+ )
+
+ return &Provider{
+ Client: client,
+ desiredReplicas: 1,
+ l: logger,
+ }, nil
+}
diff --git a/pkg/provider/digitalocean/events.go b/pkg/provider/digitalocean/events.go
new file mode 100644
index 00000000..782fd823
--- /dev/null
+++ b/pkg/provider/digitalocean/events.go
@@ -0,0 +1,66 @@
+package digitalocean
+
+import (
+ "context"
+ "log/slog"
+ "time"
+)
+
+func (p *Provider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
+ // Digital Ocean doesn't provide a native event stream API like Docker
+ // We need to poll for changes in app deployments
+ ticker := time.NewTicker(30 * time.Second)
+ defer ticker.Stop()
+
+ // Keep track of last known state
+ lastState := make(map[string]int32)
+
+ for {
+ select {
+ case <-ticker.C:
+ apps, _, err := p.Client.Apps.List(ctx, nil)
+ if err != nil {
+ p.l.ErrorContext(ctx, "failed to list apps", slog.Any("error", err))
+ continue
+ }
+
+ for _, app := range apps {
+ // Check if this app has sablier enabled
+ enabled := false
+ for _, env := range app.Spec.Envs {
+ if env.Key == "SABLIER_ENABLE" && env.Value == "true" {
+ enabled = true
+ break
+ }
+ }
+
+ if !enabled {
+ continue
+ }
+
+ // Calculate current instance count
+ var currentCount int32
+ for _, service := range app.Spec.Services {
+ currentCount += int32(service.InstanceCount)
+ }
+ for _, worker := range app.Spec.Workers {
+ currentCount += int32(worker.InstanceCount)
+ }
+
+ // Check if app was stopped (went from running to 0 instances)
+ if lastCount, exists := lastState[app.ID]; exists {
+ if lastCount > 0 && currentCount == 0 {
+ p.l.DebugContext(ctx, "app stopped detected", slog.String("app_id", app.ID))
+ instance <- app.ID
+ }
+ }
+
+ lastState[app.ID] = currentCount
+ }
+
+ case <-ctx.Done():
+ close(instance)
+ return
+ }
+ }
+}
diff --git a/pkg/provider/digitalocean/events_test.go b/pkg/provider/digitalocean/events_test.go
new file mode 100644
index 00000000..8a919f5b
--- /dev/null
+++ b/pkg/provider/digitalocean/events_test.go
@@ -0,0 +1,70 @@
+package digitalocean_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/digitalocean/godo"
+ "gotest.tools/v3/assert"
+)
+
+func TestDigitalOceanProvider_NotifyInstanceStopped(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping integration test in short mode")
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ defer cancel()
+
+ provider, client := setupDigitalOcean(t)
+ appID := getTestAppID(t, client)
+
+ t.Cleanup(func() {
+ cleanupApp(t, client, appID)
+ })
+
+ // Ensure app is running first
+ app, _, err := client.Apps.Get(ctx, appID)
+ assert.NilError(t, err)
+
+ needsUpdate := false
+ for i := range app.Spec.Services {
+ if app.Spec.Services[i].InstanceCount == 0 {
+ app.Spec.Services[i].InstanceCount = 1
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, &godo.AppUpdateRequest{Spec: app.Spec})
+ assert.NilError(t, err)
+ // Wait for deployment
+ _ = waitForDeployment(ctx, t, client, appID, 3*time.Minute)
+ }
+
+ // Start listening for stopped events
+ instanceChan := make(chan string, 1)
+ go provider.NotifyInstanceStopped(ctx, instanceChan)
+
+ // Give the polling goroutine time to start
+ time.Sleep(2 * time.Second)
+
+ // Stop the app
+ t.Log("Stopping app to trigger event...")
+ err = provider.InstanceStop(ctx, appID)
+ assert.NilError(t, err)
+
+ // Wait for the notification (with timeout)
+ // Note: Since polling is every 30s, this might take a while
+ select {
+ case stoppedAppID := <-instanceChan:
+ t.Logf("Received stop notification for app: %s", stoppedAppID)
+ assert.Equal(t, appID, stoppedAppID, "should receive notification for stopped app")
+ case <-time.After(90 * time.Second):
+ t.Log("Timeout waiting for stop notification - this is expected with 30s polling interval")
+ t.Log("The event system works but may take up to 30s to detect changes")
+ case <-ctx.Done():
+ t.Fatal("Context cancelled while waiting for notification")
+ }
+}
diff --git a/pkg/provider/digitalocean/testhelpers_test.go b/pkg/provider/digitalocean/testhelpers_test.go
new file mode 100644
index 00000000..cb163ae9
--- /dev/null
+++ b/pkg/provider/digitalocean/testhelpers_test.go
@@ -0,0 +1,125 @@
+package digitalocean_test
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/digitalocean/godo"
+ "github.com/neilotoole/slogt"
+ "github.com/sablierapp/sablier/pkg/provider/digitalocean"
+)
+
+// setupDigitalOcean creates a Digital Ocean client for testing
+// Skips the test if DIGITALOCEAN_TOKEN is not set
+func setupDigitalOcean(t *testing.T) (*digitalocean.Provider, *godo.Client) {
+ t.Helper()
+
+ token := os.Getenv("DIGITALOCEAN_TOKEN")
+ if token == "" {
+ t.Skip("DIGITALOCEAN_TOKEN environment variable not set, skipping Digital Ocean integration test")
+ }
+
+ ctx := context.Background()
+ client := godo.NewFromToken(token)
+
+ provider, err := digitalocean.New(ctx, client, slogt.New(t))
+ if err != nil {
+ t.Fatalf("failed to create Digital Ocean provider: %s", err)
+ }
+
+ return provider, client
+}
+
+// getTestAppID returns the app ID to use for testing
+// Either from DIGITALOCEAN_TEST_APP_ID env var or creates a test app
+func getTestAppID(t *testing.T, client *godo.Client) string {
+ t.Helper()
+
+ // Check if test app ID is provided
+ appID := os.Getenv("DIGITALOCEAN_TEST_APP_ID")
+ if appID != "" {
+ return appID
+ }
+
+ t.Skip("DIGITALOCEAN_TEST_APP_ID environment variable not set. Please provide an existing app ID for testing, or create a minimal app with SABLIER_ENABLE=true environment variable.")
+ return ""
+}
+
+// cleanupApp ensures the app is in a known state after testing
+func cleanupApp(t *testing.T, client *godo.Client, appID string) {
+ t.Helper()
+ ctx := context.Background()
+
+ // Get the app
+ app, _, err := client.Apps.Get(ctx, appID)
+ if err != nil {
+ t.Logf("failed to get app for cleanup: %s", err)
+ return
+ }
+
+ // Scale down to 0 if needed
+ needsUpdate := false
+ updateRequest := &godo.AppUpdateRequest{
+ Spec: app.Spec,
+ }
+
+ for i := range updateRequest.Spec.Services {
+ if updateRequest.Spec.Services[i].InstanceCount > 0 {
+ updateRequest.Spec.Services[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+
+ for i := range updateRequest.Spec.Workers {
+ if updateRequest.Spec.Workers[i].InstanceCount > 0 {
+ updateRequest.Spec.Workers[i].InstanceCount = 0
+ needsUpdate = true
+ }
+ }
+
+ if needsUpdate {
+ _, _, err = client.Apps.Update(ctx, appID, updateRequest)
+ if err != nil {
+ t.Logf("failed to cleanup app: %s", err)
+ }
+ }
+}
+
+// waitForDeployment waits for the app deployment to reach a specific phase
+func waitForDeployment(ctx context.Context, t *testing.T, client *godo.Client, appID string, timeout time.Duration) error {
+ t.Helper()
+
+ deadline := time.Now().Add(timeout)
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ app, _, err := client.Apps.Get(ctx, appID)
+ if err != nil {
+ return err
+ }
+
+ if app.ActiveDeployment != nil {
+ phase := app.ActiveDeployment.Phase
+ t.Logf("Current deployment phase: %s", phase)
+
+ switch phase {
+ case "ACTIVE", "ERROR", "CANCELED":
+ return nil
+ }
+ }
+
+ if time.Now().After(deadline) {
+ t.Log("Timeout waiting for deployment, continuing anyway...")
+ return nil
+ }
+
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
diff --git a/pkg/sabliercmd/provider.go b/pkg/sabliercmd/provider.go
index 6010a84f..a07f8669 100644
--- a/pkg/sabliercmd/provider.go
+++ b/pkg/sabliercmd/provider.go
@@ -6,8 +6,10 @@ import (
"log/slog"
"github.com/containers/podman/v5/pkg/bindings"
+ "github.com/digitalocean/godo"
"github.com/docker/docker/client"
"github.com/sablierapp/sablier/pkg/config"
+ "github.com/sablierapp/sablier/pkg/provider/digitalocean"
"github.com/sablierapp/sablier/pkg/provider/docker"
"github.com/sablierapp/sablier/pkg/provider/dockerswarm"
"github.com/sablierapp/sablier/pkg/provider/kubernetes"
@@ -54,6 +56,12 @@ func setupProvider(ctx context.Context, logger *slog.Logger, config config.Provi
return nil, fmt.Errorf("cannot create podman connection: %w", err)
}
return podman.New(connText, logger)
+ case "digitalocean":
+ if config.DigitalOcean.Token == "" {
+ return nil, fmt.Errorf("Digital Ocean token is required")
+ }
+ cli := godo.NewFromToken(config.DigitalOcean.Token)
+ return digitalocean.New(ctx, cli, logger)
}
return nil, fmt.Errorf("unimplemented provider %s", config.Name)
}