From 1e7dea6f2f3c951df89aa871f59623a7d5a5ba52 Mon Sep 17 00:00:00 2001 From: Dijar Llozana Date: Tue, 9 Dec 2025 15:34:53 +0100 Subject: [PATCH 1/5] Added elaborate helm chart support for all the components coverted from the docker compose, supports multiple configuration options via values --- helm/commonware-restaking/Chart.yaml | 13 ++ helm/commonware-restaking/templates/NOTES.txt | 61 ++++++ .../templates/_helpers.tpl | 109 +++++++++++ .../templates/configmap.yaml | 35 ++++ .../templates/ethereum-deployment.yaml | 69 +++++++ .../templates/ingress.yaml | 41 ++++ .../templates/node-deployment.yaml | 141 ++++++++++++++ helm/commonware-restaking/templates/pvc.yaml | 15 ++ .../templates/router-deployment.yaml | 176 ++++++++++++++++++ .../templates/secret.yaml | 20 ++ .../templates/setup-job.yaml | 120 ++++++++++++ .../templates/signer-deployment.yaml | 115 ++++++++++++ helm/commonware-restaking/values.yaml | 176 ++++++++++++++++++ 13 files changed, 1091 insertions(+) create mode 100644 helm/commonware-restaking/Chart.yaml create mode 100644 helm/commonware-restaking/templates/NOTES.txt create mode 100644 helm/commonware-restaking/templates/_helpers.tpl create mode 100644 helm/commonware-restaking/templates/configmap.yaml create mode 100644 helm/commonware-restaking/templates/ethereum-deployment.yaml create mode 100644 helm/commonware-restaking/templates/ingress.yaml create mode 100644 helm/commonware-restaking/templates/node-deployment.yaml create mode 100644 helm/commonware-restaking/templates/pvc.yaml create mode 100644 helm/commonware-restaking/templates/router-deployment.yaml create mode 100644 helm/commonware-restaking/templates/secret.yaml create mode 100644 helm/commonware-restaking/templates/setup-job.yaml create mode 100644 helm/commonware-restaking/templates/signer-deployment.yaml create mode 100644 helm/commonware-restaking/values.yaml diff --git a/helm/commonware-restaking/Chart.yaml b/helm/commonware-restaking/Chart.yaml new file mode 100644 index 0000000..ebf980a --- /dev/null +++ b/helm/commonware-restaking/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: commonware-restaking +description: A Helm chart for deploying Commonware Restaking AVS with EigenLayer integration +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - commonware + - avs + - eigenlayer + - ethereum +maintainers: + - name: Breadchain \ No newline at end of file diff --git a/helm/commonware-restaking/templates/NOTES.txt b/helm/commonware-restaking/templates/NOTES.txt new file mode 100644 index 0000000..4afb34e --- /dev/null +++ b/helm/commonware-restaking/templates/NOTES.txt @@ -0,0 +1,61 @@ +Thank you for installing {{ .Chart.Name }}! + +Your release is named {{ .Release.Name }}. + +== DEPLOYMENT STATUS == + +The Commonware AVS stack is being deployed with the following components: + +1. Ethereum (Anvil) - Local blockchain with forked Holesky state +2. Signer (Cerberus) - BLS signature service +3. Setup Job - EigenLayer contract deployment +4. {{ .Values.global.nodeCount }} AVS Nodes - Operator nodes +5. Router - Request routing and aggregation + +== SETUP JOB == + +A setup job will run after installation to: +- Deploy EigenLayer contracts +- Create {{ .Values.global.nodeCount }} test operator accounts + +Monitor the setup job: + kubectl get jobs -l app.kubernetes.io/instance={{ .Release.Name }} + kubectl logs job/{{ include "commonware-avs.setup.fullname" . }} + +== ACCESSING SERVICES == + +Ethereum RPC: + kubectl port-forward svc/{{ include "commonware-avs.ethereum.fullname" . }} 8545:{{ .Values.ethereum.service.port }} + +Router: + kubectl port-forward svc/{{ include "commonware-avs.router.fullname" . }} 3000:{{ .Values.router.service.port }} + +Router Ingress (if enabled): + kubectl port-forward svc/{{ include "commonware-avs.router.fullname" . }} 8080:{{ .Values.router.service.ingressPort }} + +{{- if .Values.ingress.enabled }} + +Ingress: +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ .host }} +{{- end }} +{{- end }} + +== TROUBLESHOOTING == + +Check pod status: + kubectl get pods -l app.kubernetes.io/instance={{ .Release.Name }} + +View logs for a specific component: + kubectl logs -l app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=ethereum + kubectl logs -l app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=node + kubectl logs -l app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=router + +Check shared data volume: + kubectl get pvc {{ include "commonware-avs.shareddata.fullname" . }} + +== CONFIGURATION == + +Environment: {{ .Values.global.environment }} +Node Count: {{ .Values.global.nodeCount }} +Usecase: {{ .Values.node.usecase }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/_helpers.tpl b/helm/commonware-restaking/templates/_helpers.tpl new file mode 100644 index 0000000..3ba0f9e --- /dev/null +++ b/helm/commonware-restaking/templates/_helpers.tpl @@ -0,0 +1,109 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "commonware-avs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "commonware-avs.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "commonware-avs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "commonware-avs.labels" -}} +helm.sh/chart: {{ include "commonware-avs.chart" . }} +{{ include "commonware-avs.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "commonware-avs.selectorLabels" -}} +app.kubernetes.io/name: {{ include "commonware-avs.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Ethereum service name +*/}} +{{- define "commonware-avs.ethereum.fullname" -}} +{{- printf "%s-ethereum" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Signer service name +*/}} +{{- define "commonware-avs.signer.fullname" -}} +{{- printf "%s-signer" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Router service name +*/}} +{{- define "commonware-avs.router.fullname" -}} +{{- printf "%s-router" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Node name helper +*/}} +{{- define "commonware-avs.node.fullname" -}} +{{- printf "%s-node" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Setup job name +*/}} +{{- define "commonware-avs.setup.fullname" -}} +{{- printf "%s-setup" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Shared data PVC name +*/}} +{{- define "commonware-avs.shareddata.fullname" -}} +{{- printf "%s-shared-data" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Config ConfigMap name +*/}} +{{- define "commonware-avs.config.fullname" -}} +{{- printf "%s-config" (include "commonware-avs.fullname" .) }} +{{- end }} + +{{/* +Secret name - supports existing secret or creates new one +*/}} +{{- define "commonware-avs.secret.fullname" -}} +{{- if .Values.secrets.existingSecret }} +{{- .Values.secrets.existingSecret }} +{{- else }} +{{- printf "%s-secret" (include "commonware-avs.fullname" .) }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/configmap.yaml b/helm/commonware-restaking/templates/configmap.yaml new file mode 100644 index 0000000..d09ddb5 --- /dev/null +++ b/helm/commonware-restaking/templates/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "commonware-avs.config.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} +data: + # EigenLayer setup config - dynamically generates operator socket addresses + # This replaces config/config.example.json with Kubernetes service names + config.json: | + { + "quorum": { + "minimumStake": "1", + "maxOperatorCount": 32, + "kickBIPsOfOperatorStake": 10000, + "kickBIPsOfTotalStake": 100 + }, + "metadata": { + "uri": "metadataURI" + }, + "operators": { + {{- $nodeCount := int .Values.global.nodeCount }} + {{- range $i := until $nodeCount }} + "testacc{{ add $i 1 }}": { + "socketAddress": "{{ include "commonware-avs.node.fullname" $ }}-{{ add $i 1 }}:{{ add (int $.Values.node.basePort) $i }}" + }{{ if lt (add $i 1) $nodeCount }},{{ end }} + {{- end }} + } + } + {{- if .Values.orchestrator.publicConfig }} + # Public orchestrator config - optional override + # If not provided, will use the file from the image at /app/config/public_orchestrator.json + public_orchestrator.json: | +{{ .Values.orchestrator.publicConfig | indent 4 }} + {{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/ethereum-deployment.yaml b/helm/commonware-restaking/templates/ethereum-deployment.yaml new file mode 100644 index 0000000..18fda7d --- /dev/null +++ b/helm/commonware-restaking/templates/ethereum-deployment.yaml @@ -0,0 +1,69 @@ +{{- if and .Values.ethereum.enabled .Values.secrets.forkUrl }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "commonware-avs.ethereum.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: ethereum +spec: + replicas: 1 + selector: + matchLabels: + {{- include "commonware-avs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: ethereum + template: + metadata: + labels: + {{- include "commonware-avs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: ethereum + spec: + # Prioritize keeping ethereum pod running - it holds critical state + priorityClassName: system-cluster-critical + containers: + - name: ethereum + image: "{{ .Values.ethereum.image.repository }}:{{ .Values.ethereum.image.tag }}" + imagePullPolicy: {{ .Values.ethereum.image.pullPolicy }} + env: + - name: FORK_URL + valueFrom: + secretKeyRef: + name: {{ include "commonware-avs.secret.fullname" . }} + key: FORK_URL + ports: + - name: rpc + containerPort: 8545 + protocol: TCP + # Use TCP socket probe on the RPC port - more reliable than pgrep which may not exist in container + livenessProbe: + tcpSocket: + port: rpc + initialDelaySeconds: 60 + periodSeconds: 30 + failureThreshold: 5 + readinessProbe: + tcpSocket: + port: rpc + initialDelaySeconds: 10 + periodSeconds: 5 + resources: + {{- toYaml .Values.ethereum.resources | nindent 12 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "commonware-avs.ethereum.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: ethereum +spec: + type: {{ .Values.ethereum.service.type }} + ports: + - port: {{ .Values.ethereum.service.port }} + targetPort: rpc + protocol: TCP + name: rpc + selector: + {{- include "commonware-avs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: ethereum +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/ingress.yaml b/helm/commonware-restaking/templates/ingress.yaml new file mode 100644 index 0000000..16fee27 --- /dev/null +++ b/helm/commonware-restaking/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "commonware-avs.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "commonware-avs.router.fullname" $ }} + port: + name: ingress + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/node-deployment.yaml b/helm/commonware-restaking/templates/node-deployment.yaml new file mode 100644 index 0000000..f6634a7 --- /dev/null +++ b/helm/commonware-restaking/templates/node-deployment.yaml @@ -0,0 +1,141 @@ +{{- $root := . -}} +{{- $nodeCount := int .Values.global.nodeCount -}} +{{- range $i := until $nodeCount }} +{{- $nodeIndex := add $i 1 }} +{{- $nodePort := add (int $root.Values.node.basePort) $i }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "commonware-avs.node.fullname" $root }}-{{ $nodeIndex }} + labels: + {{- include "commonware-avs.labels" $root | nindent 4 }} + app.kubernetes.io/component: node + commonware-avs/node-index: {{ $nodeIndex | quote }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "commonware-avs.selectorLabels" $root | nindent 6 }} + app.kubernetes.io/component: node + commonware-avs/node-index: {{ $nodeIndex | quote }} + template: + metadata: + labels: + {{- include "commonware-avs.selectorLabels" $root | nindent 8 }} + app.kubernetes.io/component: node + commonware-avs/node-index: {{ $nodeIndex | quote }} + spec: + initContainers: + # Wait for setup job to fully complete (check for .setup_complete marker) + # This ensures operators are registered in the quorum before nodes start + - name: wait-for-setup + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for setup to complete..." + KEY_FILE="/app/.nodes/operator_keys/testacc{{ $nodeIndex }}.private.bls.key.json" + until [ -f /app/.nodes/.setup_complete ] && [ -f "$KEY_FILE" ]; do + echo "Setup not complete, waiting for .setup_complete marker and $KEY_FILE..." + sleep 10 + done + echo "Setup complete! Key file found at $KEY_FILE" + volumeMounts: + - name: shared-data + mountPath: /app/.nodes + readOnly: true + + # Wait for ethereum RPC to be accessible before starting the node + - name: wait-for-ethereum + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for Ethereum RPC..." + until nc -z {{ include "commonware-avs.ethereum.fullname" $root }} {{ $root.Values.ethereum.service.port }}; do + echo "Ethereum not ready, waiting..." + sleep 5 + done + echo "Ethereum is ready!" + + containers: + - name: node + image: "{{ $root.Values.node.image.repository }}:{{ $root.Values.node.image.tag }}" + imagePullPolicy: {{ $root.Values.node.image.pullPolicy }} + args: + - "--key-file" + - "/app/.nodes/operator_keys/testacc{{ $nodeIndex }}.private.bls.key.json" + - "--port" + - {{ $nodePort | quote }} + - "--orchestrator" + {{- if $root.Values.orchestrator.publicConfig }} + # Use public orchestrator from configmap if provided + - "/app/public_orchestrator.json" + {{- else }} + # Use public orchestrator from the image if not provided via configmap + - "/app/config/public_orchestrator.json" + {{- end }} + env: + - name: HTTP_RPC + value: "http://{{ include "commonware-avs.ethereum.fullname" $root }}:{{ $root.Values.ethereum.service.port }}" + - name: WS_RPC + value: "ws://{{ include "commonware-avs.ethereum.fullname" $root }}:{{ $root.Values.ethereum.service.port }}" + - name: AVS_DEPLOYMENT_PATH + value: "/app/.nodes/avs_deploy.json" + ports: + - name: node + containerPort: {{ $nodePort }} + protocol: TCP + # Note: No liveness/readiness probes on P2P ports + # TCP socket probes cause noisy "failed to upgrade connection" logs + # The node process will be restarted by Kubernetes if it crashes + volumeMounts: + - name: shared-data + mountPath: /app/.nodes + readOnly: true + {{- if $root.Values.orchestrator.publicConfig }} + - name: config + mountPath: /app/public_orchestrator.json + subPath: public_orchestrator.json + readOnly: true + {{- end }} + resources: + {{- toYaml $root.Values.node.resources | nindent 12 }} + + volumes: + - name: shared-data + persistentVolumeClaim: + claimName: {{ include "commonware-avs.shareddata.fullname" $root }} + {{- if $root.Values.orchestrator.publicConfig }} + - name: config + configMap: + name: {{ include "commonware-avs.config.fullname" $root }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "commonware-avs.node.fullname" $root }}-{{ $nodeIndex }} + labels: + {{- include "commonware-avs.labels" $root | nindent 4 }} + app.kubernetes.io/component: node + commonware-avs/node-index: {{ $nodeIndex | quote }} +spec: + type: ClusterIP + # CRITICAL: Allow traffic to pods before they pass readiness probes + # This is required because nodes need to dial each other during P2P startup + # Without this, services have no endpoints until pods are ready, creating a deadlock + publishNotReadyAddresses: true + ports: + - port: {{ $nodePort }} + targetPort: node + protocol: TCP + name: node + selector: + {{- include "commonware-avs.selectorLabels" $root | nindent 4 }} + app.kubernetes.io/component: node + commonware-avs/node-index: {{ $nodeIndex | quote }} +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/pvc.yaml b/helm/commonware-restaking/templates/pvc.yaml new file mode 100644 index 0000000..7c1e0ab --- /dev/null +++ b/helm/commonware-restaking/templates/pvc.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "commonware-avs.shareddata.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.sharedData.accessMode }} + {{- if .Values.sharedData.storageClass }} + storageClassName: {{ .Values.sharedData.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.sharedData.size }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/router-deployment.yaml b/helm/commonware-restaking/templates/router-deployment.yaml new file mode 100644 index 0000000..38e31f0 --- /dev/null +++ b/helm/commonware-restaking/templates/router-deployment.yaml @@ -0,0 +1,176 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "commonware-avs.router.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: router +spec: + replicas: 1 + selector: + matchLabels: + {{- include "commonware-avs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: router + template: + metadata: + labels: + {{- include "commonware-avs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: router + spec: + initContainers: + # Wait for setup job to fully complete (check for .setup_complete marker) + - name: wait-for-setup + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for setup to complete..." + until [ -f /app/.nodes/.setup_complete ]; do + echo "Setup not complete, waiting for .setup_complete marker..." + sleep 10 + done + echo "Setup complete! Contracts deployed and operators registered." + volumeMounts: + - name: shared-data + mountPath: /app/.nodes + readOnly: true + + # Wait for ethereum RPC to be accessible + - name: wait-for-ethereum + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for Ethereum RPC..." + until nc -z {{ include "commonware-avs.ethereum.fullname" . }} {{ .Values.ethereum.service.port }}; do + echo "Ethereum not ready, waiting..." + sleep 5 + done + echo "Ethereum is ready!" + + # Wait for nodes to be ready + - name: wait-for-nodes + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for nodes to be ready..." + {{- $nodeCount := int .Values.global.nodeCount }} + {{- range $i := until $nodeCount }} + {{- $nodeIndex := add $i 1 }} + {{- $nodePort := add (int $.Values.node.basePort) $i }} + until nc -z {{ include "commonware-avs.node.fullname" $ }}-{{ $nodeIndex }} {{ $nodePort }}; do + echo "Node {{ $nodeIndex }} not ready, waiting..." + sleep 5 + done + echo "Node {{ $nodeIndex }} is ready!" + {{- end }} + echo "All nodes are ready!" + + {{- if .Values.orchestrator.routerPrivateKey }} + # Create router_orchestrator.json from secret if provided + - name: setup-router-key + image: busybox:1.36 + command: + - sh + - -c + - | + echo '{"privateKey":"'$ROUTER_KEY'"}' > /app/router_orchestrator.json + echo "Router orchestrator key file created from secret" + env: + - name: ROUTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "commonware-avs.secret.fullname" . }} + key: ROUTER_ORCHESTRATOR_PRIVATE_KEY + volumeMounts: + - name: router-key + mountPath: /app + {{- end }} + + containers: + - name: router + image: "{{ .Values.router.image.repository }}:{{ .Values.router.image.tag }}" + imagePullPolicy: {{ .Values.router.image.pullPolicy }} + args: + - "--key-file" + {{- if .Values.orchestrator.routerPrivateKey }} + - "/app/router_orchestrator.json" + {{- else }} + # Use the orchestrator key from the image if not provided via secret + - "/app/config/router_orchestrator.json" + {{- end }} + - "--port" + - {{ .Values.router.port | quote }} + env: + - name: HTTP_RPC + value: "http://{{ include "commonware-avs.ethereum.fullname" . }}:{{ .Values.ethereum.service.port }}" + - name: WS_RPC + value: "ws://{{ include "commonware-avs.ethereum.fullname" . }}:{{ .Values.ethereum.service.port }}" + - name: AVS_DEPLOYMENT_PATH + value: "/app/.nodes/avs_deploy.json" + - name: INGRESS + value: {{ .Values.router.ingress.enabled | quote }} + - name: INGRESS_TIMEOUT_MS + value: {{ .Values.router.ingress.timeoutMs | quote }} + - name: PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ include "commonware-avs.secret.fullname" . }} + key: PRIVATE_KEY + ports: + - name: router + containerPort: {{ .Values.router.port }} + protocol: TCP + - name: ingress + containerPort: {{ .Values.router.ingressPort }} + protocol: TCP + # Note: No liveness/readiness probes on P2P ports + # TCP socket probes cause noisy "failed to upgrade connection" logs + # The router process will be restarted by Kubernetes if it crashes + volumeMounts: + - name: shared-data + mountPath: /app/.nodes + readOnly: true + {{- if .Values.orchestrator.routerPrivateKey }} + - name: router-key + mountPath: /app/router_orchestrator.json + subPath: router_orchestrator.json + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.router.resources | nindent 12 }} + + volumes: + - name: shared-data + persistentVolumeClaim: + claimName: {{ include "commonware-avs.shareddata.fullname" . }} + {{- if .Values.orchestrator.routerPrivateKey }} + - name: router-key + emptyDir: {} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "commonware-avs.router.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: router +spec: + type: {{ .Values.router.service.type }} + ports: + - port: {{ .Values.router.service.port }} + targetPort: router + protocol: TCP + name: router + - port: {{ .Values.router.service.ingressPort }} + targetPort: ingress + protocol: TCP + name: ingress + selector: + {{- include "commonware-avs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: router \ No newline at end of file diff --git a/helm/commonware-restaking/templates/secret.yaml b/helm/commonware-restaking/templates/secret.yaml new file mode 100644 index 0000000..7d5d7c9 --- /dev/null +++ b/helm/commonware-restaking/templates/secret.yaml @@ -0,0 +1,20 @@ +{{- if not .Values.secrets.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "commonware-avs.secret.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} +type: Opaque +stringData: + PRIVATE_KEY: {{ required "secrets.privateKey is required" .Values.secrets.privateKey | quote }} + FUNDED_KEY: {{ required "secrets.fundedKey is required" .Values.secrets.fundedKey | quote }} + {{- if .Values.secrets.forkUrl }} + FORK_URL: {{ .Values.secrets.forkUrl | quote }} + {{- end }} + {{- if .Values.orchestrator.routerPrivateKey }} + # Router orchestrator private key (BLS private key for the router) + # If not provided, the router will use /app/config/router_orchestrator.json from the image + ROUTER_ORCHESTRATOR_PRIVATE_KEY: {{ .Values.orchestrator.routerPrivateKey | quote }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/setup-job.yaml b/helm/commonware-restaking/templates/setup-job.yaml new file mode 100644 index 0000000..07e9b30 --- /dev/null +++ b/helm/commonware-restaking/templates/setup-job.yaml @@ -0,0 +1,120 @@ +{{- if and .Values.ethereum.enabled .Values.secrets.forkUrl }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "commonware-avs.setup.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: setup + annotations: + # Only run on fresh install, not on upgrades (to preserve existing deployment state) + "helm.sh/hook": post-install + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": before-hook-creation +spec: + backoffLimit: 3 + ttlSecondsAfterFinished: 3600 + template: + metadata: + labels: + {{- include "commonware-avs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: setup + spec: + restartPolicy: OnFailure + initContainers: + # Wait for ethereum to be ready + - name: wait-for-ethereum + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for Ethereum RPC..." + until nc -z {{ include "commonware-avs.ethereum.fullname" . }} {{ .Values.ethereum.service.port }}; do + echo "Ethereum not ready, waiting..." + sleep 5 + done + echo "Ethereum is ready!" + + containers: + # Run EigenLayer setup (creates operator keys and avs_deploy.json) + # Note: The eigenlayer container may exit with non-zero code even on success, + # so we wrap it to check if files were created and exit 0 if so + - name: eigenlayer-setup + image: "{{ .Values.eigenlayer.image.repository }}:{{ .Values.eigenlayer.image.tag }}" + imagePullPolicy: {{ .Values.eigenlayer.image.pullPolicy }} + command: + - /bin/bash + - -c + - | + set +e + # Run the eigenlayer setup (it may exit with non-zero even on success) + /main.sh || true + + # Verify that required files were created + if [ -f /root/.nodes/avs_deploy.json ]; then + echo "avs_deploy.json created successfully" + # Check if all operator keys exist + {{- $nodeCount := int .Values.global.nodeCount }} + {{- range $i := until $nodeCount }} + {{- $nodeIndex := add $i 1 }} + if [ ! -f "/root/.nodes/operator_keys/testacc{{ $nodeIndex }}.private.bls.key.json" ]; then + echo "Error: testacc{{ $nodeIndex }}.private.bls.key.json not found" + exit 1 + fi + {{- end }} + echo "All operator keys created successfully" + # Create marker file to signal that setup is fully complete + # Nodes and router wait for this file instead of just avs_deploy.json + touch /root/.nodes/.setup_complete + echo "Setup completion marker created" + echo "EigenLayer setup completed successfully" + exit 0 + else + echo "Error: avs_deploy.json was not created" + exit 1 + fi + env: + - name: RPC_URL + value: "http://{{ include "commonware-avs.ethereum.fullname" . }}:{{ .Values.ethereum.service.port }}" + - name: TEST_ACCOUNTS + value: {{ .Values.global.nodeCount | quote }} + - name: ENVIRONMENT + value: {{ .Values.global.environment | lower | quote }} + - name: DELEGATION_MANAGER_ADDRESS + value: {{ .Values.eigenlayer.contracts.delegationManager | quote }} + - name: STRATEGY_MANAGER_ADDRESS + value: {{ .Values.eigenlayer.contracts.strategyManager | quote }} + - name: LST_CONTRACT_ADDRESS + value: {{ .Values.eigenlayer.contracts.lstContract | quote }} + - name: LST_STRATEGY_ADDRESS + value: {{ .Values.eigenlayer.contracts.lstStrategy | quote }} + - name: ALLOCATION_MANAGER_ADDRESS + value: {{ .Values.eigenlayer.contracts.allocationManager | quote }} + - name: PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ include "commonware-avs.secret.fullname" . }} + key: PRIVATE_KEY + - name: FUNDED_KEY + valueFrom: + secretKeyRef: + name: {{ include "commonware-avs.secret.fullname" . }} + key: FUNDED_KEY + volumeMounts: + - name: shared-data + mountPath: /root/.nodes + - name: config + mountPath: /bls-middleware/contracts/docker/eigenlayer/config.json + subPath: config.json + resources: + {{- toYaml .Values.eigenlayer.resources | nindent 12 }} + + volumes: + - name: shared-data + persistentVolumeClaim: + claimName: {{ include "commonware-avs.shareddata.fullname" . }} + - name: config + configMap: + name: {{ include "commonware-avs.config.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/templates/signer-deployment.yaml b/helm/commonware-restaking/templates/signer-deployment.yaml new file mode 100644 index 0000000..1ff240d --- /dev/null +++ b/helm/commonware-restaking/templates/signer-deployment.yaml @@ -0,0 +1,115 @@ +{{- if .Values.signer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "commonware-avs.signer.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: signer +spec: + replicas: 1 + selector: + matchLabels: + {{- include "commonware-avs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: signer + template: + metadata: + labels: + {{- include "commonware-avs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: signer + spec: + initContainers: + # Wait for setup job to complete before starting signer + # Signer needs the operator keys created by the eigenlayer setup + - name: wait-for-setup + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Waiting for setup to complete..." + until [ -f /app/.nodes/.setup_complete ]; do + echo "Setup not complete, waiting for .setup_complete marker..." + sleep 10 + done + echo "Setup complete! Starting signer..." + volumeMounts: + - name: shared-data + mountPath: /app/.nodes + readOnly: true + containers: + - name: signer + image: "{{ .Values.signer.image.repository }}:{{ .Values.signer.image.tag }}" + imagePullPolicy: {{ .Values.signer.image.pullPolicy }} + env: + - name: METRICS_PORT + value: {{ .Values.signer.service.metricsPort | quote }} + ports: + - name: grpc + containerPort: {{ .Values.signer.service.grpcPort }} + protocol: TCP + - name: metrics + containerPort: {{ .Values.signer.service.metricsPort }} + protocol: TCP + livenessProbe: + tcpSocket: + port: grpc + initialDelaySeconds: 30 + periodSeconds: 15 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: grpc + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 3 + resources: + {{- toYaml .Values.signer.resources | nindent 12 }} + + volumes: + - name: shared-data + persistentVolumeClaim: + claimName: {{ include "commonware-avs.shareddata.fullname" . }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "commonware-avs.signer.fullname" . }} + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: signer +spec: + type: {{ .Values.signer.service.type }} + ports: + - port: {{ .Values.signer.service.grpcPort }} + targetPort: grpc + protocol: TCP + name: grpc + - port: {{ .Values.signer.service.metricsPort }} + targetPort: metrics + protocol: TCP + name: metrics + selector: + {{- include "commonware-avs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: signer +--- +# Alias service named "signer" for eigenlayer compatibility +# The eigenlayer image expects to connect to signer:50051 +apiVersion: v1 +kind: Service +metadata: + name: signer + labels: + {{- include "commonware-avs.labels" . | nindent 4 }} + app.kubernetes.io/component: signer +spec: + type: ClusterIP + ports: + - port: {{ .Values.signer.service.grpcPort }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + {{- include "commonware-avs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: signer +{{- end }} \ No newline at end of file diff --git a/helm/commonware-restaking/values.yaml b/helm/commonware-restaking/values.yaml new file mode 100644 index 0000000..e929114 --- /dev/null +++ b/helm/commonware-restaking/values.yaml @@ -0,0 +1,176 @@ +# Default values for commonware-avs Helm chart + +# Global settings +global: + # Environment mode: LOCAL or TESTNET + # LOCAL: Uses Anvil with forked Holesky, auto-deploys contracts + # TESTNET: Connects to real Holesky testnet + environment: LOCAL + + # Number of operator nodes to deploy + nodeCount: 3 + +# Secrets configuration +# All sensitive values should be provided via --set or existingSecret +secrets: + # Use an existing Kubernetes secret instead of creating one + # Required keys: PRIVATE_KEY, FUNDED_KEY, FORK_URL + existingSecret: "" + + # Anvil default key - NEVER use in production! + # Override with: --set secrets.privateKey= + privateKey: "" + fundedKey: "" + + # Fork URL for Anvil (contains API key - sensitive!) + # REQUIRED for LOCAL mode - the chart will not work without this! + # Without forkUrl, ethereum and setup jobs are not deployed. + # Override with: --set secrets.forkUrl= + # Example: --set secrets.forkUrl=https://eth-holesky.g.alchemy.com/v2/YOUR_API_KEY + forkUrl: "" + +# Ethereum (Anvil) configuration - only used in LOCAL mode +# TODO: Anvil stores state in-memory. If this pod restarts, all deployed contracts are lost! +ethereum: + enabled: true + image: + repository: ghcr.io/breadchaincoop/ethereum + tag: dev + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 8545 + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "1000m" + +# EigenLayer setup job configuration +eigenlayer: + image: + repository: ghcr.io/breadchaincoop/eigenlayer + tag: dev + pullPolicy: IfNotPresent + # Holesky testnet contract addresses (forked by Anvil in LOCAL mode) + contracts: + delegationManager: "0xA44151489861Fe9e3055d95adC98FbD462B948e7" + strategyManager: "0xdfB5f6CE42aAA7830E94ECFCcAd411beF4d4D5b6" + lstContract: "0x3F1c547b21f65e10480dE3ad8E19fAAC46C95034" + lstStrategy: "0x7D704507b76571a51d9caE8AdDAbBFd0ba0e63d3" + allocationManager: "0x78469728304326CBc65f8f95FA756B0B73164462" + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "1000m" + +# Signer (Cerberus) configuration +signer: + enabled: true + image: + repository: ghcr.io/layr-labs/cerberus + tag: "0.0.2" + pullPolicy: IfNotPresent + service: + type: ClusterIP + grpcPort: 50051 + metricsPort: 9081 + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" + +# AVS Node configuration +node: + image: + # For local development, build and load the image into minikube: + # docker build -t commonware-avs-node:local -f usecases/counter/node/Dockerfile . + # minikube image load commonware-avs-node:local + repository: commonware-avs-node + tag: local + pullPolicy: IfNotPresent + # Base port for nodes (node-1 uses basePort, node-2 uses basePort+1, etc.) + basePort: 3001 + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "250m" + +# Router configuration +router: + image: + # For local development, build and load the image into minikube: + # docker build -t commonware-avs-router:local -f usecases/counter/router/Dockerfile . + # minikube image load commonware-avs-router:local + repository: commonware-avs-router + tag: local + pullPolicy: IfNotPresent + port: 3000 + ingressPort: 8080 + ingress: + enabled: false + timeoutMs: 120000 + service: + type: ClusterIP + port: 3000 + ingressPort: 8080 + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "250m" + +# Orchestrator configuration +# The orchestrator configs are included in the Docker image at /app/config/ +# You can override them here if needed, otherwise they will be used from the image +orchestrator: + # Public orchestrator config (JSON string) - optional override + # If not provided, will use /app/config/public_orchestrator.json from the image + # NOTE: The image file has "address": "router" which works for docker-compose. + # For K8s, you need to override with the correct service name. + # The service name will be: -commonware-avs-router + publicConfig: | + { + "g2_x1": "20265730220917057623326116620721648047640065506233168445998945605458084341755", + "g2_x2": "1537141129484558011683382469842956131676085503509229854572844956364492197092", + "g2_y1": "4380068110839997539835821427545270098552639074995346826656804866303457881635", + "g2_y2": "479676018937294309080674601592141614301396550682703157902264620243097107417", + "port": "3000", + "address": "commonware-avs-router" + } + # Router private key (BLS private key) - stored in Secret for security + # If not provided, will use /app/config/router_orchestrator.json from the image + # WARNING: The image contains a default private key. For production, always provide your own! + # Format: "21757297277259392731337564964512162273983455244457107194284923166011566394548" + routerPrivateKey: "" + +# Shared data volume for keys and deployment configs +sharedData: + storageClass: "" + size: 1Gi + accessMode: ReadWriteOnce + +# Kubernetes Ingress for external access (optional) +ingress: + enabled: false + className: "" + annotations: {} + hosts: + - host: commonware-avs.local + paths: + - path: / + pathType: Prefix + tls: [] \ No newline at end of file From ee3550a8ce85c8f3486c2479cc65765034886ee8 Mon Sep 17 00:00:00 2001 From: Dijar Llozana Date: Tue, 9 Dec 2025 15:36:07 +0100 Subject: [PATCH 2/5] Added EOF --- helm/commonware-restaking/Chart.yaml | 2 +- helm/commonware-restaking/templates/NOTES.txt | 2 +- helm/commonware-restaking/templates/_helpers.tpl | 2 +- helm/commonware-restaking/templates/configmap.yaml | 2 +- helm/commonware-restaking/templates/ethereum-deployment.yaml | 2 +- helm/commonware-restaking/templates/ingress.yaml | 2 +- helm/commonware-restaking/templates/node-deployment.yaml | 2 +- helm/commonware-restaking/templates/pvc.yaml | 2 +- helm/commonware-restaking/templates/router-deployment.yaml | 2 +- helm/commonware-restaking/templates/secret.yaml | 2 +- helm/commonware-restaking/templates/setup-job.yaml | 2 +- helm/commonware-restaking/templates/signer-deployment.yaml | 2 +- helm/commonware-restaking/values.yaml | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/helm/commonware-restaking/Chart.yaml b/helm/commonware-restaking/Chart.yaml index ebf980a..c833872 100644 --- a/helm/commonware-restaking/Chart.yaml +++ b/helm/commonware-restaking/Chart.yaml @@ -10,4 +10,4 @@ keywords: - eigenlayer - ethereum maintainers: - - name: Breadchain \ No newline at end of file + - name: Breadchain diff --git a/helm/commonware-restaking/templates/NOTES.txt b/helm/commonware-restaking/templates/NOTES.txt index 4afb34e..430d741 100644 --- a/helm/commonware-restaking/templates/NOTES.txt +++ b/helm/commonware-restaking/templates/NOTES.txt @@ -58,4 +58,4 @@ Check shared data volume: Environment: {{ .Values.global.environment }} Node Count: {{ .Values.global.nodeCount }} -Usecase: {{ .Values.node.usecase }} \ No newline at end of file +Usecase: {{ .Values.node.usecase }} diff --git a/helm/commonware-restaking/templates/_helpers.tpl b/helm/commonware-restaking/templates/_helpers.tpl index 3ba0f9e..f55dfda 100644 --- a/helm/commonware-restaking/templates/_helpers.tpl +++ b/helm/commonware-restaking/templates/_helpers.tpl @@ -106,4 +106,4 @@ Secret name - supports existing secret or creates new one {{- else }} {{- printf "%s-secret" (include "commonware-avs.fullname" .) }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/configmap.yaml b/helm/commonware-restaking/templates/configmap.yaml index d09ddb5..05a5142 100644 --- a/helm/commonware-restaking/templates/configmap.yaml +++ b/helm/commonware-restaking/templates/configmap.yaml @@ -32,4 +32,4 @@ data: # If not provided, will use the file from the image at /app/config/public_orchestrator.json public_orchestrator.json: | {{ .Values.orchestrator.publicConfig | indent 4 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/helm/commonware-restaking/templates/ethereum-deployment.yaml b/helm/commonware-restaking/templates/ethereum-deployment.yaml index 18fda7d..4a6361b 100644 --- a/helm/commonware-restaking/templates/ethereum-deployment.yaml +++ b/helm/commonware-restaking/templates/ethereum-deployment.yaml @@ -66,4 +66,4 @@ spec: selector: {{- include "commonware-avs.selectorLabels" . | nindent 4 }} app.kubernetes.io/component: ethereum -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/ingress.yaml b/helm/commonware-restaking/templates/ingress.yaml index 16fee27..1c9df88 100644 --- a/helm/commonware-restaking/templates/ingress.yaml +++ b/helm/commonware-restaking/templates/ingress.yaml @@ -38,4 +38,4 @@ spec: name: ingress {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/node-deployment.yaml b/helm/commonware-restaking/templates/node-deployment.yaml index f6634a7..79da44c 100644 --- a/helm/commonware-restaking/templates/node-deployment.yaml +++ b/helm/commonware-restaking/templates/node-deployment.yaml @@ -138,4 +138,4 @@ spec: {{- include "commonware-avs.selectorLabels" $root | nindent 4 }} app.kubernetes.io/component: node commonware-avs/node-index: {{ $nodeIndex | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/pvc.yaml b/helm/commonware-restaking/templates/pvc.yaml index 7c1e0ab..c625583 100644 --- a/helm/commonware-restaking/templates/pvc.yaml +++ b/helm/commonware-restaking/templates/pvc.yaml @@ -12,4 +12,4 @@ spec: {{- end }} resources: requests: - storage: {{ .Values.sharedData.size }} \ No newline at end of file + storage: {{ .Values.sharedData.size }} diff --git a/helm/commonware-restaking/templates/router-deployment.yaml b/helm/commonware-restaking/templates/router-deployment.yaml index 38e31f0..ff910dd 100644 --- a/helm/commonware-restaking/templates/router-deployment.yaml +++ b/helm/commonware-restaking/templates/router-deployment.yaml @@ -173,4 +173,4 @@ spec: name: ingress selector: {{- include "commonware-avs.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: router \ No newline at end of file + app.kubernetes.io/component: router diff --git a/helm/commonware-restaking/templates/secret.yaml b/helm/commonware-restaking/templates/secret.yaml index 7d5d7c9..b2df635 100644 --- a/helm/commonware-restaking/templates/secret.yaml +++ b/helm/commonware-restaking/templates/secret.yaml @@ -17,4 +17,4 @@ stringData: # If not provided, the router will use /app/config/router_orchestrator.json from the image ROUTER_ORCHESTRATOR_PRIVATE_KEY: {{ .Values.orchestrator.routerPrivateKey | quote }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/setup-job.yaml b/helm/commonware-restaking/templates/setup-job.yaml index 07e9b30..9947743 100644 --- a/helm/commonware-restaking/templates/setup-job.yaml +++ b/helm/commonware-restaking/templates/setup-job.yaml @@ -117,4 +117,4 @@ spec: - name: config configMap: name: {{ include "commonware-avs.config.fullname" . }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/templates/signer-deployment.yaml b/helm/commonware-restaking/templates/signer-deployment.yaml index 1ff240d..4e88db6 100644 --- a/helm/commonware-restaking/templates/signer-deployment.yaml +++ b/helm/commonware-restaking/templates/signer-deployment.yaml @@ -112,4 +112,4 @@ spec: selector: {{- include "commonware-avs.selectorLabels" . | nindent 4 }} app.kubernetes.io/component: signer -{{- end }} \ No newline at end of file +{{- end }} diff --git a/helm/commonware-restaking/values.yaml b/helm/commonware-restaking/values.yaml index e929114..76bbc5d 100644 --- a/helm/commonware-restaking/values.yaml +++ b/helm/commonware-restaking/values.yaml @@ -173,4 +173,4 @@ ingress: paths: - path: / pathType: Prefix - tls: [] \ No newline at end of file + tls: [] From 576b5bc9646fce651494aebc7a368e1c521a1236 Mon Sep 17 00:00:00 2001 From: Dijar Llozana Date: Wed, 10 Dec 2025 03:17:27 +0100 Subject: [PATCH 3/5] Added helm chart github integration tests --- .github/workflows/helm-integration-tests.yml | 448 +++++++++++++++++++ scripts/helm_test.sh | 359 +++++++++++++++ 2 files changed, 807 insertions(+) create mode 100644 .github/workflows/helm-integration-tests.yml create mode 100644 scripts/helm_test.sh diff --git a/.github/workflows/helm-integration-tests.yml b/.github/workflows/helm-integration-tests.yml new file mode 100644 index 0000000..2a00577 --- /dev/null +++ b/.github/workflows/helm-integration-tests.yml @@ -0,0 +1,448 @@ +name: Helm Integration Test + +on: + push: + branches: + - main + - dev + - staging + pull_request: + branches: + - main + - dev + - staging + +jobs: + helm-test: + name: Test with Helm Charts + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Create kind cluster + uses: helm/kind-action@v1.10.0 + with: + cluster_name: commonware-test + wait: 120s + node_image: kindest/node:v1.27.3 + + - name: Install Helm + uses: azure/setup-helm@v4 + with: + version: 'latest' + + - name: Build Docker images + run: | + echo "Building router image..." + docker build -f ./usecases/counter/router/Dockerfile -t commonware-avs-router:local . + + echo "Building node image..." + docker build -f ./usecases/counter/node/Dockerfile -t commonware-avs-node:local . + + - name: Load images into kind cluster + run: | + echo "Loading router image into kind..." + kind load docker-image commonware-avs-router:local --name commonware-test + + echo "Loading node image into kind..." + kind load docker-image commonware-avs-node:local --name commonware-test + + - name: Verify cluster is ready + run: | + kubectl cluster-info + kubectl get nodes + kubectl get pods -A + + - name: Install Helm chart + run: | + # Set fork URL from secret or use public Holesky + if [ -n "${{ secrets.RPC_URL }}" ]; then + FORK_URL="${{ secrets.RPC_URL }}" + else + FORK_URL="https://ethereum-holesky.publicnode.com" + fi + + echo "Installing Helm chart with fork URL: $FORK_URL" + + # Install without --wait to avoid circular dependency + # (pods wait for setup job, setup job is post-install hook) + helm install commonware-avs ./helm/commonware-restaking \ + --set global.environment=LOCAL \ + --set global.nodeCount=3 \ + --set secrets.forkUrl="$FORK_URL" \ + --set secrets.privateKey="$PRIVATE_KEY" \ + --set secrets.fundedKey="$FUNDED_KEY" \ + --set node.image.repository=commonware-avs-node \ + --set node.image.tag=local \ + --set node.image.pullPolicy=Never \ + --set router.image.repository=commonware-avs-router \ + --set router.image.tag=local \ + --set router.image.pullPolicy=Never \ + --set sharedData.storageClass="" + + - name: Check deployment status + run: | + echo "=== Helm Release Status ===" + helm status commonware-avs + + echo "=== All Pods ===" + kubectl get pods -o wide + + echo "=== All Services ===" + kubectl get services + + - name: Wait for setup job to complete + run: | + echo "Waiting for setup job to complete..." + + # Find the actual setup job name (includes chart name) + SETUP_JOB=$(kubectl get jobs -o name | grep setup | head -1) + + if [ -z "$SETUP_JOB" ]; then + echo "Setup job not found!" + kubectl get jobs + exit 1 + fi + + echo "Found setup job: $SETUP_JOB" + kubectl wait --for=condition=complete "$SETUP_JOB" --timeout=300s + + echo "Setup job completed successfully" + kubectl logs "$SETUP_JOB" --tail=30 + + - name: Wait for all pods to be ready + run: | + echo "Waiting for ethereum pod..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=ethereum --timeout=180s + + echo "Waiting for signer pod..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=signer --timeout=180s + + echo "Waiting for node pods..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=node --timeout=300s + + echo "Waiting for router pod..." + kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=router --timeout=300s + + echo "All pods are ready!" + + - name: Setup port forwarding for testing + run: | + # Find actual service names (they include the chart name) + ETHEREUM_SERVICE=$(kubectl get services -o name | grep ethereum | head -1 | sed 's|service/||') + ROUTER_SERVICE=$(kubectl get services -o name | grep router | head -1 | sed 's|service/||') + + echo "Ethereum service: $ETHEREUM_SERVICE" + echo "Router service: $ROUTER_SERVICE" + + # Port forward ethereum RPC + kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & + + # Port forward router ingress + kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & + + # Wait for port forwards to be ready + sleep 10 + + echo "Port forwarding established" + + - name: Verify shared data was created + run: | + echo "Checking if setup job created the required files..." + + # Get the setup job pod name + SETUP_POD=$(kubectl get pods -l app.kubernetes.io/component=setup -o jsonpath='{.items[0].metadata.name}') + + echo "Setup pod: $SETUP_POD" + + # Check PVC + kubectl get pvc + + echo "Setup job logs:" + kubectl logs $SETUP_POD || true + + - name: Test counter increment functionality + run: | + echo "Testing counter increment..." + + # Wait for services to be fully ready + sleep 20 + + # Get the counter contract address from router pod + ROUTER_POD=$(kubectl get pods -l app.kubernetes.io/component=router -o jsonpath='{.items[0].metadata.name}') + + # Copy avs_deploy.json from router pod + kubectl cp $ROUTER_POD:/app/.nodes/avs_deploy.json ./avs_deploy.json + + if [ ! -f "./avs_deploy.json" ]; then + echo "AVS deployment file not found" + exit 1 + fi + + cat ./avs_deploy.json + + COUNTER_ADDRESS=$(cat ./avs_deploy.json | jq -r '.addresses.counter' || echo "") + if [ -z "$COUNTER_ADDRESS" ]; then + echo "Counter contract address not found in deployment file" + cat ./avs_deploy.json + exit 1 + fi + echo "Counter contract address: $COUNTER_ADDRESS" + + # Read the initial counter value from the smart contract + # The function signature for "number()" is 0x8381f58a + INITIAL_COUNT=$(curl -s -X POST http://localhost:8545 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc":"2.0", + "method":"eth_call", + "params":[{ + "to":"'$COUNTER_ADDRESS'", + "data":"0x8381f58a" + }, "latest"], + "id":1 + }' | jq -r '.result' | xargs printf "%d\n") + + echo "Initial counter value: $INITIAL_COUNT" + echo "INITIAL_COUNT=$INITIAL_COUNT" >> $GITHUB_ENV + echo "COUNTER_ADDRESS=$COUNTER_ADDRESS" >> $GITHUB_ENV + + # Wait for 5 aggregation cycles (30 seconds each by default = 150 seconds) + echo "Waiting for 5 aggregation cycles (150 seconds)..." + sleep 150 + + # Read the final counter value from the smart contract + FINAL_COUNT=$(curl -s -X POST http://localhost:8545 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc":"2.0", + "method":"eth_call", + "params":[{ + "to":"'$COUNTER_ADDRESS'", + "data":"0x8381f58a" + }, "latest"], + "id":1 + }' | jq -r '.result' | xargs printf "%d\n") + + echo "Final counter value: $FINAL_COUNT" + + # Verify increment + if [ "$FINAL_COUNT" -gt "$INITIAL_COUNT" ]; then + echo "? Counter successfully incremented from $INITIAL_COUNT to $FINAL_COUNT" + echo "LAST_COUNT=$FINAL_COUNT" >> $GITHUB_ENV + else + echo "? Counter did not increment (still at $FINAL_COUNT)" + echo "=== Recent router logs ===" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 + fi + + - name: Test with fast aggregation frequency + if: github.event_name == 'push' && github.ref == 'refs/heads/dev' + run: | + echo "Testing with fast aggregation frequency (0.5 seconds)..." + + # Update router deployment with fast aggregation + kubectl set env deployment/commonware-avs-router AGGREGATION_FREQUENCY=0.5 + + # Wait for rollout + kubectl rollout status deployment/commonware-avs-router --timeout=120s + + # Re-establish port forwarding + pkill -f "port-forward.*8545" || true + pkill -f "port-forward.*8080" || true + sleep 5 + + # Find service names + ETHEREUM_SERVICE=$(kubectl get services -o name | grep ethereum | head -1 | sed 's|service/||') + ROUTER_SERVICE=$(kubectl get services -o name | grep router | head -1 | sed 's|service/||') + + kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & + kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & + sleep 10 + + # Get the starting counter value from previous step + START_COUNT=${{ env.LAST_COUNT }} + echo "Starting counter value: $START_COUNT" + + # Wait for 1 minute with fast aggregation (0.5 seconds each cycle) + echo "Waiting for 1 minute with fast aggregation..." + sleep 60 + + # Read the counter value after fast aggregation + FAST_COUNT=$(curl -s -X POST http://localhost:8545 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc":"2.0", + "method":"eth_call", + "params":[{ + "to":"'${{ env.COUNTER_ADDRESS }}'", + "data":"0x8381f58a" + }, "latest"], + "id":1 + }' | jq -r '.result' | xargs printf "%d\n") + + echo "Counter value after fast aggregation: $FAST_COUNT" + + # Verify fast aggregation worked (should have multiple increments) + if [ "$FAST_COUNT" -gt "$START_COUNT" ]; then + INCREMENTS=$((FAST_COUNT - START_COUNT)) + echo "? Fast aggregation successful: $INCREMENTS increments in 1 minute" + echo "LAST_COUNT=$FAST_COUNT" >> $GITHUB_ENV + else + echo "? Fast aggregation failed (counter still at $FAST_COUNT)" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 + fi + + - name: Test with ingress enabled + if: github.event_name == 'push' && github.ref == 'refs/heads/dev' + run: | + echo "Testing with ingress enabled..." + + # Find the actual router deployment name (includes chart name) + ROUTER_DEPLOYMENT=$(kubectl get deployments -o name | grep router | head -1 | sed 's|deployment.apps/||') + echo "Router deployment: $ROUTER_DEPLOYMENT" + + # Update router deployment with ingress enabled + kubectl set env deployment/$ROUTER_DEPLOYMENT INGRESS=true + + # Wait for rollout + kubectl rollout status deployment/$ROUTER_DEPLOYMENT --timeout=120s + + # Re-establish port forwarding + pkill -f "port-forward.*8545" || true + pkill -f "port-forward.*8080" || true + sleep 5 + + # Find service names + ETHEREUM_SERVICE=$(kubectl get services -o name | grep ethereum | head -1 | sed 's|service/||') + ROUTER_SERVICE=$(kubectl get services -o name | grep router | head -1 | sed 's|service/||') + + kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & + kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & + sleep 15 + + # Get the starting counter value from previous step + START_COUNT=${{ env.LAST_COUNT }} + echo "Starting counter value: $START_COUNT" + + # Send ingress requests to trigger increments + echo "Sending ingress requests to /trigger endpoint..." + for i in {1..5}; do + echo "=== Sending ingress request $i ===" + RESPONSE=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X POST http://localhost:8080/trigger \ + -H "Content-Type: application/json" \ + -d '{"body": {"metadata": {"request_id": "'$i'", "action": "increment"}}}') + HTTP_STATUS=$(echo "$RESPONSE" | tail -n 1 | cut -d: -f2) + BODY=$(echo "$RESPONSE" | head -n -1) + echo "Response: $BODY" + echo "HTTP Status: $HTTP_STATUS" + + if [ "$HTTP_STATUS" != "200" ]; then + echo "Warning: HTTP request failed with status $HTTP_STATUS" + fi + sleep 1 + done + + # Wait for aggregation to process the ingress requests + echo "Waiting for aggregation to process ingress requests..." + sleep 15 + + # Read the counter value after ingress + COUNTER_ADDR="${{ env.COUNTER_ADDRESS }}" + COUNTER_RESPONSE=$(curl -s -X POST http://localhost:8545 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc":"2.0", + "method":"eth_call", + "params":[{ + "to":"'$COUNTER_ADDR'", + "data":"0x8381f58a" + }, "latest"], + "id":1 + }') + + COUNTER_HEX=$(echo "$COUNTER_RESPONSE" | jq -r '.result') + + # Handle empty or invalid response + if [ -z "$COUNTER_HEX" ] || [ "$COUNTER_HEX" = "null" ] || [ "$COUNTER_HEX" = "0x" ]; then + echo "Warning: Invalid counter response, defaulting to 0" + INGRESS_COUNT=0 + else + INGRESS_COUNT=$(printf "%d\n" "$COUNTER_HEX" 2>/dev/null || echo "0") + fi + + echo "Counter value after ingress: $INGRESS_COUNT" + + # Verify ingress increments worked + if [ "$INGRESS_COUNT" -gt "$START_COUNT" ]; then + INCREMENTS=$((INGRESS_COUNT - START_COUNT)) + echo "? Ingress test successful: $INCREMENTS increments after ingress requests" + else + echo "? Ingress test failed (counter still at $INGRESS_COUNT)" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 + fi + + # Final summary + echo "=== Test Summary ===" + echo "Initial count: ${{ env.INITIAL_COUNT }}" + echo "After default aggregation (5 cycles @ 30s): ${{ env.LAST_COUNT }}" + echo "After fast aggregation: $FAST_COUNT" + echo "After ingress requests (5 requests): $INGRESS_COUNT" + echo "Total increments: $((INGRESS_COUNT - ${{ env.INITIAL_COUNT }}))" + + - name: Collect logs on failure + if: failure() + run: | + echo "=== Helm Status ===" + helm status commonware-avs || true + + echo "=== All Pods Status ===" + kubectl get pods -o wide + + echo "=== PVC Status ===" + kubectl get pvc + + echo "=== Events ===" + kubectl get events --sort-by=.metadata.creationTimestamp + + echo "=== Ethereum Logs ===" + kubectl logs -l app.kubernetes.io/component=ethereum --tail 50 || true + + echo "=== Setup Job Logs ===" + kubectl logs -l app.kubernetes.io/component=setup --tail 100 || true + + echo "=== Router Logs ===" + kubectl logs -l app.kubernetes.io/component=router --tail 100 || true + + echo "=== Node-1 Logs ===" + NODE1_POD=$(kubectl get pods -l commonware-avs/node-index=1 -o jsonpath='{.items[0].metadata.name}') + kubectl logs $NODE1_POD --tail 100 || true + + echo "=== Node-2 Logs ===" + NODE2_POD=$(kubectl get pods -l commonware-avs/node-index=2 -o jsonpath='{.items[0].metadata.name}') + kubectl logs $NODE2_POD --tail 100 || true + + echo "=== Node-3 Logs ===" + NODE3_POD=$(kubectl get pods -l commonware-avs/node-index=3 -o jsonpath='{.items[0].metadata.name}') + kubectl logs $NODE3_POD --tail 100 || true + + - name: Cleanup + if: always() + run: | + # Kill port forwarding + pkill -f "port-forward" || true + + # Uninstall helm release + helm uninstall commonware-avs || true + + # Delete the kind cluster + kind delete cluster --name commonware-test || true + diff --git a/scripts/helm_test.sh b/scripts/helm_test.sh new file mode 100644 index 0000000..92da039 --- /dev/null +++ b/scripts/helm_test.sh @@ -0,0 +1,359 @@ +#!/bin/bash + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd)" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Default values +CLUSTER_NAME="${CLUSTER_NAME:-commonware-test}" +HELM_RELEASE="${HELM_RELEASE:-commonware-avs}" +FORK_URL="${FORK_URL:-https://ethereum-holesky.publicnode.com}" +NODE_COUNT="${NODE_COUNT:-3}" +PRIVATE_KEY="${PRIVATE_KEY}" +FUNDED_KEY="${FUNDED_KEY}" +TEST_FAST_AGGREGATION="${TEST_FAST_AGGREGATION:-false}" +TEST_INGRESS="${TEST_INGRESS:-false}" + +echo -e "${GREEN}Starting Helm-based Integration Test${NC}" +echo "Project root: $PROJECT_ROOT" +echo "Cluster name: $CLUSTER_NAME" +echo "Helm release: $HELM_RELEASE" + +# Function to read counter value from contract +read_counter() { + local counter_address=$1 + local response=$(curl -s -X POST http://localhost:8545 \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc":"2.0", + "method":"eth_call", + "params":[{ + "to":"'$counter_address'", + "data":"0x8381f58a" + }, "latest"], + "id":1 + }') + + local hex_value=$(echo "$response" | jq -r '.result') + + if [ -z "$hex_value" ] || [ "$hex_value" = "null" ] || [ "$hex_value" = "0x" ]; then + echo "0" + else + printf "%d\n" "$hex_value" 2>/dev/null || echo "0" + fi +} + +# Step 1: Check if kubectl is available +echo -e "${YELLOW}Step 1: Checking prerequisites...${NC}" +if ! command -v kubectl &> /dev/null; then + echo -e "${RED}kubectl is not installed${NC}" + exit 1 +fi + +if ! command -v helm &> /dev/null; then + echo -e "${RED}helm is not installed${NC}" + exit 1 +fi + +# Step 2: Verify cluster is accessible +echo -e "${YELLOW}Step 2: Verifying cluster access...${NC}" +if ! kubectl cluster-info &> /dev/null; then + echo -e "${RED}Cannot access Kubernetes cluster${NC}" + exit 1 +fi + +echo -e "${GREEN}Cluster is accessible${NC}" +kubectl get nodes + +# Step 3: Build Docker images (if not already built) +echo -e "${YELLOW}Step 3: Building Docker images...${NC}" +cd "$PROJECT_ROOT" + +if [[ "${SKIP_BUILD:-false}" != "true" ]]; then + echo "Building router image..." + docker build -f ./usecases/counter/router/Dockerfile -t commonware-avs-router:local . + + echo "Building node image..." + docker build -f ./usecases/counter/node/Dockerfile -t commonware-avs-node:local . + + # Load images into cluster (kind-specific) + if command -v kind &> /dev/null; then + echo "Loading images into kind cluster..." + kind load docker-image commonware-avs-router:local --name "$CLUSTER_NAME" || true + kind load docker-image commonware-avs-node:local --name "$CLUSTER_NAME" || true + fi +else + echo "Skipping Docker build (SKIP_BUILD=true)" +fi + +# Step 4: Install Helm chart +echo -e "${YELLOW}Step 4: Installing Helm chart...${NC}" + +# Check if release already exists and uninstall it +if helm list -q | grep -q "^${HELM_RELEASE}$"; then + echo "Release $HELM_RELEASE already exists. Uninstalling to ensure clean setup..." + helm uninstall "$HELM_RELEASE" || true + kubectl delete pvc -l app.kubernetes.io/instance="$HELM_RELEASE" || true + echo "Waiting for resources to be cleaned up..." + sleep 10 +fi + +# Run helm install +helm install "$HELM_RELEASE" ./helm/commonware-restaking \ + --set global.environment=LOCAL \ + --set global.nodeCount="$NODE_COUNT" \ + --set secrets.forkUrl="$FORK_URL" \ + --set secrets.privateKey="$PRIVATE_KEY" \ + --set secrets.fundedKey="$FUNDED_KEY" \ + --set node.image.repository=commonware-avs-node \ + --set node.image.tag=local \ + --set node.image.pullPolicy=Never \ + --set router.image.repository=commonware-avs-router \ + --set router.image.tag=local \ + --set router.image.pullPolicy=Never \ + --set sharedData.storageClass="" + +echo -e "${GREEN}Helm chart installed successfully${NC}" + +# Step 5: Wait for setup job +echo -e "${YELLOW}Step 5: Waiting for setup job to complete...${NC}" + +# Find the actual setup job name (includes chart name in it) +SETUP_JOB=$(kubectl get jobs -o name | grep setup | head -1) + +if [ -z "$SETUP_JOB" ]; then + echo -e "${RED}Setup job not found!${NC}" + echo "Available jobs:" + kubectl get jobs + exit 1 +fi + +echo "Found setup job: $SETUP_JOB" +kubectl wait --for=condition=complete "$SETUP_JOB" --timeout=300s + +echo "Setup job completed:" +kubectl logs "$SETUP_JOB" --tail=20 + +# Step 6: Wait for pods to be ready +echo -e "${YELLOW}Step 6: Waiting for all pods to be ready...${NC}" + +echo "Waiting for ethereum pod..." +kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=ethereum --timeout=180s + +echo "Waiting for signer pod..." +kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=signer --timeout=180s + +echo "Waiting for node pods..." +kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=node --timeout=300s --all + +echo "Waiting for router pod..." +kubectl wait --for=condition=ready pod -l app.kubernetes.io/component=router --timeout=300s + +echo -e "${GREEN}All pods are ready!${NC}" + +# Step 7: Setup port forwarding +echo -e "${YELLOW}Step 7: Setting up port forwarding...${NC}" + +# Kill any existing port forwards +pkill -f "kubectl port-forward.*8545" 2>/dev/null || true +pkill -f "kubectl port-forward.*8080" 2>/dev/null || true +sleep 2 + +# Find actual service names (they include the chart name) +ETHEREUM_SERVICE=$(kubectl get services -o name | grep ethereum | head -1 | sed 's|service/||') +ROUTER_SERVICE=$(kubectl get services -o name | grep router | head -1 | sed 's|service/||') + +if [ -z "$ETHEREUM_SERVICE" ] || [ -z "$ROUTER_SERVICE" ]; then + echo -e "${RED}Required services not found!${NC}" + kubectl get services + exit 1 +fi + +echo "Ethereum service: $ETHEREUM_SERVICE" +echo "Router service: $ROUTER_SERVICE" + +# Start new port forwards +kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & +ETHEREUM_PF_PID=$! + +kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & +ROUTER_PF_PID=$! + +# Wait for port forwards to be ready +sleep 10 + +# Cleanup function +cleanup_port_forwards() { + echo -e "${YELLOW}Cleaning up port forwards...${NC}" + kill $ETHEREUM_PF_PID 2>/dev/null || true + kill $ROUTER_PF_PID 2>/dev/null || true +} + +trap cleanup_port_forwards EXIT + +echo -e "${GREEN}Port forwarding established${NC}" + +# Step 8: Get counter contract address +echo -e "${YELLOW}Step 8: Retrieving contract address...${NC}" + +ROUTER_POD=$(kubectl get pods -l app.kubernetes.io/component=router -o jsonpath='{.items[0].metadata.name}') +kubectl cp $ROUTER_POD:/app/.nodes/avs_deploy.json ./avs_deploy.json + +if [ ! -f "./avs_deploy.json" ]; then + echo -e "${RED}AVS deployment file not found${NC}" + exit 1 +fi + +COUNTER_ADDRESS=$(cat ./avs_deploy.json | jq -r '.addresses.counter') +if [ -z "$COUNTER_ADDRESS" ] || [ "$COUNTER_ADDRESS" = "null" ]; then + echo -e "${RED}Counter contract address not found${NC}" + exit 1 +fi + +echo -e "${GREEN}Counter contract address: $COUNTER_ADDRESS${NC}" + +# Step 9: Test basic counter increment +echo -e "${YELLOW}Step 9: Testing counter increment (default aggregation)...${NC}" + +INITIAL_COUNT=$(read_counter "$COUNTER_ADDRESS") +echo "Initial counter value: $INITIAL_COUNT" + +echo "Waiting for 5 aggregation cycles (150 seconds)..." +sleep 150 + +FINAL_COUNT=$(read_counter "$COUNTER_ADDRESS") +echo "Final counter value: $FINAL_COUNT" + +if [ "$FINAL_COUNT" -gt "$INITIAL_COUNT" ]; then + INCREMENTS=$((FINAL_COUNT - INITIAL_COUNT)) + echo -e "${GREEN}✓ Counter successfully incremented from $INITIAL_COUNT to $FINAL_COUNT ($INCREMENTS increments)${NC}" + LAST_COUNT=$FINAL_COUNT +else + echo -e "${RED}✗ Counter did not increment (still at $FINAL_COUNT)${NC}" + echo "Router logs:" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 +fi + +# Step 10: Test fast aggregation (optional) +if [ "$TEST_FAST_AGGREGATION" = "true" ]; then + echo -e "${YELLOW}Step 10: Testing fast aggregation (0.5s frequency)...${NC}" + + # Find the actual router deployment name (it includes the chart name) + ROUTER_DEPLOYMENT=$(kubectl get deployments -o name | grep router | head -1 | sed 's|deployment.apps/||') + + if [ -z "$ROUTER_DEPLOYMENT" ]; then + echo -e "${RED}Router deployment not found!${NC}" + kubectl get deployments + exit 1 + fi + + echo "Router deployment: $ROUTER_DEPLOYMENT" + + # Update router deployment + kubectl set env deployment/$ROUTER_DEPLOYMENT AGGREGATION_FREQUENCY=0.5 + kubectl rollout status deployment/$ROUTER_DEPLOYMENT --timeout=120s + + # Re-establish port forwarding + cleanup_port_forwards + sleep 2 + kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & + ETHEREUM_PF_PID=$! + kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & + ROUTER_PF_PID=$! + sleep 10 + + START_COUNT=$LAST_COUNT + echo "Starting counter value: $START_COUNT" + + echo "Waiting for 1 minute with fast aggregation..." + sleep 60 + + FAST_COUNT=$(read_counter "$COUNTER_ADDRESS") + echo "Counter value after fast aggregation: $FAST_COUNT" + + if [ "$FAST_COUNT" -gt "$START_COUNT" ]; then + INCREMENTS=$((FAST_COUNT - START_COUNT)) + echo -e "${GREEN}✓ Fast aggregation successful: $INCREMENTS increments in 1 minute${NC}" + LAST_COUNT=$FAST_COUNT + else + echo -e "${RED}✗ Fast aggregation failed (counter still at $FAST_COUNT)${NC}" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 + fi +fi + +# Step 11: Test ingress (optional) +if [ "$TEST_INGRESS" = "true" ]; then + echo -e "${YELLOW}Step 11: Testing ingress endpoint...${NC}" + + # Find the actual router deployment name (it includes the chart name) + ROUTER_DEPLOYMENT=$(kubectl get deployments -o name | grep router | head -1 | sed 's|deployment.apps/||') + + if [ -z "$ROUTER_DEPLOYMENT" ]; then + echo -e "${RED}Router deployment not found!${NC}" + kubectl get deployments + exit 1 + fi + + echo "Router deployment: $ROUTER_DEPLOYMENT" + + # Update router deployment + kubectl set env deployment/$ROUTER_DEPLOYMENT INGRESS=true + kubectl rollout status deployment/$ROUTER_DEPLOYMENT --timeout=120s + + # Re-establish port forwarding + cleanup_port_forwards + sleep 2 + kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & + ETHEREUM_PF_PID=$! + kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & + ROUTER_PF_PID=$! + sleep 15 + + START_COUNT=$LAST_COUNT + echo "Starting counter value: $START_COUNT" + + # Send ingress requests + echo "Sending 5 ingress requests..." + for i in {1..5}; do + echo "Sending request $i..." + curl -s -X POST http://localhost:8080/trigger \ + -H "Content-Type: application/json" \ + -d '{"body": {"metadata": {"request_id": "'$i'", "action": "increment"}}}' || true + sleep 1 + done + + echo "Waiting for aggregation to process requests..." + sleep 15 + + INGRESS_COUNT=$(read_counter "$COUNTER_ADDRESS") + echo "Counter value after ingress: $INGRESS_COUNT" + + if [ "$INGRESS_COUNT" -gt "$START_COUNT" ]; then + INCREMENTS=$((INGRESS_COUNT - START_COUNT)) + echo -e "${GREEN}✓ Ingress test successful: $INCREMENTS increments${NC}" + else + echo -e "${RED}✗ Ingress test failed (counter still at $INGRESS_COUNT)${NC}" + kubectl logs -l app.kubernetes.io/component=router --tail 50 + exit 1 + fi +fi + +# Final summary +echo -e "${GREEN}=== Test Summary ===${NC}" +echo "Initial count: $INITIAL_COUNT" +echo "After default aggregation: $FINAL_COUNT" +[ "$TEST_FAST_AGGREGATION" = "true" ] && echo "After fast aggregation: $FAST_COUNT" +[ "$TEST_INGRESS" = "true" ] && echo "After ingress requests: $INGRESS_COUNT" +echo -e "${GREEN}✅ All tests passed!${NC}" + +exit 0 From cb08164960724a0eab4b1cf1dd93f0799e8e9d93 Mon Sep 17 00:00:00 2001 From: Dijar Llozana Date: Wed, 10 Dec 2025 03:45:16 +0100 Subject: [PATCH 4/5] Fixing secrets in helm install e2e tests --- .github/workflows/helm-integration-tests.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/helm-integration-tests.yml b/.github/workflows/helm-integration-tests.yml index 2a00577..c396d5d 100644 --- a/.github/workflows/helm-integration-tests.yml +++ b/.github/workflows/helm-integration-tests.yml @@ -74,9 +74,9 @@ jobs: helm install commonware-avs ./helm/commonware-restaking \ --set global.environment=LOCAL \ --set global.nodeCount=3 \ - --set secrets.forkUrl="$FORK_URL" \ - --set secrets.privateKey="$PRIVATE_KEY" \ - --set secrets.fundedKey="$FUNDED_KEY" \ + --set secrets.forkUrl="${{ secrets.RPC_URL }}" \ + --set secrets.privateKey="${{ secrets.PRIVATE_KEY }}" \ + --set secrets.fundedKey="${{ secrets.FUNDED_KEY }}" \ --set node.image.repository=commonware-avs-node \ --set node.image.tag=local \ --set node.image.pullPolicy=Never \ From 526a5f1a3b70db8fe7bf4bf11f440e97fa503f26 Mon Sep 17 00:00:00 2001 From: Dijar Llozana Date: Wed, 10 Dec 2025 04:15:01 +0100 Subject: [PATCH 5/5] Fixed integration tests to always run fast aggregations and ingress tests --- .github/workflows/helm-integration-tests.yml | 30 +++++++++++--------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/.github/workflows/helm-integration-tests.yml b/.github/workflows/helm-integration-tests.yml index c396d5d..fe79455 100644 --- a/.github/workflows/helm-integration-tests.yml +++ b/.github/workflows/helm-integration-tests.yml @@ -233,25 +233,28 @@ jobs: # Verify increment if [ "$FINAL_COUNT" -gt "$INITIAL_COUNT" ]; then - echo "? Counter successfully incremented from $INITIAL_COUNT to $FINAL_COUNT" + echo "✓ Counter successfully incremented from $INITIAL_COUNT to $FINAL_COUNT" echo "LAST_COUNT=$FINAL_COUNT" >> $GITHUB_ENV else - echo "? Counter did not increment (still at $FINAL_COUNT)" + echo "✗ Counter did not increment (still at $FINAL_COUNT)" echo "=== Recent router logs ===" kubectl logs -l app.kubernetes.io/component=router --tail 50 exit 1 fi - name: Test with fast aggregation frequency - if: github.event_name == 'push' && github.ref == 'refs/heads/dev' run: | echo "Testing with fast aggregation frequency (0.5 seconds)..." - # Update router deployment with fast aggregation - kubectl set env deployment/commonware-avs-router AGGREGATION_FREQUENCY=0.5 + # Find the actual router deployment name (includes chart name) + ROUTER_DEPLOYMENT=$(kubectl get deployments -o name | grep router | head -1 | sed 's|deployment.apps/||') + echo "Router deployment: $ROUTER_DEPLOYMENT" + + # Update router deployment with fast aggregation - DISABLE INGRESS to avoid timeout crashes + kubectl set env deployment/$ROUTER_DEPLOYMENT INGRESS=false AGGREGATION_FREQUENCY=0.5 # Wait for rollout - kubectl rollout status deployment/commonware-avs-router --timeout=120s + kubectl rollout status deployment/$ROUTER_DEPLOYMENT --timeout=120s # Re-establish port forwarding pkill -f "port-forward.*8545" || true @@ -264,7 +267,10 @@ jobs: kubectl port-forward service/$ETHEREUM_SERVICE 8545:8545 & kubectl port-forward service/$ROUTER_SERVICE 8080:8080 & - sleep 10 + + # Wait for router to fully initialize P2P connections (takes ~15-20 seconds) + echo "Waiting for router to establish P2P connections..." + sleep 20 # Get the starting counter value from previous step START_COUNT=${{ env.LAST_COUNT }} @@ -292,16 +298,15 @@ jobs: # Verify fast aggregation worked (should have multiple increments) if [ "$FAST_COUNT" -gt "$START_COUNT" ]; then INCREMENTS=$((FAST_COUNT - START_COUNT)) - echo "? Fast aggregation successful: $INCREMENTS increments in 1 minute" + echo "✓ Fast aggregation successful: $INCREMENTS increments in 1 minute" echo "LAST_COUNT=$FAST_COUNT" >> $GITHUB_ENV else - echo "? Fast aggregation failed (counter still at $FAST_COUNT)" + echo "✗ Fast aggregation failed (counter still at $FAST_COUNT)" kubectl logs -l app.kubernetes.io/component=router --tail 50 exit 1 fi - name: Test with ingress enabled - if: github.event_name == 'push' && github.ref == 'refs/heads/dev' run: | echo "Testing with ingress enabled..." @@ -383,9 +388,9 @@ jobs: # Verify ingress increments worked if [ "$INGRESS_COUNT" -gt "$START_COUNT" ]; then INCREMENTS=$((INGRESS_COUNT - START_COUNT)) - echo "? Ingress test successful: $INCREMENTS increments after ingress requests" + echo "✓ Ingress test successful: $INCREMENTS increments after ingress requests" else - echo "? Ingress test failed (counter still at $INGRESS_COUNT)" + echo "✗ Ingress test failed (counter still at $INGRESS_COUNT)" kubectl logs -l app.kubernetes.io/component=router --tail 50 exit 1 fi @@ -445,4 +450,3 @@ jobs: # Delete the kind cluster kind delete cluster --name commonware-test || true -