Skip to content

Commit

Permalink
fix!: force containerd to pull already saved tagged images
Browse files Browse the repository at this point in the history
Images were pulled from remote registries and not from the already
saved images from ipfs during pod creation. so i tried to tag the images with
the CID and replace the image url with the tag. this also fixes the case where the
client uploads from private registries, because they will be
inaccessible such as the case with the autoscaler in the test that is
stored in the local docker registry.

The problem was that containerd will always prefix the tagged images
with docker.io/ to resolve them remotely again, therefore bypassing the
saved images from ipfs. and unless its a known registry, such as gcr or
docker.io, it will always get prefixed, even localhost.

So the solution was simple,just disable pulling images completely,
therefore it will resort to the saved images with prefixing, it just
takes it as it is.
  • Loading branch information
revoltez committed Jul 22, 2024
1 parent 9ed15bf commit 9512412
Show file tree
Hide file tree
Showing 10 changed files with 124 additions and 40 deletions.
1 change: 1 addition & 0 deletions cmd/trustedpods/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ var _ = func() error {
deploymentFlags.StringVar(&providerPeer, "provider", "", "provider peer id")
deploymentFlags.StringVar(&providerEthAddress, "provider-eth", "", "provider public address")
deploymentFlags.Int64Var(&expirationOffset, "token-expiration", 10, "authentication token expires after token-expiration seconds (expired after 10 seconds by default)")
deploymentFlags.StringVar(&ipfsApi, "ipfs", "/ip4/127.0.0.1/tcp/5001", "multiaddr where the ipfs/kubo api can be accessed")

uploadFlags.StringVar(&ipfsApi, "ipfs", "/ip4/127.0.0.1/tcp/5001", "multiaddr where the ipfs/kubo api can be accessed")
uploadFlags.BoolVar(&uploadImages, "upload-images", true, "upload images")
Expand Down
6 changes: 2 additions & 4 deletions pkg/ipcr/containerd.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,14 +158,12 @@ func PullImage(ctx context.Context, client *containerd.Client, ipfsAddr, image,
if err != nil {
return err
}

err = img.Tag(ctx, client, types.ImageTagOptions{Source: image, Target: target})
if err != nil {
return err
}
err = img.Remove(ctx, client, []string{image}, types.ImageRemoveOptions{Stdout: cmd.OutOrStdout()})
if err != nil {
return err
}

return nil
}

Expand Down
11 changes: 6 additions & 5 deletions pkg/kubernetes/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,12 @@ func ApplyPodRequest(

for cIdx, container := range podManifest.Containers {
containerSpec := corev1.Container{
Name: container.Name,
Image: images[container.Name],
Command: container.Entrypoint,
Args: container.Command,
WorkingDir: container.WorkingDir,
Name: container.Name,
Image: images[container.Name],
ImagePullPolicy: corev1.PullNever,
Command: container.Entrypoint,
Args: container.Command,
WorkingDir: container.WorkingDir,
}
for field, value := range container.Env {
containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: field, Value: value})
Expand Down
7 changes: 5 additions & 2 deletions pkg/provider/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,15 @@ func DownloadImages(ctx context.Context, client *containerd.Client, ipfsAddress,
return nil, err
}
if !exists {
err = ipcr.PullImage(ctx, client, ipfsAddress, string(c.Image.Cid), c.Image.Url)
target := string(c.Image.Cid)
err = ipcr.PullImage(ctx, client, ipfsAddress, string(c.Image.Cid), target)
if err != nil {
return nil, err
}
// Update the url
c.Image.Url = target
log.Printf("Pulled Image %v Successfully, Decrypting ...\n", c.Image.Url)
err = ipcr.DecryptImage(ctx, client, "", c.Image.Url, c.Image.Key.Data)
err = ipcr.DecryptImage(ctx, client, "", target, c.Image.Key.Data)
if err != nil {
return nil, err
}
Expand Down
30 changes: 30 additions & 0 deletions test/e2e/autoscaler/redeploy-images.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/bash

docker build -t comradecoop/apocryph/server:latest ../../.. --target server

docker build -t comradecoop/apocryph/p2p-helper:latest ../../.. --target p2p-helper

docker build -t comradecoop/apocryph/autoscaler:latest ../../.. --target autoscaler

docker run -d -p 5000:5000 --restart=always --name registry registry:2 || echo "Docker registry already running"

docker tag comradecoop/apocryph/server:latest localhost:5000/comradecoop/apocryph/server:latest
docker push localhost:5000/comradecoop/apocryph/server:latest

docker tag comradecoop/apocryph/p2p-helper:latest localhost:5000/comradecoop/apocryph/p2p-helper:latest
docker push localhost:5000/comradecoop/apocryph/p2p-helper:latest

docker tag comradecoop/apocryph/autoscaler:latest localhost:5000/comradecoop/apocryph/autoscaler:latest
docker push localhost:5000/comradecoop/apocryph/autoscaler:latest

minikube profile c1
kubectl delete namespace trustedpods
helmfile apply -f ../minikube -l name=trustedpods --skip-deps

minikube profile c2
kubectl delete namespace trustedpods
helmfile apply -f ../minikube -l name=trustedpods --skip-deps

minikube profile c3
kubectl delete namespace trustedpods
helmfile apply -f ../minikube -l name=trustedpods --skip-deps
58 changes: 34 additions & 24 deletions test/e2e/autoscaler/run-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,31 +27,16 @@ sudo chmod o+rw /run/containerd/containerd.sock
## 0: Set up the external environment

## 0.1: Build/tag server and p2p-helper and autoscaler images
./redeploy-images.sh

docker build -t comradecoop/apocryph/server:latest ../../.. --target server

docker build -t comradecoop/apocryph/p2p-helper:latest ../../.. --target p2p-helper

docker build -t comradecoop/apocryph/autoscaler:latest ../../.. --target autoscaler

## 0.2: Create local registry and push server and p2p-helper images

docker run -d -p 5000:5000 --restart=always --name registry registry:2 || echo "Docker registry already running"

docker tag comradecoop/apocryph/server:latest localhost:5000/comradecoop/apocryph/server:latest
docker push localhost:5000/comradecoop/apocryph/server:latest

docker tag comradecoop/apocryph/p2p-helper:latest localhost:5000/comradecoop/apocryph/p2p-helper:latest
docker push localhost:5000/comradecoop/apocryph/p2p-helper:latest

## 0.3: Set up a local ethereum node and deploy contracts to it
## 0.2: Set up a local ethereum node and deploy contracts to it

./redeploy-contracts.sh

## 1.0 Starting the First Cluster
minikube start --insecure-registry='host.minikube.internal:5000' --container-runtime=containerd --driver=virtualbox -p c1
minikube profile c1
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply; }
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply -f ../minikube; }

# wait until all the deployments are ready
./wait-deployments.sh
Expand All @@ -60,7 +45,7 @@ helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-
## 2.0: Starting the second Cluster
minikube start --insecure-registry='host.minikube.internal:5000' --container-runtime=containerd --driver=virtualbox -p c2
minikube profile c2
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply; }
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply -f ../minikube; }

# wait until all the deployments are ready
./wait-deployments.sh
Expand All @@ -69,7 +54,7 @@ helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-
## 3.0: Starting the third Cluster
minikube start --insecure-registry='host.minikube.internal:5000' --container-runtime=containerd --driver=virtualbox -p c3
minikube profile c3
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply; }
helmfile sync -f ../minikube || { while ! kubectl get -n keda endpoints ingress-nginx-controller -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; helmfile apply -f ../minikube; }

# wait until all the deployments are ready
./wait-deployments.sh
Expand Down Expand Up @@ -115,15 +100,40 @@ go run ../../../cmd/tpodserver registry register \
--token-contract 0x5FbDB2315678afecb367f032d93F642f64180aa3 \
--registry-contract 0x9fe46736679d2d9a65f0992f2272de9f3c7fa6e0 \

# Connect the three ipfs nodes

## 4.1: Get the tables and the providers


pkill -f "kubectl port-forward"
kubectl port-forward --namespace ipfs svc/ipfs-rpc 5004:5001 & sleep 0.5;

ipfs daemon >/dev/null &
go run ../../../cmd/trustedpods registry get --config ../../integration/registry/config.yaml config.yaml \
--ethereum-key 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a \
--registry-contract 0x9fe46736679d2d9a65f0992f2272de9f3c7fa6e0 \
--token-contract 0x5FbDB2315678afecb367f032d93F642f64180aa3 \
--ipfs /ip4/127.0.0.1/tcp/5004 \

## 5.0: Deploy the autoscaler to the providers using their p2p multiaddr
minikube profile c1


source swarm-connect.sh

PROVIDER_ETH=0x70997970C51812dc3A010C7d01b50e0d17dc79C8 #TODO= anvil.accounts[1]
PUBLISHER_KEY=$(docker logs anvil | awk '/Private Keys/ {flag=1; next} flag && /^\(2\)/ {print $2; exit}')
PAYMENT_CONTRACT=$(cat ../../../contracts/broadcast/Deploy.s.sol/31337/run-latest.json | jq -r '.returns.payment.value')
REGISTRY_CONTRACT=$(cat ../../../contracts/broadcast/Deploy.s.sol/31337/run-latest.json | jq -r '.returns.registry.value')
FUNDS=10000000000000000000000

set +v
set -x

go run ../../../cmd/trustedpods/ pod deploy ../common/manifest-autoscaler.yaml \
--ethereum-key "$PUBLISHER_KEY" \
--payment-contract "$PAYMENT_CONTRACT" \
--registry-contract "$REGISTRY_CONTRACT" \
--funds "$FUNDS" \
--upload-images=true \
--mint-funds \
--provider /p2p/"$PROVIDER_IPFS" \
--provider-eth "$PROVIDER_ETH"


38 changes: 38 additions & 0 deletions test/e2e/autoscaler/swarm-connect.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/bin/bash

## Configure provider/in-cluster IPFS and publisher IPFS ##
pkill -f "kubectl port-forward"
pkill ipfs

{ while ! kubectl get -n ipfs endpoints ipfs-rpc -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; }

O_IPFS_PATH=$IPFS_PATH
export IPFS_PATH=$(mktemp ipfs.XXXX --tmpdir -d)

[ "$PORT_5004" == "" ] && { PORT_5004="yes" ; kubectl port-forward --namespace ipfs svc/ipfs-rpc 5004:5001 & sleep 0.5; }
echo /ip4/127.0.0.1/tcp/5004 > $IPFS_PATH/api

SWARM_ADDRESSES=$(minikube service -n ipfs ipfs-swarm --url | head -n 1 | sed -E 's|http://(.+):(.+)|["/ip4/\1/tcp/\2", "/ip4/\1/udp/\2/quic", "/ip4/\1/udp/\2/quic-v1", "/ip4/\1/udp/\2/quic-v1/webtransport"]|')

PROVIDER_IPFS=$(curl -X POST "http://127.0.0.1:5004/api/v0/id" | jq '.ID' -r); echo $PROVIDER_IPFS

CONFIG_BEFORE=$(ipfs config Addresses.AppendAnnounce)
ipfs config Addresses.AppendAnnounce --json "$SWARM_ADDRESSES"
CONFIG_AFTER=$(ipfs config Addresses.AppendAnnounce)

[ "$CONFIG_BEFORE" = "$CONFIG_AFTER" ] || kubectl delete -n ipfs $(kubectl get po -o name -n ipfs) # Restart ipfs daemon

export IPFS_PATH=$O_IPFS_PATH

{ while ! kubectl get -n ipfs endpoints ipfs-rpc -o json | jq '.subsets[].addresses[].ip' &>/dev/null; do sleep 1; done; }

ipfs id &>/dev/null || ipfs init

ipfs config --json Experimental.Libp2pStreamMounting true

[ -n "$IPFS_DAEMON" ] || { IPFS_DAEMON=yes; ipfs daemon & { while ! [ -f ${IPFS_PATH:-~/.ipfs}/api ]; do sleep 0.1; done; } 2>/dev/null; }

echo "$SWARM_ADDRESSES" | jq -r '.[] + "/p2p/'"$PROVIDER_IPFS"'"' | xargs -n 1 ipfs swarm connect || true

sleep 5

2 changes: 1 addition & 1 deletion test/e2e/common/config3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pricing:
priceForUsage: 800
- resource: "bandwidth_egress"
priceForUsage: 800
cpu_model: 'Intel Xeon Platinum 8452Y Processor'
cpu_model: 'AMD EPIC 7742'
tee_type: 'CVM'
withdraw:
address: '0xa0Ee7A142d267C1f36714E4a8F75612F20a79720'
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
containers:
- name: autoscaler
image:
url: autoscaler
url: localhost:5000/comradecoop/apocryph/autoscaler
ports:
- containerPort: '9999'
- containerPort: '8080'
hostHttpHost: autoscaler.local
name: internal
- containerPort: '32500'
hostTcpPort: 32500
name: raft
resourceRequests:
- amountMillis: '10'
resource: cpu
Expand Down
4 changes: 2 additions & 2 deletions test/integration/ipcr/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ func main() {

// pulling the ecnrypted image from ipfs
log.Println("Pulling Encrypted Image")
err = ipcr.PullImage(context.Background(), client, IPFS_ADDRESS, cid, IMAGE_NAME)
err = ipcr.PullImage(context.Background(), client, IPFS_ADDRESS, cid, cid)
if err != nil {
log.Panic(err)
}
printImages(client)

// decrypting pulled Image
err = ipcr.DecryptImage(context.Background(), client, PASSWORD, IMAGE_NAME, prvKey)
err = ipcr.DecryptImage(context.Background(), client, PASSWORD, cid, prvKey)
if err != nil {
log.Panic(err)
}
Expand Down

0 comments on commit 9512412

Please sign in to comment.