|
37 | 37 | - name: Run E2E tests
|
38 | 38 | run: |
|
39 | 39 | make test-e2e-skopeo
|
| 40 | + - name: Continue E2E by deploying KServe |
| 41 | + run: | |
| 42 | + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml |
| 43 | + e2e/repeat.sh kubectl apply --server-side -f https://github.com/kserve/kserve/releases/download/v0.14.0/kserve.yaml |
| 44 | + e2e/repeat.sh kubectl apply --server-side -f https://github.com/kserve/kserve/releases/download/v0.14.0/kserve-cluster-resources.yaml |
| 45 | + kubectl patch configmap/inferenceservice-config -n kserve --type=strategic -p '{"data": {"deploy": "{\"defaultDeploymentMode\": \"RawDeployment\"}"}}' |
| 46 | + e2e/enable-modelcar.sh |
| 47 | + - name: Load image in KinD for amd64 |
| 48 | + run: | |
| 49 | + digest=$(skopeo inspect --tls-verify=false --raw docker://localhost:5001/nstestorg/modelcar | jq -r '.manifests[] | select(.platform.architecture == "amd64") | .digest') |
| 50 | + skopeo copy --src-tls-verify=false docker://localhost:5001/nstestorg/modelcar@$digest docker-daemon:localhost:5001/nstestorg/modelcar:v1 |
| 51 | + kind load docker-image -n "kind" "localhost:5001/nstestorg/modelcar:v1" |
| 52 | + - name: Apply Isvc using Modelcar # since the enable modelcar restart controller pod, better guard the kubectl apply |
| 53 | + run: | |
| 54 | + e2e/repeat.sh kubectl apply -f e2e/isvc-modelcar.yaml |
| 55 | + kubectl wait --for=condition=Ready isvc/my-inference-service --timeout=240s |
| 56 | + - name: Basic testing of Isvc that has Modelcar |
| 57 | + run: | |
| 58 | + echo "Starting port-forward..." |
| 59 | + kubectl port-forward svc/my-inference-service-predictor 8080:80 & |
| 60 | + PID=$! |
| 61 | + sleep 2 |
| 62 | + echo "I have launched port-forward in background with: $PID." |
| 63 | + echo "Check that OIP return the expected name" |
| 64 | + curl -s http://localhost:8080/v2/models | jq -e '.models | index("my-inference-service") != null' |
| 65 | + echo "Check that OIP produces an Inference Prediction" |
| 66 | + curl -s -H "Content-Type: application/json" -d @e2e/data/input0.json http://localhost:8080/v2/models/my-inference-service/infer | jq |
| 67 | + curl -s -H "Content-Type: application/json" -d @e2e/data/input1.json http://localhost:8080/v2/models/my-inference-service/infer | jq |
| 68 | + curl -s -H "Content-Type: application/json" -d @e2e/data/input4.json http://localhost:8080/v2/models/my-inference-service/infer | jq |
40 | 69 | e2e-oras:
|
41 | 70 | name: E2E using Oras CP
|
42 | 71 | runs-on: ubuntu-24.04
|
|
0 commit comments