Skip to content

Commit

Permalink
Added github action to test kserve helm (kserve#2588)
Browse files Browse the repository at this point in the history
* added github action to test kserve helm

Signed-off-by: Suresh Nakkeran <[email protected]>

* added testcase for modelmesh helm installation

Signed-off-by: Suresh-Nakkeran <[email protected]>

* add portforward in modelmesh helm testcase

Signed-off-by: Suresh-Nakkeran <[email protected]>

* added wait flag in helm installation

Signed-off-by: Suresh-Nakkeran <[email protected]>

Signed-off-by: Suresh Nakkeran <[email protected]>
Signed-off-by: Suresh-Nakkeran <[email protected]>
  • Loading branch information
Suresh-Nakkeran authored Jan 2, 2023
1 parent 50648a5 commit 4778517
Show file tree
Hide file tree
Showing 11 changed files with 473 additions and 4 deletions.
28 changes: 28 additions & 0 deletions .github/workflows/e2e-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -387,3 +387,31 @@ jobs:
if: always()
run: |
./test/scripts/gh-actions/status-check.sh
test-with-helm:
runs-on: ubuntu-latest
needs: [kserve-image-build, predictor-runtime-build, explainer-runtime-build]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.17.3'
- uses: ./.github/actions/minikube-setup
- uses: ./.github/actions/base-download

- name: Install Kserve from helm
run: |
./test/scripts/gh-actions/setup-modelmesh-dep.sh
./test/scripts/gh-actions/setup-kserve-helm.sh
kubectl get pods -n kserve
kubectl describe pods -n kserve
- name: Run E2E tests
timeout-minutes: 40
run: |
./test/scripts/gh-actions/run-e2e-tests.sh "helm"
kubectl get pods -n kserve
- name: Check system status
if: always()
run: |
kubectl get pods --all-namespaces
./test/scripts/gh-actions/status-check.sh
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,10 @@ deploy-ci: manifests
kubectl wait --for=condition=ready pod -l control-plane=kserve-controller-manager -n kserve --timeout=300s
kustomize build config/overlays/test/runtimes | kubectl apply -f -

deploy-helm: manifests
helm install kserve-crd charts/kserve-crd/ --wait --timeout 180s
helm install kserve charts/kserve-resources/ --wait --timeout 180s

undeploy:
kustomize build config/default | kubectl delete -f -
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io inferenceservice.serving.kserve.io
Expand Down
2 changes: 1 addition & 1 deletion charts/kserve-resources/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ spec:
- command:
- /manager
image: "{{ .Values.kserve.controller.image }}:{{ .Values.kserve.controller.tag }}"
imagePullPolicy: Always
imagePullPolicy: IfNotPresent
name: manager
args:
- "--metrics-addr=127.0.0.1:8080"
Expand Down
18 changes: 15 additions & 3 deletions test/e2e/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from urllib.parse import urlparse

import grpc
import portforward
import requests
from kubernetes import client

Expand Down Expand Up @@ -196,10 +197,9 @@ def explain_response(service_name, input_json):
return json_response


def get_cluster_ip():
def get_cluster_ip(name="istio-ingressgateway", namespace="istio-system"):
api_instance = client.CoreV1Api(client.ApiClient())
service = api_instance.read_namespaced_service("istio-ingressgateway",
"istio-system")
service = api_instance.read_namespaced_service(name, namespace)
if service.status.load_balancer.ingress is None:
cluster_ip = service.spec.cluster_ip
else:
Expand Down Expand Up @@ -233,3 +233,15 @@ def predict_grpc(service_name, payload, version=constants.KSERVE_V1BETA1_VERSION
options=(('grpc.ssl_target_name_override', host),))
stub = grpc_predict_v2_pb2_grpc.GRPCInferenceServiceStub(channel)
return stub.ModelInfer(pb.ModelInferRequest(model_name=model_name, inputs=payload))


def predict_modelmesh(service_name, input_json, pod_name, model_name=None):
with open(input_json) as json_file:
data = json.load(json_file)

if model_name is None:
model_name = service_name
with portforward.forward("default", pod_name, 8008, 8008):
url = f"http://localhost:8008/v2/models/{model_name}/infer"
response = requests.post(url, json.dumps(data))
return json.loads(response.content.decode("utf-8"))
78 changes: 78 additions & 0 deletions test/e2e/data/mm_sklearn_input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
{
"inputs": [
{
"name": "predict",
"shape": [
1,
64
],
"datatype": "FP32",
"data": [
0.0,
0.0,
1.0,
11.0,
14.0,
15.0,
3.0,
0.0,
0.0,
1.0,
13.0,
16.0,
12.0,
16.0,
8.0,
0.0,
0.0,
8.0,
16.0,
4.0,
6.0,
16.0,
5.0,
0.0,
0.0,
5.0,
15.0,
11.0,
13.0,
14.0,
0.0,
0.0,
0.0,
0.0,
2.0,
12.0,
16.0,
13.0,
0.0,
0.0,
0.0,
0.0,
0.0,
13.0,
16.0,
16.0,
6.0,
0.0,
0.0,
0.0,
0.0,
16.0,
16.0,
16.0,
7.0,
0.0,
0.0,
0.0,
0.0,
11.0,
13.0,
12.0,
1.0,
0.0
]
}
]
}
Empty file added test/e2e/helm/__init__.py
Empty file.
73 changes: 73 additions & 0 deletions test/e2e/helm/test_kserve_sklearn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Copyright 2022 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import pytest
from kubernetes import client
from kubernetes.client import V1ResourceRequirements

from kserve import (
KServeClient,
V1beta1InferenceService,
V1beta1InferenceServiceSpec,
V1beta1ModelFormat,
V1beta1ModelSpec,
V1beta1PredictorSpec,
constants
)

from ..common.utils import KSERVE_TEST_NAMESPACE, predict


@pytest.mark.helm
def test_sklearn_kserve():
service_name = "isvc-sklearn-kserve"

predictor = V1beta1PredictorSpec(
min_replicas=1,
model=V1beta1ModelSpec(
model_format=V1beta1ModelFormat(
name="sklearn",
),
runtime="kserve-mlserver",
storage_uri="gs://seldon-models/sklearn/mms/lr_model",
protocol_version="v2",
resources=V1ResourceRequirements(
requests={"cpu": "50m", "memory": "128Mi"},
limits={"cpu": "100m", "memory": "512Mi"},
),
),
)

isvc = V1beta1InferenceService(
api_version=constants.KSERVE_V1BETA1,
kind=constants.KSERVE_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KSERVE_TEST_NAMESPACE
),
spec=V1beta1InferenceServiceSpec(predictor=predictor),
)

kserve_client = KServeClient(
config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
kserve_client.create(isvc)
kserve_client.wait_isvc_ready(
service_name, namespace=KSERVE_TEST_NAMESPACE)

res = predict(service_name, "./data/iris_input_v2.json",
protocol_version="v2")
assert res["outputs"][0]["data"] == [1, 1]

kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
77 changes: 77 additions & 0 deletions test/e2e/helm/test_model_mesh_sklearn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Copyright 2022 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import pytest
from kubernetes import client
from kubernetes.client import V1ResourceRequirements

from kserve import (
KServeClient,
V1beta1InferenceService,
V1beta1InferenceServiceSpec,
V1beta1ModelFormat,
V1beta1ModelSpec,
V1beta1PredictorSpec,
V1beta1StorageSpec,
constants
)

from ..common.utils import predict_modelmesh


@pytest.mark.helm
def test_sklearn_modelmesh():
service_name = "isvc-sklearn-modelmesh"
annotations = dict()
annotations["serving.kserve.io/deploymentMode"] = "ModelMesh"
predictor = V1beta1PredictorSpec(
min_replicas=1,
model=V1beta1ModelSpec(
model_format=V1beta1ModelFormat(
name="sklearn",
),
resources=V1ResourceRequirements(
requests={"cpu": "50m", "memory": "128Mi"},
limits={"cpu": "100m", "memory": "512Mi"},
),
storage=V1beta1StorageSpec(
key="localMinIO",
path="sklearn/mnist-svm.joblib"
)
),
)

isvc = V1beta1InferenceService(
api_version=constants.KSERVE_V1BETA1,
kind=constants.KSERVE_KIND,
metadata=client.V1ObjectMeta(
name=service_name,
annotations=annotations
),
spec=V1beta1InferenceServiceSpec(predictor=predictor),
)

kserve_client = KServeClient(
config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
kserve_client.create(isvc)
kserve_client.wait_isvc_ready(service_name)
pods = kserve_client.core_api.list_namespaced_pod("default", label_selector="name=modelmesh-serving-mlserver-0.x")

pod_name = pods.items[0].metadata.name
res = predict_modelmesh(service_name, "./data/mm_sklearn_input.json", pod_name)
assert res["outputs"][0]["data"] == [8]

kserve_client.delete(service_name)
1 change: 1 addition & 0 deletions test/e2e/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ markers =
pmml: pmml e2e tests
grpc: grpc tests
graph: inference graph tests
helm: helm e2e tests
50 changes: 50 additions & 0 deletions test/scripts/gh-actions/setup-kserve-helm.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/bin/bash

# Copyright 2022 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# The script will install KServe dependencies in the GH Actions environment.
# (Istio, Knative, cert-manager, kustomize, yq)

set -o errexit
set -o nounset
set -o pipefail

sed -i -e "s/*defaultVersion/${GITHUB_SHA}/g" charts/kserve-resources/values.yaml

cat ./charts/kserve-resources/values.yaml

make deploy-helm

echo "Updating modelmesh default replicas count..."
kubectl patch servingruntimes mlserver-0.x --type='merge' -p '{"spec":{"replicas":1}}'

echo "Get events of all pods ..."
kubectl get events -A

echo "Add testing models to minio storage ..."
kubectl apply -f config/overlays/test/minio/minio-init-job.yaml
kubectl wait --for=condition=complete --timeout=90s job/minio-init

echo "Creating a namespace kserve-ci-test ..."
kubectl create namespace kserve-ci-e2e-test

echo "Add storageSpec testing secrets ..."
kubectl apply -f config/overlays/test/minio/minio-user-secret.yaml -n kserve-ci-e2e-test

echo "Installing KServe Python SDK ..."
python3 -m pip install --upgrade pip
pushd python/kserve >/dev/null
pip3 install -e .[test] --user
popd
Loading

0 comments on commit 4778517

Please sign in to comment.