Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 184 additions & 0 deletions ChatQnA/tests/test_compose_remote_on_xeon.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
#!/bin/bash
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

set -xe
IMAGE_REPO=${IMAGE_REPO:-"opea"}
IMAGE_TAG=${IMAGE_TAG:-"latest"}
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
Copy link
Preview

Copilot AI Aug 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The echo statements are displaying variable names instead of values. Should be echo "REGISTRY=${IMAGE_REPO}" and echo "TAG=${IMAGE_TAG}"

Suggested change
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
echo "REGISTRY=${IMAGE_REPO}"
echo "TAG=${IMAGE_TAG}"

Copilot uses AI. Check for mistakes.

export REGISTRY=${IMAGE_REPO}
export TAG=${IMAGE_TAG}
export MODEL_CACHE=${model_cache:-"./data"}

WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
opea_branch=${opea_branch:-"main"}
cd $WORKPATH/docker_image_build
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
pushd GenAIComps
echo "GenAIComps test commit is $(git rev-parse HEAD)"
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="chatqna chatqna-ui dataprep retriever nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker run --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this docker run command inside docker build function? you could move it to start services and do sleep in between.

docker images && sleep 1s
Copy link
Preview

Copilot AI Aug 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This Docker run command is executed without proper backgrounding or container cleanup. Consider running in detached mode with -d flag and implementing proper container lifecycle management.

Suggested change
docker images && sleep 1s
VLLM_CONTAINER_ID=$(docker run -d --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY)
docker images && sleep 1s
# Stop and remove the container after use
docker stop $VLLM_CONTAINER_ID
docker rm $VLLM_CONTAINER_ID

Copilot uses AI. Check for mistakes.

}

function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon

source set_env.sh
export REMOTE_ENDPOINT=http://localhost:8000
export API_KEY=$TEST_KEY
export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0

# Start Docker Containers
docker compose -f compose_remote.yaml -f compose.telemetry.yaml up -d --quiet-pull > ${LOG_PATH}/start_services_with_compose_remote.log
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add logic to wait for the longest docker container to spin up, usually the backend service. example: https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/tests/test_compose_on_xeon.sh#L47


function validate_service() {
local URL="$1"
local EXPECTED_RESULT="$2"
local SERVICE_NAME="$3"
local DOCKER_NAME="$4"
local INPUT_DATA="$5"

local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
if [ "$HTTP_STATUS" -eq 200 ]; then
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."

local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)

if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
echo "[ $SERVICE_NAME ] Content is as expected."
else
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
else
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
exit 1
fi
sleep 1s
}

function validate_microservices() {
# Check if the microservices are running correctly.
sleep 3m

# tei for embedding service
validate_service \
"${ip_address}:6006/embed" \
"\[\[" \
"tei-embedding" \
"tei-embedding-server" \
'{"inputs":"What is Deep Learning?"}'

# retrieval microservice
test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
validate_service \
"${ip_address}:7000/v1/retrieval" \
" " \
"retrieval" \
"retriever-redis-server" \
"{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}"

# tei for rerank microservice
validate_service \
"${ip_address}:8808/rerank" \
'{"index":1,"score":' \
"tei-rerank" \
"tei-reranking-server" \
'{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}'

}

function validate_megaservice() {
# Curl the Mega Service
validate_service \
"${ip_address}:8888/v1/chatqna" \
"Nike" \
"mega-chatqna" \
"chatqna-xeon-backend-server" \
'{"messages": "What is the revenue of Nike in 2023?"}'

}

function validate_frontend() {
cd $WORKPATH/ui/svelte
local conda_env_name="OPEA_e2e"
export PATH=${HOME}/miniforge3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi

source activate ${conda_env_name}

sed -i "s/localhost/$ip_address/g" playwright.config.ts

conda install -c conda-forge nodejs=22.6.0 -y
npm install && npm ci && npx playwright install --with-deps
node -v && npm -v && pip list

exit_status=0
npx playwright test || exit_status=$?

if [ $exit_status -ne 0 ]; then
echo "[TEST INFO]: ---------frontend test failed---------"
exit $exit_status
else
echo "[TEST INFO]: ---------frontend test passed---------"
fi
}

function stop_docker() {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add a line to stop the vllm-cpu docker container

cd $WORKPATH/docker_compose/intel/cpu/xeon
docker compose -f compose_remote.yaml -f compose.telemetry.yaml down
}

function main() {

echo "::group::stop_docker"
stop_docker
echo "::endgroup::"

echo "::group::build_docker_images"
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
echo "::endgroup::"

echo "::group::start_services"
start_services
echo "::endgroup::"

echo "::group::validate_microservices"
validate_microservices
echo "::endgroup::"

echo "::group::validate_megaservice"
validate_megaservice
echo "::endgroup::"

echo "::group::validate_frontend"
validate_frontend
echo "::endgroup::"

echo "::group::stop_docker"
stop_docker
echo "::endgroup::"

docker system prune -f

}

main
Loading
Loading