From 9c7a242abd5409cec08727de4a921b13e71acc0d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 14 Aug 2025 03:46:48 +0000 Subject: [PATCH 1/2] add test script Signed-off-by: Ubuntu --- ChatQnA/tests/test_compose_remote_on_xeon.sh | 184 ++++++++ CodeGen/tests/test_compose_remote_on_xeon.sh | 218 ++++++++++ DocSum/tests/test_compose_remote_on_xeon.sh | 392 ++++++++++++++++++ .../tests/test_compose_remote_on_xeon.sh | 280 +++++++++++++ 4 files changed, 1074 insertions(+) create mode 100644 ChatQnA/tests/test_compose_remote_on_xeon.sh create mode 100644 CodeGen/tests/test_compose_remote_on_xeon.sh create mode 100644 DocSum/tests/test_compose_remote_on_xeon.sh create mode 100644 ProductivitySuite/tests/test_compose_remote_on_xeon.sh diff --git a/ChatQnA/tests/test_compose_remote_on_xeon.sh b/ChatQnA/tests/test_compose_remote_on_xeon.sh new file mode 100644 index 0000000000..bb01da6423 --- /dev/null +++ b/ChatQnA/tests/test_compose_remote_on_xeon.sh @@ -0,0 +1,184 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="chatqna chatqna-ui dataprep retriever nginx" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + docker run --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon + + source set_env.sh + export REMOTE_ENDPOINT=http://localhost:8000 + export API_KEY=$TEST_KEY + export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0 + + # Start Docker Containers + docker compose -f compose_remote.yaml -f compose.telemetry.yaml up -d --quiet-pull > ${LOG_PATH}/start_services_with_compose_remote.log +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + sleep 3m + + # tei for embedding service + validate_service \ + "${ip_address}:6006/embed" \ + "\[\[" \ + "tei-embedding" \ + "tei-embedding-server" \ + '{"inputs":"What is Deep Learning?"}' + + # retrieval microservice + test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") + validate_service \ + "${ip_address}:7000/v1/retrieval" \ + " " \ + "retrieval" \ + "retriever-redis-server" \ + "{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}" + + # tei for rerank microservice + validate_service \ + "${ip_address}:8808/rerank" \ + '{"index":1,"score":' \ + "tei-rerank" \ + "tei-reranking-server" \ + '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_service \ + "${ip_address}:8888/v1/chatqna" \ + "Nike" \ + "mega-chatqna" \ + "chatqna-xeon-backend-server" \ + '{"messages": "What is the revenue of Nike in 2023?"}' + +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs=22.6.0 -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon + docker compose -f compose_remote.yaml -f compose.telemetry.yaml down +} + +function main() { + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_microservices" + validate_microservices + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::validate_frontend" + validate_frontend + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + docker system prune -f + +} + +main diff --git a/CodeGen/tests/test_compose_remote_on_xeon.sh b/CodeGen/tests/test_compose_remote_on_xeon.sh new file mode 100644 index 0000000000..ea1a2126aa --- /dev/null +++ b/CodeGen/tests/test_compose_remote_on_xeon.sh @@ -0,0 +1,218 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') +source $WORKPATH/docker_compose/intel/set_env.sh + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + git clone https://github.com/vllm-project/vllm.git && cd vllm + VLLM_VER=v0.9.0.1 + echo "Check out vLLM tag ${VLLM_VER}" + git checkout ${VLLM_VER} &> /dev/null + cd ../ + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="codegen codegen-gradio-ui dataprep retriever embedding" + + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + docker run --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY + docker images && sleep 1s +} + +function start_services() { + + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export REMOTE_ENDPOINT=http://localhost:8000 + export API_KEY=$TEST_KEY + export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0 + # Start Docker Containers + docker compose -f compose_remote.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + if [[ "$SERVICE_NAME" == "ingest" ]]; then + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -F index_name=test_redis -H 'Content-Type: multipart/form-data' "$URL") + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Data preparation succeeded..." + else + echo "[ $SERVICE_NAME ] Data preparation failed..." + fi + + else + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + fi + sleep 5s +} + +function validate_microservices() { + + # Data ingest microservice + validate_services \ + "${ip_address}:6007/v1/dataprep/ingest" \ + "Data preparation succeeded" \ + "ingest" \ + "dataprep-redis-server" \ + 'link_list=["https://modin.readthedocs.io/en/latest/index.html"]' +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "print" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{"messages": "def print_hello_world():", "max_tokens": 256}' + + # Curl the Mega Service with stream as false + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{ "messages": "def print_hello_world():", "max_tokens": 256, "stream": false}' + + # Curl the Mega Service with index_name and agents_flag + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}' + + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "class" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}' + +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs=22.6.0 -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function validate_gradio() { + local URL="http://${ip_address}:5173/health" + local HTTP_STATUS=$(curl "$URL") + local SERVICE_NAME="Gradio" + + if [ "$HTTP_STATUS" = '{"status":"ok"}' ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. UI server is running successfully..." + else + echo "[ $SERVICE_NAME ] UI server has failed..." + fi +} + +function stop_docker() { + + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f compose_remote.yaml down +} + +function main() { + + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_microservices" + validate_microservices + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::validate_frontend" + validate_gradio + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + docker system prune -f +} + +main diff --git a/DocSum/tests/test_compose_remote_on_xeon.sh b/DocSum/tests/test_compose_remote_on_xeon.sh new file mode 100644 index 0000000000..e0a7e76604 --- /dev/null +++ b/DocSum/tests/test_compose_remote_on_xeon.sh @@ -0,0 +1,392 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe + +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +export http_proxy=$http_proxy +export https_proxy=$https_proxy +export host_ip=$(hostname -I | awk '{print $1}') + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +source $WORKPATH/docker_compose/intel/set_env.sh +export MODEL_CACHE=${model_cache:-"./data"} + +export MAX_INPUT_TOKENS=2048 +export MAX_TOTAL_TOKENS=4096 +#export REMOTE_ENDPOINT= +#export API_KEY= +#export LLM_MODEL_ID= + +# Get the root folder of the current script +ROOT_FOLDER=$(dirname "$(readlink -f "$0")") + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + git clone https://github.com/vllm-project/vllm.git && cd vllm + VLLM_VER=v0.9.0.1 + echo "Check out vLLM tag ${VLLM_VER}" + git checkout ${VLLM_VER} &> /dev/null + cd ../ + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="docsum docsum-gradio-ui whisper llm-docsum" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + docker run --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export REMOTE_ENDPOINT=http://localhost:8000 + export API_KEY=$TEST_KEY + export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0 + docker compose -f compose_remote.yaml up -d > ${LOG_PATH}/start_services_with_compose_remote.log + sleep 1m +} + +get_base64_str() { + local file_name=$1 + base64 -w 0 "$file_name" +} + +# Function to generate input data for testing based on the document type +input_data_for_test() { + local document_type=$1 + case $document_type in + ("text") + echo "THIS IS A TEST >>>> and a number of states are starting to adopt them voluntarily special correspondent john delenco of education week reports it takes just 10 minutes to cross through gillette wyoming this small city sits in the northeast corner of the state surrounded by 100s of miles of prairie but schools here in campbell county are on the edge of something big the next generation science standards you are going to build a strand of dna and you are going to decode it and figure out what that dna actually says for christy mathis at sage valley junior high school the new standards are about learning to think like a scientist there is a lot of really good stuff in them every standard is a performance task it is not you know the child needs to memorize these things it is the student needs to be able to do some pretty intense stuff we are analyzing we are critiquing we are." + ;; + ("audio") + get_base64_str "$ROOT_FOLDER/data/test.wav" + ;; + ("video") + get_base64_str "$ROOT_FOLDER/data/test.mp4" + ;; + (*) + echo "Invalid document type" >&2 + exit 1 + ;; + esac +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local VALIDATE_TYPE="$5" + local INPUT_DATA="$6" + local FORM_DATA1="$7" + local FORM_DATA2="$8" + local FORM_DATA3="$9" + local FORM_DATA4="${10}" + local FORM_DATA5="${11}" + local FORM_DATA6="${12}" + + if [[ $VALIDATE_TYPE == *"json"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + else + CURL_CMD=(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + if [[ -n "$FORM_DATA6" ]]; then + CURL_CMD+=(-F "$FORM_DATA6") + fi + HTTP_RESPONSE=$("${CURL_CMD[@]}") + fi + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "RESPONSE_BODY==> $RESPONSE_BODY" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # whisper microservice + ulimit -s 65536 + validate_service \ + "${host_ip}:7066/v1/asr" \ + '{"asr_result":"well"}' \ + "whisper" \ + "docsum-xeon-whisper-server" \ + "json" \ + "{\"audio\": \"$(input_data_for_test "audio")\"}" + +} + +function validate_megaservice_text() { + echo ">>> Checking text data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "json" \ + '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + echo ">>> Checking text data in form format, set language=en" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ + "max_tokens=32" \ + "language=en" \ + "stream=True" + + echo ">>> Checking text data in form format, set language=zh" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=2024?9?26?,??��??,??????????� ??� 6??????(??Granite Rapids),?AI?????????????????????????" \ + "max_tokens=32" \ + "language=zh" \ + "stream=True" + + echo ">>> Checking text data in form format, upload file" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "TEI" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/short.txt" \ + "max_tokens=32" \ + "language=en" \ + "stream=False" +} + +function validate_megaservice_multimedia() { + echo ">>> Checking audio data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "well" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "json" \ + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\", \"stream\": \"False\"}" + + echo ">>> Checking audio data in form format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "you" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=audio" \ + "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ + "max_tokens=32" \ + "language=en" \ + "stream=False" + + echo ">>> Checking audio data in form format, upload file" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "well" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=audio" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/test.wav" \ + "max_tokens=32" \ + "language=en" \ + "stream=False" + + echo ">>> Checking video data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "bye" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "json" \ + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"}" + + echo ">>> Checking video data in form format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "bye" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=video" \ + "messages=\"$(input_data_for_test "video")\"" \ + "max_tokens=32" \ + "language=en" \ + "stream=False" + + echo ">>> Checking video data in form format, upload file" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "bye" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=video" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/test.mp4" \ + "max_tokens=32" \ + "language=en" \ + "stream=False" +} + +function validate_megaservice_long_text() { + echo ">>> Checking long text data in form format, set summary_type=auto" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "Intel" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=auto" \ + "stream=False" + + echo ">>> Checking long text data in form format, set summary_type=stuff" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "TEI" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/short.txt" \ + "max_tokens=128" \ + "summary_type=stuff" \ + "stream=False" + + echo ">>> Checking long text data in form format, set summary_type=truncate" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "Intel" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=truncate" \ + "stream=False" + + echo ">>> Checking long text data in form format, set summary_type=map_reduce" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "Intel" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=map_reduce" \ + "stream=False" + + echo ">>> Checking long text data in form format, set summary_type=refine" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "Intel" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=refine" \ + "stream=False" +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose stop && docker compose rm -f +} + +function main() { + + echo "::group:: Stopping any running Docker containers..." + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group:: Validating microservices" + validate_microservices + echo "::endgroup::" + + echo "::group::validate_megaservice_text" + validate_megaservice_text + echo "::endgroup::" + + echo "::group::validate_megaservice_multimedia" + validate_megaservice_multimedia + echo "::endgroup::" + + echo "::group::validate_megaservice_long_text" + validate_megaservice_long_text + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + docker system prune -f + +} + +main diff --git a/ProductivitySuite/tests/test_compose_remote_on_xeon.sh b/ProductivitySuite/tests/test_compose_remote_on_xeon.sh new file mode 100644 index 0000000000..24029d31d7 --- /dev/null +++ b/ProductivitySuite/tests/test_compose_remote_on_xeon.sh @@ -0,0 +1,280 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log + docker run --env "VLLM_SKIP_WARMUP=true" -p 8000:8000 --ipc=host public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.9.2 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --api-key $TEST_KEY + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export host_ip=${ip_address} + export LOGFLAG=True + export no_proxy="$no_proxy,tgi_service_codegen,llm_codegen,tei-embedding-service,tei-reranking-service,chatqna-xeon-backend-server,retriever,tgi-service,redis-vector-db,whisper,llm-docsum-tgi,docsum-xeon-backend-server,mongo,codegen" + + source set_env.sh + + export REMOTE_ENDPOINT=http://localhost:8000 + export API_KEY=$TEST_KEY + export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0 + export LLM_MODEL_ID_CODEGEN=TinyLlama/TinyLlama-1.1B-Chat-v1.0 + + # Start Docker Containers + docker compose -f compose_remote.yaml up -d > ${LOG_PATH}/start_services_with_compose_remote.log + sleep 30s + +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + if [[ $SERVICE_NAME == *"dataprep_upload_file"* ]]; then + cd $LOG_PATH + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'files=@./dataprep_file.txt' -H 'Content-Type: multipart/form-data' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_upload_link"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'link_list=["https://www.ces.tech/"]' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_get"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_del"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d '{"file_path": "all"}' -H 'Content-Type: application/json' "$URL") + else + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + fi + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tei for embedding service + validate_service \ + "${ip_address}:6006/embed" \ + "[[" \ + "tei-embedding" \ + "tei-embedding-server" \ + '{"inputs":"What is Deep Learning?"}' + + sleep 1m # retrieval can't curl as expected, try to wait for more time + + # test /v1/dataprep/delete + validate_service \ + "http://${ip_address}:6007/v1/dataprep/delete" \ + '{"status":true}' \ + "dataprep_del" \ + "dataprep-redis-server" + + # test /v1/dataprep/ingest upload file + echo "Deep learning is a subset of machine learning that utilizes neural networks with multiple layers to analyze various levels of abstract data representations. It enables computers to identify patterns and make decisions with minimal human intervention by learning from large amounts of data." > $LOG_PATH/dataprep_file.txt + validate_service \ + "http://${ip_address}:6007/v1/dataprep/ingest" \ + "Data preparation succeeded" \ + "dataprep_upload_file" \ + "dataprep-redis-server" + + # test /v1/dataprep upload link + validate_service \ + "http://${ip_address}:6007/v1/dataprep/ingest" \ + "Data preparation succeeded" \ + "dataprep_upload_link" \ + "dataprep-redis-server" + + # test /v1/dataprep/get + validate_service \ + "http://${ip_address}:6007/v1/dataprep/get" \ + '{"name":' \ + "dataprep_get" \ + "dataprep-redis-server" + + # retrieval microservice + test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") + validate_service \ + "${ip_address}:7001/v1/retrieval" \ + "retrieved_docs" \ + "retrieval-microservice" \ + "retriever-redis-server" \ + "{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}" + + # tei for rerank microservice + validate_service \ + "${ip_address}:8808/rerank" \ + '{"index":1,"score":' \ + "tei-rerank" \ + "tei-reranking-server" \ + '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' + + # CodeGen llm microservice + validate_service \ + "${ip_address}:9001/v1/chat/completions" \ + "data: " \ + "llm_codegen" \ + "llm-textgen-server-codegen" \ + '{"query":"def print_hello_world():"}' + + result=$(curl -X 'POST' \ + http://${ip_address}:6012/v1/chathistory/create \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "data": { + "messages": "test Messages", "user": "test" + } + }') + echo $result + if [[ ${#result} -eq 26 ]]; then + echo "Correct result." + else + echo "Incorrect result." + exit 1 + fi + + result=$(curl -X 'POST' \ + http://$ip_address:6018/v1/prompt/create \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "prompt_text": "test prompt", "user": "test" + }') + echo $result + if [[ ${#result} -eq 26 ]]; then + echo "Correct result." + else + echo "Incorrect result." + exit 1 + fi + +} + +function validate_megaservice() { + + # Curl the ChatQnAMega Service + validate_service \ + "${ip_address}:8888/v1/chatqna" \ + "data: " \ + "chatqna-megaservice" \ + "chatqna-xeon-backend-server" \ + '{"messages": "What is the revenue of Nike in 2023?"}'\ + + # Curl the CodeGen Mega Service + validate_service \ + "${ip_address}:7778/v1/codegen" \ + "print" \ + "codegen-xeon-backend-server" \ + "codegen-xeon-backend-server" \ + '{"messages": "def print_hello_world():"}' +} + +function validate_frontend() { + echo "[ TEST INFO ]: --------- frontend test started ---------" + cd $WORKPATH/ui/react + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + + if conda info --envs | grep -q "^${conda_env_name}[[:space:]]"; then + echo "[ TEST INFO ]: Conda environment '${conda_env_name}' exists. Activating..." + else + echo "[ TEST INFO ]: Conda environment '${conda_env_name}' not found. Creating..." + conda create -n "${conda_env_name}" python=3.12 -y + fi + + source activate ${conda_env_name} + echo "[ TEST INFO ]: --------- conda env activated ---------" + + conda install -c conda-forge nodejs=22.6.0 -y + npm install && npm ci + node -v && npm -v && pip list + + exit_status=0 + npm run test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f compose_remote.yaml stop && docker compose -f compose_remote.yaml rm -f +} + +function main() { + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_microservices" + validate_microservices + echo "::endgroup::" + + echo "::group::validate_frontend" + validate_frontend + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + docker system prune -f + +} + +main From 3da05e719b75f072a09fe059865e8626c5bf602a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 14 Aug 2025 03:49:06 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- ChatQnA/tests/test_compose_remote_on_xeon.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ChatQnA/tests/test_compose_remote_on_xeon.sh b/ChatQnA/tests/test_compose_remote_on_xeon.sh index bb01da6423..56449033fd 100644 --- a/ChatQnA/tests/test_compose_remote_on_xeon.sh +++ b/ChatQnA/tests/test_compose_remote_on_xeon.sh @@ -38,7 +38,7 @@ function start_services() { export REMOTE_ENDPOINT=http://localhost:8000 export API_KEY=$TEST_KEY export LLM_MODEL_ID=TinyLlama/TinyLlama-1.1B-Chat-v1.0 - + # Start Docker Containers docker compose -f compose_remote.yaml -f compose.telemetry.yaml up -d --quiet-pull > ${LOG_PATH}/start_services_with_compose_remote.log }