Skip to content
12 changes: 6 additions & 6 deletions examples/nixl/run_accuracy_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
#set -xe

# Models to run
MODELS=(
"Qwen/Qwen3-0.6B"
)
#MODELS=(
# "meta-llama/Llama-3.1-8B"
# "Qwen/Qwen3-0.6B"
#)
MODELS=(
"meta-llama/Llama-3.1-8B-Instruct"
)

export VLLM_USE_V1=1
export VLLM_SKIP_WARMUP="true"
Expand Down Expand Up @@ -103,7 +103,7 @@ run_tests_for_model() {
echo "Starting prefill instance $i on GPU $GPU_ID, port $PORT"

# Build the command with or without model-specific args
BASE_CMD="RANK=0 UCX_TLS=tcp VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
BASE_CMD="RANK=0 UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
--port $PORT \
--enforce-eager \
--max_num_batched_tokens 8192 \
Expand Down Expand Up @@ -136,7 +136,7 @@ run_tests_for_model() {
echo "Starting decode instance $i on GPU $GPU_ID, port $PORT"

# Build the command with or without model-specific args
BASE_CMD="RANK=1 UCX_TLS=tcp VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
BASE_CMD="RANK=1 UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
--port $PORT \
--enforce-eager \
--max_num_batched_tokens 8192 \
Expand Down
158 changes: 113 additions & 45 deletions examples/nixl/run_benchmark_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,34 @@ set -xe


MODELS=(
"/root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/"
"/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's not use internal path here, didn't realized that last time

)

export VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS=1000000
export VLLM_RPC_TIMEOUT=1000000000
export NIXL_LOG_LEVEL=debug
#export UCX_LOG_LEVEL=debug
export VLLM_USE_V1=1
#export VLLM_SKIP_WARMUP=True
export VLLM_SKIP_WARMUP=True
export PT_HPU_LAZY_MODE=1
export VLLM_EXPONENTIAL_BUCKETING=False
#export VLLM_PROMPT_BS_BUCKET_MIN=1
#export VLLM_PROMPT_SEQ_BUCKET_MIN=1
#export VLLM_PROMPT_SEQ_BUCKET_STEP=8192
#export VLLM_PROMPT_SEQ_BUCKET_MAX=8192
export VLLM_PROMPT_SEQ_BUCKET_MIN=8192
export VLLM_PROMPT_SEQ_BUCKET_STEP=8192
export VLLM_PROMPT_SEQ_BUCKET_MAX=8192
export VLLM_DECODE_BLOCK_BUCKET_MIN=1024
export VLLM_DECODE_BLOCK_BUCKET_MAX=1184
export VLLM_USE_PADDING_AWARE_SCHEDULING=1
export DECODER_TP_RATIO=2

# Number of prefill and decode instances to create
NUM_PREFILL_INSTANCES=${NUM_PREFILL_INSTANCES:-1} # Default to 1
NUM_DECODE_INSTANCES=${NUM_DECODE_INSTANCES:-1} # Default to 1
PREFILLER_TP_SIZE=${PREFILLER_TP_SIZE:-1}
DECODER_TP_SIZE=${DECODER_TP_SIZE:-1}


# Find the git repository root directory
#GIT_ROOT=$(git rev-parse --show-toplevel)
GIT_ROOT="/home/vllm-nixl/vllm"
Expand Down Expand Up @@ -98,17 +109,17 @@ run_tests_for_model() {
# Calculate port number (base port + instance number)
PORT=$((8300 + i))
# Calculate side channel port. Avoid clash with with TP workers.
SIDE_CHANNEL_PORT=$((6559 + i))
SIDE_CHANNEL_PORT=$((5559 + i))

echo "Starting prefill instance $i on GPU $GPU_ID, port $PORT"

# Build the command with or without model-specific args
BASE_CMD="RANK=0 UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
BASE_CMD="HABANA_VISIBLE_DEVICES=1 MY_ROLE=PREFILL UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
--port $PORT \
--long_prefill_token_threshold 8192 \
--max_num_batched_tokens 8192 \
--gpu-memory-utilization 0.3 \
--disable-log-requests \
--gpu-memory-utilization 0.3 \
--tensor-parallel-size $PREFILLER_TP_SIZE \
--kv-transfer-config '{\"kv_connector\":\"NixlConnector\",\"kv_role\":\"kv_both\",\"kv_buffer_device\":\"cpu\"}'"

Expand All @@ -132,12 +143,12 @@ run_tests_for_model() {
# Calculate port number (base port + instance number)
PORT=$((8400 + i))
# Calculate side channel port
SIDE_CHANNEL_PORT=$((5659 + i * $DECODER_TP_SIZE))
SIDE_CHANNEL_PORT=$((4659 + i * $DECODER_TP_SIZE))

echo "Starting decode instance $i on GPU $GPU_ID, port $PORT"

# Build the command with or without model-specific args
BASE_CMD="RANK=1 UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
BASE_CMD="HABANA_VISIBLE_DEVICES=2,3 MY_ROLE=DECODE UCX_TLS=rc,ud,ib VLLM_NIXL_SIDE_CHANNEL_PORT=$SIDE_CHANNEL_PORT vllm serve $model_name \
--port $PORT \
--gpu-memory-utilization 0.3 \
--tensor-parallel-size $DECODER_TP_SIZE \
Expand Down Expand Up @@ -171,7 +182,7 @@ run_tests_for_model() {
done

# Build the command for the proxy server with all the hosts and ports
PROXY_CMD="python toy_proxy_server.py --port 9192"
PROXY_CMD="python toy_proxy_server.py --port 9111"

# Add all prefill hosts and ports
PROXY_CMD+=" --prefiller-hosts ${PREFILL_HOSTS[@]}"
Expand All @@ -188,7 +199,7 @@ run_tests_for_model() {
# Wait for the proxy to start
sleep 10

# curl -X POST -s http://localhost:9192/v1/completions \
# curl -X POST -s http://localhost:9111/v1/completions \
# -H "Content-Type: application/json" \
# -d '{
# "model": "meta-llama/Llama-3.1-8B",
Expand All @@ -198,42 +209,99 @@ run_tests_for_model() {
# }'
# sleep 5
# echo "--------------------===================-------------"
#curl -X POST -s http://localhost:9192/v1/completions \
# -H "Content-Type: application/json" \
# -d '{
# "model": "/root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/",
# "prompt": "Mark Elliot Zuckerberg is an American businessman who co-founded the social media service Facebook and its parent company Meta Platforms, of which he is the chairman, chief executive officer, and controlling shareholder. Zuckerberg has been the subject of multiple lawsuits regarding the creation and ownership of the website as well as issues such as user privacy. Born in White Plains, New York, Zuckerberg briefly attended Harvard College, where he launched Facebook in February 2004 with his roommates Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. Zuckerberg took the company public in May 2012 with majority shares. He became the worlds youngest self-made billionaire[a] in 2008, at age 23, and has consistently ranked among the worlds wealthiest individuals. According to Forbes, Zuckerbergs estimated net worth stood at US$221.2 billion as of May 2025, making him the second-richest individual in the world.[2] Intel opened its first international manufacturing facility in 1972, in Malaysia, which would host multiple Intel operations, before opening assembly facilities and semiconductor plants in Singapore and Jerusalem in the early 1980s, and manufacturing and development centers in China, India, and Costa Rica in the 1990s.[31] By the early 1980s, its business was dominated by DRAM chips. However, increased competition from Japanese semiconductor manufacturers had, by 1983, dramatically reduced the profitability of this market. The growing success of the IBM personal computer, based on an Intel microprocessor, was among factors that convinced Gordon Moore (CEO since 1975) to shift the companys focus to microprocessors and to change fundamental aspects of that business model. Moores decision to sole-source Intels 386 chip played into the companys continuing success.",
# "max_tokens": 5,
# "temperature": 0
# }'
#curl -X POST -s http://localhost:9192/v1/completions \
# -H "Content-Type: application/json" \
# -d '{
# "model": "/root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/",
# "prompt": ["This was a few months ago. It was my day off and the only thing I had to do was pick my girlfriend up from work at 9:00 pm. Other than that, I was free to loaf on the couch from morning to night, which is what I did. Around 8:00, I decided to shower before I left the house. Now, I have short hair that dries pretty quickly, but I am deeply vain about it, so I always dry it with the hairdryer right after I shower to ensure my hair doesnt get flat and weird. I never skip this step. So, I get out of the shower, start drying my hair... And then I wake up in bed. Its half an hour later. I feel like garbage, my entire body mysteriously hurts, and I am slowly realizing that I dont remember exiting the bathroom. My only clear thought is: oh shit, its 9:00! I have to pick up my girlfriend! Better shake myself awake. I dragged my aching carcass back to the bathroom, and this was when I noticed the massive blisters forming all over my hand. I was still pretty out of it, but I knew that this was a hospital visit kind of burn. My girlfriend then called to check in because I was running late and, despite my undoubtedly convincing argument that I was still perfectly fine to drive, she immediately knew something was wrong. She cabbed home and we got a ride to the ER. Turns out, I had my first ever seizure! It seems like during the seizure, I clenched the hairdryer in my fist and had it pointed at my other hand long enough to thoroughly cook it. The tissue loss is pretty deep in some areas and there was concerns about me retaining my mobility, but its been healing well so far.",
# "Mark Elliot Zuckerberg is an American businessman who co-founded the social media service Facebook and its parent company Meta Platforms, of which he is the chairman, chief executive officer, and controlling shareholder. Zuckerberg has been the subject of multiple lawsuits regarding the creation and ownership of the website as well as issues such as user privacy. Born in White Plains, New York, Zuckerberg briefly attended Harvard College, where he launched Facebook in February 2004 with his roommates Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. Zuckerberg took the company public in May 2012 with majority shares. He became the worlds youngest self-made billionaire[a] in 2008, at age 23, and has consistently ranked among the worlds wealthiest individuals. According to Forbes, Zuckerbergs estimated net worth stood at US$221.2 billion as of May 2025, making him the second-richest individual in the world.[2]"],
# "max_tokens": 2,
# "temperature": 0
# }'
sleep 2
curl -X POST -s http://localhost:9111/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/",
"prompt": "Mark Elliot Zuckerberg is an American businessman who co-founded the social media service Facebook and its parent company Meta Platforms, of which he is the chairman, chief executive officer, and controlling shareholder. Zuckerberg has been the subject of multiple lawsuits regarding the creation and ownership of the website as well as issues such as user privacy. Born in White Plains, New York, Zuckerberg briefly attended Harvard College, where he launched Facebook in February 2004 with his roommates Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. Zuckerberg took the company public in May 2012 with majority shares. He became the worlds youngest self-made billionaire[a] in 2008, at age 23, and has consistently ranked among the worlds wealthiest individuals. According to Forbes, Zuckerbergs estimated net worth stood at US$221.2 billion as of May 2025, making him the second-richest individual in the world.[2] Intel opened its first international manufacturing facility in 1972, in Malaysia, which would host multiple Intel operations, before opening assembly facilities and semiconductor plants in Singapore and Jerusalem in the early 1980s, and manufacturing and development centers in China, India, and Costa Rica in the 1990s.[31] By the early 1980s, its business was dominated by DRAM chips. However, increased competition from Japanese semiconductor manufacturers had, by 1983, dramatically reduced the profitability of this market. The growing success of the IBM personal computer, based on an Intel microprocessor, was among factors that convinced Gordon Moore (CEO since 1975) to shift the companys focus to microprocessors and to change fundamental aspects of that business model. Moores decision to sole-source Intels 386 chip played into the companys continuing success.",
"max_tokens": 100,
"temperature": 0
}'
#curl -X POST -s http://localhost:9111/v1/completions \
# -H "Content-Type: application/json" \
# -d '{
# "model": "/root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/",
# "prompt": ["This was a few months ago. It was my day off and the only thing I had to do was pick my girlfriend up from work at 9:00 pm. Other than that, I was free to loaf on the couch from morning to night, which is what I did. Around 8:00, I decided to shower before I left the house. Now, I have short hair that dries pretty quickly, but I am deeply vain about it, so I always dry it with the hairdryer right after I shower to ensure my hair doesnt get flat and weird. I never skip this step. So, I get out of the shower, start drying my hair... And then I wake up in bed. Its half an hour later. I feel like garbage, my entire body mysteriously hurts, and I am slowly realizing that I dont remember exiting the bathroom. My only clear thought is: oh shit, its 9:00! I have to pick up my girlfriend! Better shake myself awake. I dragged my aching carcass back to the bathroom, and this was when I noticed the massive blisters forming all over my hand. I was still pretty out of it, but I knew that this was a hospital visit kind of burn. My girlfriend then called to check in because I was running late and, despite my undoubtedly convincing argument that I was still perfectly fine to drive, she immediately knew something was wrong. She cabbed home and we got a ride to the ER. Turns out, I had my first ever seizure! It seems like during the seizure, I clenched the hairdryer in my fist and had it pointed at my other hand long enough to thoroughly cook it. The tissue loss is pretty deep in some areas and there was concerns about me retaining my mobility, but its been healing well so far.",
# "Mark Elliot Zuckerberg is an American businessman who co-founded the social media service Facebook and its parent company Meta Platforms, of which he is the chairman, chief executive officer, and controlling shareholder. Zuckerberg has been the subject of multiple lawsuits regarding the creation and ownership of the website as well as issues such as user privacy. Born in White Plains, New York, Zuckerberg briefly attended Harvard College, where he launched Facebook in February 2004 with his roommates Eduardo Saverin, Andrew McCollum, Dustin Moskovitz and Chris Hughes. Zuckerberg took the company public in May 2012 with majority shares. He became the worlds youngest self-made billionaire[a] in 2008, at age 23, and has consistently ranked among the worlds wealthiest individuals. According to Forbes, Zuckerbergs estimated net worth stood at US$221.2 billion as of May 2025, making him the second-richest individual in the world.[2]"],
# "max_tokens": 100,
# "temperature": 0
# }'
#sleep 2
# Run lm eval for this model
echo "Running tests for $model_name"
#echo "Running tests for $model_name"
#TEST_MODEL=$model_name python -m pytest -s -x test_accuracy.py
python3 ../../../../benchmarks/benchmark_serving.py \
--port 9192 \
--seed "$(date +%s)" \
--model /root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/ \
--dataset-name random \
--random-input-len 8192 \
--random-output-len 200 \
--num-prompts 100 \
--burstiness 100 \
--request-rate 3.6 \
--metric-percentiles 95 \
--percentile-metrics ttft,tpot,itl,e2el \
--backend openai \
--endpoint /v1/completions \
--ignore-eos
#python3 ../../../../benchmarks/benchmark_serving.py \
# --port 9111 \
# --seed "$(date +%s)" \
# --model /root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/ \
# --dataset-name random \
# --random-input-len 8192 \
# --random-output-len 256 \
# --num-prompts 32 \
# --burstiness 100 \
# --request-rate 3.6 \
# --metric-percentiles 95 \
# --percentile-metrics ttft,tpot,itl,e2el \
# --backend openai \
# --endpoint /v1/completions \
# --ignore-eos

#sleep 100
#python3 ../../../../benchmarks/benchmark_serving.py \
# --port 8300 \
# --seed "$(date +%s)" \
# --model /root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/ \
# --dataset-name random \
# --random-input-len 8192 \
# --random-output-len 200 \
# --num-prompts 100 \
# --burstiness 100 \
# --request-rate 3.6 \
# --metric-percentiles 95 \
# --percentile-metrics ttft,tpot,itl,e2el \
# --backend openai \
# --endpoint /v1/completions \
# --ignore-eos
qps=(0.5) #(0.1 0.25 0.5 1 2 3 4) # 5)
# explicit num_prompts mapping (must have same length as qps[])
num_prompts=(32) #(32 64 128 256 256 256 256) # 256)
input_len=8192
output_len=256 #56

# just sanity‐check lengths
#if [ "${#qps[@]}" -ne "${#num_prompts[@]}" ]; then
# echo "❌ qps[] and num_prompts[] must be the same length"
# exit 1
#fi

#for i in "${!qps[@]}"; do
#q=${qps[$i]}
#np=${num_prompts[$i]}

#ts=$(date +"%Y%m%d_%H%M%S")
#logf="./nixlresult/run_in${input_len}_out${output_len}_qps${q//./p}_$ts.log"

#echo "[$(date +"%Y-%m-%d %H:%M:%S")] input=${input_len}, output=${output_len}, qps=${q}, num_prompts=${np}" \
# | tee "$logf"

#python3 ../../../../benchmarks/benchmark_serving.py \
# --port 9111 \
# --seed "$(date +%s)" \
# --model /root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/ \
# --tokenizer /root/software/data/pytorch/huggingface/hub/models--meta-llama--Llama-3.1-8B-Instruct/snapshots/0e9e39f249a16976918f6564b8830bc894c89659/ \
# --dataset-name random \
# --random-input-len "$input_len" \
# --random-output-len 256 \
# --num-prompts "$np" \
# --request-rate "$q" \
# --percentile-metrics ttft,tpot,itl,e2el \
# --burstiness 100 \
# --backend openai \
# --endpoint /v1/completions \
# --ignore-eos \
# 2>&1 | tee -a "$logf"

#done

# Clean up before running next model
cleanup_instances
Expand Down
Loading
Loading