Skip to content

Commit a38a694

Browse files
author
Guang Yang
committed
Benchmark optimum-executorch
1 parent 4457cf6 commit a38a694

File tree

6 files changed

+189
-50
lines changed

6 files changed

+189
-50
lines changed

.ci/scripts/gather_benchmark_configs.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@
3232
BENCHMARK_CONFIGS = {
3333
"xplat": [
3434
"xnnpack_q8",
35-
"hf_xnnpack_fp32",
35+
"hf_xnnpack_custom_spda_kv_cache_8da4w",
36+
"et_xnnpack_custom_spda_kv_cache_8da4w",
3637
"llama3_fb16",
3738
"llama3_spinquant",
3839
"llama3_qlora",
@@ -129,25 +130,26 @@ def generate_compatible_configs(model_name: str, target_os=None) -> List[str]:
129130
"""
130131
configs = []
131132
if is_valid_huggingface_model_id(model_name):
133+
configs.append("hf_xnnpack_custom_spda_kv_cache_8da4w")
132134
if model_name.startswith("meta-llama/"):
133-
# LLaMA models
135+
# etLLM recipes for Llama
134136
repo_name = model_name.split("meta-llama/")[1]
135137
if "qlora" in repo_name.lower():
136138
configs.append("llama3_qlora")
137139
elif "spinquant" in repo_name.lower():
138140
configs.append("llama3_spinquant")
139141
else:
140142
configs.append("llama3_fb16")
143+
configs.append("et_xnnpack_custom_spda_kv_cache_8da4w")
141144
configs.extend(
142145
[
143146
config
144147
for config in BENCHMARK_CONFIGS.get(target_os, [])
145148
if config.startswith("llama")
146149
]
147150
)
148-
else:
149-
# Non-LLaMA models
150-
configs.append("hf_xnnpack_fp32")
151+
if model_name.startswith("Qwen/Qwen3"):
152+
configs.append("et_xnnpack_custom_spda_kv_cache_8da4w")
151153
elif model_name in MODEL_NAME_TO_MODEL:
152154
# ExecuTorch in-tree non-GenAI models
153155
configs.append("xnnpack_q8")

.github/workflows/android-perf-private-device-experiment.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ on:
1818
description: Models to be benchmarked
1919
required: false
2020
type: string
21-
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8
21+
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf
2222
devices:
2323
description: Target devices to run benchmark
2424
required: false
@@ -34,7 +34,7 @@ on:
3434
description: Models to be benchmarked
3535
required: false
3636
type: string
37-
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8
37+
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf
3838
devices:
3939
description: Target devices to run benchmark
4040
required: false
@@ -57,6 +57,6 @@ jobs:
5757
id-token: write
5858
contents: read
5959
with:
60-
models: ${{ inputs.models || 'mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8' }}
60+
models: ${{ inputs.models || 'mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf' }}
6161
devices: samsung_galaxy_s22_private
6262
benchmark_configs: ${{ inputs.benchmark_configs }}

.github/workflows/android-perf.yml

Lines changed: 76 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ jobs:
7070
# Separate default values from the workflow dispatch. To ensure defaults are accessible
7171
# during scheduled runs and to provide flexibility for different defaults between
7272
# on-demand and periodic benchmarking.
73-
CRON_DEFAULT_MODELS: ${{ github.event_name == 'schedule' && 'llama,mv3,mv2,ic4,ic3,resnet50,edsr,mobilebert,w2l,meta-llama/Llama-3.2-1B,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8' || 'llama' }}
73+
CRON_DEFAULT_MODELS: ${{ github.event_name == 'schedule' && 'llama,mv3,mv2,ic4,ic3,resnet50,edsr,mobilebert,w2l,meta-llama/Llama-3.2-1B,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,allenai/OLMo-1B-hf' || 'llama' }}
7474
CRON_DEFAULT_DEVICES: samsung_galaxy_s22
7575
run: |
7676
set -eux
@@ -201,8 +201,8 @@ jobs:
201201
HF_MODEL_REPO=${{ matrix.model }}
202202
OUT_ET_MODEL_NAME="$(echo "$HF_MODEL_REPO" | awk -F'/' '{print $2}' | sed 's/_/-/g' | tr '[:upper:]' '[:lower:]')_${{ matrix.config }}"
203203
204+
# Convert HF checkpoint to ET via etLLM path
204205
if [[ "$HF_MODEL_REPO" == meta-llama/* ]]; then
205-
# Llama models on Hugging Face
206206
if [[ ${{ matrix.config }} == "llama3_spinquant" ]]; then
207207
# SpinQuant
208208
# Download prequantized chceckpoint from Hugging Face
@@ -272,6 +272,21 @@ jobs:
272272
--metadata '{"get_bos_id":128000, "get_eos_ids":[128009, 128001]}' \
273273
--output_name="${OUT_ET_MODEL_NAME}.pte"
274274
ls -lh "${OUT_ET_MODEL_NAME}.pte"
275+
elif [[ ${{ matrix.config }} == "et_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
276+
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "original" --files "tokenizer.model" "params.json" "consolidated.00.pth")
277+
python -m examples.models.llama.export_llama \
278+
--model llama3_2 \
279+
--checkpoint "${DOWNLOADED_PATH}/consolidated.00.pth" \
280+
--params "${DOWNLOADED_PATH}/params.json" \
281+
-kv \
282+
--use_sdpa_with_kv_cache \
283+
-d fp32 \
284+
-X \
285+
--xnnpack-extended-ops \
286+
-qmode 8da4w -G 32 -E 8,0 \
287+
--metadata '{"get_bos_id":128000, "get_eos_ids":[128009, 128001]}' \
288+
--output_name="${OUT_ET_MODEL_NAME}.pte"
289+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
275290
elif [[ ${{ matrix.config }} == "llama3_qnn_htp" ]]; then
276291
export QNN_SDK_ROOT=/tmp/qnn/2.28.0.241029
277292
export LD_LIBRARY_PATH=$QNN_SDK_ROOT/lib/x86_64-linux-clang/
@@ -292,18 +307,68 @@ jobs:
292307
OUT_ET_MODEL_NAME="llama3_2_qnn" # Qualcomm hard-coded it in their script
293308
find . -name "${OUT_ET_MODEL_NAME}.pte" -not -path "./${OUT_ET_MODEL_NAME}.pte" -exec mv {} ./ \;
294309
ls -lh "${OUT_ET_MODEL_NAME}.pte"
295-
else
296-
# By default, test with the Hugging Face model and the xnnpack recipe
297-
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "original" --files "tokenizer.model")
298-
python -m extension.export_util.export_hf_model -hfm="$HF_MODEL_REPO" -o "$OUT_ET_MODEL_NAME"
299-
ls -lh "${OUT_ET_MODEL_NAME}.pte"
300310
fi
301-
else
302-
echo "Unsupported model ${{ matrix.model }}"
303-
exit 1
311+
elif [[ "$HF_MODEL_REPO" == "Qwen/Qwen3-0.6B" ]]; then
312+
if [[ ${{ matrix.config }} == "et_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
313+
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "." --files "tokenizer.json")
314+
python -m examples.models.llama.export_llama \
315+
--model qwen3-0_6b \
316+
--params examples/models/qwen3/0_6b_config.json \
317+
-kv \
318+
--use_sdpa_with_kv_cache \
319+
-d fp32 \
320+
-X \
321+
--xnnpack-extended-ops \
322+
-qmode 8da4w -G 32 -E 8,0 \
323+
--metadata '{"get_bos_id": 151644, "get_eos_ids":[151645]}' \
324+
--output_name="${OUT_ET_MODEL_NAME}.pte"
325+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
326+
fi
327+
fi
328+
329+
if [[ ${{ matrix.config }} == "hf_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
330+
DOWNLOADED_PATH=$(
331+
bash .ci/scripts/download_hf_hub.sh \
332+
--model_id "${HF_MODEL_REPO}" \
333+
--files "tokenizer.json"
334+
)
335+
echo "tokenizer.json is downloaded to $DOWNLOADED_PATH"
336+
337+
# Install optimum-executorch
338+
git clone https://github.com/huggingface/optimum-executorch
339+
pushd optimum-executorch
340+
# There is no release yet, for CI stability, always test from the same commit on main
341+
git checkout 1c653dc49812fc431a22312c7295d97005d22e12
342+
python install_dev.py
343+
pip list
344+
345+
ARGS=(
346+
"--model" "${HF_MODEL_REPO}"
347+
"--task" "text-generation"
348+
"--recipe" "xnnpack"
349+
"--use_custom_sdpa"
350+
"--qlinear"
351+
"--qembedding"
352+
"--output_dir" "."
353+
)
354+
355+
# Add conditional arguments based on model
356+
case "${HF_MODEL_REPO}" in
357+
*"google/gemma-3-1b-it"*)
358+
echo "--use_custom_kv_cache can not be used for HybridCache"
359+
;;
360+
*)
361+
ARGS+=("--use_custom_kv_cache")
362+
;;
363+
esac
364+
365+
optimum-cli export executorch "${ARGS[@]}"
366+
367+
mv model.pte ${OUT_ET_MODEL_NAME}.pte
368+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
304369
fi
305370
306-
zip -j model.zip "${OUT_ET_MODEL_NAME}.pte" "${DOWNLOADED_PATH}/tokenizer.model"
371+
zip -j model.zip ${OUT_ET_MODEL_NAME}.pte ${DOWNLOADED_PATH}/tokenizer.*
307372
ls -lh model.zip
308373
mkdir -p "${ARTIFACTS_DIR_NAME}"
309374
mv model.zip "${ARTIFACTS_DIR_NAME}"

.github/workflows/apple-perf-private-device-experiment.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ on:
1818
description: Models to be benchmarked
1919
required: false
2020
type: string
21-
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8
21+
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf
2222
devices:
2323
description: Target devices to run benchmark
2424
required: false
@@ -34,7 +34,7 @@ on:
3434
description: Models to be benchmarked
3535
required: false
3636
type: string
37-
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8
37+
default: mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf
3838
devices:
3939
description: Target devices to run benchmark
4040
required: false
@@ -57,6 +57,6 @@ jobs:
5757
id-token: write
5858
contents: read
5959
with:
60-
models: ${{ inputs.models || 'mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8' }}
60+
models: ${{ inputs.models || 'mv3,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf' }}
6161
devices: apple_iphone_15_private
6262
benchmark_configs: ${{ inputs.benchmark_configs }}

.github/workflows/apple-perf.yml

Lines changed: 76 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ jobs:
7070
# Separate default values from the workflow dispatch. To ensure defaults are accessible
7171
# during scheduled runs and to provide flexibility for different defaults between
7272
# on-demand and periodic benchmarking.
73-
CRON_DEFAULT_MODELS: ${{ github.event_name == 'schedule' && 'llama,mv3,mv2,ic4,ic3,resnet50,edsr,mobilebert,w2l,meta-llama/Llama-3.2-1B,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8' || 'llama' }}
73+
CRON_DEFAULT_MODELS: ${{ github.event_name == 'schedule' && 'llama,mv3,mv2,ic4,ic3,resnet50,edsr,mobilebert,w2l,meta-llama/Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8,meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8,google/gemma-3-1b-it,Qwen/Qwen3-0.6B,HuggingFaceTB/SmolLM2-135M,meta-llama/Llama-3.2-1B,allenai/OLMo-1B-hf' || 'llama' }}
7474
CRON_DEFAULT_DEVICES: apple_iphone_15
7575
run: |
7676
set -eux
@@ -207,6 +207,7 @@ jobs:
207207
HF_MODEL_REPO=${{ matrix.model }}
208208
OUT_ET_MODEL_NAME="$(echo "$HF_MODEL_REPO" | awk -F'/' '{print $2}' | sed 's/_/-/g' | tr '[:upper:]' '[:lower:]')_${{ matrix.config }}"
209209
210+
# Convert HF checkpoint to ET via etLLM path
210211
if [[ "$HF_MODEL_REPO" == meta-llama/* ]]; then
211212
# Llama models on Hugging Face
212213
if [[ ${{ matrix.config }} == "llama3_spinquant" ]]; then
@@ -278,6 +279,21 @@ jobs:
278279
--metadata '{"get_bos_id":128000, "get_eos_ids":[128009, 128001]}' \
279280
--output_name="${OUT_ET_MODEL_NAME}.pte"
280281
ls -lh "${OUT_ET_MODEL_NAME}.pte"
282+
elif [[ ${{ matrix.config }} == "et_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
283+
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "original" --files "tokenizer.model" "params.json" "consolidated.00.pth")
284+
python -m examples.models.llama.export_llama \
285+
--model llama3_2 \
286+
--checkpoint "${DOWNLOADED_PATH}/consolidated.00.pth" \
287+
--params "${DOWNLOADED_PATH}/params.json" \
288+
-kv \
289+
--use_sdpa_with_kv_cache \
290+
-d fp32 \
291+
-X \
292+
--xnnpack-extended-ops \
293+
-qmode 8da4w -G 32 -E 8,0 \
294+
--metadata '{"get_bos_id":128000, "get_eos_ids":[128009, 128001]}' \
295+
--output_name="${OUT_ET_MODEL_NAME}.pte"
296+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
281297
elif [[ ${{ matrix.config }} == "llama3_coreml_ane" ]]; then
282298
# ANE
283299
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "original" --files "tokenizer.model" "params.json" "consolidated.00.pth")
@@ -293,18 +309,68 @@ jobs:
293309
--coreml-compute-units cpu_and_ne \
294310
--output_name="${OUT_ET_MODEL_NAME}.pte"
295311
ls -lh "${OUT_ET_MODEL_NAME}.pte"
296-
else
297-
# By default, test with the Hugging Face model and the xnnpack recipe
298-
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "original" --files "tokenizer.model")
299-
${CONDA_RUN} python -m extension.export_util.export_hf_model -hfm="$HF_MODEL_REPO" -o "$OUT_ET_MODEL_NAME"
300-
ls -lh "${OUT_ET_MODEL_NAME}.pte"
301312
fi
302-
else
303-
echo "Unsupported model ${{ matrix.model }}"
304-
exit 1
313+
elif [[ "$HF_MODEL_REPO" == "Qwen/Qwen3-0.6B" ]]; then
314+
if [[ ${{ matrix.config }} == "et_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
315+
DOWNLOADED_PATH=$(bash .ci/scripts/download_hf_hub.sh --model_id "${HF_MODEL_REPO}" --subdir "." --files "tokenizer.json")
316+
python -m examples.models.llama.export_llama \
317+
--model qwen3-0_6b \
318+
--params examples/models/qwen3/0_6b_config.json \
319+
-kv \
320+
--use_sdpa_with_kv_cache \
321+
-d fp32 \
322+
-X \
323+
--xnnpack-extended-ops \
324+
-qmode 8da4w -G 32 -E 8,0 \
325+
--metadata '{"get_bos_id": 151644, "get_eos_ids":[151645]}' \
326+
--output_name="${OUT_ET_MODEL_NAME}.pte"
327+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
328+
fi
329+
fi
330+
331+
if [[ ${{ matrix.config }} == "hf_xnnpack_custom_spda_kv_cache_8da4w" ]]; then
332+
DOWNLOADED_PATH=$(
333+
bash .ci/scripts/download_hf_hub.sh \
334+
--model_id "${HF_MODEL_REPO}" \
335+
--files "tokenizer.json"
336+
)
337+
echo "tokenizer.json is downloaded to $DOWNLOADED_PATH"
338+
339+
# Install optimum-executorch
340+
git clone https://github.com/huggingface/optimum-executorch
341+
pushd optimum-executorch
342+
# There is no release yet, for CI stability, always test from the same commit on main
343+
git checkout 1c653dc49812fc431a22312c7295d97005d22e12
344+
python install_dev.py
345+
pip list
346+
347+
ARGS=(
348+
"--model" "${HF_MODEL_REPO}"
349+
"--task" "text-generation"
350+
"--recipe" "xnnpack"
351+
"--use_custom_sdpa"
352+
"--qlinear"
353+
"--qembedding"
354+
"--output_dir" "."
355+
)
356+
357+
# Add conditional arguments based on model
358+
case "${HF_MODEL_REPO}" in
359+
*"google/gemma-3-1b-it"*)
360+
echo "--use_custom_kv_cache can not be used for HybridCache"
361+
;;
362+
*)
363+
ARGS+=("--use_custom_kv_cache")
364+
;;
365+
esac
366+
367+
optimum-cli export executorch "${ARGS[@]}"
368+
369+
mv model.pte ${OUT_ET_MODEL_NAME}.pte
370+
ls -lh "${OUT_ET_MODEL_NAME}.pte"
305371
fi
306372
307-
zip -j model.zip "${OUT_ET_MODEL_NAME}.pte" "${DOWNLOADED_PATH}/tokenizer.model"
373+
zip -j model.zip ${OUT_ET_MODEL_NAME}.pte ${DOWNLOADED_PATH}/tokenizer.*
308374
ls -lh model.zip
309375
mkdir -p "${ARTIFACTS_DIR_NAME}"
310376
mv model.zip "${ARTIFACTS_DIR_NAME}"

.github/workflows/trunk.yml

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -570,34 +570,40 @@ jobs:
570570
git clone https://github.com/huggingface/optimum-executorch
571571
pushd optimum-executorch
572572
# There is no release yet, for CI stability, always test from the same commit on main
573-
git checkout da80c9e35b3db5c7eea8731b7d660482fb4870a8
573+
git checkout 1c653dc49812fc431a22312c7295d97005d22e12
574574
pip install .[tests]
575+
pip install transformers==4.52.4
575576
popd
576-
577-
if [ "${{ matrix.hf_model_id }}" == "google/gemma-3-1b-it" ]; then
578-
# Fixes for gemma-3 is not available in the released version
579-
git clone https://github.com/huggingface/transformers.git
580-
pushd transformers
581-
git checkout a57274466f7f72efaa2662d1738cdaf28ae8071f
582-
pip install -e .
583-
popd
584-
fi
585577
pip list
586578
echo "::endgroup::"
587579
588580
echo "::group::Export to ExecuTorch"
589581
# Pass matrix variable as environment variable
590582
export MODEL_ID="${{ matrix.hf_model_id }}"
591-
export OUTPUT_DIR="$(pwd)/${MODEL_ID}_custom_sdpa_8da4w"
583+
export OUTPUT_DIR="$(pwd)/${MODEL_ID}_custom_sdpa_kv_cache_8da4w"
592584
pushd optimum-executorch
593585
594-
optimum-cli export executorch \
595-
--model ${MODEL_ID} \
596-
--task text-generation \
597-
--recipe xnnpack \
598-
--use_custom_sdpa \
599-
--output_dir ${OUTPUT_DIR} \
600-
--qlinear
586+
ARGS=(
587+
"--model" "${MODEL_ID}"
588+
"--task" "text-generation"
589+
"--recipe" "xnnpack"
590+
"--use_custom_sdpa"
591+
"--qlinear"
592+
"--qembedding"
593+
"--output_dir" "."
594+
)
595+
596+
# Add conditional arguments based on model
597+
case "${MODEL_ID}" in
598+
*"google/gemma-3-1b-it"*)
599+
echo "--use_custom_kv_cache can not be used for HybridCache"
600+
;;
601+
*)
602+
ARGS+=("--use_custom_kv_cache")
603+
;;
604+
esac
605+
606+
optimum-cli export executorch "${ARGS[@]}"
601607
602608
ls -FlAGhp ${OUTPUT_DIR}
603609
popd

0 commit comments

Comments
 (0)