-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathllada_long_gsm8k.sh
More file actions
117 lines (99 loc) · 5.18 KB
/
llada_long_gsm8k.sh
File metadata and controls
117 lines (99 loc) · 5.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# Qwen2.5-7B-Instruct, long-gsm8k
export CUDA_VISIBLE_DEVICES="0,1,2,3"
export HF_ALLOW_CODE_EVAL=1
cd ~/Codes/d3LLM/utils/lm-evaluation-harness
PYTHONPATH=~/Codes/d3LLM/utils/lm-evaluation-harness:$PYTHONPATH \
accelerate launch -m lm_eval \
--model hf \
--model_args "pretrained=Qwen/Qwen2.5-7B-Instruct,temperature=0.0" \
--tasks gsm8k \
--num_fewshot 5 \
--batch_size 32 \
--output_path evals_results/gsm8k \
--log_samples \
--confirm_run_unsafe_code \
--gen_kwargs do_sample=False,max_gen_toks=256
# Vanilla LLaDA, TPF=1.0:
cd ~/Codes/d3LLM/utils/utils_LLaDA
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --main_process_port 29600 eval_llada.py --tasks gsm8k --num_fewshot 5 \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='GSAI-ML/LLaDA-8B-Instruct',gen_length=256,steps=256,block_length=32,show_speed=True,task="gsm8k" --batch_size 1
# Fast-dLLM LLaDA:
cd ~/Codes/d3LLM/utils/utils_LLaDA
rm -rf null/
accelerate launch --main_process_port 12335 eval_llada.py --model llada_dist --model_args model_path=GSAI-ML/LLaDA-8B-Instruct,steps=8,gen_length=256,block_length=32,remasking=low_confidence,threshold=0.9,save_dir=null,show_speed=True,task=gsm8k,generation_method=Fast_dllm_v1,use_cache=True,dual_cache=True --tasks gsm8k --num_fewshot 5 --batch_size 1 --output_path evals_results/llada_fast_dllm_dual_cache
# dParallel-LLaDA, TPF=1.0:
cd ~/Codes/d3LLM/utils/utils_LLaDA
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --main_process_port 29600 eval_llada.py --tasks gsm8k --num_fewshot 5 \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='Zigeng/dParallel-LLaDA-8B-instruct',gen_length=256,steps=256,block_length=32,show_speed=True,task="gsm8k" --batch_size 1
# dParallel-LLaDA, entropy-threshold:
cd ~/Codes/d3LLM/utils/utils_LLaDA
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --main_process_port 29601 eval_llada.py --tasks gsm8k --num_fewshot 5 \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='Zigeng/dParallel-LLaDA-8B-instruct',gen_length=256,steps=256,block_length=32,show_speed=True,threshold=0.5,task="gsm8k" --batch_size 1
# D2F, TPF = 1.0
cd ~/Codes/d3LLM/utils/utils_LLaDA
export HF_ALLOW_CODE_EVAL=1
export HF_DATASETS_TRUST_REMOTE_CODE=true
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
--main_process_port 29520 \
--num_processes 4 \
eval_d2f.py \
--model dream_lora \
--model_args pretrained=GSAI-ML/LLaDA-8B-Instruct,lora_path=SJTU-Deng-Lab/D2F_LLaDA_Instruct_8B_Lora,max_new_tokens=256,diffusion_steps=256,add_bos_token=true,temperature=0,block_size=32,block_add_threshold=1.0,skip_threshold=1.0,decoded_token_threshold=1.0,dtype=bfloat16,sampling_strategy=default,save_dir=eval_tmp \
--tasks gsm8k \
--num_fewshot 5 \
--batch_size 1 \
--output_path eval_tmp \
--log_samples \
--confirm_run_unsafe_code \
--apply_chat_template \
--fewshot_as_multiturn
# D2F
cd ~/Codes/d3LLM/utils/utils_LLaDA
export HF_ALLOW_CODE_EVAL=1
export HF_DATASETS_TRUST_REMOTE_CODE=true
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
--main_process_port 29520 \
--num_processes 4 \
eval_d2f.py \
--model dream_lora \
--model_args pretrained=GSAI-ML/LLaDA-8B-Instruct,lora_path=SJTU-Deng-Lab/D2F_LLaDA_Instruct_8B_Lora,max_new_tokens=256,diffusion_steps=256,add_bos_token=true,temperature=0,block_size=32,block_add_threshold=0.7,skip_threshold=0.9,decoded_token_threshold=0.95,dtype=bfloat16,sampling_strategy=default,save_dir=eval_tmp \
--tasks gsm8k \
--num_fewshot 5 \
--batch_size 1 \
--output_path eval_tmp \
--log_samples \
--confirm_run_unsafe_code \
--apply_chat_template \
--fewshot_as_multiturn
# d3LLM-LLaDA, TPF=1.0:
cd ~/Codes/d3LLM/utils/utils_LLaDA
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --main_process_port 29600 eval_llada.py --tasks gsm8k --num_fewshot 5 \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='d3LLM/d3LLM_LLaDA',gen_length=256,steps=256,block_length=32,show_speed=True,task="gsm8k" --batch_size 1
# d3LLM-LLaDA: generate_multi_block, 0.4:
cd ~/Codes/d3LLM/utils/utils_LLaDA
export HF_ALLOW_CODE_EVAL=1
export HF_DATASETS_TRUST_REMOTE_CODE=true
task=gsm8k
length=256
block_length=32
num_fewshot=5
steps=256
accelerate launch --main_process_port 29601 eval_llada.py --tasks ${task} --num_fewshot ${num_fewshot} \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='d3LLM/d3LLM_LLaDA',gen_length=${length},steps=${steps},block_length=${block_length},show_speed=True,threshold=0.4,task="gsm8k",generation_method="generate_multi_block",early_stop=True
# d3LLM-LLaDA: generate_multi_block_kv_cache, delay=2:
cd ~/Codes/d3LLM/utils/utils_LLaDA
export HF_ALLOW_CODE_EVAL=1
export HF_DATASETS_TRUST_REMOTE_CODE=true
task=gsm8k
length=256
block_length=32
num_fewshot=5
steps=256
accelerate launch --main_process_port 29601 eval_llada.py --tasks ${task} --num_fewshot ${num_fewshot} \
--confirm_run_unsafe_code --model llada_dist \
--model_args model_path='d3LLM/d3LLM_LLaDA',gen_length=${length},steps=${steps},block_length=${block_length},show_speed=True,threshold=0.4,task="gsm8k",generation_method="generate_multi_block_kv_cache",cache_delay_iter=2,refresh_interval=10000,early_stop=True