Skip to content

Commit 98a88c0

Browse files
authored
DOC Rename max_seq_length argument (#2862)
Argument was renamed to max_length.
1 parent fff52ab commit 98a88c0

File tree

13 files changed

+15
-15
lines changed

13 files changed

+15
-15
lines changed

examples/alora_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ trainer = Trainer(
3232
model=peft_model,
3333
train_dataset=dataset,
3434
dataset_text_field="text",
35-
max_seq_length=2048,
35+
max_length=2048,
3636
tokenizer=tokenizer,
3737
data_collator=data_collator,
3838
)

examples/bone_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ peft_model.print_trainable_parameters()
2828

2929
dataset = load_dataset("imdb", split="train[:1%]")
3030

31-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
31+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
3232
trainer = SFTTrainer(
3333
model=peft_model,
3434
args=training_args,

examples/corda_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ preprocess_corda(model, lora_config, run_model=run_model)
109109
peft_model = get_peft_model(model, lora_config)
110110
peft_model.print_trainable_parameters()
111111

112-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
112+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
113113
trainer = SFTTrainer(
114114
model=peft_model,
115115
args=training_args,

examples/delora_finetuning/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ peft_model.print_trainable_parameters()
2626

2727
dataset = load_dataset("imdb", split="train[:1%]")
2828

29-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
29+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
3030
trainer = SFTTrainer(
3131
model=peft_model,
3232
args=training_args,
@@ -52,7 +52,7 @@ peft_model = PeftModel.from_pretrained(model, "delora-llama-3-8b")
5252
## Advanced Usage
5353
In this script the default DeLoRA layers are the query and value layers of the Llama model. Adding adapters on more layers will increase memory usage. If you wish to choose a different set of layers for DeLoRA to be applied on, you can simply define it using:
5454
```bash
55-
python examples/delora_finetuning/delora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --delora_target_modules "q_proj,k_proj,v_proj,o_proj"
55+
python examples/delora_finetuning/delora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --target_modules "q_proj,k_proj,v_proj,o_proj"
5656
```
5757

5858
Using different lambdas for different layers is also possible by setting `lambda_pattern`.
@@ -74,7 +74,7 @@ python delora_finetuning.py \
7474
--rank 32 \
7575
--delora_lambda 15 \
7676
--module_dropout 0.1 \
77-
--delora_target_modules "q_proj,v_proj" \
77+
--target_modules "q_proj,v_proj" \
7878
--hub_model_id "YOUR_HF_REPO" \
7979
--push_to_hub
8080
```

examples/dora_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ trainer = transformers.Trainer(
2424
model=peft_model,
2525
train_dataset=dataset,
2626
dataset_text_field="text",
27-
max_seq_length=2048,
27+
max_length=2048,
2828
tokenizer=tokenizer,
2929
)
3030
trainer.train()

examples/lorafa_finetune/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ trainer = transformers.Trainer(
4040
model=peft_model,
4141
train_dataset=dataset,
4242
dataset_text_field="text",
43-
max_seq_length=2048,
43+
max_length=2048,
4444
processing_class=tokenizer,
4545
optimizers=(optimizer, None),
4646
)

examples/miss_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ peft_model.print_trainable_parameters()
3636

3737
dataset = load_dataset("imdb", split="train[:1%]")
3838

39-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
39+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
4040
trainer = SFTTrainer(
4141
model=peft_model,
4242
args=training_args,

examples/olora_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ lora_config = LoraConfig(
1818
init_lora_weights="olora"
1919
)
2020
peft_model = get_peft_model(model, lora_config)
21-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
21+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
2222
trainer = SFTTrainer(
2323
model=peft_model,
2424
train_dataset=dataset,

examples/pissa_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ peft_model.print_trainable_parameters()
2323

2424
dataset = load_dataset("imdb", split="train[:1%]")
2525

26-
training_args = SFTConfig(dataset_text_field="text", max_seq_length=128)
26+
training_args = SFTConfig(dataset_text_field="text", max_length=128)
2727
trainer = SFTTrainer(
2828
model=peft_model,
2929
args=training_args,

examples/randlora_finetuning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ trainer = transformers.Trainer(
2020
model=peft_model,
2121
train_dataset=dataset,
2222
dataset_text_field="text",
23-
max_seq_length=2048,
23+
max_length=2048,
2424
processing_class=tokenizer,
2525
)
2626
trainer.train()

0 commit comments

Comments
 (0)