@@ -48,6 +48,9 @@ def createFinetuneRequest(
48
48
lora_trainable_modules : str | None = "all-linear" ,
49
49
suffix : str | None = None ,
50
50
wandb_api_key : str | None = None ,
51
+ wandb_base_url : str | None = None ,
52
+ wandb_project_name : str | None = None ,
53
+ wandb_name : str | None = None ,
51
54
train_on_inputs : bool | Literal ["auto" ] = "auto" ,
52
55
) -> FinetuneRequest :
53
56
if batch_size == "max" :
@@ -118,6 +121,9 @@ def createFinetuneRequest(
118
121
training_type = training_type ,
119
122
suffix = suffix ,
120
123
wandb_key = wandb_api_key ,
124
+ wandb_base_url = wandb_base_url ,
125
+ wandb_project_name = wandb_project_name ,
126
+ wandb_name = wandb_name ,
121
127
train_on_inputs = train_on_inputs ,
122
128
)
123
129
@@ -150,6 +156,9 @@ def create(
150
156
lora_trainable_modules : str | None = "all-linear" ,
151
157
suffix : str | None = None ,
152
158
wandb_api_key : str | None = None ,
159
+ wandb_base_url : str | None = None ,
160
+ wandb_project_name : str | None = None ,
161
+ wandb_name : str | None = None ,
153
162
verbose : bool = False ,
154
163
model_limits : FinetuneTrainingLimits | None = None ,
155
164
train_on_inputs : bool | Literal ["auto" ] = "auto" ,
@@ -182,6 +191,12 @@ def create(
182
191
Defaults to None.
183
192
wandb_api_key (str, optional): API key for Weights & Biases integration.
184
193
Defaults to None.
194
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
195
+ Defaults to None.
196
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
197
+ Defaults to None.
198
+ wandb_name (str, optional): Run name for Weights & Biases integration.
199
+ Defaults to None.
185
200
verbose (bool, optional): whether to print the job parameters before submitting a request.
186
201
Defaults to False.
187
202
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -225,6 +240,9 @@ def create(
225
240
lora_trainable_modules = lora_trainable_modules ,
226
241
suffix = suffix ,
227
242
wandb_api_key = wandb_api_key ,
243
+ wandb_base_url = wandb_base_url ,
244
+ wandb_project_name = wandb_project_name ,
245
+ wandb_name = wandb_name ,
228
246
train_on_inputs = train_on_inputs ,
229
247
)
230
248
@@ -479,6 +497,9 @@ async def create(
479
497
lora_trainable_modules : str | None = "all-linear" ,
480
498
suffix : str | None = None ,
481
499
wandb_api_key : str | None = None ,
500
+ wandb_base_url : str | None = None ,
501
+ wandb_project_name : str | None = None ,
502
+ wandb_name : str | None = None ,
482
503
verbose : bool = False ,
483
504
model_limits : FinetuneTrainingLimits | None = None ,
484
505
train_on_inputs : bool | Literal ["auto" ] = "auto" ,
@@ -511,6 +532,12 @@ async def create(
511
532
Defaults to None.
512
533
wandb_api_key (str, optional): API key for Weights & Biases integration.
513
534
Defaults to None.
535
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
536
+ Defaults to None.
537
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
538
+ Defaults to None.
539
+ wandb_name (str, optional): Run name for Weights & Biases integration.
540
+ Defaults to None.
514
541
verbose (bool, optional): whether to print the job parameters before submitting a request.
515
542
Defaults to False.
516
543
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -554,6 +581,9 @@ async def create(
554
581
lora_trainable_modules = lora_trainable_modules ,
555
582
suffix = suffix ,
556
583
wandb_api_key = wandb_api_key ,
584
+ wandb_base_url = wandb_base_url ,
585
+ wandb_project_name = wandb_project_name ,
586
+ wandb_name = wandb_name ,
557
587
train_on_inputs = train_on_inputs ,
558
588
)
559
589
0 commit comments