Skip to content

Commit 49b8c28

Browse files
authored
Merge pull request #217 from togethercomputer/arsh/new-wandb-params
Add WandB base url, project name, run name
2 parents 2467de2 + 507cd57 commit 49b8c28

File tree

4 files changed

+53
-2
lines changed

4 files changed

+53
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
1212

1313
[tool.poetry]
1414
name = "together"
15-
version = "1.3.5"
15+
version = "1.3.6"
1616
authors = [
1717
"Together AI <[email protected]>"
1818
]

src/together/cli/api/finetune.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,9 @@ def fine_tuning(ctx: click.Context) -> None:
108108
"--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
109109
)
110110
@click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
111+
@click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL")
112+
@click.option("--wandb-project-name", type=str, default=None, help="Wandb project name")
113+
@click.option("--wandb-name", type=str, default=None, help="Wandb run name")
111114
@click.option(
112115
"--confirm",
113116
"-y",
@@ -144,6 +147,9 @@ def create(
144147
lora_trainable_modules: str,
145148
suffix: str,
146149
wandb_api_key: str,
150+
wandb_base_url: str,
151+
wandb_project_name: str,
152+
wandb_name: str,
147153
confirm: bool,
148154
train_on_inputs: bool | Literal["auto"],
149155
) -> None:
@@ -170,6 +176,9 @@ def create(
170176
lora_trainable_modules=lora_trainable_modules,
171177
suffix=suffix,
172178
wandb_api_key=wandb_api_key,
179+
wandb_base_url=wandb_base_url,
180+
wandb_project_name=wandb_project_name,
181+
wandb_name=wandb_name,
173182
train_on_inputs=train_on_inputs,
174183
)
175184

src/together/resources/finetune.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ def createFinetuneRequest(
4848
lora_trainable_modules: str | None = "all-linear",
4949
suffix: str | None = None,
5050
wandb_api_key: str | None = None,
51+
wandb_base_url: str | None = None,
52+
wandb_project_name: str | None = None,
53+
wandb_name: str | None = None,
5154
train_on_inputs: bool | Literal["auto"] = "auto",
5255
) -> FinetuneRequest:
5356
if batch_size == "max":
@@ -118,6 +121,9 @@ def createFinetuneRequest(
118121
training_type=training_type,
119122
suffix=suffix,
120123
wandb_key=wandb_api_key,
124+
wandb_base_url=wandb_base_url,
125+
wandb_project_name=wandb_project_name,
126+
wandb_name=wandb_name,
121127
train_on_inputs=train_on_inputs,
122128
)
123129

@@ -150,6 +156,9 @@ def create(
150156
lora_trainable_modules: str | None = "all-linear",
151157
suffix: str | None = None,
152158
wandb_api_key: str | None = None,
159+
wandb_base_url: str | None = None,
160+
wandb_project_name: str | None = None,
161+
wandb_name: str | None = None,
153162
verbose: bool = False,
154163
model_limits: FinetuneTrainingLimits | None = None,
155164
train_on_inputs: bool | Literal["auto"] = "auto",
@@ -182,6 +191,12 @@ def create(
182191
Defaults to None.
183192
wandb_api_key (str, optional): API key for Weights & Biases integration.
184193
Defaults to None.
194+
wandb_base_url (str, optional): Base URL for Weights & Biases integration.
195+
Defaults to None.
196+
wandb_project_name (str, optional): Project name for Weights & Biases integration.
197+
Defaults to None.
198+
wandb_name (str, optional): Run name for Weights & Biases integration.
199+
Defaults to None.
185200
verbose (bool, optional): whether to print the job parameters before submitting a request.
186201
Defaults to False.
187202
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -225,6 +240,9 @@ def create(
225240
lora_trainable_modules=lora_trainable_modules,
226241
suffix=suffix,
227242
wandb_api_key=wandb_api_key,
243+
wandb_base_url=wandb_base_url,
244+
wandb_project_name=wandb_project_name,
245+
wandb_name=wandb_name,
228246
train_on_inputs=train_on_inputs,
229247
)
230248

@@ -479,6 +497,9 @@ async def create(
479497
lora_trainable_modules: str | None = "all-linear",
480498
suffix: str | None = None,
481499
wandb_api_key: str | None = None,
500+
wandb_base_url: str | None = None,
501+
wandb_project_name: str | None = None,
502+
wandb_name: str | None = None,
482503
verbose: bool = False,
483504
model_limits: FinetuneTrainingLimits | None = None,
484505
train_on_inputs: bool | Literal["auto"] = "auto",
@@ -511,6 +532,12 @@ async def create(
511532
Defaults to None.
512533
wandb_api_key (str, optional): API key for Weights & Biases integration.
513534
Defaults to None.
535+
wandb_base_url (str, optional): Base URL for Weights & Biases integration.
536+
Defaults to None.
537+
wandb_project_name (str, optional): Project name for Weights & Biases integration.
538+
Defaults to None.
539+
wandb_name (str, optional): Run name for Weights & Biases integration.
540+
Defaults to None.
514541
verbose (bool, optional): whether to print the job parameters before submitting a request.
515542
Defaults to False.
516543
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -554,6 +581,9 @@ async def create(
554581
lora_trainable_modules=lora_trainable_modules,
555582
suffix=suffix,
556583
wandb_api_key=wandb_api_key,
584+
wandb_base_url=wandb_base_url,
585+
wandb_project_name=wandb_project_name,
586+
wandb_name=wandb_name,
557587
train_on_inputs=train_on_inputs,
558588
)
559589

src/together/types/finetune.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,15 @@ class FinetuneRequest(BaseModel):
168168
suffix: str | None = None
169169
# weights & biases api key
170170
wandb_key: str | None = None
171+
# weights & biases base url
172+
wandb_base_url: str | None = None
173+
# wandb project name
174+
wandb_project_name: str | None = None
175+
# wandb run name
176+
wandb_name: str | None = None
177+
# training type
171178
training_type: FullTrainingType | LoRATrainingType | None = None
179+
# train on inputs
172180
train_on_inputs: StrictBool | Literal["auto"] = "auto"
173181

174182

@@ -236,8 +244,12 @@ class FinetuneResponse(BaseModel):
236244
evals_completed: int | None = None
237245
# place in job queue (decrementing counter)
238246
queue_depth: int | None = None
239-
# weights & biases project name
247+
# weights & biases base url
248+
wandb_base_url: str | None = None
249+
# wandb project name
240250
wandb_project_name: str | None = None
251+
# wandb run name
252+
wandb_name: str | None = None
241253
# weights & biases job url
242254
wandb_url: str | None = None
243255
# training file metadata

0 commit comments

Comments
 (0)