From a08cc1be5a283233c2832b79a50ebf9a9b2b4e1d Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Mon, 9 Jun 2025 18:41:06 -0400 Subject: [PATCH] chore: Remove hf_format= argument for save_checkpoint It's always True. Signed-off-by: Ihar Hrachyshka --- src/instructlab/training/main_ds.py | 2 -- src/instructlab/training/utils.py | 18 ++++++++---------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/instructlab/training/main_ds.py b/src/instructlab/training/main_ds.py index 4ca638d0..5ee29f0f 100644 --- a/src/instructlab/training/main_ds.py +++ b/src/instructlab/training/main_ds.py @@ -228,7 +228,6 @@ def train( tokenizer=model.tokenizer, samples_seen=samples_seen, is_lora=bool(args.lora_r), - hf_format=True, ) base_logger.debug("RANK (%d) waiting at post-save barrier.", local_rank) torch.distributed.barrier() @@ -247,7 +246,6 @@ def train( samples_seen=samples_seen, is_lora=bool(args.lora_r), full_state=args.accelerate_full_state_at_epoch, - hf_format=True, epoch=epoch, ) base_logger.debug("RANK (%d) waiting at post-save barrier.", local_rank) diff --git a/src/instructlab/training/utils.py b/src/instructlab/training/utils.py index 9472884e..993d29e4 100644 --- a/src/instructlab/training/utils.py +++ b/src/instructlab/training/utils.py @@ -805,18 +805,16 @@ def save_checkpoint( samples_seen, is_lora: bool, epoch: int = None, - hf_format: bool = True, full_state: bool = False, ) -> None: - if hf_format: - save_hf_format_accelerate( - args=args, - model=model, - accelerator=accelerator, - tokenizer=tokenizer, - samples_seen=samples_seen, - is_lora=is_lora, - ) + save_hf_format_accelerate( + args=args, + model=model, + accelerator=accelerator, + tokenizer=tokenizer, + samples_seen=samples_seen, + is_lora=is_lora, + ) if full_state: save_full_state(