Skip to content

Commit ee588a7

Browse files
chinamaogemaoge
andauthored
[None][fix] Fix the error where checkpoint_dir is assigned as NONE wh… (#8401)
Signed-off-by: maoge <[email protected]> Co-authored-by: maoge <[email protected]>
1 parent 0a0159f commit ee588a7

File tree

1 file changed

+16
-9
lines changed

1 file changed

+16
-9
lines changed

tensorrt_llm/_torch/model_config.py

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -414,16 +414,23 @@ def from_pretrained(cls,
414414
# Use file lock to prevent race conditions when multiple processes
415415
# try to import/cache the same remote model config file
416416
with config_file_lock():
417-
pretrained_config = transformers.AutoConfig.from_pretrained(
418-
checkpoint_dir,
419-
trust_remote_code=trust_remote_code,
420-
)
417+
# When handling the case where model_format is TLLM_ENGINE
418+
# send cyclic requests to the NONE URL.
419+
if checkpoint_dir is not None:
420+
pretrained_config = transformers.AutoConfig.from_pretrained(
421+
checkpoint_dir,
422+
trust_remote_code=trust_remote_code,
423+
)
421424

422-
# Find the cache path by looking for the config.json file which should be in all
423-
# huggingface models
424-
model_dir = Path(
425-
transformers.utils.hub.cached_file(checkpoint_dir,
426-
'config.json')).parent
425+
# Find the cache path by looking for the config.json file which should be in all
426+
# huggingface models
427+
model_dir = Path(
428+
transformers.utils.hub.cached_file(checkpoint_dir,
429+
'config.json')).parent
430+
else:
431+
raise ValueError(
432+
"checkpoint_dir is None. Cannot load model config without a valid checkpoint directory."
433+
)
427434

428435
quant_config = QuantConfig()
429436
layer_quant_config = None

0 commit comments

Comments
 (0)