ignore issues with calculating # params when printing (#1493)
Browse files
src/axolotl/utils/models.py
CHANGED
@@ -902,7 +902,12 @@ def load_lora(model, cfg, inference=False, config_only=False):
|
|
902 |
model = get_peft_model(model, lora_config)
|
903 |
|
904 |
if rank == 0:
|
905 |
-
|
|
|
|
|
|
|
|
|
|
|
906 |
elif cfg.fsdp and cfg.adapter == "qlora":
|
907 |
setup_quantized_peft_meta_for_training(model)
|
908 |
|
|
|
902 |
model = get_peft_model(model, lora_config)
|
903 |
|
904 |
if rank == 0:
|
905 |
+
try:
|
906 |
+
model.print_trainable_parameters()
|
907 |
+
except AttributeError as exc:
|
908 |
+
LOG.warning(
|
909 |
+
"Exception caught during model.print_trainable_parameters(): %s", exc
|
910 |
+
)
|
911 |
elif cfg.fsdp and cfg.adapter == "qlora":
|
912 |
setup_quantized_peft_meta_for_training(model)
|
913 |
|