Fix Loading Model with Modified Config File
#51
by
SamMaggioli
- opened
- modeling_lora.py +17 -3
modeling_lora.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import math
|
2 |
import os
|
3 |
-
from functools import partial
|
4 |
from typing import Iterator, List, Optional, Tuple, Union
|
5 |
|
6 |
import numpy as np
|
@@ -335,9 +335,23 @@ class XLMRobertaLoRA(XLMRobertaPreTrainedModel):
|
|
335 |
use_safetensors: bool = None,
|
336 |
**kwargs,
|
337 |
):
|
338 |
-
if config.load_trained_adapters: # checkpoint already contains LoRA adapters
|
|
|
|
|
|
|
|
|
339 |
return super().from_pretrained(
|
340 |
-
pretrained_model_name_or_path,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
)
|
342 |
else: # initializing new adapters
|
343 |
roberta = XLMRobertaModel.from_pretrained(
|
|
|
1 |
import math
|
2 |
import os
|
3 |
+
from functools import partial, cache
|
4 |
from typing import Iterator, List, Optional, Tuple, Union
|
5 |
|
6 |
import numpy as np
|
|
|
335 |
use_safetensors: bool = None,
|
336 |
**kwargs,
|
337 |
):
|
338 |
+
if config.load_trained_adapters : # checkpoint already contains LoRA adapters
|
339 |
+
# model = super().from_pretrained(
|
340 |
+
# pretrained_model_name_or_path, *model_args, **kwargs
|
341 |
+
# )
|
342 |
+
# return cls(config, roberta=roberta)
|
343 |
return super().from_pretrained(
|
344 |
+
pretrained_model_name_or_path,
|
345 |
+
*model_args,
|
346 |
+
config=config,
|
347 |
+
cache_dir=cache_dir,
|
348 |
+
ignore_mismatched_sizes=ignore_mismatched_sizes,
|
349 |
+
force_download=force_download,
|
350 |
+
local_files_only=local_files_only,
|
351 |
+
token=token,
|
352 |
+
revision=revision,
|
353 |
+
use_safetensors=use_safetensors,
|
354 |
+
**kwargs
|
355 |
)
|
356 |
else: # initializing new adapters
|
357 |
roberta = XLMRobertaModel.from_pretrained(
|