set default for merge (#1044)
Browse files- README.md +2 -2
- src/axolotl/cli/merge_lora.py +8 -1
README.md
CHANGED
@@ -1036,10 +1036,10 @@ Please use `--sample_packing False` if you have it on and receive the error simi
|
|
1036 |
|
1037 |
### Merge LORA to base
|
1038 |
|
1039 |
-
|
1040 |
|
1041 |
```bash
|
1042 |
-
python3 -m axolotl.cli.merge_lora
|
1043 |
```
|
1044 |
|
1045 |
If you run out of CUDA memory, you can try to merge in system RAM with
|
|
|
1036 |
|
1037 |
### Merge LORA to base
|
1038 |
|
1039 |
+
The following command will merge your LORA adapater with your base model. You can optionally pass the argument `--lora_model_dir` to specify the directory where your LORA adapter was saved, otherwhise, this will be inferred from `output_dir` in your axolotl config file. The merged model is saved in the sub-directory `{lora_model_dir}/merged`.
|
1040 |
|
1041 |
```bash
|
1042 |
+
python3 -m axolotl.cli.merge_lora your_config.yml --lora_model_dir="./completed-model"
|
1043 |
```
|
1044 |
|
1045 |
If you run out of CUDA memory, you can try to merge in system RAM with
|
src/axolotl/cli/merge_lora.py
CHANGED
@@ -25,9 +25,16 @@ def do_cli(config: Path = Path("examples/"), **kwargs):
|
|
25 |
load_in_8bit=False,
|
26 |
load_in_4bit=False,
|
27 |
flash_attention=False,
|
28 |
-
**kwargs
|
29 |
)
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
32 |
|
33 |
|
|
|
25 |
load_in_8bit=False,
|
26 |
load_in_4bit=False,
|
27 |
flash_attention=False,
|
28 |
+
**kwargs,
|
29 |
)
|
30 |
|
31 |
+
if not parsed_cfg.lora_model_dir and parsed_cfg.output_dir:
|
32 |
+
parsed_cfg.lora_model_dir = parsed_cfg.output_dir
|
33 |
+
if not Path(parsed_cfg.lora_model_dir).exists():
|
34 |
+
raise ValueError(
|
35 |
+
f"Target directory for merge: `{parsed_cfg.lora_model_dir}` does not exist."
|
36 |
+
)
|
37 |
+
|
38 |
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
39 |
|
40 |
|