add e2e tests for checking functionality of resume from checkpoint (#865)
Browse files* use tensorboard to see if resume from checkpoint works
* make sure e2e test is either fp16 or bf16
* set max_steps and save limit so we have the checkpoint when testing resuming
* fix test parameters
- requirements.txt +1 -0
- tests/e2e/test_lora_llama.py +1 -0
- tests/e2e/test_resume.py +95 -0
- tests/e2e/utils.py +12 -1
requirements.txt
CHANGED
@@ -32,3 +32,4 @@ pynvml
|
|
32 |
art
|
33 |
fschat==0.2.29
|
34 |
gradio
|
|
|
|
32 |
art
|
33 |
fschat==0.2.29
|
34 |
gradio
|
35 |
+
tensorboard
|
tests/e2e/test_lora_llama.py
CHANGED
@@ -101,6 +101,7 @@ class TestLoraLlama(unittest.TestCase):
|
|
101 |
"learning_rate": 0.00001,
|
102 |
"optimizer": "adamw_torch",
|
103 |
"lr_scheduler": "cosine",
|
|
|
104 |
}
|
105 |
)
|
106 |
normalize_config(cfg)
|
|
|
101 |
"learning_rate": 0.00001,
|
102 |
"optimizer": "adamw_torch",
|
103 |
"lr_scheduler": "cosine",
|
104 |
+
"bf16": True,
|
105 |
}
|
106 |
)
|
107 |
normalize_config(cfg)
|
tests/e2e/test_resume.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
E2E tests for resuming training
|
3 |
+
"""
|
4 |
+
|
5 |
+
import logging
|
6 |
+
import os
|
7 |
+
import re
|
8 |
+
import subprocess
|
9 |
+
import unittest
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
from transformers.utils import is_torch_bf16_gpu_available
|
13 |
+
|
14 |
+
from axolotl.cli import load_datasets
|
15 |
+
from axolotl.common.cli import TrainerCliArgs
|
16 |
+
from axolotl.train import train
|
17 |
+
from axolotl.utils.config import normalize_config
|
18 |
+
from axolotl.utils.dict import DictDefault
|
19 |
+
|
20 |
+
from .utils import most_recent_subdir, with_temp_dir
|
21 |
+
|
22 |
+
LOG = logging.getLogger("axolotl.tests.e2e")
|
23 |
+
os.environ["WANDB_DISABLED"] = "true"
|
24 |
+
|
25 |
+
|
26 |
+
class TestResumeLlama(unittest.TestCase):
|
27 |
+
"""
|
28 |
+
Test case for resuming training of llama models
|
29 |
+
"""
|
30 |
+
|
31 |
+
@with_temp_dir
|
32 |
+
def test_resume_qlora(self, temp_dir):
|
33 |
+
# pylint: disable=duplicate-code
|
34 |
+
cfg = DictDefault(
|
35 |
+
{
|
36 |
+
"base_model": "JackFram/llama-68m",
|
37 |
+
"tokenizer_type": "LlamaTokenizer",
|
38 |
+
"sequence_len": 1024,
|
39 |
+
"sample_packing": True,
|
40 |
+
"flash_attention": True,
|
41 |
+
"load_in_4bit": True,
|
42 |
+
"adapter": "qlora",
|
43 |
+
"lora_r": 32,
|
44 |
+
"lora_alpha": 64,
|
45 |
+
"lora_dropout": 0.05,
|
46 |
+
"lora_target_linear": True,
|
47 |
+
"val_set_size": 0.1,
|
48 |
+
"special_tokens": {},
|
49 |
+
"datasets": [
|
50 |
+
{
|
51 |
+
"path": "vicgalle/alpaca-gpt4",
|
52 |
+
"type": "alpaca",
|
53 |
+
},
|
54 |
+
],
|
55 |
+
"num_epochs": 2,
|
56 |
+
"micro_batch_size": 1,
|
57 |
+
"gradient_accumulation_steps": 1,
|
58 |
+
"output_dir": temp_dir,
|
59 |
+
"learning_rate": 0.00001,
|
60 |
+
"optimizer": "adamw_torch",
|
61 |
+
"lr_scheduler": "cosine",
|
62 |
+
"save_steps": 10,
|
63 |
+
"save_total_limit": 5,
|
64 |
+
"max_steps": 40,
|
65 |
+
}
|
66 |
+
)
|
67 |
+
if is_torch_bf16_gpu_available():
|
68 |
+
cfg.bf16 = True
|
69 |
+
else:
|
70 |
+
cfg.fp16 = True
|
71 |
+
normalize_config(cfg)
|
72 |
+
cli_args = TrainerCliArgs()
|
73 |
+
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
74 |
+
|
75 |
+
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
76 |
+
|
77 |
+
resume_cfg = cfg | DictDefault(
|
78 |
+
{
|
79 |
+
"resume_from_checkpoint": f"{temp_dir}/checkpoint-30/",
|
80 |
+
}
|
81 |
+
)
|
82 |
+
normalize_config(resume_cfg)
|
83 |
+
cli_args = TrainerCliArgs()
|
84 |
+
|
85 |
+
train(cfg=resume_cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
86 |
+
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
87 |
+
|
88 |
+
tb_log_path_1 = most_recent_subdir(temp_dir + "/runs")
|
89 |
+
cmd = f"tensorboard --inspect --logdir {tb_log_path_1}"
|
90 |
+
res = subprocess.run(
|
91 |
+
cmd, shell=True, text=True, capture_output=True, check=True
|
92 |
+
)
|
93 |
+
pattern = r"first_step\s+(\d+)"
|
94 |
+
first_steps = int(re.findall(pattern, res.stdout)[0])
|
95 |
+
assert first_steps == 31
|
tests/e2e/utils.py
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
"""
|
2 |
helper utils for tests
|
3 |
"""
|
4 |
-
|
5 |
import shutil
|
6 |
import tempfile
|
7 |
from functools import wraps
|
|
|
8 |
|
9 |
|
10 |
def with_temp_dir(test_func):
|
@@ -20,3 +21,13 @@ def with_temp_dir(test_func):
|
|
20 |
shutil.rmtree(temp_dir)
|
21 |
|
22 |
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
helper utils for tests
|
3 |
"""
|
4 |
+
import os
|
5 |
import shutil
|
6 |
import tempfile
|
7 |
from functools import wraps
|
8 |
+
from pathlib import Path
|
9 |
|
10 |
|
11 |
def with_temp_dir(test_func):
|
|
|
21 |
shutil.rmtree(temp_dir)
|
22 |
|
23 |
return wrapper
|
24 |
+
|
25 |
+
|
26 |
+
def most_recent_subdir(path):
|
27 |
+
base_path = Path(path)
|
28 |
+
subdirectories = [d for d in base_path.iterdir() if d.is_dir()]
|
29 |
+
if not subdirectories:
|
30 |
+
return None
|
31 |
+
subdir = max(subdirectories, key=os.path.getctime)
|
32 |
+
|
33 |
+
return subdir
|