hllj commited on
Commit
710ea23
1 Parent(s): 0604c20

Model save

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: hllj/zephyr-7b-beta-vi-math
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: sft-zephyr-7b-beta-vi-math-v1
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # sft-zephyr-7b-beta-vi-math-v1
14
+
15
+ This model is a fine-tuned version of [hllj/zephyr-7b-beta-vi-math](https://huggingface.co/hllj/zephyr-7b-beta-vi-math) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 0.3393
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 4
38
+ - eval_batch_size: 4
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: cosine
43
+ - lr_scheduler_warmup_ratio: 0.05
44
+ - num_epochs: 1
45
+ - mixed_precision_training: Native AMP
46
+
47
+ ### Training results
48
+
49
+ | Training Loss | Epoch | Step | Validation Loss |
50
+ |:-------------:|:-----:|:----:|:---------------:|
51
+ | 0.5124 | 0.17 | 50 | 0.4338 |
52
+ | 0.3876 | 0.34 | 100 | 0.3593 |
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.35.2
58
+ - Pytorch 2.1.0
59
+ - Datasets 2.15.0
60
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "hllj/zephyr-7b-beta-vi-math",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "o_proj",
21
+ "k_proj",
22
+ "v_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d11134749e2c5b60bff943689f10202986b71035eb637e347309e490a24faee9
3
+ size 54560368
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.5,
3
+ "eval_loss": 0.339265376329422,
4
+ "eval_runtime": 1.0935,
5
+ "eval_samples": 12,
6
+ "eval_samples_per_second": 10.974,
7
+ "eval_steps_per_second": 2.744,
8
+ "train_loss": 0.5100626462981814,
9
+ "train_runtime": 502.0996,
10
+ "train_samples": 1184,
11
+ "train_samples_per_second": 2.358,
12
+ "train_steps_per_second": 0.59
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.5,
3
+ "eval_loss": 0.339265376329422,
4
+ "eval_runtime": 1.0935,
5
+ "eval_samples": 12,
6
+ "eval_samples_per_second": 10.974,
7
+ "eval_steps_per_second": 2.744
8
+ }
runs/Nov17_18-47-06_7a59b30c842e/events.out.tfevents.1700246832.7a59b30c842e.2079.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25479244ca132824359da3651844a5e3a48fb025b797d04046f19a6938df1ae9
3
+ size 7534
runs/Nov17_18-47-06_7a59b30c842e/events.out.tfevents.1700247335.7a59b30c842e.2079.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b451d90d3a1ca153da586e760798a22570642d9fe5c903518f01fe8fdd0ee12e
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
+ "bos_token": "<s>",
36
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "legacy": true,
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "</s>",
42
+ "sp_model_kwargs": {},
43
+ "spaces_between_special_tokens": false,
44
+ "tokenizer_class": "LlamaTokenizer",
45
+ "truncation_side": "left",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": true
48
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.5,
3
+ "train_loss": 0.5100626462981814,
4
+ "train_runtime": 502.0996,
5
+ "train_samples": 1184,
6
+ "train_samples_per_second": 2.358,
7
+ "train_steps_per_second": 0.59
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.4966216216216216,
5
+ "eval_steps": 50,
6
+ "global_step": 147,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.3333333333333334e-06,
14
+ "loss": 0.9977,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 1.3333333333333333e-05,
20
+ "loss": 0.9343,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 1.998437989229673e-05,
26
+ "loss": 0.8302,
27
+ "step": 20
28
+ },
29
+ {
30
+ "epoch": 0.1,
31
+ "learning_rate": 1.9859711663543573e-05,
32
+ "loss": 0.706,
33
+ "step": 30
34
+ },
35
+ {
36
+ "epoch": 0.14,
37
+ "learning_rate": 1.961193185426459e-05,
38
+ "loss": 0.5885,
39
+ "step": 40
40
+ },
41
+ {
42
+ "epoch": 0.17,
43
+ "learning_rate": 1.9244134324096223e-05,
44
+ "loss": 0.5124,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.17,
49
+ "eval_loss": 0.43378910422325134,
50
+ "eval_runtime": 1.092,
51
+ "eval_samples_per_second": 10.989,
52
+ "eval_steps_per_second": 2.747,
53
+ "step": 50
54
+ },
55
+ {
56
+ "epoch": 0.2,
57
+ "learning_rate": 1.876091151314196e-05,
58
+ "loss": 0.4707,
59
+ "step": 60
60
+ },
61
+ {
62
+ "epoch": 0.24,
63
+ "learning_rate": 1.8168297099265094e-05,
64
+ "loss": 0.427,
65
+ "step": 70
66
+ },
67
+ {
68
+ "epoch": 0.27,
69
+ "learning_rate": 1.747369065961599e-05,
70
+ "loss": 0.4299,
71
+ "step": 80
72
+ },
73
+ {
74
+ "epoch": 0.3,
75
+ "learning_rate": 1.6685765277094702e-05,
76
+ "loss": 0.4137,
77
+ "step": 90
78
+ },
79
+ {
80
+ "epoch": 0.34,
81
+ "learning_rate": 1.5814359245404818e-05,
82
+ "loss": 0.3876,
83
+ "step": 100
84
+ },
85
+ {
86
+ "epoch": 0.34,
87
+ "eval_loss": 0.3593088984489441,
88
+ "eval_runtime": 1.0931,
89
+ "eval_samples_per_second": 10.978,
90
+ "eval_steps_per_second": 2.745,
91
+ "step": 100
92
+ },
93
+ {
94
+ "epoch": 0.37,
95
+ "learning_rate": 1.4870353224904572e-05,
96
+ "loss": 0.3912,
97
+ "step": 110
98
+ },
99
+ {
100
+ "epoch": 0.41,
101
+ "learning_rate": 1.3865534383127406e-05,
102
+ "loss": 0.3932,
103
+ "step": 120
104
+ },
105
+ {
106
+ "epoch": 0.44,
107
+ "learning_rate": 1.2812449216357863e-05,
108
+ "loss": 0.3705,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 0.47,
113
+ "learning_rate": 1.1724246889980638e-05,
114
+ "loss": 0.371,
115
+ "step": 140
116
+ },
117
+ {
118
+ "epoch": 0.5,
119
+ "step": 147,
120
+ "total_flos": 2.560641466368e+16,
121
+ "train_loss": 0.5100626462981814,
122
+ "train_runtime": 502.0996,
123
+ "train_samples_per_second": 2.358,
124
+ "train_steps_per_second": 0.59
125
+ }
126
+ ],
127
+ "logging_steps": 10,
128
+ "max_steps": 296,
129
+ "num_train_epochs": 1,
130
+ "save_steps": 500,
131
+ "total_flos": 2.560641466368e+16,
132
+ "trial_name": null,
133
+ "trial_params": null
134
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2578ab79106ee1b3a538815434bca273c625fc184b3a3dfc61a2a0f5c422fbb3
3
+ size 4664