WangXFng commited on
Commit
f7fc55c
1 Parent(s): b4fa4b4

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- base_model: meta-llama/Llama-2-7b-hf
3
  library_name: peft
4
- license: llama2
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # Instruments-8bit-8B-4Epoch
16
 
17
- This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -34,14 +34,13 @@ More information needed
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0001
37
- - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
- - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
- - lr_scheduler_warmup_steps: 2
45
  - num_epochs: 4
46
 
47
  ### Training results
 
1
  ---
2
+ base_model: meta-llama/Llama-3.1-8B-Instruct
3
  library_name: peft
4
+ license: llama3.1
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
14
 
15
  # Instruments-8bit-8B-4Epoch
16
 
17
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0001
37
+ - train_batch_size: 4
38
  - eval_batch_size: 8
39
  - seed: 42
40
  - gradient_accumulation_steps: 16
41
+ - total_train_batch_size: 64
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
 
44
  - num_epochs: 4
45
 
46
  ### Training results
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -16,17 +16,17 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
24
- "q_proj",
25
- "down_proj",
26
  "up_proj",
27
- "v_proj",
28
  "k_proj",
29
- "gate_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-3.1-8B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
 
 
24
  "up_proj",
 
25
  "k_proj",
26
+ "q_proj",
27
+ "gate_proj",
28
+ "down_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74b824b773ff9a426b5ebaaad8c4f64308d00fe2897db2575050ef68d0592497
3
- size 619341864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1905bea0c65207cdfdf2b1d49e32940fe592d564ce02ca66cd8390a35bfbf5
3
+ size 2284219288
config.json CHANGED
@@ -1,30 +1,39 @@
1
  {
2
- "_name_or_path": "meta-llama/Llama-2-7b-hf",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "bos_token_id": 1,
9
- "eos_token_id": 2,
 
 
 
 
10
  "head_dim": 128,
11
  "hidden_act": "silu",
12
  "hidden_size": 4096,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 11008,
15
- "max_position_embeddings": 4096,
16
  "mlp_bias": false,
17
  "model_type": "llama",
18
  "num_attention_heads": 32,
19
  "num_hidden_layers": 32,
20
- "num_key_value_heads": 32,
21
  "pretraining_tp": 1,
22
  "rms_norm_eps": 1e-05,
23
- "rope_scaling": null,
24
- "rope_theta": 10000.0,
 
 
 
 
 
 
25
  "tie_word_embeddings": false,
26
- "torch_dtype": "float16",
27
  "transformers_version": "4.45.2",
28
  "use_cache": true,
29
- "vocab_size": 32918
30
  }
 
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
+ "bos_token_id": 128000,
8
+ "eos_token_id": [
9
+ 128001,
10
+ 128008,
11
+ 128009
12
+ ],
13
  "head_dim": 128,
14
  "hidden_act": "silu",
15
  "hidden_size": 4096,
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 14336,
18
+ "max_position_embeddings": 131072,
19
  "mlp_bias": false,
20
  "model_type": "llama",
21
  "num_attention_heads": 32,
22
  "num_hidden_layers": 32,
23
+ "num_key_value_heads": 8,
24
  "pretraining_tp": 1,
25
  "rms_norm_eps": 1e-05,
26
+ "rope_scaling": {
27
+ "factor": 8.0,
28
+ "high_freq_factor": 4.0,
29
+ "low_freq_factor": 1.0,
30
+ "original_max_position_embeddings": 8192,
31
+ "rope_type": "llama3"
32
+ },
33
+ "rope_theta": 500000.0,
34
  "tie_word_embeddings": false,
35
+ "torch_dtype": "bfloat16",
36
  "transformers_version": "4.45.2",
37
  "use_cache": true,
38
+ "vocab_size": 129174
39
  }
special_tokens_map.json CHANGED
@@ -1,24 +1,17 @@
1
  {
2
  "bos_token": {
3
- "content": "<s>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  }
 
1
  {
2
  "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|eot_id|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "!"
 
 
 
 
 
 
 
17
  }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cdb5a41c4ff365aaf322d31b46b6ce2c9f8493fa398bd5ad4fbe6179ebfa53e
3
+ size 17378691
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
trainer_state.json CHANGED
@@ -3,79 +3,247 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
- "global_step": 2060,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.4854368932038835,
13
- "grad_norm": 1.139075756072998,
14
- "learning_rate": 8.794946550048592e-05,
15
- "loss": 3.1617,
16
  "step": 250
17
  },
18
  {
19
- "epoch": 0.970873786407767,
20
- "grad_norm": 0.9628340005874634,
21
- "learning_rate": 7.580174927113704e-05,
22
- "loss": 2.3986,
23
  "step": 500
24
  },
25
  {
26
- "epoch": 1.4563106796116505,
27
- "grad_norm": 0.7678180932998657,
28
- "learning_rate": 6.365403304178815e-05,
29
- "loss": 2.3268,
30
  "step": 750
31
  },
32
  {
33
- "epoch": 1.941747572815534,
34
- "grad_norm": 0.735146701335907,
35
- "learning_rate": 5.150631681243926e-05,
36
- "loss": 2.3146,
37
  "step": 1000
38
  },
39
  {
40
- "epoch": 2.4271844660194173,
41
- "grad_norm": 0.6907908320426941,
42
- "learning_rate": 3.9358600583090386e-05,
43
- "loss": 2.2942,
44
  "step": 1250
45
  },
46
  {
47
- "epoch": 2.912621359223301,
48
- "grad_norm": 0.7977621555328369,
49
- "learning_rate": 2.72108843537415e-05,
50
- "loss": 2.286,
51
  "step": 1500
52
  },
53
  {
54
- "epoch": 3.3980582524271843,
55
- "grad_norm": 0.7419993281364441,
56
- "learning_rate": 1.5063168124392615e-05,
57
- "loss": 2.2667,
58
  "step": 1750
59
  },
60
  {
61
- "epoch": 3.883495145631068,
62
- "grad_norm": 0.7748873233795166,
63
- "learning_rate": 2.915451895043732e-06,
64
- "loss": 2.2535,
65
  "step": 2000
66
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  {
68
  "epoch": 4.0,
69
- "step": 2060,
70
- "total_flos": 3.616227843169714e+18,
71
- "train_loss": 2.4079057119425062,
72
- "train_runtime": 29920.0718,
73
- "train_samples_per_second": 17.625,
74
- "train_steps_per_second": 0.069
75
  }
76
  ],
77
  "logging_steps": 250,
78
- "max_steps": 2060,
79
  "num_input_tokens_seen": 0,
80
  "num_train_epochs": 4,
81
  "save_steps": 500,
@@ -91,8 +259,8 @@
91
  "attributes": {}
92
  }
93
  },
94
- "total_flos": 3.616227843169714e+18,
95
- "train_batch_size": 16,
96
  "trial_name": null,
97
  "trial_params": null
98
  }
 
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
+ "global_step": 8240,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.12135922330097088,
13
+ "grad_norm": 1.1636821031570435,
14
+ "learning_rate": 9.696601941747573e-05,
15
+ "loss": 1.1222,
16
  "step": 250
17
  },
18
  {
19
+ "epoch": 0.24271844660194175,
20
+ "grad_norm": 1.0256236791610718,
21
+ "learning_rate": 9.393203883495146e-05,
22
+ "loss": 0.595,
23
  "step": 500
24
  },
25
  {
26
+ "epoch": 0.3640776699029126,
27
+ "grad_norm": 0.8023567795753479,
28
+ "learning_rate": 9.089805825242718e-05,
29
+ "loss": 0.5214,
30
  "step": 750
31
  },
32
  {
33
+ "epoch": 0.4854368932038835,
34
+ "grad_norm": 0.6630896925926208,
35
+ "learning_rate": 8.786407766990292e-05,
36
+ "loss": 0.4947,
37
  "step": 1000
38
  },
39
  {
40
+ "epoch": 0.6067961165048543,
41
+ "grad_norm": 0.621222198009491,
42
+ "learning_rate": 8.483009708737865e-05,
43
+ "loss": 0.4853,
44
  "step": 1250
45
  },
46
  {
47
+ "epoch": 0.7281553398058253,
48
+ "grad_norm": 0.6639849543571472,
49
+ "learning_rate": 8.179611650485438e-05,
50
+ "loss": 0.4713,
51
  "step": 1500
52
  },
53
  {
54
+ "epoch": 0.8495145631067961,
55
+ "grad_norm": 0.696992039680481,
56
+ "learning_rate": 7.87621359223301e-05,
57
+ "loss": 0.4594,
58
  "step": 1750
59
  },
60
  {
61
+ "epoch": 0.970873786407767,
62
+ "grad_norm": 0.6508496403694153,
63
+ "learning_rate": 7.572815533980583e-05,
64
+ "loss": 0.4454,
65
  "step": 2000
66
  },
67
+ {
68
+ "epoch": 1.0922330097087378,
69
+ "grad_norm": 0.7967496514320374,
70
+ "learning_rate": 7.269417475728155e-05,
71
+ "loss": 0.4208,
72
+ "step": 2250
73
+ },
74
+ {
75
+ "epoch": 1.2135922330097086,
76
+ "grad_norm": 0.8370482921600342,
77
+ "learning_rate": 6.966019417475728e-05,
78
+ "loss": 0.3955,
79
+ "step": 2500
80
+ },
81
+ {
82
+ "epoch": 1.3349514563106797,
83
+ "grad_norm": 0.8135976791381836,
84
+ "learning_rate": 6.662621359223301e-05,
85
+ "loss": 0.3725,
86
+ "step": 2750
87
+ },
88
+ {
89
+ "epoch": 1.4563106796116505,
90
+ "grad_norm": 0.884570837020874,
91
+ "learning_rate": 6.359223300970875e-05,
92
+ "loss": 0.3436,
93
+ "step": 3000
94
+ },
95
+ {
96
+ "epoch": 1.5776699029126213,
97
+ "grad_norm": 0.908380925655365,
98
+ "learning_rate": 6.055825242718447e-05,
99
+ "loss": 0.3208,
100
+ "step": 3250
101
+ },
102
+ {
103
+ "epoch": 1.6990291262135924,
104
+ "grad_norm": 0.9605095982551575,
105
+ "learning_rate": 5.752427184466019e-05,
106
+ "loss": 0.2998,
107
+ "step": 3500
108
+ },
109
+ {
110
+ "epoch": 1.820388349514563,
111
+ "grad_norm": 0.8754192590713501,
112
+ "learning_rate": 5.4490291262135926e-05,
113
+ "loss": 0.2857,
114
+ "step": 3750
115
+ },
116
+ {
117
+ "epoch": 1.941747572815534,
118
+ "grad_norm": 0.7980837225914001,
119
+ "learning_rate": 5.145631067961165e-05,
120
+ "loss": 0.2708,
121
+ "step": 4000
122
+ },
123
+ {
124
+ "epoch": 2.063106796116505,
125
+ "grad_norm": 0.8806191682815552,
126
+ "learning_rate": 4.8422330097087385e-05,
127
+ "loss": 0.2443,
128
+ "step": 4250
129
+ },
130
+ {
131
+ "epoch": 2.1844660194174756,
132
+ "grad_norm": 0.9350611567497253,
133
+ "learning_rate": 4.538834951456311e-05,
134
+ "loss": 0.2288,
135
+ "step": 4500
136
+ },
137
+ {
138
+ "epoch": 2.3058252427184467,
139
+ "grad_norm": 0.8377496600151062,
140
+ "learning_rate": 4.235436893203884e-05,
141
+ "loss": 0.2249,
142
+ "step": 4750
143
+ },
144
+ {
145
+ "epoch": 2.4271844660194173,
146
+ "grad_norm": 0.9385828971862793,
147
+ "learning_rate": 3.9320388349514564e-05,
148
+ "loss": 0.22,
149
+ "step": 5000
150
+ },
151
+ {
152
+ "epoch": 2.5485436893203883,
153
+ "grad_norm": 0.7633670568466187,
154
+ "learning_rate": 3.62864077669903e-05,
155
+ "loss": 0.2149,
156
+ "step": 5250
157
+ },
158
+ {
159
+ "epoch": 2.6699029126213594,
160
+ "grad_norm": 0.8883378505706787,
161
+ "learning_rate": 3.325242718446602e-05,
162
+ "loss": 0.2086,
163
+ "step": 5500
164
+ },
165
+ {
166
+ "epoch": 2.79126213592233,
167
+ "grad_norm": 0.8694002032279968,
168
+ "learning_rate": 3.0218446601941746e-05,
169
+ "loss": 0.207,
170
+ "step": 5750
171
+ },
172
+ {
173
+ "epoch": 2.912621359223301,
174
+ "grad_norm": 0.7862321138381958,
175
+ "learning_rate": 2.7184466019417475e-05,
176
+ "loss": 0.2013,
177
+ "step": 6000
178
+ },
179
+ {
180
+ "epoch": 3.033980582524272,
181
+ "grad_norm": 0.6997531056404114,
182
+ "learning_rate": 2.4150485436893205e-05,
183
+ "loss": 0.1921,
184
+ "step": 6250
185
+ },
186
+ {
187
+ "epoch": 3.1553398058252426,
188
+ "grad_norm": 0.700462281703949,
189
+ "learning_rate": 2.111650485436893e-05,
190
+ "loss": 0.1789,
191
+ "step": 6500
192
+ },
193
+ {
194
+ "epoch": 3.2766990291262137,
195
+ "grad_norm": 0.7700124979019165,
196
+ "learning_rate": 1.808252427184466e-05,
197
+ "loss": 0.1774,
198
+ "step": 6750
199
+ },
200
+ {
201
+ "epoch": 3.3980582524271843,
202
+ "grad_norm": 0.8625558018684387,
203
+ "learning_rate": 1.5048543689320387e-05,
204
+ "loss": 0.1764,
205
+ "step": 7000
206
+ },
207
+ {
208
+ "epoch": 3.5194174757281553,
209
+ "grad_norm": 0.722398042678833,
210
+ "learning_rate": 1.2014563106796117e-05,
211
+ "loss": 0.175,
212
+ "step": 7250
213
+ },
214
+ {
215
+ "epoch": 3.6407766990291264,
216
+ "grad_norm": 0.8033301830291748,
217
+ "learning_rate": 8.980582524271845e-06,
218
+ "loss": 0.1733,
219
+ "step": 7500
220
+ },
221
+ {
222
+ "epoch": 3.762135922330097,
223
+ "grad_norm": 0.7849830985069275,
224
+ "learning_rate": 5.946601941747574e-06,
225
+ "loss": 0.1718,
226
+ "step": 7750
227
+ },
228
+ {
229
+ "epoch": 3.883495145631068,
230
+ "grad_norm": 0.7299964427947998,
231
+ "learning_rate": 2.912621359223301e-06,
232
+ "loss": 0.1717,
233
+ "step": 8000
234
+ },
235
  {
236
  "epoch": 4.0,
237
+ "step": 8240,
238
+ "total_flos": 2.943708398017659e+18,
239
+ "train_loss": 0.3226171414828995,
240
+ "train_runtime": 43740.1611,
241
+ "train_samples_per_second": 12.056,
242
+ "train_steps_per_second": 0.188
243
  }
244
  ],
245
  "logging_steps": 250,
246
+ "max_steps": 8240,
247
  "num_input_tokens_seen": 0,
248
  "num_train_epochs": 4,
249
  "save_steps": 500,
 
259
  "attributes": {}
260
  }
261
  },
262
+ "total_flos": 2.943708398017659e+18,
263
+ "train_batch_size": 4,
264
  "trial_name": null,
265
  "trial_params": null
266
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7730c2aeda0ef80b88155d30afa262f9a7e3cb30b892963a7a33bb8c63f05086
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22bf41754b661eab03139ce27d8b6fa7ff961f56dbe30af269f06d11b812936a
3
  size 5240