ZeroUniqueness commited on
Commit
5c3ef38
·
1 Parent(s): 35aec0e

Training in progress step 5900

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. adapter_config.json +5 -5
  2. checkpoint-5400/README.md +0 -20
  3. checkpoint-5400/adapter_config.json +0 -26
  4. checkpoint-5400/adapter_model.bin +0 -3
  5. checkpoint-5400/adapter_model/README.md +0 -20
  6. checkpoint-5400/adapter_model/adapter_config.json +0 -26
  7. checkpoint-5400/adapter_model/adapter_model.bin +0 -3
  8. checkpoint-5400/optimizer.pt +0 -3
  9. checkpoint-5400/rng_state_0.pth +0 -3
  10. checkpoint-5400/rng_state_1.pth +0 -3
  11. checkpoint-5400/rng_state_10.pth +0 -3
  12. checkpoint-5400/rng_state_11.pth +0 -3
  13. checkpoint-5400/rng_state_12.pth +0 -3
  14. checkpoint-5400/rng_state_13.pth +0 -3
  15. checkpoint-5400/rng_state_2.pth +0 -3
  16. checkpoint-5400/rng_state_3.pth +0 -3
  17. checkpoint-5400/rng_state_4.pth +0 -3
  18. checkpoint-5400/rng_state_5.pth +0 -3
  19. checkpoint-5400/rng_state_6.pth +0 -3
  20. checkpoint-5400/rng_state_7.pth +0 -3
  21. checkpoint-5400/rng_state_8.pth +0 -3
  22. checkpoint-5400/rng_state_9.pth +0 -3
  23. checkpoint-5400/scheduler.pt +0 -3
  24. checkpoint-5400/trainer_state.json +0 -1328
  25. checkpoint-5400/training_args.bin +0 -3
  26. checkpoint-5500/README.md +0 -20
  27. checkpoint-5500/adapter_config.json +0 -26
  28. checkpoint-5500/adapter_model.bin +0 -3
  29. checkpoint-5500/adapter_model/README.md +0 -20
  30. checkpoint-5500/adapter_model/adapter_config.json +0 -26
  31. checkpoint-5500/adapter_model/adapter_model.bin +0 -3
  32. checkpoint-5500/optimizer.pt +0 -3
  33. checkpoint-5500/rng_state_0.pth +0 -3
  34. checkpoint-5500/rng_state_1.pth +0 -3
  35. checkpoint-5500/rng_state_10.pth +0 -3
  36. checkpoint-5500/rng_state_11.pth +0 -3
  37. checkpoint-5500/rng_state_12.pth +0 -3
  38. checkpoint-5500/rng_state_13.pth +0 -3
  39. checkpoint-5500/rng_state_2.pth +0 -3
  40. checkpoint-5500/rng_state_3.pth +0 -3
  41. checkpoint-5500/rng_state_4.pth +0 -3
  42. checkpoint-5500/rng_state_5.pth +0 -3
  43. checkpoint-5500/rng_state_6.pth +0 -3
  44. checkpoint-5500/rng_state_7.pth +0 -3
  45. checkpoint-5500/rng_state_8.pth +0 -3
  46. checkpoint-5500/rng_state_9.pth +0 -3
  47. checkpoint-5500/scheduler.pt +0 -3
  48. checkpoint-5500/trainer_state.json +0 -1352
  49. checkpoint-5500/training_args.bin +0 -3
  50. checkpoint-5600/README.md +0 -20
adapter_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
  "bias": "none",
5
  "fan_in_fan_out": null,
6
- "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
@@ -14,13 +14,13 @@
14
  "r": 32,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "v_proj",
19
- "gate_proj",
20
- "up_proj",
21
  "o_proj",
22
  "k_proj",
23
- "down_proj"
 
 
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
3
  "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
  "bias": "none",
5
  "fan_in_fan_out": null,
6
+ "inference_mode": false,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
 
14
  "r": 32,
15
  "revision": null,
16
  "target_modules": [
 
17
  "v_proj",
 
 
18
  "o_proj",
19
  "k_proj",
20
+ "q_proj",
21
+ "up_proj",
22
+ "down_proj",
23
+ "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
checkpoint-5400/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5400/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "gate_proj",
19
- "o_proj",
20
- "down_proj",
21
- "k_proj",
22
- "v_proj",
23
- "up_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5400/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a996a2cd6511d12fe1f6c74e5551595b29964345d9c5913a1440514d20e74909
3
- size 500897101
 
 
 
 
checkpoint-5400/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5400/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "gate_proj",
19
- "o_proj",
20
- "down_proj",
21
- "k_proj",
22
- "v_proj",
23
- "up_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5400/adapter_model/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a996a2cd6511d12fe1f6c74e5551595b29964345d9c5913a1440514d20e74909
3
- size 500897101
 
 
 
 
checkpoint-5400/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a049c191517e4cc2442d5e383237bf35c7e90159b14ee20f4ef3f1e917c94f7d
3
- size 1001752701
 
 
 
 
checkpoint-5400/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bb5c299ce12f96c89868d521ed520c7ace1a7a288b7b7a826d6e603aeb79a09
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c1b9ccab0a7c765a8ccd991c977a9ffc75c10546853ccb44210b37c9347c640
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_10.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a9597f7462f3230a6cbef5e02b36f74b70ccfcdcc850d8c7e131d92db196783
3
- size 27789
 
 
 
 
checkpoint-5400/rng_state_11.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8af70b0d7c5428460585eb71f0762f807411f8fcb9745d5f3d67da929bebac6
3
- size 27789
 
 
 
 
checkpoint-5400/rng_state_12.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e6923f879a023dad7d6429c5f4ae2372fbd5ee6569ab6149a7cdae5b14fb3c3
3
- size 27789
 
 
 
 
checkpoint-5400/rng_state_13.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c5a179975104cf77a9e13a52a805da05b0e24a85d3849b0f5c3e5420f17eea8
3
- size 27789
 
 
 
 
checkpoint-5400/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:739e768c2ad37f578b983337dedf3c7558c279d10f81301e90d17df333839571
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_3.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ef2ea61eeb1cf909c75298fb11154372946533cb23888c32bb3d5dbf8f450be
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_4.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a0df83ddd7c2adb2cfaa37c8e8ef974c3ee5b10b047c9debb55e91b9c6abfa0
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_5.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:28d8090af65c1044a84f75fca5aaad6150338b890ba2ff8a597eb42e98730156
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_6.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:39eb6a8461a3f8f64867ca24431f39e0c189f967e51fc06b1cd04b61557b7ab5
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_7.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c082109f0ab8381e4f9518c09cb1c66ba874df40524ab79671e907b2bae40124
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_8.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e09522662a1c7e523f238f6591ac23db965da78361158348b4370a4a0e5cadc9
3
- size 27772
 
 
 
 
checkpoint-5400/rng_state_9.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b144a36861671d0f406faba8754207d253d86202309d2a0cf2f6675277d4179
3
- size 27772
 
 
 
 
checkpoint-5400/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ced8c2de33809dd534295dd5742d2284a2f142b85e0e85a7f49385eb1bef7354
3
- size 627
 
 
 
 
checkpoint-5400/trainer_state.json DELETED
@@ -1,1328 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 2.093834819697557,
5
- "global_step": 5400,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.02,
12
- "learning_rate": 0.0001999867761371633,
13
- "loss": 1.0435,
14
- "step": 50
15
- },
16
- {
17
- "epoch": 0.04,
18
- "learning_rate": 0.00019993306018843102,
19
- "loss": 0.8918,
20
- "step": 100
21
- },
22
- {
23
- "epoch": 0.06,
24
- "learning_rate": 0.00019983804784290833,
25
- "loss": 0.8874,
26
- "step": 150
27
- },
28
- {
29
- "epoch": 0.08,
30
- "learning_rate": 0.00019970177836355307,
31
- "loss": 0.8839,
32
- "step": 200
33
- },
34
- {
35
- "epoch": 0.09,
36
- "learning_rate": 0.00019961818913082012,
37
- "loss": 0.8801,
38
- "step": 225
39
- },
40
- {
41
- "epoch": 0.1,
42
- "learning_rate": 0.00019952430806244534,
43
- "loss": 0.8753,
44
- "step": 250
45
- },
46
- {
47
- "epoch": 0.11,
48
- "learning_rate": 0.00019942014485754635,
49
- "loss": 0.8754,
50
- "step": 275
51
- },
52
- {
53
- "epoch": 0.12,
54
- "learning_rate": 0.00019930571027751713,
55
- "loss": 0.8751,
56
- "step": 300
57
- },
58
- {
59
- "epoch": 0.13,
60
- "learning_rate": 0.0001991810161449164,
61
- "loss": 0.8819,
62
- "step": 325
63
- },
64
- {
65
- "epoch": 0.14,
66
- "learning_rate": 0.00019904607534224612,
67
- "loss": 0.8744,
68
- "step": 350
69
- },
70
- {
71
- "epoch": 0.15,
72
- "learning_rate": 0.00019890090181062063,
73
- "loss": 0.8735,
74
- "step": 375
75
- },
76
- {
77
- "epoch": 0.16,
78
- "learning_rate": 0.00019874551054832625,
79
- "loss": 0.8703,
80
- "step": 400
81
- },
82
- {
83
- "epoch": 0.16,
84
- "learning_rate": 0.00019857991760927193,
85
- "loss": 0.8715,
86
- "step": 425
87
- },
88
- {
89
- "epoch": 0.17,
90
- "learning_rate": 0.00019840414010133045,
91
- "loss": 0.8714,
92
- "step": 450
93
- },
94
- {
95
- "epoch": 0.18,
96
- "learning_rate": 0.00019821819618457114,
97
- "loss": 0.8653,
98
- "step": 475
99
- },
100
- {
101
- "epoch": 0.19,
102
- "learning_rate": 0.0001980221050693837,
103
- "loss": 0.8716,
104
- "step": 500
105
- },
106
- {
107
- "epoch": 0.2,
108
- "learning_rate": 0.00019781588701449338,
109
- "loss": 0.8695,
110
- "step": 525
111
- },
112
- {
113
- "epoch": 0.21,
114
- "learning_rate": 0.0001975995633248682,
115
- "loss": 0.8746,
116
- "step": 550
117
- },
118
- {
119
- "epoch": 0.22,
120
- "learning_rate": 0.00019737315634951762,
121
- "loss": 0.8731,
122
- "step": 575
123
- },
124
- {
125
- "epoch": 0.23,
126
- "learning_rate": 0.00019713668947918386,
127
- "loss": 0.867,
128
- "step": 600
129
- },
130
- {
131
- "epoch": 0.24,
132
- "learning_rate": 0.0001968901871439252,
133
- "loss": 0.8706,
134
- "step": 625
135
- },
136
- {
137
- "epoch": 0.25,
138
- "learning_rate": 0.000196633674810592,
139
- "loss": 0.8595,
140
- "step": 650
141
- },
142
- {
143
- "epoch": 0.26,
144
- "learning_rate": 0.0001963671789801958,
145
- "loss": 0.8627,
146
- "step": 675
147
- },
148
- {
149
- "epoch": 0.27,
150
- "learning_rate": 0.0001960907271851712,
151
- "loss": 0.8607,
152
- "step": 700
153
- },
154
- {
155
- "epoch": 0.28,
156
- "learning_rate": 0.00019580434798653173,
157
- "loss": 0.858,
158
- "step": 725
159
- },
160
- {
161
- "epoch": 0.29,
162
- "learning_rate": 0.00019550807097091876,
163
- "loss": 0.8589,
164
- "step": 750
165
- },
166
- {
167
- "epoch": 0.3,
168
- "learning_rate": 0.00019520192674754515,
169
- "loss": 0.8561,
170
- "step": 775
171
- },
172
- {
173
- "epoch": 0.31,
174
- "learning_rate": 0.00019488594694503264,
175
- "loss": 0.8576,
176
- "step": 800
177
- },
178
- {
179
- "epoch": 0.32,
180
- "learning_rate": 0.00019456016420814446,
181
- "loss": 0.8597,
182
- "step": 825
183
- },
184
- {
185
- "epoch": 0.33,
186
- "learning_rate": 0.00019422461219441254,
187
- "loss": 0.862,
188
- "step": 850
189
- },
190
- {
191
- "epoch": 0.34,
192
- "learning_rate": 0.00019387932557066035,
193
- "loss": 0.8577,
194
- "step": 875
195
- },
196
- {
197
- "epoch": 0.35,
198
- "learning_rate": 0.00019352434000942127,
199
- "loss": 0.8632,
200
- "step": 900
201
- },
202
- {
203
- "epoch": 0.36,
204
- "learning_rate": 0.00019315969218525333,
205
- "loss": 0.8567,
206
- "step": 925
207
- },
208
- {
209
- "epoch": 0.37,
210
- "learning_rate": 0.00019278541977095005,
211
- "loss": 0.8501,
212
- "step": 950
213
- },
214
- {
215
- "epoch": 0.38,
216
- "learning_rate": 0.00019240156143364844,
217
- "loss": 0.8596,
218
- "step": 975
219
- },
220
- {
221
- "epoch": 0.39,
222
- "learning_rate": 0.00019200815683083434,
223
- "loss": 0.8556,
224
- "step": 1000
225
- },
226
- {
227
- "epoch": 0.39,
228
- "eval_loss": 0.8521950244903564,
229
- "eval_runtime": 59.8838,
230
- "eval_samples_per_second": 12.19,
231
- "eval_steps_per_second": 0.885,
232
- "step": 1000
233
- },
234
- {
235
- "epoch": 0.4,
236
- "learning_rate": 0.00019160524660624505,
237
- "loss": 0.8531,
238
- "step": 1025
239
- },
240
- {
241
- "epoch": 0.41,
242
- "learning_rate": 0.00019119287238567045,
243
- "loss": 0.8513,
244
- "step": 1050
245
- },
246
- {
247
- "epoch": 0.42,
248
- "learning_rate": 0.00019077107677265253,
249
- "loss": 0.8502,
250
- "step": 1075
251
- },
252
- {
253
- "epoch": 0.43,
254
- "learning_rate": 0.00019033990334408384,
255
- "loss": 0.8469,
256
- "step": 1100
257
- },
258
- {
259
- "epoch": 0.44,
260
- "learning_rate": 0.00018989939664570545,
261
- "loss": 0.8495,
262
- "step": 1125
263
- },
264
- {
265
- "epoch": 0.45,
266
- "learning_rate": 0.00018944960218750484,
267
- "loss": 0.8485,
268
- "step": 1150
269
- },
270
- {
271
- "epoch": 0.46,
272
- "learning_rate": 0.00018899056643901404,
273
- "loss": 0.8534,
274
- "step": 1175
275
- },
276
- {
277
- "epoch": 0.47,
278
- "learning_rate": 0.00018852233682450893,
279
- "loss": 0.8531,
280
- "step": 1200
281
- },
282
- {
283
- "epoch": 0.47,
284
- "learning_rate": 0.00018804496171810948,
285
- "loss": 0.8509,
286
- "step": 1225
287
- },
288
- {
289
- "epoch": 0.48,
290
- "learning_rate": 0.00018755849043878222,
291
- "loss": 0.8445,
292
- "step": 1250
293
- },
294
- {
295
- "epoch": 0.49,
296
- "learning_rate": 0.0001870629732452449,
297
- "loss": 0.8548,
298
- "step": 1275
299
- },
300
- {
301
- "epoch": 0.5,
302
- "learning_rate": 0.00018655846133077417,
303
- "loss": 0.8441,
304
- "step": 1300
305
- },
306
- {
307
- "epoch": 0.51,
308
- "learning_rate": 0.00018604500681791656,
309
- "loss": 0.8533,
310
- "step": 1325
311
- },
312
- {
313
- "epoch": 0.52,
314
- "learning_rate": 0.00018552266275310373,
315
- "loss": 0.8505,
316
- "step": 1350
317
- },
318
- {
319
- "epoch": 0.53,
320
- "learning_rate": 0.0001849914831011719,
321
- "loss": 0.8544,
322
- "step": 1375
323
- },
324
- {
325
- "epoch": 0.54,
326
- "learning_rate": 0.00018445152273978668,
327
- "loss": 0.845,
328
- "step": 1400
329
- },
330
- {
331
- "epoch": 0.55,
332
- "learning_rate": 0.00018390283745377354,
333
- "loss": 0.8376,
334
- "step": 1425
335
- },
336
- {
337
- "epoch": 0.56,
338
- "learning_rate": 0.0001833454839293545,
339
- "loss": 0.847,
340
- "step": 1450
341
- },
342
- {
343
- "epoch": 0.57,
344
- "learning_rate": 0.00018277951974829163,
345
- "loss": 0.8473,
346
- "step": 1475
347
- },
348
- {
349
- "epoch": 0.58,
350
- "learning_rate": 0.0001822050033819382,
351
- "loss": 0.8438,
352
- "step": 1500
353
- },
354
- {
355
- "epoch": 0.59,
356
- "learning_rate": 0.00018162199418519785,
357
- "loss": 0.8418,
358
- "step": 1525
359
- },
360
- {
361
- "epoch": 0.6,
362
- "learning_rate": 0.00018103055239039243,
363
- "loss": 0.842,
364
- "step": 1550
365
- },
366
- {
367
- "epoch": 0.61,
368
- "learning_rate": 0.0001804307391010393,
369
- "loss": 0.8435,
370
- "step": 1575
371
- },
372
- {
373
- "epoch": 0.62,
374
- "learning_rate": 0.00017982261628553842,
375
- "loss": 0.8349,
376
- "step": 1600
377
- },
378
- {
379
- "epoch": 0.63,
380
- "learning_rate": 0.0001792062467707703,
381
- "loss": 0.8483,
382
- "step": 1625
383
- },
384
- {
385
- "epoch": 0.64,
386
- "learning_rate": 0.0001785816942356052,
387
- "loss": 0.8387,
388
- "step": 1650
389
- },
390
- {
391
- "epoch": 0.65,
392
- "learning_rate": 0.00017794902320432429,
393
- "loss": 0.843,
394
- "step": 1675
395
- },
396
- {
397
- "epoch": 0.66,
398
- "learning_rate": 0.00017730829903995333,
399
- "loss": 0.8424,
400
- "step": 1700
401
- },
402
- {
403
- "epoch": 0.67,
404
- "learning_rate": 0.00017665958793751006,
405
- "loss": 0.8418,
406
- "step": 1725
407
- },
408
- {
409
- "epoch": 0.68,
410
- "learning_rate": 0.00017600295691716522,
411
- "loss": 0.8384,
412
- "step": 1750
413
- },
414
- {
415
- "epoch": 0.69,
416
- "learning_rate": 0.00017533847381731856,
417
- "loss": 0.8445,
418
- "step": 1775
419
- },
420
- {
421
- "epoch": 0.7,
422
- "learning_rate": 0.00017466620728759033,
423
- "loss": 0.8446,
424
- "step": 1800
425
- },
426
- {
427
- "epoch": 0.71,
428
- "learning_rate": 0.00017398622678172878,
429
- "loss": 0.838,
430
- "step": 1825
431
- },
432
- {
433
- "epoch": 0.72,
434
- "learning_rate": 0.0001732986025504348,
435
- "loss": 0.8415,
436
- "step": 1850
437
- },
438
- {
439
- "epoch": 0.73,
440
- "learning_rate": 0.000172603405634104,
441
- "loss": 0.8357,
442
- "step": 1875
443
- },
444
- {
445
- "epoch": 0.74,
446
- "learning_rate": 0.00017190070785548755,
447
- "loss": 0.8311,
448
- "step": 1900
449
- },
450
- {
451
- "epoch": 0.75,
452
- "learning_rate": 0.0001711905818122717,
453
- "loss": 0.8333,
454
- "step": 1925
455
- },
456
- {
457
- "epoch": 0.76,
458
- "learning_rate": 0.0001704731008695777,
459
- "loss": 0.8387,
460
- "step": 1950
461
- },
462
- {
463
- "epoch": 0.77,
464
- "learning_rate": 0.0001697483391523821,
465
- "loss": 0.8442,
466
- "step": 1975
467
- },
468
- {
469
- "epoch": 0.78,
470
- "learning_rate": 0.00016901637153785885,
471
- "loss": 0.8399,
472
- "step": 2000
473
- },
474
- {
475
- "epoch": 0.78,
476
- "eval_loss": 0.8339959383010864,
477
- "eval_runtime": 58.5829,
478
- "eval_samples_per_second": 12.461,
479
- "eval_steps_per_second": 0.905,
480
- "step": 2000
481
- },
482
- {
483
- "epoch": 0.79,
484
- "learning_rate": 0.0001682772736476434,
485
- "loss": 0.8334,
486
- "step": 2025
487
- },
488
- {
489
- "epoch": 0.79,
490
- "learning_rate": 0.0001675311218400201,
491
- "loss": 0.835,
492
- "step": 2050
493
- },
494
- {
495
- "epoch": 0.8,
496
- "learning_rate": 0.00016677799320203332,
497
- "loss": 0.8368,
498
- "step": 2075
499
- },
500
- {
501
- "epoch": 0.81,
502
- "learning_rate": 0.00016601796554152344,
503
- "loss": 0.8278,
504
- "step": 2100
505
- },
506
- {
507
- "epoch": 0.82,
508
- "learning_rate": 0.00016525111737908827,
509
- "loss": 0.8334,
510
- "step": 2125
511
- },
512
- {
513
- "epoch": 0.83,
514
- "learning_rate": 0.00016447752793997096,
515
- "loss": 0.8416,
516
- "step": 2150
517
- },
518
- {
519
- "epoch": 0.84,
520
- "learning_rate": 0.00016369727714587483,
521
- "loss": 0.8297,
522
- "step": 2175
523
- },
524
- {
525
- "epoch": 0.85,
526
- "learning_rate": 0.0001629104456067066,
527
- "loss": 0.8327,
528
- "step": 2200
529
- },
530
- {
531
- "epoch": 0.86,
532
- "learning_rate": 0.00016211711461224825,
533
- "loss": 0.8324,
534
- "step": 2225
535
- },
536
- {
537
- "epoch": 0.87,
538
- "learning_rate": 0.0001613173661237589,
539
- "loss": 0.8313,
540
- "step": 2250
541
- },
542
- {
543
- "epoch": 0.88,
544
- "learning_rate": 0.0001605112827655069,
545
- "loss": 0.8292,
546
- "step": 2275
547
- },
548
- {
549
- "epoch": 0.89,
550
- "learning_rate": 0.0001596989478162339,
551
- "loss": 0.8334,
552
- "step": 2300
553
- },
554
- {
555
- "epoch": 0.9,
556
- "learning_rate": 0.00015888044520055106,
557
- "loss": 0.8352,
558
- "step": 2325
559
- },
560
- {
561
- "epoch": 0.91,
562
- "learning_rate": 0.00015805585948026852,
563
- "loss": 0.823,
564
- "step": 2350
565
- },
566
- {
567
- "epoch": 0.92,
568
- "learning_rate": 0.000157225275845659,
569
- "loss": 0.8293,
570
- "step": 2375
571
- },
572
- {
573
- "epoch": 0.93,
574
- "learning_rate": 0.00015638878010665672,
575
- "loss": 0.8289,
576
- "step": 2400
577
- },
578
- {
579
- "epoch": 0.94,
580
- "learning_rate": 0.00015554645868399205,
581
- "loss": 0.832,
582
- "step": 2425
583
- },
584
- {
585
- "epoch": 0.95,
586
- "learning_rate": 0.00015469839860026308,
587
- "loss": 0.8294,
588
- "step": 2450
589
- },
590
- {
591
- "epoch": 0.96,
592
- "learning_rate": 0.0001538446874709452,
593
- "loss": 0.8281,
594
- "step": 2475
595
- },
596
- {
597
- "epoch": 0.97,
598
- "learning_rate": 0.00015298541349533925,
599
- "loss": 0.8314,
600
- "step": 2500
601
- },
602
- {
603
- "epoch": 0.98,
604
- "learning_rate": 0.00015212066544745926,
605
- "loss": 0.831,
606
- "step": 2525
607
- },
608
- {
609
- "epoch": 0.99,
610
- "learning_rate": 0.00015125053266686124,
611
- "loss": 0.8319,
612
- "step": 2550
613
- },
614
- {
615
- "epoch": 1.0,
616
- "learning_rate": 0.00015037510504941303,
617
- "loss": 0.8259,
618
- "step": 2575
619
- },
620
- {
621
- "epoch": 1.01,
622
- "learning_rate": 0.00014949447303800695,
623
- "loss": 0.8133,
624
- "step": 2600
625
- },
626
- {
627
- "epoch": 1.02,
628
- "learning_rate": 0.00014860872761321593,
629
- "loss": 0.8139,
630
- "step": 2625
631
- },
632
- {
633
- "epoch": 1.03,
634
- "learning_rate": 0.00014771796028389405,
635
- "loss": 0.804,
636
- "step": 2650
637
- },
638
- {
639
- "epoch": 1.04,
640
- "learning_rate": 0.0001468222630777225,
641
- "loss": 0.8011,
642
- "step": 2675
643
- },
644
- {
645
- "epoch": 1.05,
646
- "learning_rate": 0.00014592172853170193,
647
- "loss": 0.8037,
648
- "step": 2700
649
- },
650
- {
651
- "epoch": 1.06,
652
- "learning_rate": 0.00014501644968259212,
653
- "loss": 0.8063,
654
- "step": 2725
655
- },
656
- {
657
- "epoch": 1.07,
658
- "learning_rate": 0.00014410652005730025,
659
- "loss": 0.8155,
660
- "step": 2750
661
- },
662
- {
663
- "epoch": 1.08,
664
- "learning_rate": 0.00014319203366321826,
665
- "loss": 0.8066,
666
- "step": 2775
667
- },
668
- {
669
- "epoch": 1.09,
670
- "learning_rate": 0.0001422730849785107,
671
- "loss": 0.8091,
672
- "step": 2800
673
- },
674
- {
675
- "epoch": 1.1,
676
- "learning_rate": 0.0001413497689423539,
677
- "loss": 0.8067,
678
- "step": 2825
679
- },
680
- {
681
- "epoch": 1.11,
682
- "learning_rate": 0.00014042218094512755,
683
- "loss": 0.8046,
684
- "step": 2850
685
- },
686
- {
687
- "epoch": 1.11,
688
- "learning_rate": 0.00013949041681855985,
689
- "loss": 0.8053,
690
- "step": 2875
691
- },
692
- {
693
- "epoch": 1.12,
694
- "learning_rate": 0.0001385545728258264,
695
- "loss": 0.8075,
696
- "step": 2900
697
- },
698
- {
699
- "epoch": 1.13,
700
- "learning_rate": 0.0001376147456516055,
701
- "loss": 0.8015,
702
- "step": 2925
703
- },
704
- {
705
- "epoch": 1.14,
706
- "learning_rate": 0.00013667103239208903,
707
- "loss": 0.8016,
708
- "step": 2950
709
- },
710
- {
711
- "epoch": 1.15,
712
- "learning_rate": 0.00013572353054495126,
713
- "loss": 0.8029,
714
- "step": 2975
715
- },
716
- {
717
- "epoch": 1.16,
718
- "learning_rate": 0.0001347723379992762,
719
- "loss": 0.8017,
720
- "step": 3000
721
- },
722
- {
723
- "epoch": 1.16,
724
- "eval_loss": 0.8229297995567322,
725
- "eval_runtime": 59.3398,
726
- "eval_samples_per_second": 12.302,
727
- "eval_steps_per_second": 0.893,
728
- "step": 3000
729
- },
730
- {
731
- "epoch": 1.17,
732
- "learning_rate": 0.0001338175530254443,
733
- "loss": 0.8049,
734
- "step": 3025
735
- },
736
- {
737
- "epoch": 1.18,
738
- "learning_rate": 0.00013285927426497985,
739
- "loss": 0.8027,
740
- "step": 3050
741
- },
742
- {
743
- "epoch": 1.19,
744
- "learning_rate": 0.00013189760072036008,
745
- "loss": 0.8028,
746
- "step": 3075
747
- },
748
- {
749
- "epoch": 1.2,
750
- "learning_rate": 0.0001309326317447869,
751
- "loss": 0.8021,
752
- "step": 3100
753
- },
754
- {
755
- "epoch": 1.21,
756
- "learning_rate": 0.00012996446703192257,
757
- "loss": 0.8033,
758
- "step": 3125
759
- },
760
- {
761
- "epoch": 1.22,
762
- "learning_rate": 0.00012899320660558986,
763
- "loss": 0.8016,
764
- "step": 3150
765
- },
766
- {
767
- "epoch": 1.23,
768
- "learning_rate": 0.00012801895080943846,
769
- "loss": 0.7995,
770
- "step": 3175
771
- },
772
- {
773
- "epoch": 1.24,
774
- "learning_rate": 0.0001270418002965782,
775
- "loss": 0.799,
776
- "step": 3200
777
- },
778
- {
779
- "epoch": 1.25,
780
- "learning_rate": 0.0001260618560191802,
781
- "loss": 0.8002,
782
- "step": 3225
783
- },
784
- {
785
- "epoch": 1.26,
786
- "learning_rate": 0.00012507921921804717,
787
- "loss": 0.8068,
788
- "step": 3250
789
- },
790
- {
791
- "epoch": 1.27,
792
- "learning_rate": 0.00012409399141215423,
793
- "loss": 0.8041,
794
- "step": 3275
795
- },
796
- {
797
- "epoch": 1.28,
798
- "learning_rate": 0.0001231062743881603,
799
- "loss": 0.7999,
800
- "step": 3300
801
- },
802
- {
803
- "epoch": 1.29,
804
- "learning_rate": 0.0001221161701898926,
805
- "loss": 0.7995,
806
- "step": 3325
807
- },
808
- {
809
- "epoch": 1.3,
810
- "learning_rate": 0.00012112378110780391,
811
- "loss": 0.7959,
812
- "step": 3350
813
- },
814
- {
815
- "epoch": 1.31,
816
- "learning_rate": 0.00012012920966840486,
817
- "loss": 0.7999,
818
- "step": 3375
819
- },
820
- {
821
- "epoch": 1.32,
822
- "learning_rate": 0.00011913255862367151,
823
- "loss": 0.8016,
824
- "step": 3400
825
- },
826
- {
827
- "epoch": 1.33,
828
- "learning_rate": 0.00011813393094042993,
829
- "loss": 0.7944,
830
- "step": 3425
831
- },
832
- {
833
- "epoch": 1.34,
834
- "learning_rate": 0.0001171334297897181,
835
- "loss": 0.8026,
836
- "step": 3450
837
- },
838
- {
839
- "epoch": 1.35,
840
- "learning_rate": 0.00011613115853612734,
841
- "loss": 0.8004,
842
- "step": 3475
843
- },
844
- {
845
- "epoch": 1.36,
846
- "learning_rate": 0.00011512722072712321,
847
- "loss": 0.7992,
848
- "step": 3500
849
- },
850
- {
851
- "epoch": 1.37,
852
- "learning_rate": 0.00011412172008234785,
853
- "loss": 0.8004,
854
- "step": 3525
855
- },
856
- {
857
- "epoch": 1.38,
858
- "learning_rate": 0.0001131147604829043,
859
- "loss": 0.8009,
860
- "step": 3550
861
- },
862
- {
863
- "epoch": 1.39,
864
- "learning_rate": 0.00011210644596062439,
865
- "loss": 0.7993,
866
- "step": 3575
867
- },
868
- {
869
- "epoch": 1.4,
870
- "learning_rate": 0.00011109688068732081,
871
- "loss": 0.7965,
872
- "step": 3600
873
- },
874
- {
875
- "epoch": 1.41,
876
- "learning_rate": 0.00011008616896402482,
877
- "loss": 0.7991,
878
- "step": 3625
879
- },
880
- {
881
- "epoch": 1.42,
882
- "learning_rate": 0.00010907441521021072,
883
- "loss": 0.8026,
884
- "step": 3650
885
- },
886
- {
887
- "epoch": 1.42,
888
- "learning_rate": 0.00010806172395300789,
889
- "loss": 0.7941,
890
- "step": 3675
891
- },
892
- {
893
- "epoch": 1.43,
894
- "learning_rate": 0.00010704819981640186,
895
- "loss": 0.7989,
896
- "step": 3700
897
- },
898
- {
899
- "epoch": 1.44,
900
- "learning_rate": 0.00010603394751042522,
901
- "loss": 0.7981,
902
- "step": 3725
903
- },
904
- {
905
- "epoch": 1.45,
906
- "learning_rate": 0.00010501907182033979,
907
- "loss": 0.7985,
908
- "step": 3750
909
- },
910
- {
911
- "epoch": 1.46,
912
- "learning_rate": 0.000104003677595811,
913
- "loss": 0.7921,
914
- "step": 3775
915
- },
916
- {
917
- "epoch": 1.47,
918
- "learning_rate": 0.00010298786974007555,
919
- "loss": 0.8012,
920
- "step": 3800
921
- },
922
- {
923
- "epoch": 1.48,
924
- "learning_rate": 0.00010197175319910343,
925
- "loss": 0.7906,
926
- "step": 3825
927
- },
928
- {
929
- "epoch": 1.49,
930
- "learning_rate": 0.00010095543295075593,
931
- "loss": 0.7928,
932
- "step": 3850
933
- },
934
- {
935
- "epoch": 1.5,
936
- "learning_rate": 9.993901399393979e-05,
937
- "loss": 0.8018,
938
- "step": 3875
939
- },
940
- {
941
- "epoch": 1.51,
942
- "learning_rate": 9.892260133775968e-05,
943
- "loss": 0.7991,
944
- "step": 3900
945
- },
946
- {
947
- "epoch": 1.52,
948
- "learning_rate": 9.79062999906693e-05,
949
- "loss": 0.795,
950
- "step": 3925
951
- },
952
- {
953
- "epoch": 1.53,
954
- "learning_rate": 9.68902149496227e-05,
955
- "loss": 0.7977,
956
- "step": 3950
957
- },
958
- {
959
- "epoch": 1.54,
960
- "learning_rate": 9.587445118922674e-05,
961
- "loss": 0.8013,
962
- "step": 3975
963
- },
964
- {
965
- "epoch": 1.55,
966
- "learning_rate": 9.485911365089589e-05,
967
- "loss": 0.7978,
968
- "step": 4000
969
- },
970
- {
971
- "epoch": 1.55,
972
- "eval_loss": 0.8142631649971008,
973
- "eval_runtime": 59.4108,
974
- "eval_samples_per_second": 12.287,
975
- "eval_steps_per_second": 0.892,
976
- "step": 4000
977
- },
978
- {
979
- "epoch": 1.56,
980
- "learning_rate": 9.384430723201036e-05,
981
- "loss": 0.7912,
982
- "step": 4025
983
- },
984
- {
985
- "epoch": 1.57,
986
- "learning_rate": 9.283013677507902e-05,
987
- "loss": 0.7919,
988
- "step": 4050
989
- },
990
- {
991
- "epoch": 1.58,
992
- "learning_rate": 9.181670705690761e-05,
993
- "loss": 0.7919,
994
- "step": 4075
995
- },
996
- {
997
- "epoch": 1.59,
998
- "learning_rate": 9.080412277777413e-05,
999
- "loss": 0.8018,
1000
- "step": 4100
1001
- },
1002
- {
1003
- "epoch": 1.6,
1004
- "learning_rate": 8.979248855061188e-05,
1005
- "loss": 0.7811,
1006
- "step": 4125
1007
- },
1008
- {
1009
- "epoch": 1.61,
1010
- "learning_rate": 8.878190889020159e-05,
1011
- "loss": 0.7919,
1012
- "step": 4150
1013
- },
1014
- {
1015
- "epoch": 1.62,
1016
- "learning_rate": 8.777248820237376e-05,
1017
- "loss": 0.7994,
1018
- "step": 4175
1019
- },
1020
- {
1021
- "epoch": 1.63,
1022
- "learning_rate": 8.676433077322215e-05,
1023
- "loss": 0.7956,
1024
- "step": 4200
1025
- },
1026
- {
1027
- "epoch": 1.64,
1028
- "learning_rate": 8.575754075832973e-05,
1029
- "loss": 0.7968,
1030
- "step": 4225
1031
- },
1032
- {
1033
- "epoch": 1.65,
1034
- "learning_rate": 8.475222217200801e-05,
1035
- "loss": 0.7905,
1036
- "step": 4250
1037
- },
1038
- {
1039
- "epoch": 1.66,
1040
- "learning_rate": 8.374847887655112e-05,
1041
- "loss": 0.7889,
1042
- "step": 4275
1043
- },
1044
- {
1045
- "epoch": 1.67,
1046
- "learning_rate": 8.274641457150543e-05,
1047
- "loss": 0.7988,
1048
- "step": 4300
1049
- },
1050
- {
1051
- "epoch": 1.68,
1052
- "learning_rate": 8.174613278295608e-05,
1053
- "loss": 0.7947,
1054
- "step": 4325
1055
- },
1056
- {
1057
- "epoch": 1.69,
1058
- "learning_rate": 8.074773685283137e-05,
1059
- "loss": 0.7929,
1060
- "step": 4350
1061
- },
1062
- {
1063
- "epoch": 1.7,
1064
- "learning_rate": 7.97513299282264e-05,
1065
- "loss": 0.7949,
1066
- "step": 4375
1067
- },
1068
- {
1069
- "epoch": 1.71,
1070
- "learning_rate": 7.875701495074638e-05,
1071
- "loss": 0.7925,
1072
- "step": 4400
1073
- },
1074
- {
1075
- "epoch": 1.72,
1076
- "learning_rate": 7.776489464587158e-05,
1077
- "loss": 0.7917,
1078
- "step": 4425
1079
- },
1080
- {
1081
- "epoch": 1.73,
1082
- "learning_rate": 7.677507151234448e-05,
1083
- "loss": 0.7905,
1084
- "step": 4450
1085
- },
1086
- {
1087
- "epoch": 1.74,
1088
- "learning_rate": 7.578764781158034e-05,
1089
- "loss": 0.7912,
1090
- "step": 4475
1091
- },
1092
- {
1093
- "epoch": 1.74,
1094
- "learning_rate": 7.480272555710227e-05,
1095
- "loss": 0.8006,
1096
- "step": 4500
1097
- },
1098
- {
1099
- "epoch": 1.75,
1100
- "learning_rate": 7.382040650400185e-05,
1101
- "loss": 0.7937,
1102
- "step": 4525
1103
- },
1104
- {
1105
- "epoch": 1.76,
1106
- "learning_rate": 7.28407921384267e-05,
1107
- "loss": 0.794,
1108
- "step": 4550
1109
- },
1110
- {
1111
- "epoch": 1.77,
1112
- "learning_rate": 7.186398366709545e-05,
1113
- "loss": 0.7931,
1114
- "step": 4575
1115
- },
1116
- {
1117
- "epoch": 1.78,
1118
- "learning_rate": 7.089008200684197e-05,
1119
- "loss": 0.7982,
1120
- "step": 4600
1121
- },
1122
- {
1123
- "epoch": 1.79,
1124
- "learning_rate": 6.991918777418928e-05,
1125
- "loss": 0.7916,
1126
- "step": 4625
1127
- },
1128
- {
1129
- "epoch": 1.8,
1130
- "learning_rate": 6.895140127495455e-05,
1131
- "loss": 0.7919,
1132
- "step": 4650
1133
- },
1134
- {
1135
- "epoch": 1.81,
1136
- "learning_rate": 6.798682249388631e-05,
1137
- "loss": 0.7863,
1138
- "step": 4675
1139
- },
1140
- {
1141
- "epoch": 1.82,
1142
- "learning_rate": 6.702555108433461e-05,
1143
- "loss": 0.789,
1144
- "step": 4700
1145
- },
1146
- {
1147
- "epoch": 1.83,
1148
- "learning_rate": 6.606768635795574e-05,
1149
- "loss": 0.7902,
1150
- "step": 4725
1151
- },
1152
- {
1153
- "epoch": 1.84,
1154
- "learning_rate": 6.511332727445191e-05,
1155
- "loss": 0.7924,
1156
- "step": 4750
1157
- },
1158
- {
1159
- "epoch": 1.85,
1160
- "learning_rate": 6.416257243134747e-05,
1161
- "loss": 0.7957,
1162
- "step": 4775
1163
- },
1164
- {
1165
- "epoch": 1.86,
1166
- "learning_rate": 6.321552005380256e-05,
1167
- "loss": 0.7916,
1168
- "step": 4800
1169
- },
1170
- {
1171
- "epoch": 1.87,
1172
- "learning_rate": 6.22722679844652e-05,
1173
- "loss": 0.7867,
1174
- "step": 4825
1175
- },
1176
- {
1177
- "epoch": 1.88,
1178
- "learning_rate": 6.133291367336284e-05,
1179
- "loss": 0.7944,
1180
- "step": 4850
1181
- },
1182
- {
1183
- "epoch": 1.89,
1184
- "learning_rate": 6.039755416783457e-05,
1185
- "loss": 0.7982,
1186
- "step": 4875
1187
- },
1188
- {
1189
- "epoch": 1.9,
1190
- "learning_rate": 5.946628610250484e-05,
1191
- "loss": 0.7918,
1192
- "step": 4900
1193
- },
1194
- {
1195
- "epoch": 1.91,
1196
- "learning_rate": 5.853920568929996e-05,
1197
- "loss": 0.7921,
1198
- "step": 4925
1199
- },
1200
- {
1201
- "epoch": 1.92,
1202
- "learning_rate": 5.761640870750799e-05,
1203
- "loss": 0.7878,
1204
- "step": 4950
1205
- },
1206
- {
1207
- "epoch": 1.93,
1208
- "learning_rate": 5.669799049388375e-05,
1209
- "loss": 0.7901,
1210
- "step": 4975
1211
- },
1212
- {
1213
- "epoch": 1.94,
1214
- "learning_rate": 5.578404593279911e-05,
1215
- "loss": 0.7858,
1216
- "step": 5000
1217
- },
1218
- {
1219
- "epoch": 1.94,
1220
- "eval_loss": 0.807844877243042,
1221
- "eval_runtime": 59.586,
1222
- "eval_samples_per_second": 12.251,
1223
- "eval_steps_per_second": 0.889,
1224
- "step": 5000
1225
- },
1226
- {
1227
- "epoch": 1.95,
1228
- "learning_rate": 5.487466944644033e-05,
1229
- "loss": 0.7902,
1230
- "step": 5025
1231
- },
1232
- {
1233
- "epoch": 1.96,
1234
- "learning_rate": 5.3969954985052996e-05,
1235
- "loss": 0.7979,
1236
- "step": 5050
1237
- },
1238
- {
1239
- "epoch": 1.97,
1240
- "learning_rate": 5.306999601723579e-05,
1241
- "loss": 0.7931,
1242
- "step": 5075
1243
- },
1244
- {
1245
- "epoch": 1.98,
1246
- "learning_rate": 5.21748855202839e-05,
1247
- "loss": 0.7868,
1248
- "step": 5100
1249
- },
1250
- {
1251
- "epoch": 1.99,
1252
- "learning_rate": 5.128471597058342e-05,
1253
- "loss": 0.7993,
1254
- "step": 5125
1255
- },
1256
- {
1257
- "epoch": 2.0,
1258
- "learning_rate": 5.03995793340572e-05,
1259
- "loss": 0.7892,
1260
- "step": 5150
1261
- },
1262
- {
1263
- "epoch": 2.01,
1264
- "learning_rate": 4.9519567056663694e-05,
1265
- "loss": 0.7788,
1266
- "step": 5175
1267
- },
1268
- {
1269
- "epoch": 2.02,
1270
- "learning_rate": 4.864477005494938e-05,
1271
- "loss": 0.7654,
1272
- "step": 5200
1273
- },
1274
- {
1275
- "epoch": 2.03,
1276
- "learning_rate": 4.777527870665592e-05,
1277
- "loss": 0.7468,
1278
- "step": 5225
1279
- },
1280
- {
1281
- "epoch": 2.04,
1282
- "learning_rate": 4.691118284138296e-05,
1283
- "loss": 0.7359,
1284
- "step": 5250
1285
- },
1286
- {
1287
- "epoch": 2.05,
1288
- "learning_rate": 4.605257173130763e-05,
1289
- "loss": 0.7422,
1290
- "step": 5275
1291
- },
1292
- {
1293
- "epoch": 2.06,
1294
- "learning_rate": 4.519953408196152e-05,
1295
- "loss": 0.7424,
1296
- "step": 5300
1297
- },
1298
- {
1299
- "epoch": 2.06,
1300
- "learning_rate": 4.435215802306635e-05,
1301
- "loss": 0.7521,
1302
- "step": 5325
1303
- },
1304
- {
1305
- "epoch": 2.07,
1306
- "learning_rate": 4.351053109942894e-05,
1307
- "loss": 0.7477,
1308
- "step": 5350
1309
- },
1310
- {
1311
- "epoch": 2.08,
1312
- "learning_rate": 4.2674740261896776e-05,
1313
- "loss": 0.7456,
1314
- "step": 5375
1315
- },
1316
- {
1317
- "epoch": 2.09,
1318
- "learning_rate": 4.1844871858374844e-05,
1319
- "loss": 0.766,
1320
- "step": 5400
1321
- }
1322
- ],
1323
- "max_steps": 7737,
1324
- "num_train_epochs": 3,
1325
- "total_flos": 2.325582880769573e+19,
1326
- "trial_name": null,
1327
- "trial_params": null
1328
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5400/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:689d52379bcc7c50e04c40b22a97b473b8de3f17b4096bebf81eb9f37e1dafa6
3
- size 4027
 
 
 
 
checkpoint-5500/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5500/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "gate_proj",
19
- "o_proj",
20
- "down_proj",
21
- "k_proj",
22
- "v_proj",
23
- "up_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5500/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c7243d5bc7b28d5d5a4bafb40fe933dd1f5d2b41c4b35311c4562dd50c5883a
3
- size 500897101
 
 
 
 
checkpoint-5500/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5500/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "q_proj",
18
- "gate_proj",
19
- "o_proj",
20
- "down_proj",
21
- "k_proj",
22
- "v_proj",
23
- "up_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5500/adapter_model/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c7243d5bc7b28d5d5a4bafb40fe933dd1f5d2b41c4b35311c4562dd50c5883a
3
- size 500897101
 
 
 
 
checkpoint-5500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:80adf9c30a8a3ebab96b13b5000ed1b3eb4306228d36a12dfadc4e7cc433a4ec
3
- size 1001752701
 
 
 
 
checkpoint-5500/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:30e0a79fb2da13095943ed5d6ff4010e829f95de5b910b4f831e610487314e6f
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5459f0eefaad357478856f70446e39da7b343e17571839caaa8a02c194f7d669
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_10.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:386be381ea58870ea29c419ee56fe49473c2a98987edeacf220a45b1a2223941
3
- size 27789
 
 
 
 
checkpoint-5500/rng_state_11.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4e40b2e53f6a7d00bfd79e9648b446e522e5f1b008a72d0ecf49f8afaa037c8
3
- size 27789
 
 
 
 
checkpoint-5500/rng_state_12.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:800d188559c734beb02115217b063e8bb6cb7259475ac5e72045222c4e97d3ae
3
- size 27789
 
 
 
 
checkpoint-5500/rng_state_13.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:675b5fc0a7deb4335cae143f3a24d62ea65052ce52feb5d15793418be4662ac8
3
- size 27789
 
 
 
 
checkpoint-5500/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6acf616edcf44f8c9ecee1675d30b9f4f2d2de918203c9ebb49e8a44b17d57c
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_3.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2a7c589905769b0dbc43278783063253c4361403cb5e93a12c744d0c001f090
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_4.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3eca9eeb1ff3b7857657d5e2e3990c3de3e6f2dc13c394c9ae206a0dc471b87a
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_5.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b17e2394c1de98b9b25d7d58415327217900745e09f7ab9276a36f8c4ea50f4f
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_6.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1131467c982915441e4d4625565d4d9601fd3f17521d48e909b1e53bfaff94c
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_7.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:293b86ebb5ed10caa66ef5329f9bec84416b4637fe61389a51d73c4026771d2c
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_8.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:686f925d57ef5c5206a88fc40e5fc96231e3bf44a2a0b7213509ab2181ee4b8f
3
- size 27772
 
 
 
 
checkpoint-5500/rng_state_9.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f2aba6953f527967beec2652c8b9c0bb338e770c0158e5e0f3e5100d0f7bc63
3
- size 27772
 
 
 
 
checkpoint-5500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ceeb3b5fc20812592b97207e07386031eb6265ebb23202022487dfc5a6412586
3
- size 627
 
 
 
 
checkpoint-5500/trainer_state.json DELETED
@@ -1,1352 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 2.1326095385808452,
5
- "global_step": 5500,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.02,
12
- "learning_rate": 0.0001999867761371633,
13
- "loss": 1.0435,
14
- "step": 50
15
- },
16
- {
17
- "epoch": 0.04,
18
- "learning_rate": 0.00019993306018843102,
19
- "loss": 0.8918,
20
- "step": 100
21
- },
22
- {
23
- "epoch": 0.06,
24
- "learning_rate": 0.00019983804784290833,
25
- "loss": 0.8874,
26
- "step": 150
27
- },
28
- {
29
- "epoch": 0.08,
30
- "learning_rate": 0.00019970177836355307,
31
- "loss": 0.8839,
32
- "step": 200
33
- },
34
- {
35
- "epoch": 0.09,
36
- "learning_rate": 0.00019961818913082012,
37
- "loss": 0.8801,
38
- "step": 225
39
- },
40
- {
41
- "epoch": 0.1,
42
- "learning_rate": 0.00019952430806244534,
43
- "loss": 0.8753,
44
- "step": 250
45
- },
46
- {
47
- "epoch": 0.11,
48
- "learning_rate": 0.00019942014485754635,
49
- "loss": 0.8754,
50
- "step": 275
51
- },
52
- {
53
- "epoch": 0.12,
54
- "learning_rate": 0.00019930571027751713,
55
- "loss": 0.8751,
56
- "step": 300
57
- },
58
- {
59
- "epoch": 0.13,
60
- "learning_rate": 0.0001991810161449164,
61
- "loss": 0.8819,
62
- "step": 325
63
- },
64
- {
65
- "epoch": 0.14,
66
- "learning_rate": 0.00019904607534224612,
67
- "loss": 0.8744,
68
- "step": 350
69
- },
70
- {
71
- "epoch": 0.15,
72
- "learning_rate": 0.00019890090181062063,
73
- "loss": 0.8735,
74
- "step": 375
75
- },
76
- {
77
- "epoch": 0.16,
78
- "learning_rate": 0.00019874551054832625,
79
- "loss": 0.8703,
80
- "step": 400
81
- },
82
- {
83
- "epoch": 0.16,
84
- "learning_rate": 0.00019857991760927193,
85
- "loss": 0.8715,
86
- "step": 425
87
- },
88
- {
89
- "epoch": 0.17,
90
- "learning_rate": 0.00019840414010133045,
91
- "loss": 0.8714,
92
- "step": 450
93
- },
94
- {
95
- "epoch": 0.18,
96
- "learning_rate": 0.00019821819618457114,
97
- "loss": 0.8653,
98
- "step": 475
99
- },
100
- {
101
- "epoch": 0.19,
102
- "learning_rate": 0.0001980221050693837,
103
- "loss": 0.8716,
104
- "step": 500
105
- },
106
- {
107
- "epoch": 0.2,
108
- "learning_rate": 0.00019781588701449338,
109
- "loss": 0.8695,
110
- "step": 525
111
- },
112
- {
113
- "epoch": 0.21,
114
- "learning_rate": 0.0001975995633248682,
115
- "loss": 0.8746,
116
- "step": 550
117
- },
118
- {
119
- "epoch": 0.22,
120
- "learning_rate": 0.00019737315634951762,
121
- "loss": 0.8731,
122
- "step": 575
123
- },
124
- {
125
- "epoch": 0.23,
126
- "learning_rate": 0.00019713668947918386,
127
- "loss": 0.867,
128
- "step": 600
129
- },
130
- {
131
- "epoch": 0.24,
132
- "learning_rate": 0.0001968901871439252,
133
- "loss": 0.8706,
134
- "step": 625
135
- },
136
- {
137
- "epoch": 0.25,
138
- "learning_rate": 0.000196633674810592,
139
- "loss": 0.8595,
140
- "step": 650
141
- },
142
- {
143
- "epoch": 0.26,
144
- "learning_rate": 0.0001963671789801958,
145
- "loss": 0.8627,
146
- "step": 675
147
- },
148
- {
149
- "epoch": 0.27,
150
- "learning_rate": 0.0001960907271851712,
151
- "loss": 0.8607,
152
- "step": 700
153
- },
154
- {
155
- "epoch": 0.28,
156
- "learning_rate": 0.00019580434798653173,
157
- "loss": 0.858,
158
- "step": 725
159
- },
160
- {
161
- "epoch": 0.29,
162
- "learning_rate": 0.00019550807097091876,
163
- "loss": 0.8589,
164
- "step": 750
165
- },
166
- {
167
- "epoch": 0.3,
168
- "learning_rate": 0.00019520192674754515,
169
- "loss": 0.8561,
170
- "step": 775
171
- },
172
- {
173
- "epoch": 0.31,
174
- "learning_rate": 0.00019488594694503264,
175
- "loss": 0.8576,
176
- "step": 800
177
- },
178
- {
179
- "epoch": 0.32,
180
- "learning_rate": 0.00019456016420814446,
181
- "loss": 0.8597,
182
- "step": 825
183
- },
184
- {
185
- "epoch": 0.33,
186
- "learning_rate": 0.00019422461219441254,
187
- "loss": 0.862,
188
- "step": 850
189
- },
190
- {
191
- "epoch": 0.34,
192
- "learning_rate": 0.00019387932557066035,
193
- "loss": 0.8577,
194
- "step": 875
195
- },
196
- {
197
- "epoch": 0.35,
198
- "learning_rate": 0.00019352434000942127,
199
- "loss": 0.8632,
200
- "step": 900
201
- },
202
- {
203
- "epoch": 0.36,
204
- "learning_rate": 0.00019315969218525333,
205
- "loss": 0.8567,
206
- "step": 925
207
- },
208
- {
209
- "epoch": 0.37,
210
- "learning_rate": 0.00019278541977095005,
211
- "loss": 0.8501,
212
- "step": 950
213
- },
214
- {
215
- "epoch": 0.38,
216
- "learning_rate": 0.00019240156143364844,
217
- "loss": 0.8596,
218
- "step": 975
219
- },
220
- {
221
- "epoch": 0.39,
222
- "learning_rate": 0.00019200815683083434,
223
- "loss": 0.8556,
224
- "step": 1000
225
- },
226
- {
227
- "epoch": 0.39,
228
- "eval_loss": 0.8521950244903564,
229
- "eval_runtime": 59.8838,
230
- "eval_samples_per_second": 12.19,
231
- "eval_steps_per_second": 0.885,
232
- "step": 1000
233
- },
234
- {
235
- "epoch": 0.4,
236
- "learning_rate": 0.00019160524660624505,
237
- "loss": 0.8531,
238
- "step": 1025
239
- },
240
- {
241
- "epoch": 0.41,
242
- "learning_rate": 0.00019119287238567045,
243
- "loss": 0.8513,
244
- "step": 1050
245
- },
246
- {
247
- "epoch": 0.42,
248
- "learning_rate": 0.00019077107677265253,
249
- "loss": 0.8502,
250
- "step": 1075
251
- },
252
- {
253
- "epoch": 0.43,
254
- "learning_rate": 0.00019033990334408384,
255
- "loss": 0.8469,
256
- "step": 1100
257
- },
258
- {
259
- "epoch": 0.44,
260
- "learning_rate": 0.00018989939664570545,
261
- "loss": 0.8495,
262
- "step": 1125
263
- },
264
- {
265
- "epoch": 0.45,
266
- "learning_rate": 0.00018944960218750484,
267
- "loss": 0.8485,
268
- "step": 1150
269
- },
270
- {
271
- "epoch": 0.46,
272
- "learning_rate": 0.00018899056643901404,
273
- "loss": 0.8534,
274
- "step": 1175
275
- },
276
- {
277
- "epoch": 0.47,
278
- "learning_rate": 0.00018852233682450893,
279
- "loss": 0.8531,
280
- "step": 1200
281
- },
282
- {
283
- "epoch": 0.47,
284
- "learning_rate": 0.00018804496171810948,
285
- "loss": 0.8509,
286
- "step": 1225
287
- },
288
- {
289
- "epoch": 0.48,
290
- "learning_rate": 0.00018755849043878222,
291
- "loss": 0.8445,
292
- "step": 1250
293
- },
294
- {
295
- "epoch": 0.49,
296
- "learning_rate": 0.0001870629732452449,
297
- "loss": 0.8548,
298
- "step": 1275
299
- },
300
- {
301
- "epoch": 0.5,
302
- "learning_rate": 0.00018655846133077417,
303
- "loss": 0.8441,
304
- "step": 1300
305
- },
306
- {
307
- "epoch": 0.51,
308
- "learning_rate": 0.00018604500681791656,
309
- "loss": 0.8533,
310
- "step": 1325
311
- },
312
- {
313
- "epoch": 0.52,
314
- "learning_rate": 0.00018552266275310373,
315
- "loss": 0.8505,
316
- "step": 1350
317
- },
318
- {
319
- "epoch": 0.53,
320
- "learning_rate": 0.0001849914831011719,
321
- "loss": 0.8544,
322
- "step": 1375
323
- },
324
- {
325
- "epoch": 0.54,
326
- "learning_rate": 0.00018445152273978668,
327
- "loss": 0.845,
328
- "step": 1400
329
- },
330
- {
331
- "epoch": 0.55,
332
- "learning_rate": 0.00018390283745377354,
333
- "loss": 0.8376,
334
- "step": 1425
335
- },
336
- {
337
- "epoch": 0.56,
338
- "learning_rate": 0.0001833454839293545,
339
- "loss": 0.847,
340
- "step": 1450
341
- },
342
- {
343
- "epoch": 0.57,
344
- "learning_rate": 0.00018277951974829163,
345
- "loss": 0.8473,
346
- "step": 1475
347
- },
348
- {
349
- "epoch": 0.58,
350
- "learning_rate": 0.0001822050033819382,
351
- "loss": 0.8438,
352
- "step": 1500
353
- },
354
- {
355
- "epoch": 0.59,
356
- "learning_rate": 0.00018162199418519785,
357
- "loss": 0.8418,
358
- "step": 1525
359
- },
360
- {
361
- "epoch": 0.6,
362
- "learning_rate": 0.00018103055239039243,
363
- "loss": 0.842,
364
- "step": 1550
365
- },
366
- {
367
- "epoch": 0.61,
368
- "learning_rate": 0.0001804307391010393,
369
- "loss": 0.8435,
370
- "step": 1575
371
- },
372
- {
373
- "epoch": 0.62,
374
- "learning_rate": 0.00017982261628553842,
375
- "loss": 0.8349,
376
- "step": 1600
377
- },
378
- {
379
- "epoch": 0.63,
380
- "learning_rate": 0.0001792062467707703,
381
- "loss": 0.8483,
382
- "step": 1625
383
- },
384
- {
385
- "epoch": 0.64,
386
- "learning_rate": 0.0001785816942356052,
387
- "loss": 0.8387,
388
- "step": 1650
389
- },
390
- {
391
- "epoch": 0.65,
392
- "learning_rate": 0.00017794902320432429,
393
- "loss": 0.843,
394
- "step": 1675
395
- },
396
- {
397
- "epoch": 0.66,
398
- "learning_rate": 0.00017730829903995333,
399
- "loss": 0.8424,
400
- "step": 1700
401
- },
402
- {
403
- "epoch": 0.67,
404
- "learning_rate": 0.00017665958793751006,
405
- "loss": 0.8418,
406
- "step": 1725
407
- },
408
- {
409
- "epoch": 0.68,
410
- "learning_rate": 0.00017600295691716522,
411
- "loss": 0.8384,
412
- "step": 1750
413
- },
414
- {
415
- "epoch": 0.69,
416
- "learning_rate": 0.00017533847381731856,
417
- "loss": 0.8445,
418
- "step": 1775
419
- },
420
- {
421
- "epoch": 0.7,
422
- "learning_rate": 0.00017466620728759033,
423
- "loss": 0.8446,
424
- "step": 1800
425
- },
426
- {
427
- "epoch": 0.71,
428
- "learning_rate": 0.00017398622678172878,
429
- "loss": 0.838,
430
- "step": 1825
431
- },
432
- {
433
- "epoch": 0.72,
434
- "learning_rate": 0.0001732986025504348,
435
- "loss": 0.8415,
436
- "step": 1850
437
- },
438
- {
439
- "epoch": 0.73,
440
- "learning_rate": 0.000172603405634104,
441
- "loss": 0.8357,
442
- "step": 1875
443
- },
444
- {
445
- "epoch": 0.74,
446
- "learning_rate": 0.00017190070785548755,
447
- "loss": 0.8311,
448
- "step": 1900
449
- },
450
- {
451
- "epoch": 0.75,
452
- "learning_rate": 0.0001711905818122717,
453
- "loss": 0.8333,
454
- "step": 1925
455
- },
456
- {
457
- "epoch": 0.76,
458
- "learning_rate": 0.0001704731008695777,
459
- "loss": 0.8387,
460
- "step": 1950
461
- },
462
- {
463
- "epoch": 0.77,
464
- "learning_rate": 0.0001697483391523821,
465
- "loss": 0.8442,
466
- "step": 1975
467
- },
468
- {
469
- "epoch": 0.78,
470
- "learning_rate": 0.00016901637153785885,
471
- "loss": 0.8399,
472
- "step": 2000
473
- },
474
- {
475
- "epoch": 0.78,
476
- "eval_loss": 0.8339959383010864,
477
- "eval_runtime": 58.5829,
478
- "eval_samples_per_second": 12.461,
479
- "eval_steps_per_second": 0.905,
480
- "step": 2000
481
- },
482
- {
483
- "epoch": 0.79,
484
- "learning_rate": 0.0001682772736476434,
485
- "loss": 0.8334,
486
- "step": 2025
487
- },
488
- {
489
- "epoch": 0.79,
490
- "learning_rate": 0.0001675311218400201,
491
- "loss": 0.835,
492
- "step": 2050
493
- },
494
- {
495
- "epoch": 0.8,
496
- "learning_rate": 0.00016677799320203332,
497
- "loss": 0.8368,
498
- "step": 2075
499
- },
500
- {
501
- "epoch": 0.81,
502
- "learning_rate": 0.00016601796554152344,
503
- "loss": 0.8278,
504
- "step": 2100
505
- },
506
- {
507
- "epoch": 0.82,
508
- "learning_rate": 0.00016525111737908827,
509
- "loss": 0.8334,
510
- "step": 2125
511
- },
512
- {
513
- "epoch": 0.83,
514
- "learning_rate": 0.00016447752793997096,
515
- "loss": 0.8416,
516
- "step": 2150
517
- },
518
- {
519
- "epoch": 0.84,
520
- "learning_rate": 0.00016369727714587483,
521
- "loss": 0.8297,
522
- "step": 2175
523
- },
524
- {
525
- "epoch": 0.85,
526
- "learning_rate": 0.0001629104456067066,
527
- "loss": 0.8327,
528
- "step": 2200
529
- },
530
- {
531
- "epoch": 0.86,
532
- "learning_rate": 0.00016211711461224825,
533
- "loss": 0.8324,
534
- "step": 2225
535
- },
536
- {
537
- "epoch": 0.87,
538
- "learning_rate": 0.0001613173661237589,
539
- "loss": 0.8313,
540
- "step": 2250
541
- },
542
- {
543
- "epoch": 0.88,
544
- "learning_rate": 0.0001605112827655069,
545
- "loss": 0.8292,
546
- "step": 2275
547
- },
548
- {
549
- "epoch": 0.89,
550
- "learning_rate": 0.0001596989478162339,
551
- "loss": 0.8334,
552
- "step": 2300
553
- },
554
- {
555
- "epoch": 0.9,
556
- "learning_rate": 0.00015888044520055106,
557
- "loss": 0.8352,
558
- "step": 2325
559
- },
560
- {
561
- "epoch": 0.91,
562
- "learning_rate": 0.00015805585948026852,
563
- "loss": 0.823,
564
- "step": 2350
565
- },
566
- {
567
- "epoch": 0.92,
568
- "learning_rate": 0.000157225275845659,
569
- "loss": 0.8293,
570
- "step": 2375
571
- },
572
- {
573
- "epoch": 0.93,
574
- "learning_rate": 0.00015638878010665672,
575
- "loss": 0.8289,
576
- "step": 2400
577
- },
578
- {
579
- "epoch": 0.94,
580
- "learning_rate": 0.00015554645868399205,
581
- "loss": 0.832,
582
- "step": 2425
583
- },
584
- {
585
- "epoch": 0.95,
586
- "learning_rate": 0.00015469839860026308,
587
- "loss": 0.8294,
588
- "step": 2450
589
- },
590
- {
591
- "epoch": 0.96,
592
- "learning_rate": 0.0001538446874709452,
593
- "loss": 0.8281,
594
- "step": 2475
595
- },
596
- {
597
- "epoch": 0.97,
598
- "learning_rate": 0.00015298541349533925,
599
- "loss": 0.8314,
600
- "step": 2500
601
- },
602
- {
603
- "epoch": 0.98,
604
- "learning_rate": 0.00015212066544745926,
605
- "loss": 0.831,
606
- "step": 2525
607
- },
608
- {
609
- "epoch": 0.99,
610
- "learning_rate": 0.00015125053266686124,
611
- "loss": 0.8319,
612
- "step": 2550
613
- },
614
- {
615
- "epoch": 1.0,
616
- "learning_rate": 0.00015037510504941303,
617
- "loss": 0.8259,
618
- "step": 2575
619
- },
620
- {
621
- "epoch": 1.01,
622
- "learning_rate": 0.00014949447303800695,
623
- "loss": 0.8133,
624
- "step": 2600
625
- },
626
- {
627
- "epoch": 1.02,
628
- "learning_rate": 0.00014860872761321593,
629
- "loss": 0.8139,
630
- "step": 2625
631
- },
632
- {
633
- "epoch": 1.03,
634
- "learning_rate": 0.00014771796028389405,
635
- "loss": 0.804,
636
- "step": 2650
637
- },
638
- {
639
- "epoch": 1.04,
640
- "learning_rate": 0.0001468222630777225,
641
- "loss": 0.8011,
642
- "step": 2675
643
- },
644
- {
645
- "epoch": 1.05,
646
- "learning_rate": 0.00014592172853170193,
647
- "loss": 0.8037,
648
- "step": 2700
649
- },
650
- {
651
- "epoch": 1.06,
652
- "learning_rate": 0.00014501644968259212,
653
- "loss": 0.8063,
654
- "step": 2725
655
- },
656
- {
657
- "epoch": 1.07,
658
- "learning_rate": 0.00014410652005730025,
659
- "loss": 0.8155,
660
- "step": 2750
661
- },
662
- {
663
- "epoch": 1.08,
664
- "learning_rate": 0.00014319203366321826,
665
- "loss": 0.8066,
666
- "step": 2775
667
- },
668
- {
669
- "epoch": 1.09,
670
- "learning_rate": 0.0001422730849785107,
671
- "loss": 0.8091,
672
- "step": 2800
673
- },
674
- {
675
- "epoch": 1.1,
676
- "learning_rate": 0.0001413497689423539,
677
- "loss": 0.8067,
678
- "step": 2825
679
- },
680
- {
681
- "epoch": 1.11,
682
- "learning_rate": 0.00014042218094512755,
683
- "loss": 0.8046,
684
- "step": 2850
685
- },
686
- {
687
- "epoch": 1.11,
688
- "learning_rate": 0.00013949041681855985,
689
- "loss": 0.8053,
690
- "step": 2875
691
- },
692
- {
693
- "epoch": 1.12,
694
- "learning_rate": 0.0001385545728258264,
695
- "loss": 0.8075,
696
- "step": 2900
697
- },
698
- {
699
- "epoch": 1.13,
700
- "learning_rate": 0.0001376147456516055,
701
- "loss": 0.8015,
702
- "step": 2925
703
- },
704
- {
705
- "epoch": 1.14,
706
- "learning_rate": 0.00013667103239208903,
707
- "loss": 0.8016,
708
- "step": 2950
709
- },
710
- {
711
- "epoch": 1.15,
712
- "learning_rate": 0.00013572353054495126,
713
- "loss": 0.8029,
714
- "step": 2975
715
- },
716
- {
717
- "epoch": 1.16,
718
- "learning_rate": 0.0001347723379992762,
719
- "loss": 0.8017,
720
- "step": 3000
721
- },
722
- {
723
- "epoch": 1.16,
724
- "eval_loss": 0.8229297995567322,
725
- "eval_runtime": 59.3398,
726
- "eval_samples_per_second": 12.302,
727
- "eval_steps_per_second": 0.893,
728
- "step": 3000
729
- },
730
- {
731
- "epoch": 1.17,
732
- "learning_rate": 0.0001338175530254443,
733
- "loss": 0.8049,
734
- "step": 3025
735
- },
736
- {
737
- "epoch": 1.18,
738
- "learning_rate": 0.00013285927426497985,
739
- "loss": 0.8027,
740
- "step": 3050
741
- },
742
- {
743
- "epoch": 1.19,
744
- "learning_rate": 0.00013189760072036008,
745
- "loss": 0.8028,
746
- "step": 3075
747
- },
748
- {
749
- "epoch": 1.2,
750
- "learning_rate": 0.0001309326317447869,
751
- "loss": 0.8021,
752
- "step": 3100
753
- },
754
- {
755
- "epoch": 1.21,
756
- "learning_rate": 0.00012996446703192257,
757
- "loss": 0.8033,
758
- "step": 3125
759
- },
760
- {
761
- "epoch": 1.22,
762
- "learning_rate": 0.00012899320660558986,
763
- "loss": 0.8016,
764
- "step": 3150
765
- },
766
- {
767
- "epoch": 1.23,
768
- "learning_rate": 0.00012801895080943846,
769
- "loss": 0.7995,
770
- "step": 3175
771
- },
772
- {
773
- "epoch": 1.24,
774
- "learning_rate": 0.0001270418002965782,
775
- "loss": 0.799,
776
- "step": 3200
777
- },
778
- {
779
- "epoch": 1.25,
780
- "learning_rate": 0.0001260618560191802,
781
- "loss": 0.8002,
782
- "step": 3225
783
- },
784
- {
785
- "epoch": 1.26,
786
- "learning_rate": 0.00012507921921804717,
787
- "loss": 0.8068,
788
- "step": 3250
789
- },
790
- {
791
- "epoch": 1.27,
792
- "learning_rate": 0.00012409399141215423,
793
- "loss": 0.8041,
794
- "step": 3275
795
- },
796
- {
797
- "epoch": 1.28,
798
- "learning_rate": 0.0001231062743881603,
799
- "loss": 0.7999,
800
- "step": 3300
801
- },
802
- {
803
- "epoch": 1.29,
804
- "learning_rate": 0.0001221161701898926,
805
- "loss": 0.7995,
806
- "step": 3325
807
- },
808
- {
809
- "epoch": 1.3,
810
- "learning_rate": 0.00012112378110780391,
811
- "loss": 0.7959,
812
- "step": 3350
813
- },
814
- {
815
- "epoch": 1.31,
816
- "learning_rate": 0.00012012920966840486,
817
- "loss": 0.7999,
818
- "step": 3375
819
- },
820
- {
821
- "epoch": 1.32,
822
- "learning_rate": 0.00011913255862367151,
823
- "loss": 0.8016,
824
- "step": 3400
825
- },
826
- {
827
- "epoch": 1.33,
828
- "learning_rate": 0.00011813393094042993,
829
- "loss": 0.7944,
830
- "step": 3425
831
- },
832
- {
833
- "epoch": 1.34,
834
- "learning_rate": 0.0001171334297897181,
835
- "loss": 0.8026,
836
- "step": 3450
837
- },
838
- {
839
- "epoch": 1.35,
840
- "learning_rate": 0.00011613115853612734,
841
- "loss": 0.8004,
842
- "step": 3475
843
- },
844
- {
845
- "epoch": 1.36,
846
- "learning_rate": 0.00011512722072712321,
847
- "loss": 0.7992,
848
- "step": 3500
849
- },
850
- {
851
- "epoch": 1.37,
852
- "learning_rate": 0.00011412172008234785,
853
- "loss": 0.8004,
854
- "step": 3525
855
- },
856
- {
857
- "epoch": 1.38,
858
- "learning_rate": 0.0001131147604829043,
859
- "loss": 0.8009,
860
- "step": 3550
861
- },
862
- {
863
- "epoch": 1.39,
864
- "learning_rate": 0.00011210644596062439,
865
- "loss": 0.7993,
866
- "step": 3575
867
- },
868
- {
869
- "epoch": 1.4,
870
- "learning_rate": 0.00011109688068732081,
871
- "loss": 0.7965,
872
- "step": 3600
873
- },
874
- {
875
- "epoch": 1.41,
876
- "learning_rate": 0.00011008616896402482,
877
- "loss": 0.7991,
878
- "step": 3625
879
- },
880
- {
881
- "epoch": 1.42,
882
- "learning_rate": 0.00010907441521021072,
883
- "loss": 0.8026,
884
- "step": 3650
885
- },
886
- {
887
- "epoch": 1.42,
888
- "learning_rate": 0.00010806172395300789,
889
- "loss": 0.7941,
890
- "step": 3675
891
- },
892
- {
893
- "epoch": 1.43,
894
- "learning_rate": 0.00010704819981640186,
895
- "loss": 0.7989,
896
- "step": 3700
897
- },
898
- {
899
- "epoch": 1.44,
900
- "learning_rate": 0.00010603394751042522,
901
- "loss": 0.7981,
902
- "step": 3725
903
- },
904
- {
905
- "epoch": 1.45,
906
- "learning_rate": 0.00010501907182033979,
907
- "loss": 0.7985,
908
- "step": 3750
909
- },
910
- {
911
- "epoch": 1.46,
912
- "learning_rate": 0.000104003677595811,
913
- "loss": 0.7921,
914
- "step": 3775
915
- },
916
- {
917
- "epoch": 1.47,
918
- "learning_rate": 0.00010298786974007555,
919
- "loss": 0.8012,
920
- "step": 3800
921
- },
922
- {
923
- "epoch": 1.48,
924
- "learning_rate": 0.00010197175319910343,
925
- "loss": 0.7906,
926
- "step": 3825
927
- },
928
- {
929
- "epoch": 1.49,
930
- "learning_rate": 0.00010095543295075593,
931
- "loss": 0.7928,
932
- "step": 3850
933
- },
934
- {
935
- "epoch": 1.5,
936
- "learning_rate": 9.993901399393979e-05,
937
- "loss": 0.8018,
938
- "step": 3875
939
- },
940
- {
941
- "epoch": 1.51,
942
- "learning_rate": 9.892260133775968e-05,
943
- "loss": 0.7991,
944
- "step": 3900
945
- },
946
- {
947
- "epoch": 1.52,
948
- "learning_rate": 9.79062999906693e-05,
949
- "loss": 0.795,
950
- "step": 3925
951
- },
952
- {
953
- "epoch": 1.53,
954
- "learning_rate": 9.68902149496227e-05,
955
- "loss": 0.7977,
956
- "step": 3950
957
- },
958
- {
959
- "epoch": 1.54,
960
- "learning_rate": 9.587445118922674e-05,
961
- "loss": 0.8013,
962
- "step": 3975
963
- },
964
- {
965
- "epoch": 1.55,
966
- "learning_rate": 9.485911365089589e-05,
967
- "loss": 0.7978,
968
- "step": 4000
969
- },
970
- {
971
- "epoch": 1.55,
972
- "eval_loss": 0.8142631649971008,
973
- "eval_runtime": 59.4108,
974
- "eval_samples_per_second": 12.287,
975
- "eval_steps_per_second": 0.892,
976
- "step": 4000
977
- },
978
- {
979
- "epoch": 1.56,
980
- "learning_rate": 9.384430723201036e-05,
981
- "loss": 0.7912,
982
- "step": 4025
983
- },
984
- {
985
- "epoch": 1.57,
986
- "learning_rate": 9.283013677507902e-05,
987
- "loss": 0.7919,
988
- "step": 4050
989
- },
990
- {
991
- "epoch": 1.58,
992
- "learning_rate": 9.181670705690761e-05,
993
- "loss": 0.7919,
994
- "step": 4075
995
- },
996
- {
997
- "epoch": 1.59,
998
- "learning_rate": 9.080412277777413e-05,
999
- "loss": 0.8018,
1000
- "step": 4100
1001
- },
1002
- {
1003
- "epoch": 1.6,
1004
- "learning_rate": 8.979248855061188e-05,
1005
- "loss": 0.7811,
1006
- "step": 4125
1007
- },
1008
- {
1009
- "epoch": 1.61,
1010
- "learning_rate": 8.878190889020159e-05,
1011
- "loss": 0.7919,
1012
- "step": 4150
1013
- },
1014
- {
1015
- "epoch": 1.62,
1016
- "learning_rate": 8.777248820237376e-05,
1017
- "loss": 0.7994,
1018
- "step": 4175
1019
- },
1020
- {
1021
- "epoch": 1.63,
1022
- "learning_rate": 8.676433077322215e-05,
1023
- "loss": 0.7956,
1024
- "step": 4200
1025
- },
1026
- {
1027
- "epoch": 1.64,
1028
- "learning_rate": 8.575754075832973e-05,
1029
- "loss": 0.7968,
1030
- "step": 4225
1031
- },
1032
- {
1033
- "epoch": 1.65,
1034
- "learning_rate": 8.475222217200801e-05,
1035
- "loss": 0.7905,
1036
- "step": 4250
1037
- },
1038
- {
1039
- "epoch": 1.66,
1040
- "learning_rate": 8.374847887655112e-05,
1041
- "loss": 0.7889,
1042
- "step": 4275
1043
- },
1044
- {
1045
- "epoch": 1.67,
1046
- "learning_rate": 8.274641457150543e-05,
1047
- "loss": 0.7988,
1048
- "step": 4300
1049
- },
1050
- {
1051
- "epoch": 1.68,
1052
- "learning_rate": 8.174613278295608e-05,
1053
- "loss": 0.7947,
1054
- "step": 4325
1055
- },
1056
- {
1057
- "epoch": 1.69,
1058
- "learning_rate": 8.074773685283137e-05,
1059
- "loss": 0.7929,
1060
- "step": 4350
1061
- },
1062
- {
1063
- "epoch": 1.7,
1064
- "learning_rate": 7.97513299282264e-05,
1065
- "loss": 0.7949,
1066
- "step": 4375
1067
- },
1068
- {
1069
- "epoch": 1.71,
1070
- "learning_rate": 7.875701495074638e-05,
1071
- "loss": 0.7925,
1072
- "step": 4400
1073
- },
1074
- {
1075
- "epoch": 1.72,
1076
- "learning_rate": 7.776489464587158e-05,
1077
- "loss": 0.7917,
1078
- "step": 4425
1079
- },
1080
- {
1081
- "epoch": 1.73,
1082
- "learning_rate": 7.677507151234448e-05,
1083
- "loss": 0.7905,
1084
- "step": 4450
1085
- },
1086
- {
1087
- "epoch": 1.74,
1088
- "learning_rate": 7.578764781158034e-05,
1089
- "loss": 0.7912,
1090
- "step": 4475
1091
- },
1092
- {
1093
- "epoch": 1.74,
1094
- "learning_rate": 7.480272555710227e-05,
1095
- "loss": 0.8006,
1096
- "step": 4500
1097
- },
1098
- {
1099
- "epoch": 1.75,
1100
- "learning_rate": 7.382040650400185e-05,
1101
- "loss": 0.7937,
1102
- "step": 4525
1103
- },
1104
- {
1105
- "epoch": 1.76,
1106
- "learning_rate": 7.28407921384267e-05,
1107
- "loss": 0.794,
1108
- "step": 4550
1109
- },
1110
- {
1111
- "epoch": 1.77,
1112
- "learning_rate": 7.186398366709545e-05,
1113
- "loss": 0.7931,
1114
- "step": 4575
1115
- },
1116
- {
1117
- "epoch": 1.78,
1118
- "learning_rate": 7.089008200684197e-05,
1119
- "loss": 0.7982,
1120
- "step": 4600
1121
- },
1122
- {
1123
- "epoch": 1.79,
1124
- "learning_rate": 6.991918777418928e-05,
1125
- "loss": 0.7916,
1126
- "step": 4625
1127
- },
1128
- {
1129
- "epoch": 1.8,
1130
- "learning_rate": 6.895140127495455e-05,
1131
- "loss": 0.7919,
1132
- "step": 4650
1133
- },
1134
- {
1135
- "epoch": 1.81,
1136
- "learning_rate": 6.798682249388631e-05,
1137
- "loss": 0.7863,
1138
- "step": 4675
1139
- },
1140
- {
1141
- "epoch": 1.82,
1142
- "learning_rate": 6.702555108433461e-05,
1143
- "loss": 0.789,
1144
- "step": 4700
1145
- },
1146
- {
1147
- "epoch": 1.83,
1148
- "learning_rate": 6.606768635795574e-05,
1149
- "loss": 0.7902,
1150
- "step": 4725
1151
- },
1152
- {
1153
- "epoch": 1.84,
1154
- "learning_rate": 6.511332727445191e-05,
1155
- "loss": 0.7924,
1156
- "step": 4750
1157
- },
1158
- {
1159
- "epoch": 1.85,
1160
- "learning_rate": 6.416257243134747e-05,
1161
- "loss": 0.7957,
1162
- "step": 4775
1163
- },
1164
- {
1165
- "epoch": 1.86,
1166
- "learning_rate": 6.321552005380256e-05,
1167
- "loss": 0.7916,
1168
- "step": 4800
1169
- },
1170
- {
1171
- "epoch": 1.87,
1172
- "learning_rate": 6.22722679844652e-05,
1173
- "loss": 0.7867,
1174
- "step": 4825
1175
- },
1176
- {
1177
- "epoch": 1.88,
1178
- "learning_rate": 6.133291367336284e-05,
1179
- "loss": 0.7944,
1180
- "step": 4850
1181
- },
1182
- {
1183
- "epoch": 1.89,
1184
- "learning_rate": 6.039755416783457e-05,
1185
- "loss": 0.7982,
1186
- "step": 4875
1187
- },
1188
- {
1189
- "epoch": 1.9,
1190
- "learning_rate": 5.946628610250484e-05,
1191
- "loss": 0.7918,
1192
- "step": 4900
1193
- },
1194
- {
1195
- "epoch": 1.91,
1196
- "learning_rate": 5.853920568929996e-05,
1197
- "loss": 0.7921,
1198
- "step": 4925
1199
- },
1200
- {
1201
- "epoch": 1.92,
1202
- "learning_rate": 5.761640870750799e-05,
1203
- "loss": 0.7878,
1204
- "step": 4950
1205
- },
1206
- {
1207
- "epoch": 1.93,
1208
- "learning_rate": 5.669799049388375e-05,
1209
- "loss": 0.7901,
1210
- "step": 4975
1211
- },
1212
- {
1213
- "epoch": 1.94,
1214
- "learning_rate": 5.578404593279911e-05,
1215
- "loss": 0.7858,
1216
- "step": 5000
1217
- },
1218
- {
1219
- "epoch": 1.94,
1220
- "eval_loss": 0.807844877243042,
1221
- "eval_runtime": 59.586,
1222
- "eval_samples_per_second": 12.251,
1223
- "eval_steps_per_second": 0.889,
1224
- "step": 5000
1225
- },
1226
- {
1227
- "epoch": 1.95,
1228
- "learning_rate": 5.487466944644033e-05,
1229
- "loss": 0.7902,
1230
- "step": 5025
1231
- },
1232
- {
1233
- "epoch": 1.96,
1234
- "learning_rate": 5.3969954985052996e-05,
1235
- "loss": 0.7979,
1236
- "step": 5050
1237
- },
1238
- {
1239
- "epoch": 1.97,
1240
- "learning_rate": 5.306999601723579e-05,
1241
- "loss": 0.7931,
1242
- "step": 5075
1243
- },
1244
- {
1245
- "epoch": 1.98,
1246
- "learning_rate": 5.21748855202839e-05,
1247
- "loss": 0.7868,
1248
- "step": 5100
1249
- },
1250
- {
1251
- "epoch": 1.99,
1252
- "learning_rate": 5.128471597058342e-05,
1253
- "loss": 0.7993,
1254
- "step": 5125
1255
- },
1256
- {
1257
- "epoch": 2.0,
1258
- "learning_rate": 5.03995793340572e-05,
1259
- "loss": 0.7892,
1260
- "step": 5150
1261
- },
1262
- {
1263
- "epoch": 2.01,
1264
- "learning_rate": 4.9519567056663694e-05,
1265
- "loss": 0.7788,
1266
- "step": 5175
1267
- },
1268
- {
1269
- "epoch": 2.02,
1270
- "learning_rate": 4.864477005494938e-05,
1271
- "loss": 0.7654,
1272
- "step": 5200
1273
- },
1274
- {
1275
- "epoch": 2.03,
1276
- "learning_rate": 4.777527870665592e-05,
1277
- "loss": 0.7468,
1278
- "step": 5225
1279
- },
1280
- {
1281
- "epoch": 2.04,
1282
- "learning_rate": 4.691118284138296e-05,
1283
- "loss": 0.7359,
1284
- "step": 5250
1285
- },
1286
- {
1287
- "epoch": 2.05,
1288
- "learning_rate": 4.605257173130763e-05,
1289
- "loss": 0.7422,
1290
- "step": 5275
1291
- },
1292
- {
1293
- "epoch": 2.06,
1294
- "learning_rate": 4.519953408196152e-05,
1295
- "loss": 0.7424,
1296
- "step": 5300
1297
- },
1298
- {
1299
- "epoch": 2.06,
1300
- "learning_rate": 4.435215802306635e-05,
1301
- "loss": 0.7521,
1302
- "step": 5325
1303
- },
1304
- {
1305
- "epoch": 2.07,
1306
- "learning_rate": 4.351053109942894e-05,
1307
- "loss": 0.7477,
1308
- "step": 5350
1309
- },
1310
- {
1311
- "epoch": 2.08,
1312
- "learning_rate": 4.2674740261896776e-05,
1313
- "loss": 0.7456,
1314
- "step": 5375
1315
- },
1316
- {
1317
- "epoch": 2.09,
1318
- "learning_rate": 4.1844871858374844e-05,
1319
- "loss": 0.766,
1320
- "step": 5400
1321
- },
1322
- {
1323
- "epoch": 2.1,
1324
- "learning_rate": 4.1021011624904814e-05,
1325
- "loss": 0.7664,
1326
- "step": 5425
1327
- },
1328
- {
1329
- "epoch": 2.11,
1330
- "learning_rate": 4.0203244676807353e-05,
1331
- "loss": 0.7703,
1332
- "step": 5450
1333
- },
1334
- {
1335
- "epoch": 2.12,
1336
- "learning_rate": 3.939165549988873e-05,
1337
- "loss": 0.7674,
1338
- "step": 5475
1339
- },
1340
- {
1341
- "epoch": 2.13,
1342
- "learning_rate": 3.858632794171222e-05,
1343
- "loss": 0.7722,
1344
- "step": 5500
1345
- }
1346
- ],
1347
- "max_steps": 7737,
1348
- "num_train_epochs": 3,
1349
- "total_flos": 2.3687017848899633e+19,
1350
- "trial_name": null,
1351
- "trial_params": null
1352
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5500/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:689d52379bcc7c50e04c40b22a97b473b8de3f17b4096bebf81eb9f37e1dafa6
3
- size 4027
 
 
 
 
checkpoint-5600/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0