ssunbear commited on
Commit
b5f6707
1 Parent(s): 2ee856e

Upload 7 files

Browse files
Files changed (7) hide show
  1. config.json +44 -0
  2. model.safetensors +3 -0
  3. optimizer.pt +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +570 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "klue/bert-base",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "LABEL_0": 0,
24
+ "LABEL_1": 1,
25
+ "LABEL_2": 2,
26
+ "LABEL_3": 3,
27
+ "LABEL_4": 4,
28
+ "LABEL_5": 5,
29
+ "LABEL_6": 6
30
+ },
31
+ "layer_norm_eps": 1e-12,
32
+ "max_position_embeddings": 512,
33
+ "model_type": "bert",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "pad_token_id": 0,
37
+ "position_embedding_type": "absolute",
38
+ "problem_type": "single_label_classification",
39
+ "torch_dtype": "float32",
40
+ "transformers_version": "4.46.1",
41
+ "type_vocab_size": 2,
42
+ "use_cache": true,
43
+ "vocab_size": 32000
44
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6d69fd3b07839a370f696636a37793b8baade9734b03e9841ad7b96da88c3dd
3
+ size 442514444
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9095c169227d79339439d844811a97cfaa2e12577be53f46f05cca7e41e4b8b
3
+ size 885149946
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbccc70bd33155ac584a271ef918f2788d349e8731ac943c0a3e0a7534674813
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c7bfe41b244ff7fac493dc230b65609829d3986f8020d396a6f4190d3e5594
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.840443960959029,
3
+ "best_model_checkpoint": "/data/ephemeral/home/level2-nlp-datacentric-nlp-15/models/train_new_8564_7000fconcat+deepl_15000.csv_20241107_163124/checkpoint-600",
4
+ "epoch": 2.0,
5
+ "eval_steps": 100,
6
+ "global_step": 692,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.028901734104046242,
13
+ "grad_norm": 5.670835018157959,
14
+ "learning_rate": 1.971098265895954e-05,
15
+ "loss": 1.8852,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.057803468208092484,
20
+ "grad_norm": 5.40836763381958,
21
+ "learning_rate": 1.9421965317919077e-05,
22
+ "loss": 1.6768,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.08670520231213873,
27
+ "grad_norm": 6.977480888366699,
28
+ "learning_rate": 1.9132947976878615e-05,
29
+ "loss": 1.4293,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.11560693641618497,
34
+ "grad_norm": 6.8324995040893555,
35
+ "learning_rate": 1.8843930635838153e-05,
36
+ "loss": 1.2372,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.14450867052023122,
41
+ "grad_norm": 4.969491481781006,
42
+ "learning_rate": 1.855491329479769e-05,
43
+ "loss": 1.0231,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.17341040462427745,
48
+ "grad_norm": 7.948673725128174,
49
+ "learning_rate": 1.8265895953757225e-05,
50
+ "loss": 0.9941,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.2023121387283237,
55
+ "grad_norm": 7.314361572265625,
56
+ "learning_rate": 1.7976878612716763e-05,
57
+ "loss": 0.8929,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.23121387283236994,
62
+ "grad_norm": 7.860761642456055,
63
+ "learning_rate": 1.76878612716763e-05,
64
+ "loss": 0.7236,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.26011560693641617,
69
+ "grad_norm": 8.400164604187012,
70
+ "learning_rate": 1.739884393063584e-05,
71
+ "loss": 0.7342,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.28901734104046245,
76
+ "grad_norm": 5.281578063964844,
77
+ "learning_rate": 1.7109826589595377e-05,
78
+ "loss": 0.7285,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.28901734104046245,
83
+ "eval_f1": 0.7855931775121717,
84
+ "eval_loss": 0.6838143467903137,
85
+ "eval_runtime": 43.4732,
86
+ "eval_samples_per_second": 108.895,
87
+ "eval_steps_per_second": 3.404,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 0.3179190751445087,
92
+ "grad_norm": 6.559142589569092,
93
+ "learning_rate": 1.6820809248554915e-05,
94
+ "loss": 0.767,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 0.3468208092485549,
99
+ "grad_norm": 6.961650371551514,
100
+ "learning_rate": 1.6531791907514452e-05,
101
+ "loss": 0.72,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 0.37572254335260113,
106
+ "grad_norm": 7.83851432800293,
107
+ "learning_rate": 1.624277456647399e-05,
108
+ "loss": 0.6204,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 0.4046242774566474,
113
+ "grad_norm": 5.967935562133789,
114
+ "learning_rate": 1.5953757225433528e-05,
115
+ "loss": 0.6811,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 0.43352601156069365,
120
+ "grad_norm": 7.195727348327637,
121
+ "learning_rate": 1.5664739884393066e-05,
122
+ "loss": 0.7254,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 0.4624277456647399,
127
+ "grad_norm": 6.0163750648498535,
128
+ "learning_rate": 1.5375722543352604e-05,
129
+ "loss": 0.5248,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 0.4913294797687861,
134
+ "grad_norm": 7.599509239196777,
135
+ "learning_rate": 1.508670520231214e-05,
136
+ "loss": 0.6095,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 0.5202312138728323,
141
+ "grad_norm": 11.452792167663574,
142
+ "learning_rate": 1.4797687861271676e-05,
143
+ "loss": 0.6339,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 0.5491329479768786,
148
+ "grad_norm": 9.863639831542969,
149
+ "learning_rate": 1.4508670520231216e-05,
150
+ "loss": 0.5901,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 0.5780346820809249,
155
+ "grad_norm": 6.931671619415283,
156
+ "learning_rate": 1.4219653179190754e-05,
157
+ "loss": 0.5106,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 0.5780346820809249,
162
+ "eval_f1": 0.8163391427279263,
163
+ "eval_loss": 0.604805588722229,
164
+ "eval_runtime": 43.5017,
165
+ "eval_samples_per_second": 108.823,
166
+ "eval_steps_per_second": 3.402,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 0.6069364161849711,
171
+ "grad_norm": 7.751237869262695,
172
+ "learning_rate": 1.393063583815029e-05,
173
+ "loss": 0.6615,
174
+ "step": 210
175
+ },
176
+ {
177
+ "epoch": 0.6358381502890174,
178
+ "grad_norm": 7.981545925140381,
179
+ "learning_rate": 1.3641618497109828e-05,
180
+ "loss": 0.6194,
181
+ "step": 220
182
+ },
183
+ {
184
+ "epoch": 0.6647398843930635,
185
+ "grad_norm": 8.75810432434082,
186
+ "learning_rate": 1.3352601156069365e-05,
187
+ "loss": 0.6756,
188
+ "step": 230
189
+ },
190
+ {
191
+ "epoch": 0.6936416184971098,
192
+ "grad_norm": 7.4759521484375,
193
+ "learning_rate": 1.3063583815028902e-05,
194
+ "loss": 0.6079,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 0.7225433526011561,
199
+ "grad_norm": 9.445594787597656,
200
+ "learning_rate": 1.2774566473988441e-05,
201
+ "loss": 0.5329,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 0.7514450867052023,
206
+ "grad_norm": 7.227514266967773,
207
+ "learning_rate": 1.2485549132947979e-05,
208
+ "loss": 0.5342,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 0.7803468208092486,
213
+ "grad_norm": 6.781445503234863,
214
+ "learning_rate": 1.2196531791907515e-05,
215
+ "loss": 0.5455,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 0.8092485549132948,
220
+ "grad_norm": 7.4944353103637695,
221
+ "learning_rate": 1.1907514450867053e-05,
222
+ "loss": 0.5171,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 0.838150289017341,
227
+ "grad_norm": 12.261595726013184,
228
+ "learning_rate": 1.161849710982659e-05,
229
+ "loss": 0.5793,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 0.8670520231213873,
234
+ "grad_norm": 7.033445835113525,
235
+ "learning_rate": 1.1329479768786129e-05,
236
+ "loss": 0.6664,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 0.8670520231213873,
241
+ "eval_f1": 0.8202014204332396,
242
+ "eval_loss": 0.5958309769630432,
243
+ "eval_runtime": 43.4645,
244
+ "eval_samples_per_second": 108.916,
245
+ "eval_steps_per_second": 3.405,
246
+ "step": 300
247
+ },
248
+ {
249
+ "epoch": 0.8959537572254336,
250
+ "grad_norm": 16.04691505432129,
251
+ "learning_rate": 1.1040462427745667e-05,
252
+ "loss": 0.5736,
253
+ "step": 310
254
+ },
255
+ {
256
+ "epoch": 0.9248554913294798,
257
+ "grad_norm": 6.906232833862305,
258
+ "learning_rate": 1.0751445086705203e-05,
259
+ "loss": 0.5677,
260
+ "step": 320
261
+ },
262
+ {
263
+ "epoch": 0.953757225433526,
264
+ "grad_norm": 7.995438098907471,
265
+ "learning_rate": 1.046242774566474e-05,
266
+ "loss": 0.5872,
267
+ "step": 330
268
+ },
269
+ {
270
+ "epoch": 0.9826589595375722,
271
+ "grad_norm": 7.967160701751709,
272
+ "learning_rate": 1.0173410404624278e-05,
273
+ "loss": 0.5363,
274
+ "step": 340
275
+ },
276
+ {
277
+ "epoch": 1.0115606936416186,
278
+ "grad_norm": 7.901902198791504,
279
+ "learning_rate": 9.884393063583816e-06,
280
+ "loss": 0.4558,
281
+ "step": 350
282
+ },
283
+ {
284
+ "epoch": 1.0404624277456647,
285
+ "grad_norm": 3.8353588581085205,
286
+ "learning_rate": 9.595375722543352e-06,
287
+ "loss": 0.563,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 1.069364161849711,
292
+ "grad_norm": 10.367301940917969,
293
+ "learning_rate": 9.306358381502892e-06,
294
+ "loss": 0.4669,
295
+ "step": 370
296
+ },
297
+ {
298
+ "epoch": 1.0982658959537572,
299
+ "grad_norm": 3.960563898086548,
300
+ "learning_rate": 9.017341040462428e-06,
301
+ "loss": 0.4186,
302
+ "step": 380
303
+ },
304
+ {
305
+ "epoch": 1.1271676300578035,
306
+ "grad_norm": 5.368332862854004,
307
+ "learning_rate": 8.728323699421966e-06,
308
+ "loss": 0.4852,
309
+ "step": 390
310
+ },
311
+ {
312
+ "epoch": 1.1560693641618498,
313
+ "grad_norm": 4.498876094818115,
314
+ "learning_rate": 8.439306358381504e-06,
315
+ "loss": 0.5515,
316
+ "step": 400
317
+ },
318
+ {
319
+ "epoch": 1.1560693641618498,
320
+ "eval_f1": 0.832606512739663,
321
+ "eval_loss": 0.5639938116073608,
322
+ "eval_runtime": 43.4855,
323
+ "eval_samples_per_second": 108.864,
324
+ "eval_steps_per_second": 3.403,
325
+ "step": 400
326
+ },
327
+ {
328
+ "epoch": 1.1849710982658959,
329
+ "grad_norm": 6.101068496704102,
330
+ "learning_rate": 8.150289017341042e-06,
331
+ "loss": 0.5084,
332
+ "step": 410
333
+ },
334
+ {
335
+ "epoch": 1.2138728323699421,
336
+ "grad_norm": 8.67908000946045,
337
+ "learning_rate": 7.86127167630058e-06,
338
+ "loss": 0.4328,
339
+ "step": 420
340
+ },
341
+ {
342
+ "epoch": 1.2427745664739884,
343
+ "grad_norm": 6.932382106781006,
344
+ "learning_rate": 7.5722543352601166e-06,
345
+ "loss": 0.3798,
346
+ "step": 430
347
+ },
348
+ {
349
+ "epoch": 1.2716763005780347,
350
+ "grad_norm": 6.64376163482666,
351
+ "learning_rate": 7.283236994219654e-06,
352
+ "loss": 0.4763,
353
+ "step": 440
354
+ },
355
+ {
356
+ "epoch": 1.300578034682081,
357
+ "grad_norm": 5.648944854736328,
358
+ "learning_rate": 6.9942196531791914e-06,
359
+ "loss": 0.4008,
360
+ "step": 450
361
+ },
362
+ {
363
+ "epoch": 1.3294797687861273,
364
+ "grad_norm": 8.3818998336792,
365
+ "learning_rate": 6.7052023121387284e-06,
366
+ "loss": 0.4959,
367
+ "step": 460
368
+ },
369
+ {
370
+ "epoch": 1.3583815028901733,
371
+ "grad_norm": 6.271963119506836,
372
+ "learning_rate": 6.416184971098266e-06,
373
+ "loss": 0.4123,
374
+ "step": 470
375
+ },
376
+ {
377
+ "epoch": 1.3872832369942196,
378
+ "grad_norm": 10.868525505065918,
379
+ "learning_rate": 6.127167630057804e-06,
380
+ "loss": 0.4478,
381
+ "step": 480
382
+ },
383
+ {
384
+ "epoch": 1.416184971098266,
385
+ "grad_norm": 4.8018012046813965,
386
+ "learning_rate": 5.838150289017341e-06,
387
+ "loss": 0.3475,
388
+ "step": 490
389
+ },
390
+ {
391
+ "epoch": 1.4450867052023122,
392
+ "grad_norm": 7.195227146148682,
393
+ "learning_rate": 5.549132947976878e-06,
394
+ "loss": 0.5246,
395
+ "step": 500
396
+ },
397
+ {
398
+ "epoch": 1.4450867052023122,
399
+ "eval_f1": 0.8394367921843663,
400
+ "eval_loss": 0.5455829501152039,
401
+ "eval_runtime": 43.4865,
402
+ "eval_samples_per_second": 108.861,
403
+ "eval_steps_per_second": 3.403,
404
+ "step": 500
405
+ },
406
+ {
407
+ "epoch": 1.4739884393063583,
408
+ "grad_norm": 7.47812557220459,
409
+ "learning_rate": 5.260115606936417e-06,
410
+ "loss": 0.4008,
411
+ "step": 510
412
+ },
413
+ {
414
+ "epoch": 1.5028901734104045,
415
+ "grad_norm": 3.8493812084198,
416
+ "learning_rate": 4.971098265895954e-06,
417
+ "loss": 0.4166,
418
+ "step": 520
419
+ },
420
+ {
421
+ "epoch": 1.5317919075144508,
422
+ "grad_norm": 6.382811069488525,
423
+ "learning_rate": 4.682080924855492e-06,
424
+ "loss": 0.4287,
425
+ "step": 530
426
+ },
427
+ {
428
+ "epoch": 1.560693641618497,
429
+ "grad_norm": 4.259267330169678,
430
+ "learning_rate": 4.3930635838150296e-06,
431
+ "loss": 0.4493,
432
+ "step": 540
433
+ },
434
+ {
435
+ "epoch": 1.5895953757225434,
436
+ "grad_norm": 5.338929653167725,
437
+ "learning_rate": 4.1040462427745666e-06,
438
+ "loss": 0.4488,
439
+ "step": 550
440
+ },
441
+ {
442
+ "epoch": 1.6184971098265897,
443
+ "grad_norm": 4.65845251083374,
444
+ "learning_rate": 3.815028901734104e-06,
445
+ "loss": 0.4649,
446
+ "step": 560
447
+ },
448
+ {
449
+ "epoch": 1.647398843930636,
450
+ "grad_norm": 8.946381568908691,
451
+ "learning_rate": 3.526011560693642e-06,
452
+ "loss": 0.4826,
453
+ "step": 570
454
+ },
455
+ {
456
+ "epoch": 1.6763005780346822,
457
+ "grad_norm": 10.277519226074219,
458
+ "learning_rate": 3.2369942196531797e-06,
459
+ "loss": 0.4061,
460
+ "step": 580
461
+ },
462
+ {
463
+ "epoch": 1.7052023121387283,
464
+ "grad_norm": 9.004323959350586,
465
+ "learning_rate": 2.947976878612717e-06,
466
+ "loss": 0.463,
467
+ "step": 590
468
+ },
469
+ {
470
+ "epoch": 1.7341040462427746,
471
+ "grad_norm": 9.768237113952637,
472
+ "learning_rate": 2.658959537572254e-06,
473
+ "loss": 0.407,
474
+ "step": 600
475
+ },
476
+ {
477
+ "epoch": 1.7341040462427746,
478
+ "eval_f1": 0.840443960959029,
479
+ "eval_loss": 0.5418282151222229,
480
+ "eval_runtime": 43.4552,
481
+ "eval_samples_per_second": 108.94,
482
+ "eval_steps_per_second": 3.406,
483
+ "step": 600
484
+ },
485
+ {
486
+ "epoch": 1.7630057803468207,
487
+ "grad_norm": 3.791731119155884,
488
+ "learning_rate": 2.369942196531792e-06,
489
+ "loss": 0.4195,
490
+ "step": 610
491
+ },
492
+ {
493
+ "epoch": 1.791907514450867,
494
+ "grad_norm": 9.772268295288086,
495
+ "learning_rate": 2.08092485549133e-06,
496
+ "loss": 0.4644,
497
+ "step": 620
498
+ },
499
+ {
500
+ "epoch": 1.8208092485549132,
501
+ "grad_norm": 5.360107421875,
502
+ "learning_rate": 1.791907514450867e-06,
503
+ "loss": 0.4315,
504
+ "step": 630
505
+ },
506
+ {
507
+ "epoch": 1.8497109826589595,
508
+ "grad_norm": 9.654316902160645,
509
+ "learning_rate": 1.502890173410405e-06,
510
+ "loss": 0.435,
511
+ "step": 640
512
+ },
513
+ {
514
+ "epoch": 1.8786127167630058,
515
+ "grad_norm": 6.9839768409729,
516
+ "learning_rate": 1.2138728323699423e-06,
517
+ "loss": 0.387,
518
+ "step": 650
519
+ },
520
+ {
521
+ "epoch": 1.907514450867052,
522
+ "grad_norm": 8.008018493652344,
523
+ "learning_rate": 9.248554913294798e-07,
524
+ "loss": 0.4039,
525
+ "step": 660
526
+ },
527
+ {
528
+ "epoch": 1.9364161849710984,
529
+ "grad_norm": 7.851306438446045,
530
+ "learning_rate": 6.358381502890174e-07,
531
+ "loss": 0.4879,
532
+ "step": 670
533
+ },
534
+ {
535
+ "epoch": 1.9653179190751446,
536
+ "grad_norm": 10.8189115524292,
537
+ "learning_rate": 3.468208092485549e-07,
538
+ "loss": 0.3706,
539
+ "step": 680
540
+ },
541
+ {
542
+ "epoch": 1.9942196531791907,
543
+ "grad_norm": 8.877676963806152,
544
+ "learning_rate": 5.7803468208092485e-08,
545
+ "loss": 0.3861,
546
+ "step": 690
547
+ }
548
+ ],
549
+ "logging_steps": 10,
550
+ "max_steps": 692,
551
+ "num_input_tokens_seen": 0,
552
+ "num_train_epochs": 2,
553
+ "save_steps": 100,
554
+ "stateful_callbacks": {
555
+ "TrainerControl": {
556
+ "args": {
557
+ "should_epoch_stop": false,
558
+ "should_evaluate": false,
559
+ "should_log": false,
560
+ "should_save": true,
561
+ "should_training_stop": true
562
+ },
563
+ "attributes": {}
564
+ }
565
+ },
566
+ "total_flos": 5812910382182400.0,
567
+ "train_batch_size": 32,
568
+ "trial_name": null,
569
+ "trial_params": null
570
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7df685089f1fda54845a56c2b08b81de31e89a39f766a00e859a228c00d1693e
3
+ size 5432