Dev176 commited on
Commit
f118752
1 Parent(s): ccc7934

End of training

Browse files
README.md CHANGED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: google/vit-base-patch16-224
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: 21BAI1229
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # 21BAI1229
18
+
19
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.4078
22
+ - Accuracy: 0.8734
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 64
43
+ - eval_batch_size: 64
44
+ - seed: 42
45
+ - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 256
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: linear
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 20
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
55
+ |:-------------:|:-------:|:----:|:---------------:|:--------:|
56
+ | 2.6034 | 0.9873 | 39 | 2.0544 | 0.4520 |
57
+ | 1.4429 | 2.0 | 79 | 0.7736 | 0.7849 |
58
+ | 0.8307 | 2.9873 | 118 | 0.5456 | 0.8413 |
59
+ | 0.6814 | 4.0 | 158 | 0.4881 | 0.8516 |
60
+ | 0.6199 | 4.9873 | 197 | 0.4614 | 0.8528 |
61
+ | 0.5578 | 6.0 | 237 | 0.4419 | 0.8615 |
62
+ | 0.5198 | 6.9873 | 276 | 0.4485 | 0.8603 |
63
+ | 0.4811 | 8.0 | 316 | 0.4355 | 0.8659 |
64
+ | 0.4568 | 8.9873 | 355 | 0.4182 | 0.8651 |
65
+ | 0.4268 | 10.0 | 395 | 0.4094 | 0.8702 |
66
+ | 0.4281 | 10.9873 | 434 | 0.4158 | 0.8706 |
67
+ | 0.4143 | 12.0 | 474 | 0.4078 | 0.8734 |
68
+ | 0.4009 | 12.9873 | 513 | 0.4066 | 0.8714 |
69
+ | 0.3642 | 14.0 | 553 | 0.4131 | 0.8683 |
70
+ | 0.3659 | 14.9873 | 592 | 0.4047 | 0.8726 |
71
+ | 0.3487 | 16.0 | 632 | 0.4054 | 0.8710 |
72
+ | 0.35 | 16.9873 | 671 | 0.4107 | 0.8722 |
73
+ | 0.3291 | 18.0 | 711 | 0.4099 | 0.8698 |
74
+ | 0.338 | 18.9873 | 750 | 0.4063 | 0.8718 |
75
+ | 0.3419 | 19.7468 | 780 | 0.4066 | 0.8702 |
76
+
77
+
78
+ ### Framework versions
79
+
80
+ - Transformers 4.46.2
81
+ - Pytorch 2.5.0+cu121
82
+ - Datasets 3.1.0
83
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.746835443037973,
3
+ "eval_accuracy": 0.8734126984126984,
4
+ "eval_loss": 0.40782999992370605,
5
+ "eval_runtime": 35.112,
6
+ "eval_samples_per_second": 71.77,
7
+ "eval_steps_per_second": 1.139,
8
+ "total_flos": 1.5428282771770638e+19,
9
+ "train_loss": 0.6176073722350292,
10
+ "train_runtime": 7965.5555,
11
+ "train_samples_per_second": 25.309,
12
+ "train_steps_per_second": 0.098
13
+ }
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "calling",
13
+ "1": "clapping",
14
+ "2": "cycling",
15
+ "3": "dancing",
16
+ "4": "drinking",
17
+ "5": "eating",
18
+ "6": "fighting",
19
+ "7": "hugging",
20
+ "8": "laughing",
21
+ "9": "listening_to_music",
22
+ "10": "running",
23
+ "11": "sitting",
24
+ "12": "sleeping",
25
+ "13": "texting",
26
+ "14": "using_laptop"
27
+ },
28
+ "image_size": 224,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 3072,
31
+ "label2id": {
32
+ "calling": 0,
33
+ "clapping": 1,
34
+ "cycling": 2,
35
+ "dancing": 3,
36
+ "drinking": 4,
37
+ "eating": 5,
38
+ "fighting": 6,
39
+ "hugging": 7,
40
+ "laughing": 8,
41
+ "listening_to_music": 9,
42
+ "running": 10,
43
+ "sitting": 11,
44
+ "sleeping": 12,
45
+ "texting": 13,
46
+ "using_laptop": 14
47
+ },
48
+ "layer_norm_eps": 1e-12,
49
+ "model_type": "vit",
50
+ "num_attention_heads": 12,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 12,
53
+ "patch_size": 16,
54
+ "problem_type": "single_label_classification",
55
+ "qkv_bias": true,
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.46.2"
58
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.746835443037973,
3
+ "eval_accuracy": 0.8734126984126984,
4
+ "eval_loss": 0.40782999992370605,
5
+ "eval_runtime": 35.112,
6
+ "eval_samples_per_second": 71.77,
7
+ "eval_steps_per_second": 1.139
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b26f3402a9152e42107553fd514e55ab8e64a82692e28366ba23cd1d130247aa
3
+ size 343263964
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
runs/Nov14_22-59-20_24484a158615/events.out.tfevents.1731625166.24484a158615.168.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b73f7e706417ed98f4bcc9bdb3ab38bc5be539d9855ef6b0e7a2448aed06660c
3
+ size 16632
runs/Nov14_22-59-20_24484a158615/events.out.tfevents.1731633168.24484a158615.168.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7557bd00bd01e5e2a650902e2408110eb44c937039e20ab2849ba3f682d1caba
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.746835443037973,
3
+ "total_flos": 1.5428282771770638e+19,
4
+ "train_loss": 0.6176073722350292,
5
+ "train_runtime": 7965.5555,
6
+ "train_samples_per_second": 25.309,
7
+ "train_steps_per_second": 0.098
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8734126984126984,
3
+ "best_model_checkpoint": "21BAI1229/checkpoint-474",
4
+ "epoch": 19.746835443037973,
5
+ "eval_steps": 500,
6
+ "global_step": 780,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.9873417721518988,
13
+ "grad_norm": 11.754476547241211,
14
+ "learning_rate": 2.5e-05,
15
+ "loss": 2.6034,
16
+ "step": 39
17
+ },
18
+ {
19
+ "epoch": 0.9873417721518988,
20
+ "eval_accuracy": 0.451984126984127,
21
+ "eval_loss": 2.054410696029663,
22
+ "eval_runtime": 36.1954,
23
+ "eval_samples_per_second": 69.622,
24
+ "eval_steps_per_second": 1.105,
25
+ "step": 39
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "grad_norm": 7.275434970855713,
30
+ "learning_rate": 4.992877492877493e-05,
31
+ "loss": 1.4429,
32
+ "step": 79
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_accuracy": 0.7849206349206349,
37
+ "eval_loss": 0.7735527157783508,
38
+ "eval_runtime": 35.4184,
39
+ "eval_samples_per_second": 71.149,
40
+ "eval_steps_per_second": 1.129,
41
+ "step": 79
42
+ },
43
+ {
44
+ "epoch": 2.9873417721518987,
45
+ "grad_norm": 7.623991012573242,
46
+ "learning_rate": 4.7150997150997157e-05,
47
+ "loss": 0.8307,
48
+ "step": 118
49
+ },
50
+ {
51
+ "epoch": 2.9873417721518987,
52
+ "eval_accuracy": 0.8412698412698413,
53
+ "eval_loss": 0.5455929636955261,
54
+ "eval_runtime": 35.3707,
55
+ "eval_samples_per_second": 71.245,
56
+ "eval_steps_per_second": 1.131,
57
+ "step": 118
58
+ },
59
+ {
60
+ "epoch": 4.0,
61
+ "grad_norm": 8.973851203918457,
62
+ "learning_rate": 4.4301994301994304e-05,
63
+ "loss": 0.6814,
64
+ "step": 158
65
+ },
66
+ {
67
+ "epoch": 4.0,
68
+ "eval_accuracy": 0.8515873015873016,
69
+ "eval_loss": 0.48805657029151917,
70
+ "eval_runtime": 35.4085,
71
+ "eval_samples_per_second": 71.169,
72
+ "eval_steps_per_second": 1.13,
73
+ "step": 158
74
+ },
75
+ {
76
+ "epoch": 4.987341772151899,
77
+ "grad_norm": 8.185949325561523,
78
+ "learning_rate": 4.152421652421652e-05,
79
+ "loss": 0.6199,
80
+ "step": 197
81
+ },
82
+ {
83
+ "epoch": 4.987341772151899,
84
+ "eval_accuracy": 0.8527777777777777,
85
+ "eval_loss": 0.46135592460632324,
86
+ "eval_runtime": 35.2536,
87
+ "eval_samples_per_second": 71.482,
88
+ "eval_steps_per_second": 1.135,
89
+ "step": 197
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "grad_norm": 11.136569023132324,
94
+ "learning_rate": 3.867521367521368e-05,
95
+ "loss": 0.5578,
96
+ "step": 237
97
+ },
98
+ {
99
+ "epoch": 6.0,
100
+ "eval_accuracy": 0.8615079365079366,
101
+ "eval_loss": 0.44191327691078186,
102
+ "eval_runtime": 35.2038,
103
+ "eval_samples_per_second": 71.583,
104
+ "eval_steps_per_second": 1.136,
105
+ "step": 237
106
+ },
107
+ {
108
+ "epoch": 6.987341772151899,
109
+ "grad_norm": 6.935160160064697,
110
+ "learning_rate": 3.58974358974359e-05,
111
+ "loss": 0.5198,
112
+ "step": 276
113
+ },
114
+ {
115
+ "epoch": 6.987341772151899,
116
+ "eval_accuracy": 0.8603174603174604,
117
+ "eval_loss": 0.4485108256340027,
118
+ "eval_runtime": 35.2921,
119
+ "eval_samples_per_second": 71.404,
120
+ "eval_steps_per_second": 1.133,
121
+ "step": 276
122
+ },
123
+ {
124
+ "epoch": 8.0,
125
+ "grad_norm": 7.163381576538086,
126
+ "learning_rate": 3.304843304843305e-05,
127
+ "loss": 0.4811,
128
+ "step": 316
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_accuracy": 0.8658730158730159,
133
+ "eval_loss": 0.4355041980743408,
134
+ "eval_runtime": 35.4396,
135
+ "eval_samples_per_second": 71.107,
136
+ "eval_steps_per_second": 1.129,
137
+ "step": 316
138
+ },
139
+ {
140
+ "epoch": 8.987341772151899,
141
+ "grad_norm": 7.22255277633667,
142
+ "learning_rate": 3.0270655270655275e-05,
143
+ "loss": 0.4568,
144
+ "step": 355
145
+ },
146
+ {
147
+ "epoch": 8.987341772151899,
148
+ "eval_accuracy": 0.8650793650793651,
149
+ "eval_loss": 0.4182125926017761,
150
+ "eval_runtime": 35.5074,
151
+ "eval_samples_per_second": 70.971,
152
+ "eval_steps_per_second": 1.127,
153
+ "step": 355
154
+ },
155
+ {
156
+ "epoch": 10.0,
157
+ "grad_norm": 7.7428879737854,
158
+ "learning_rate": 2.7421652421652423e-05,
159
+ "loss": 0.4268,
160
+ "step": 395
161
+ },
162
+ {
163
+ "epoch": 10.0,
164
+ "eval_accuracy": 0.8702380952380953,
165
+ "eval_loss": 0.4093915522098541,
166
+ "eval_runtime": 35.1709,
167
+ "eval_samples_per_second": 71.65,
168
+ "eval_steps_per_second": 1.137,
169
+ "step": 395
170
+ },
171
+ {
172
+ "epoch": 10.987341772151899,
173
+ "grad_norm": 8.56812572479248,
174
+ "learning_rate": 2.4643874643874645e-05,
175
+ "loss": 0.4281,
176
+ "step": 434
177
+ },
178
+ {
179
+ "epoch": 10.987341772151899,
180
+ "eval_accuracy": 0.8706349206349207,
181
+ "eval_loss": 0.41577932238578796,
182
+ "eval_runtime": 35.2893,
183
+ "eval_samples_per_second": 71.41,
184
+ "eval_steps_per_second": 1.133,
185
+ "step": 434
186
+ },
187
+ {
188
+ "epoch": 12.0,
189
+ "grad_norm": 9.711762428283691,
190
+ "learning_rate": 2.1794871794871795e-05,
191
+ "loss": 0.4143,
192
+ "step": 474
193
+ },
194
+ {
195
+ "epoch": 12.0,
196
+ "eval_accuracy": 0.8734126984126984,
197
+ "eval_loss": 0.40782999992370605,
198
+ "eval_runtime": 35.0211,
199
+ "eval_samples_per_second": 71.957,
200
+ "eval_steps_per_second": 1.142,
201
+ "step": 474
202
+ },
203
+ {
204
+ "epoch": 12.987341772151899,
205
+ "grad_norm": 7.874723434448242,
206
+ "learning_rate": 1.9017094017094017e-05,
207
+ "loss": 0.4009,
208
+ "step": 513
209
+ },
210
+ {
211
+ "epoch": 12.987341772151899,
212
+ "eval_accuracy": 0.8714285714285714,
213
+ "eval_loss": 0.4066493511199951,
214
+ "eval_runtime": 35.2449,
215
+ "eval_samples_per_second": 71.5,
216
+ "eval_steps_per_second": 1.135,
217
+ "step": 513
218
+ },
219
+ {
220
+ "epoch": 14.0,
221
+ "grad_norm": 8.416353225708008,
222
+ "learning_rate": 1.6168091168091168e-05,
223
+ "loss": 0.3642,
224
+ "step": 553
225
+ },
226
+ {
227
+ "epoch": 14.0,
228
+ "eval_accuracy": 0.8682539682539683,
229
+ "eval_loss": 0.4131360352039337,
230
+ "eval_runtime": 35.3914,
231
+ "eval_samples_per_second": 71.204,
232
+ "eval_steps_per_second": 1.13,
233
+ "step": 553
234
+ },
235
+ {
236
+ "epoch": 14.987341772151899,
237
+ "grad_norm": 8.845190048217773,
238
+ "learning_rate": 1.3390313390313392e-05,
239
+ "loss": 0.3659,
240
+ "step": 592
241
+ },
242
+ {
243
+ "epoch": 14.987341772151899,
244
+ "eval_accuracy": 0.8726190476190476,
245
+ "eval_loss": 0.40469926595687866,
246
+ "eval_runtime": 35.2434,
247
+ "eval_samples_per_second": 71.503,
248
+ "eval_steps_per_second": 1.135,
249
+ "step": 592
250
+ },
251
+ {
252
+ "epoch": 16.0,
253
+ "grad_norm": 7.056828022003174,
254
+ "learning_rate": 1.0541310541310543e-05,
255
+ "loss": 0.3487,
256
+ "step": 632
257
+ },
258
+ {
259
+ "epoch": 16.0,
260
+ "eval_accuracy": 0.871031746031746,
261
+ "eval_loss": 0.4053677022457123,
262
+ "eval_runtime": 35.2106,
263
+ "eval_samples_per_second": 71.569,
264
+ "eval_steps_per_second": 1.136,
265
+ "step": 632
266
+ },
267
+ {
268
+ "epoch": 16.9873417721519,
269
+ "grad_norm": 7.8862199783325195,
270
+ "learning_rate": 7.763532763532765e-06,
271
+ "loss": 0.35,
272
+ "step": 671
273
+ },
274
+ {
275
+ "epoch": 16.9873417721519,
276
+ "eval_accuracy": 0.8722222222222222,
277
+ "eval_loss": 0.41073036193847656,
278
+ "eval_runtime": 35.125,
279
+ "eval_samples_per_second": 71.744,
280
+ "eval_steps_per_second": 1.139,
281
+ "step": 671
282
+ },
283
+ {
284
+ "epoch": 18.0,
285
+ "grad_norm": 9.344978332519531,
286
+ "learning_rate": 4.914529914529915e-06,
287
+ "loss": 0.3291,
288
+ "step": 711
289
+ },
290
+ {
291
+ "epoch": 18.0,
292
+ "eval_accuracy": 0.8698412698412699,
293
+ "eval_loss": 0.40985915064811707,
294
+ "eval_runtime": 35.2658,
295
+ "eval_samples_per_second": 71.457,
296
+ "eval_steps_per_second": 1.134,
297
+ "step": 711
298
+ },
299
+ {
300
+ "epoch": 18.9873417721519,
301
+ "grad_norm": 6.548698902130127,
302
+ "learning_rate": 2.136752136752137e-06,
303
+ "loss": 0.338,
304
+ "step": 750
305
+ },
306
+ {
307
+ "epoch": 18.9873417721519,
308
+ "eval_accuracy": 0.8718253968253968,
309
+ "eval_loss": 0.40625905990600586,
310
+ "eval_runtime": 35.4023,
311
+ "eval_samples_per_second": 71.182,
312
+ "eval_steps_per_second": 1.13,
313
+ "step": 750
314
+ },
315
+ {
316
+ "epoch": 19.746835443037973,
317
+ "grad_norm": 6.30403470993042,
318
+ "learning_rate": 0.0,
319
+ "loss": 0.3419,
320
+ "step": 780
321
+ },
322
+ {
323
+ "epoch": 19.746835443037973,
324
+ "eval_accuracy": 0.8702380952380953,
325
+ "eval_loss": 0.4066447913646698,
326
+ "eval_runtime": 35.3364,
327
+ "eval_samples_per_second": 71.315,
328
+ "eval_steps_per_second": 1.132,
329
+ "step": 780
330
+ },
331
+ {
332
+ "epoch": 19.746835443037973,
333
+ "step": 780,
334
+ "total_flos": 1.5428282771770638e+19,
335
+ "train_loss": 0.6176073722350292,
336
+ "train_runtime": 7965.5555,
337
+ "train_samples_per_second": 25.309,
338
+ "train_steps_per_second": 0.098
339
+ }
340
+ ],
341
+ "logging_steps": 500,
342
+ "max_steps": 780,
343
+ "num_input_tokens_seen": 0,
344
+ "num_train_epochs": 20,
345
+ "save_steps": 500,
346
+ "stateful_callbacks": {
347
+ "TrainerControl": {
348
+ "args": {
349
+ "should_epoch_stop": false,
350
+ "should_evaluate": false,
351
+ "should_log": false,
352
+ "should_save": true,
353
+ "should_training_stop": true
354
+ },
355
+ "attributes": {}
356
+ }
357
+ },
358
+ "total_flos": 1.5428282771770638e+19,
359
+ "train_batch_size": 64,
360
+ "trial_name": null,
361
+ "trial_params": null
362
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2ed54e17dab6d903d84db67320e8c81985da9a8f2c71c234b43a7fd55efd247
3
+ size 5240