yuanhuaisen commited on
Commit
b8f6064
1 Parent(s): c49658f

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - yuanhuaisen/autotrain-data-autotrain-khvt4-4vmox
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.18480469286441803
23
+
24
+ f1: 0.962962962962963
25
+
26
+ precision: 1.0
27
+
28
+ recall: 0.9285714285714286
29
+
30
+ auc: 1.0
31
+
32
+ accuracy: 0.96
checkpoint-87/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "11train_covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12train_covered_with_a_quilt_and_exposed_other_parts_of_the_body"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "11train_covered_with_a_quilt_and_only_the_head_exposed": 0,
21
+ "12train_covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 16,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 24,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
checkpoint-87/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cb9173521abe5a76936d89d765100ebb21a51f28c3451526a9b60fa11ba06dc
3
+ size 1213261264
checkpoint-87/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24f5dfc110186f9900cdb2b48247bfa2a477628585f4d3ebb64edcaacc31693
3
+ size 2426757546
checkpoint-87/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a407268a2d2833730e866a3c84f40a9efdea033897151b0f4507e6401dbf33
3
+ size 14244
checkpoint-87/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4242efb28570acb4f30dd67f3a85011ce77064c7fcad186e493c47739ad898eb
3
+ size 1064
checkpoint-87/trainer_state.json ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.18480469286441803,
3
+ "best_model_checkpoint": "/tmp/model/checkpoint-87",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 87,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 0.0,
14
+ "loss": 0.6934,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 5.555555555555556e-06,
20
+ "loss": 0.6934,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 1.1111111111111112e-05,
26
+ "loss": 0.6934,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.14,
31
+ "learning_rate": 1.6666666666666667e-05,
32
+ "loss": 0.6931,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 2.2222222222222223e-05,
38
+ "loss": 0.6923,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 2.777777777777778e-05,
44
+ "loss": 0.694,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.24,
49
+ "learning_rate": 3.3333333333333335e-05,
50
+ "loss": 0.6877,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "learning_rate": 3.888888888888889e-05,
56
+ "loss": 0.688,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.31,
61
+ "learning_rate": 4.4444444444444447e-05,
62
+ "loss": 0.6873,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.34,
67
+ "learning_rate": 5e-05,
68
+ "loss": 0.6959,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.38,
73
+ "learning_rate": 4.935897435897436e-05,
74
+ "loss": 0.6808,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.41,
79
+ "learning_rate": 4.871794871794872e-05,
80
+ "loss": 0.6835,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.45,
85
+ "learning_rate": 4.8076923076923084e-05,
86
+ "loss": 0.6755,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.48,
91
+ "learning_rate": 4.7435897435897435e-05,
92
+ "loss": 0.684,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.52,
97
+ "learning_rate": 4.67948717948718e-05,
98
+ "loss": 0.6845,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.55,
103
+ "learning_rate": 4.615384615384616e-05,
104
+ "loss": 0.6609,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.59,
109
+ "learning_rate": 4.5512820512820516e-05,
110
+ "loss": 0.6962,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.62,
115
+ "learning_rate": 4.4871794871794874e-05,
116
+ "loss": 0.6797,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.66,
121
+ "learning_rate": 4.423076923076923e-05,
122
+ "loss": 0.657,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.69,
127
+ "learning_rate": 4.358974358974359e-05,
128
+ "loss": 0.6794,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.72,
133
+ "learning_rate": 4.294871794871795e-05,
134
+ "loss": 0.629,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.76,
139
+ "learning_rate": 4.230769230769231e-05,
140
+ "loss": 0.6429,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.79,
145
+ "learning_rate": 4.166666666666667e-05,
146
+ "loss": 0.6003,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.83,
151
+ "learning_rate": 4.1025641025641023e-05,
152
+ "loss": 0.58,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.86,
157
+ "learning_rate": 4.038461538461539e-05,
158
+ "loss": 0.6459,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.9,
163
+ "learning_rate": 3.974358974358974e-05,
164
+ "loss": 0.5719,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.93,
169
+ "learning_rate": 3.9102564102564105e-05,
170
+ "loss": 0.5321,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.97,
175
+ "learning_rate": 3.846153846153846e-05,
176
+ "loss": 0.6402,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 1.0,
181
+ "learning_rate": 3.782051282051282e-05,
182
+ "loss": 0.4468,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 1.0,
187
+ "eval_accuracy": 0.76,
188
+ "eval_auc": 1.0,
189
+ "eval_f1": 0.8235294117647058,
190
+ "eval_loss": 0.5563281178474426,
191
+ "eval_precision": 0.7,
192
+ "eval_recall": 1.0,
193
+ "eval_runtime": 1.6051,
194
+ "eval_samples_per_second": 15.576,
195
+ "eval_steps_per_second": 1.246,
196
+ "step": 29
197
+ },
198
+ {
199
+ "epoch": 1.03,
200
+ "learning_rate": 3.717948717948718e-05,
201
+ "loss": 0.5103,
202
+ "step": 30
203
+ },
204
+ {
205
+ "epoch": 1.07,
206
+ "learning_rate": 3.653846153846154e-05,
207
+ "loss": 0.5385,
208
+ "step": 31
209
+ },
210
+ {
211
+ "epoch": 1.1,
212
+ "learning_rate": 3.58974358974359e-05,
213
+ "loss": 0.5151,
214
+ "step": 32
215
+ },
216
+ {
217
+ "epoch": 1.14,
218
+ "learning_rate": 3.525641025641026e-05,
219
+ "loss": 0.4949,
220
+ "step": 33
221
+ },
222
+ {
223
+ "epoch": 1.17,
224
+ "learning_rate": 3.461538461538462e-05,
225
+ "loss": 0.5086,
226
+ "step": 34
227
+ },
228
+ {
229
+ "epoch": 1.21,
230
+ "learning_rate": 3.397435897435898e-05,
231
+ "loss": 0.4416,
232
+ "step": 35
233
+ },
234
+ {
235
+ "epoch": 1.24,
236
+ "learning_rate": 3.3333333333333335e-05,
237
+ "loss": 0.4374,
238
+ "step": 36
239
+ },
240
+ {
241
+ "epoch": 1.28,
242
+ "learning_rate": 3.269230769230769e-05,
243
+ "loss": 0.5711,
244
+ "step": 37
245
+ },
246
+ {
247
+ "epoch": 1.31,
248
+ "learning_rate": 3.205128205128206e-05,
249
+ "loss": 0.462,
250
+ "step": 38
251
+ },
252
+ {
253
+ "epoch": 1.34,
254
+ "learning_rate": 3.141025641025641e-05,
255
+ "loss": 0.4268,
256
+ "step": 39
257
+ },
258
+ {
259
+ "epoch": 1.38,
260
+ "learning_rate": 3.0769230769230774e-05,
261
+ "loss": 0.473,
262
+ "step": 40
263
+ },
264
+ {
265
+ "epoch": 1.41,
266
+ "learning_rate": 3.012820512820513e-05,
267
+ "loss": 0.4651,
268
+ "step": 41
269
+ },
270
+ {
271
+ "epoch": 1.45,
272
+ "learning_rate": 2.948717948717949e-05,
273
+ "loss": 0.3712,
274
+ "step": 42
275
+ },
276
+ {
277
+ "epoch": 1.48,
278
+ "learning_rate": 2.8846153846153845e-05,
279
+ "loss": 0.6356,
280
+ "step": 43
281
+ },
282
+ {
283
+ "epoch": 1.52,
284
+ "learning_rate": 2.8205128205128207e-05,
285
+ "loss": 0.3557,
286
+ "step": 44
287
+ },
288
+ {
289
+ "epoch": 1.55,
290
+ "learning_rate": 2.756410256410257e-05,
291
+ "loss": 0.3891,
292
+ "step": 45
293
+ },
294
+ {
295
+ "epoch": 1.59,
296
+ "learning_rate": 2.6923076923076923e-05,
297
+ "loss": 0.412,
298
+ "step": 46
299
+ },
300
+ {
301
+ "epoch": 1.62,
302
+ "learning_rate": 2.6282051282051285e-05,
303
+ "loss": 0.2874,
304
+ "step": 47
305
+ },
306
+ {
307
+ "epoch": 1.66,
308
+ "learning_rate": 2.564102564102564e-05,
309
+ "loss": 0.3086,
310
+ "step": 48
311
+ },
312
+ {
313
+ "epoch": 1.69,
314
+ "learning_rate": 2.5e-05,
315
+ "loss": 0.384,
316
+ "step": 49
317
+ },
318
+ {
319
+ "epoch": 1.72,
320
+ "learning_rate": 2.435897435897436e-05,
321
+ "loss": 0.2773,
322
+ "step": 50
323
+ },
324
+ {
325
+ "epoch": 1.76,
326
+ "learning_rate": 2.3717948717948718e-05,
327
+ "loss": 0.3755,
328
+ "step": 51
329
+ },
330
+ {
331
+ "epoch": 1.79,
332
+ "learning_rate": 2.307692307692308e-05,
333
+ "loss": 0.507,
334
+ "step": 52
335
+ },
336
+ {
337
+ "epoch": 1.83,
338
+ "learning_rate": 2.2435897435897437e-05,
339
+ "loss": 0.3976,
340
+ "step": 53
341
+ },
342
+ {
343
+ "epoch": 1.86,
344
+ "learning_rate": 2.1794871794871795e-05,
345
+ "loss": 0.3925,
346
+ "step": 54
347
+ },
348
+ {
349
+ "epoch": 1.9,
350
+ "learning_rate": 2.1153846153846154e-05,
351
+ "loss": 0.5349,
352
+ "step": 55
353
+ },
354
+ {
355
+ "epoch": 1.93,
356
+ "learning_rate": 2.0512820512820512e-05,
357
+ "loss": 0.2437,
358
+ "step": 56
359
+ },
360
+ {
361
+ "epoch": 1.97,
362
+ "learning_rate": 1.987179487179487e-05,
363
+ "loss": 0.3496,
364
+ "step": 57
365
+ },
366
+ {
367
+ "epoch": 2.0,
368
+ "learning_rate": 1.923076923076923e-05,
369
+ "loss": 0.1945,
370
+ "step": 58
371
+ },
372
+ {
373
+ "epoch": 2.0,
374
+ "eval_accuracy": 0.84,
375
+ "eval_auc": 1.0,
376
+ "eval_f1": 0.8750000000000001,
377
+ "eval_loss": 0.3492187559604645,
378
+ "eval_precision": 0.7777777777777778,
379
+ "eval_recall": 1.0,
380
+ "eval_runtime": 1.5992,
381
+ "eval_samples_per_second": 15.633,
382
+ "eval_steps_per_second": 1.251,
383
+ "step": 58
384
+ },
385
+ {
386
+ "epoch": 2.03,
387
+ "learning_rate": 1.858974358974359e-05,
388
+ "loss": 0.2392,
389
+ "step": 59
390
+ },
391
+ {
392
+ "epoch": 2.07,
393
+ "learning_rate": 1.794871794871795e-05,
394
+ "loss": 0.2135,
395
+ "step": 60
396
+ },
397
+ {
398
+ "epoch": 2.1,
399
+ "learning_rate": 1.730769230769231e-05,
400
+ "loss": 0.4027,
401
+ "step": 61
402
+ },
403
+ {
404
+ "epoch": 2.14,
405
+ "learning_rate": 1.6666666666666667e-05,
406
+ "loss": 0.3798,
407
+ "step": 62
408
+ },
409
+ {
410
+ "epoch": 2.17,
411
+ "learning_rate": 1.602564102564103e-05,
412
+ "loss": 0.2134,
413
+ "step": 63
414
+ },
415
+ {
416
+ "epoch": 2.21,
417
+ "learning_rate": 1.5384615384615387e-05,
418
+ "loss": 0.3116,
419
+ "step": 64
420
+ },
421
+ {
422
+ "epoch": 2.24,
423
+ "learning_rate": 1.4743589743589745e-05,
424
+ "loss": 0.2248,
425
+ "step": 65
426
+ },
427
+ {
428
+ "epoch": 2.28,
429
+ "learning_rate": 1.4102564102564104e-05,
430
+ "loss": 0.1883,
431
+ "step": 66
432
+ },
433
+ {
434
+ "epoch": 2.31,
435
+ "learning_rate": 1.3461538461538462e-05,
436
+ "loss": 0.1935,
437
+ "step": 67
438
+ },
439
+ {
440
+ "epoch": 2.34,
441
+ "learning_rate": 1.282051282051282e-05,
442
+ "loss": 0.1912,
443
+ "step": 68
444
+ },
445
+ {
446
+ "epoch": 2.38,
447
+ "learning_rate": 1.217948717948718e-05,
448
+ "loss": 0.1999,
449
+ "step": 69
450
+ },
451
+ {
452
+ "epoch": 2.41,
453
+ "learning_rate": 1.153846153846154e-05,
454
+ "loss": 0.2033,
455
+ "step": 70
456
+ },
457
+ {
458
+ "epoch": 2.45,
459
+ "learning_rate": 1.0897435897435898e-05,
460
+ "loss": 0.2038,
461
+ "step": 71
462
+ },
463
+ {
464
+ "epoch": 2.48,
465
+ "learning_rate": 1.0256410256410256e-05,
466
+ "loss": 0.1647,
467
+ "step": 72
468
+ },
469
+ {
470
+ "epoch": 2.52,
471
+ "learning_rate": 9.615384615384616e-06,
472
+ "loss": 0.1812,
473
+ "step": 73
474
+ },
475
+ {
476
+ "epoch": 2.55,
477
+ "learning_rate": 8.974358974358976e-06,
478
+ "loss": 0.1766,
479
+ "step": 74
480
+ },
481
+ {
482
+ "epoch": 2.59,
483
+ "learning_rate": 8.333333333333334e-06,
484
+ "loss": 0.2137,
485
+ "step": 75
486
+ },
487
+ {
488
+ "epoch": 2.62,
489
+ "learning_rate": 7.692307692307694e-06,
490
+ "loss": 0.3739,
491
+ "step": 76
492
+ },
493
+ {
494
+ "epoch": 2.66,
495
+ "learning_rate": 7.051282051282052e-06,
496
+ "loss": 0.301,
497
+ "step": 77
498
+ },
499
+ {
500
+ "epoch": 2.69,
501
+ "learning_rate": 6.41025641025641e-06,
502
+ "loss": 0.1771,
503
+ "step": 78
504
+ },
505
+ {
506
+ "epoch": 2.72,
507
+ "learning_rate": 5.76923076923077e-06,
508
+ "loss": 0.2224,
509
+ "step": 79
510
+ },
511
+ {
512
+ "epoch": 2.76,
513
+ "learning_rate": 5.128205128205128e-06,
514
+ "loss": 0.2905,
515
+ "step": 80
516
+ },
517
+ {
518
+ "epoch": 2.79,
519
+ "learning_rate": 4.487179487179488e-06,
520
+ "loss": 0.1836,
521
+ "step": 81
522
+ },
523
+ {
524
+ "epoch": 2.83,
525
+ "learning_rate": 3.846153846153847e-06,
526
+ "loss": 0.1592,
527
+ "step": 82
528
+ },
529
+ {
530
+ "epoch": 2.86,
531
+ "learning_rate": 3.205128205128205e-06,
532
+ "loss": 0.1648,
533
+ "step": 83
534
+ },
535
+ {
536
+ "epoch": 2.9,
537
+ "learning_rate": 2.564102564102564e-06,
538
+ "loss": 0.1626,
539
+ "step": 84
540
+ },
541
+ {
542
+ "epoch": 2.93,
543
+ "learning_rate": 1.9230769230769234e-06,
544
+ "loss": 0.1826,
545
+ "step": 85
546
+ },
547
+ {
548
+ "epoch": 2.97,
549
+ "learning_rate": 1.282051282051282e-06,
550
+ "loss": 0.3295,
551
+ "step": 86
552
+ },
553
+ {
554
+ "epoch": 3.0,
555
+ "learning_rate": 6.41025641025641e-07,
556
+ "loss": 0.1466,
557
+ "step": 87
558
+ },
559
+ {
560
+ "epoch": 3.0,
561
+ "eval_accuracy": 0.96,
562
+ "eval_auc": 1.0,
563
+ "eval_f1": 0.962962962962963,
564
+ "eval_loss": 0.18480469286441803,
565
+ "eval_precision": 1.0,
566
+ "eval_recall": 0.9285714285714286,
567
+ "eval_runtime": 1.5881,
568
+ "eval_samples_per_second": 15.742,
569
+ "eval_steps_per_second": 1.259,
570
+ "step": 87
571
+ }
572
+ ],
573
+ "logging_steps": 1,
574
+ "max_steps": 87,
575
+ "num_input_tokens_seen": 0,
576
+ "num_train_epochs": 3,
577
+ "save_steps": 500,
578
+ "total_flos": 1.849055714085888e+17,
579
+ "train_batch_size": 8,
580
+ "trial_name": null,
581
+ "trial_params": null
582
+ }
checkpoint-87/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a030c7106d2282775ad7da76b9cb3d7d6f07de1ef536e7079ed136311be4071
3
+ size 4728
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "11train_covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12train_covered_with_a_quilt_and_exposed_other_parts_of_the_body"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "11train_covered_with_a_quilt_and_only_the_head_exposed": 0,
21
+ "12train_covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 16,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 24,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cb9173521abe5a76936d89d765100ebb21a51f28c3451526a9b60fa11ba06dc
3
+ size 1213261264
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a030c7106d2282775ad7da76b9cb3d7d6f07de1ef536e7079ed136311be4071
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "yuanhuaisen/autotrain-data-autotrain-khvt4-4vmox",
3
+ "model": "google/vit-large-patch16-224",
4
+ "username": "yuanhuaisen",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "/tmp/model",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "yuanhuaisen/autotrain-khvt4-4vmox",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }