robertsw commited on
Commit
8490378
1 Parent(s): 35321c4

End of training

Browse files
Files changed (5) hide show
  1. README.md +48 -0
  2. config.json +125 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +44 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ datasets:
5
+ - imagefolder
6
+ model-index:
7
+ - name: tmp_trainer
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # tmp_trainer
15
+
16
+ This model was trained from scratch on the imagefolder dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 5e-05
36
+ - train_batch_size: 8
37
+ - eval_batch_size: 8
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 3.0
42
+
43
+ ### Framework versions
44
+
45
+ - Transformers 4.41.2
46
+ - Pytorch 2.2.0
47
+ - Datasets 2.19.2
48
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "checkpoint-400",
3
+ "apply_layernorm": true,
4
+ "architectures": [
5
+ "Dinov2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "drop_path_rate": 0.0,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Alternative",
14
+ "1": "Animecore",
15
+ "10": "Dark_Fantasy",
16
+ "11": "E-Girl",
17
+ "12": "Ethereal",
18
+ "13": "Fairycore",
19
+ "14": "Goblincore",
20
+ "15": "Goth",
21
+ "16": "Grunge",
22
+ "17": "Hypebeast",
23
+ "18": "Jersey_Shore",
24
+ "19": "Kawaii",
25
+ "2": "Art_Hoe",
26
+ "20": "Kidcore",
27
+ "21": "Lagenlook",
28
+ "22": "Lolita",
29
+ "23": "Mob_Wife",
30
+ "24": "Normcore",
31
+ "25": "Old_Money",
32
+ "26": "Pastel_Goth",
33
+ "27": "Preppy",
34
+ "28": "Punk",
35
+ "29": "Soft_Grunge",
36
+ "3": "Avant-garde",
37
+ "30": "Steampunk",
38
+ "31": "Tomato_Girl_Summer",
39
+ "32": "Twee",
40
+ "33": "VSCO",
41
+ "34": "Vaporwave",
42
+ "35": "Y2K",
43
+ "4": "Baddie",
44
+ "5": "Bohemian",
45
+ "6": "Cottagecore",
46
+ "7": "Country",
47
+ "8": "Cyberpunk",
48
+ "9": "Dark_Academia"
49
+ },
50
+ "image_size": 518,
51
+ "initializer_range": 0.02,
52
+ "label2id": {
53
+ "Alternative": "0",
54
+ "Animecore": "1",
55
+ "Art_Hoe": "2",
56
+ "Avant-garde": "3",
57
+ "Baddie": "4",
58
+ "Bohemian": "5",
59
+ "Cottagecore": "6",
60
+ "Country": "7",
61
+ "Cyberpunk": "8",
62
+ "Dark_Academia": "9",
63
+ "Dark_Fantasy": "10",
64
+ "E-Girl": "11",
65
+ "Ethereal": "12",
66
+ "Fairycore": "13",
67
+ "Goblincore": "14",
68
+ "Goth": "15",
69
+ "Grunge": "16",
70
+ "Hypebeast": "17",
71
+ "Jersey_Shore": "18",
72
+ "Kawaii": "19",
73
+ "Kidcore": "20",
74
+ "Lagenlook": "21",
75
+ "Lolita": "22",
76
+ "Mob_Wife": "23",
77
+ "Normcore": "24",
78
+ "Old_Money": "25",
79
+ "Pastel_Goth": "26",
80
+ "Preppy": "27",
81
+ "Punk": "28",
82
+ "Soft_Grunge": "29",
83
+ "Steampunk": "30",
84
+ "Tomato_Girl_Summer": "31",
85
+ "Twee": "32",
86
+ "VSCO": "33",
87
+ "Vaporwave": "34",
88
+ "Y2K": "35"
89
+ },
90
+ "layer_norm_eps": 1e-06,
91
+ "layerscale_value": 1.0,
92
+ "mlp_ratio": 4,
93
+ "model_type": "dinov2",
94
+ "num_attention_heads": 12,
95
+ "num_channels": 3,
96
+ "num_hidden_layers": 12,
97
+ "out_features": [
98
+ "stage12"
99
+ ],
100
+ "out_indices": [
101
+ 12
102
+ ],
103
+ "patch_size": 14,
104
+ "problem_type": "single_label_classification",
105
+ "qkv_bias": true,
106
+ "reshape_hidden_states": true,
107
+ "stage_names": [
108
+ "stem",
109
+ "stage1",
110
+ "stage2",
111
+ "stage3",
112
+ "stage4",
113
+ "stage5",
114
+ "stage6",
115
+ "stage7",
116
+ "stage8",
117
+ "stage9",
118
+ "stage10",
119
+ "stage11",
120
+ "stage12"
121
+ ],
122
+ "torch_dtype": "float32",
123
+ "transformers_version": "4.41.2",
124
+ "use_swiglu_ffn": false
125
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a941270203dcdf3d95bfe16e20258e66dea8c2dacf7ca4a6c01cf71f405e62
3
+ size 346568968
preprocessor_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_center_crop",
8
+ "crop_size",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "do_convert_rgb",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format"
18
+ ],
19
+ "crop_size": {
20
+ "height": 224,
21
+ "width": 224
22
+ },
23
+ "do_center_crop": true,
24
+ "do_convert_rgb": true,
25
+ "do_normalize": true,
26
+ "do_rescale": true,
27
+ "do_resize": true,
28
+ "image_mean": [
29
+ 0.485,
30
+ 0.456,
31
+ 0.406
32
+ ],
33
+ "image_processor_type": "BitImageProcessor",
34
+ "image_std": [
35
+ 0.229,
36
+ 0.224,
37
+ 0.225
38
+ ],
39
+ "resample": 3,
40
+ "rescale_factor": 0.00392156862745098,
41
+ "size": {
42
+ "shortest_edge": 256
43
+ }
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e7c8308c22aed37672a0cdaf57e6e5dd82acfcd69de76eaa8957ea2c325c14d
3
+ size 5048