SmilingWolf
commited on
Commit
•
fa2b83f
1
Parent(s):
b74d104
Add model files
Browse files- README.md +40 -3
- config.json +39 -0
- model.msgpack +3 -0
- model.onnx +3 -0
- model.safetensors +3 -0
- selected_tags.csv +0 -0
- sw_jax_cv_config.json +19 -0
README.md
CHANGED
@@ -1,3 +1,40 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
library_name: timm
|
4 |
+
---
|
5 |
+
# WD EVA02-Large Tagger v3
|
6 |
+
|
7 |
+
Supports ratings, characters and general tags.
|
8 |
+
|
9 |
+
Trained using https://github.com/SmilingWolf/JAX-CV.
|
10 |
+
TPUs used for training kindly provided by the [TRC program](https://sites.research.google/trc/about/).
|
11 |
+
|
12 |
+
## Dataset
|
13 |
+
Last image id: 7220105
|
14 |
+
Trained on Danbooru images with IDs modulo 0000-0899.
|
15 |
+
Validated on images with IDs modulo 0950-0999.
|
16 |
+
Images with less than 10 general tags were filtered out.
|
17 |
+
Tags with less than 600 images were filtered out.
|
18 |
+
|
19 |
+
## Validation results
|
20 |
+
`v1.0: P=R: threshold = 0.xxxx, F1 = 0.xxxx`
|
21 |
+
|
22 |
+
## What's new
|
23 |
+
Model v1.0/Dataset v3:
|
24 |
+
More training images, more and up-to-date tags (up to 2024-02-28).
|
25 |
+
Now `timm` compatible! Load it up and give it a spin using the canonical one-liner!
|
26 |
+
ONNX model is compatible with code developed for the v2 series of models.
|
27 |
+
The batch dimension of the ONNX model is not fixed to 1 anymore. Now you can go crazy with batch inference.
|
28 |
+
Switched to Macro-F1 to measure model performance since it gives me a better gauge of overall training progress.
|
29 |
+
|
30 |
+
# Runtime deps
|
31 |
+
ONNX model requires `onnxruntime >= 1.17.0`
|
32 |
+
|
33 |
+
# Inference code examples
|
34 |
+
For timm: https://github.com/neggles/wdv3-timm
|
35 |
+
For ONNX: https://huggingface.co/spaces/SmilingWolf/wd-tagger
|
36 |
+
For JAX: https://github.com/SmilingWolf/wdv3-jax
|
37 |
+
|
38 |
+
## Final words
|
39 |
+
Subject to change and updates.
|
40 |
+
Downstream users are encouraged to use tagged releases rather than relying on the head of the repo.
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architecture": "eva02_large_patch14_448",
|
3 |
+
"num_classes": 10861,
|
4 |
+
"num_features": 1024,
|
5 |
+
"global_pool": "avg",
|
6 |
+
"model_args": {
|
7 |
+
"img_size": 448,
|
8 |
+
"ref_feat_shape": [
|
9 |
+
32,
|
10 |
+
32
|
11 |
+
]
|
12 |
+
},
|
13 |
+
"pretrained_cfg": {
|
14 |
+
"custom_load": false,
|
15 |
+
"input_size": [
|
16 |
+
3,
|
17 |
+
448,
|
18 |
+
448
|
19 |
+
],
|
20 |
+
"fixed_input_size": false,
|
21 |
+
"interpolation": "bicubic",
|
22 |
+
"crop_pct": 1.0,
|
23 |
+
"crop_mode": "center",
|
24 |
+
"mean": [
|
25 |
+
0.5,
|
26 |
+
0.5,
|
27 |
+
0.5
|
28 |
+
],
|
29 |
+
"std": [
|
30 |
+
0.5,
|
31 |
+
0.5,
|
32 |
+
0.5
|
33 |
+
],
|
34 |
+
"num_classes": 10861,
|
35 |
+
"pool_size": null,
|
36 |
+
"first_conv": null,
|
37 |
+
"classifier": null
|
38 |
+
}
|
39 |
+
}
|
model.msgpack
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0843c6a7706f52ec55c97c1eab40753928937883312361f1dda899abd5076c15
|
3 |
+
size 1260921286
|
model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e768793060c7939b277ccb382783e8670e8a042d29d77aa736be0c8cc898bfc
|
3 |
+
size 1260435999
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74f05b0aad869d9f91fbc597bc8d157d98abdead573d5c23509a195dbb8a7ef5
|
3 |
+
size 1260796004
|
selected_tags.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
sw_jax_cv_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_size": 448,
|
3 |
+
"model_name": "eva02_large",
|
4 |
+
"model_args": {
|
5 |
+
"image_size": 448,
|
6 |
+
"patch_size": 14,
|
7 |
+
"num_classes": 10861,
|
8 |
+
"num_layers": 24,
|
9 |
+
"embed_dim": 1024,
|
10 |
+
"mlp_dim": 2730,
|
11 |
+
"num_heads": 16,
|
12 |
+
"scale_mlp": true,
|
13 |
+
"drop_path_rate": 0.1,
|
14 |
+
"use_norm_bias": false,
|
15 |
+
"use_linear_bias": true,
|
16 |
+
"norm_layer": "reparam_layernorm",
|
17 |
+
"layer_norm_eps": 1e-06
|
18 |
+
}
|
19 |
+
}
|