svjack commited on
Commit
afb27bf
·
verified ·
1 Parent(s): 6225f38

Upload 8 files

Browse files
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Genshin Impact Ccip
3
- emoji:
4
- colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.10.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Genshin-Impact-Character-CCIP
3
+ emoji: 🐱🏆
4
+ colorFrom: red
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.10.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: mit
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from ccip import _VALID_MODEL_NAMES, _DEFAULT_MODEL_NAMES, ccip_difference, ccip_default_threshold
4
+ from datasets import load_dataset
5
+ import pathlib
6
+
7
+ # 加载数据集
8
+ Genshin_Impact_Illustration_ds = load_dataset("svjack/Genshin-Impact-Illustration")["train"]
9
+ ds_size = len(Genshin_Impact_Illustration_ds)
10
+ name_image_dict = {}
11
+ for i in range(ds_size):
12
+ row_dict = Genshin_Impact_Illustration_ds[i]
13
+ name_image_dict[row_dict["name"]] = row_dict["image"]
14
+
15
+ # 从数据集中选择一些图片作为示例
16
+ #example_images = list(name_image_dict.values())[:5] # 选择前5张图片作为示例
17
+ example_images = list(map(str ,list(pathlib.Path(".").rglob("*.png"))))
18
+
19
+ def _compare_with_dataset(imagex, model_name):
20
+ threshold = ccip_default_threshold(model_name)
21
+ results = []
22
+
23
+ for name, imagey in name_image_dict.items():
24
+ diff = ccip_difference(imagex, imagey)
25
+ result = (diff, 'Same' if diff <= threshold else 'Not Same', name)
26
+ results.append(result)
27
+
28
+ # 按照 diff 值进行排序
29
+ results.sort(key=lambda x: x[0])
30
+
31
+ return results
32
+
33
+ if __name__ == '__main__':
34
+ with gr.Blocks() as demo:
35
+ with gr.Row():
36
+ with gr.Column():
37
+ # 添加示例图片
38
+ gr_input_x = gr.Image(type='pil', label='Upload Image')
39
+ gr_model_name = gr.Dropdown(_VALID_MODEL_NAMES, value=_DEFAULT_MODEL_NAMES, label='Model')
40
+ gr_button = gr.Button(value='Compare with Dataset', variant='primary')
41
+
42
+ gr.Examples(
43
+ examples=example_images, # 示例数据
44
+ inputs=[gr_input_x], # 示例数据对应的输入组件
45
+ label="Click on an example to load it into the input."
46
+ )
47
+
48
+ with gr.Column():
49
+ gr_results = gr.Dataframe(headers=["Difference", "Prediction", "Name"], label='Comparison Results')
50
+
51
+ gr_button.click(
52
+ _compare_with_dataset,
53
+ inputs=[gr_input_x, gr_model_name],
54
+ outputs=gr_results,
55
+ )
56
+
57
+ demo.queue(os.cpu_count()).launch(share=True)
ccip.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os.path
3
+ from functools import lru_cache
4
+ from typing import Union, List
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+ from huggingface_hub import hf_hub_download, HfFileSystem
9
+
10
+ try:
11
+ from typing import Literal
12
+ except (ModuleNotFoundError, ImportError):
13
+ from typing_extensions import Literal
14
+
15
+ from imgutils.data import MultiImagesTyping, load_images, ImageTyping
16
+ from imgutils.utils import open_onnx_model
17
+
18
+ hf_fs = HfFileSystem()
19
+
20
+
21
+ def _normalize(data, mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)):
22
+ mean, std = np.asarray(mean), np.asarray(std)
23
+ return (data - mean[:, None, None]) / std[:, None, None]
24
+
25
+
26
+ def _preprocess_image(image: Image.Image, size: int = 384):
27
+ image = image.resize((size, size), resample=Image.BILINEAR)
28
+ # noinspection PyTypeChecker
29
+ data = np.array(image).transpose(2, 0, 1).astype(np.float32) / 255.0
30
+ data = _normalize(data)
31
+
32
+ return data
33
+
34
+
35
+ @lru_cache()
36
+ def _open_feat_model(model):
37
+ return open_onnx_model(hf_hub_download(
38
+ f'deepghs/ccip_onnx',
39
+ f'{model}/model_feat.onnx',
40
+ ))
41
+
42
+
43
+ @lru_cache()
44
+ def _open_metric_model(model):
45
+ return open_onnx_model(hf_hub_download(
46
+ f'deepghs/ccip_onnx',
47
+ f'{model}/model_metrics.onnx',
48
+ ))
49
+
50
+
51
+ @lru_cache()
52
+ def _open_metrics(model):
53
+ with open(hf_hub_download(f'deepghs/ccip_onnx', f'{model}/metrics.json'), 'r') as f:
54
+ return json.load(f)
55
+
56
+
57
+ @lru_cache()
58
+ def _open_cluster_metrics(model):
59
+ with open(hf_hub_download(f'deepghs/ccip_onnx', f'{model}/cluster.json'), 'r') as f:
60
+ return json.load(f)
61
+
62
+
63
+ _VALID_MODEL_NAMES = [
64
+ os.path.basename(os.path.dirname(file)) for file in
65
+ hf_fs.glob('deepghs/ccip_onnx/*/model.ckpt')
66
+ ]
67
+ _DEFAULT_MODEL_NAMES = 'ccip-caformer-24-randaug-pruned'
68
+
69
+
70
+ def ccip_extract_feature(image: ImageTyping, size: int = 384, model: str = _DEFAULT_MODEL_NAMES):
71
+ """
72
+ Extracts the feature vector of the character from the given anime image.
73
+
74
+ :param image: The anime image containing a single character.
75
+ :type image: ImageTyping
76
+
77
+ :param size: The size of the input image to be used for feature extraction. (default: ``384``)
78
+ :type size: int
79
+
80
+ :param model: The name of the model to use for feature extraction. (default: ``ccip-caformer-24-randaug-pruned``)
81
+ The available model names are: ``ccip-caformer-24-randaug-pruned``,
82
+ ``ccip-caformer-6-randaug-pruned_fp32``, ``ccip-caformer-5_fp32``.
83
+ :type model: str
84
+
85
+ :return: The feature vector of the character.
86
+ :rtype: numpy.ndarray
87
+
88
+ Examples::
89
+ >>> from imgutils.metrics import ccip_extract_feature
90
+ >>>
91
+ >>> feat = ccip_extract_feature('ccip/1.jpg')
92
+ >>> feat.shape, feat.dtype
93
+ ((768,), dtype('float32'))
94
+ """
95
+ return ccip_batch_extract_features([image], size, model)[0]
96
+
97
+
98
+ def ccip_batch_extract_features(images: MultiImagesTyping, size: int = 384, model: str = _DEFAULT_MODEL_NAMES):
99
+ """
100
+ Extracts the feature vectors of multiple images using the specified model.
101
+
102
+ :param images: The input images from which to extract the feature vectors.
103
+ :type images: MultiImagesTyping
104
+
105
+ :param size: The size of the input image to be used for feature extraction. (default: ``384``)
106
+ :type size: int
107
+
108
+ :param model: The name of the model to use for feature extraction. (default: ``ccip-caformer-24-randaug-pruned``)
109
+ The available model names are: ``ccip-caformer-24-randaug-pruned``,
110
+ ``ccip-caformer-6-randaug-pruned_fp32``, ``ccip-caformer-5_fp32``.
111
+ :type model: str
112
+
113
+ :return: The feature vectors of the input images.
114
+ :rtype: numpy.ndarray
115
+
116
+ Examples::
117
+ >>> from imgutils.metrics import ccip_batch_extract_features
118
+ >>>
119
+ >>> feat = ccip_batch_extract_features(['ccip/1.jpg', 'ccip/2.jpg', 'ccip/6.jpg'])
120
+ >>> feat.shape, feat.dtype
121
+ ((3, 768), dtype('float32'))
122
+ """
123
+ images = load_images(images, mode='RGB')
124
+ data = np.stack([_preprocess_image(item, size=size) for item in images]).astype(np.float32)
125
+ output, = _open_feat_model(model).run(['output'], {'input': data})
126
+ return output
127
+
128
+
129
+ _FeatureOrImage = Union[ImageTyping, np.ndarray]
130
+
131
+
132
+ def _p_feature(x: _FeatureOrImage, size: int = 384, model: str = _DEFAULT_MODEL_NAMES):
133
+ if isinstance(x, np.ndarray): # if feature
134
+ return x
135
+ else: # is image or path
136
+ return ccip_extract_feature(x, size, model)
137
+
138
+
139
+ def ccip_default_threshold(model: str = _DEFAULT_MODEL_NAMES) -> float:
140
+ """
141
+ Retrieves the default threshold value obtained from model metrics in the Hugging Face model repository.
142
+
143
+ :param model: The name of the model to use for feature extraction. (default: ``ccip-caformer-24-randaug-pruned``)
144
+ The available model names are: ``ccip-caformer-24-randaug-pruned``,
145
+ ``ccip-caformer-6-randaug-pruned_fp32``, ``ccip-caformer-5_fp32``.
146
+ :type model: str
147
+
148
+ :return: The default threshold value obtained from model metrics.
149
+ :rtype: float
150
+
151
+ Examples::
152
+ >>> from imgutils.metrics import ccip_default_threshold
153
+ >>>
154
+ >>> ccip_default_threshold()
155
+ 0.17847511429108218
156
+ >>> ccip_default_threshold('ccip-caformer-6-randaug-pruned_fp32')
157
+ 0.1951224011983088
158
+ >>> ccip_default_threshold('ccip-caformer-5_fp32')
159
+ 0.18397327797685215
160
+ """
161
+ return _open_metrics(model)['threshold']
162
+
163
+
164
+ def ccip_difference(x: _FeatureOrImage, y: _FeatureOrImage,
165
+ size: int = 384, model: str = _DEFAULT_MODEL_NAMES) -> float:
166
+ """
167
+ Calculates the difference value between two anime characters based on their images or feature vectors.
168
+
169
+ :param x: The image or feature vector of the first anime character.
170
+ :type x: Union[ImageTyping, np.ndarray]
171
+
172
+ :param y: The image or feature vector of the second anime character.
173
+ :type y: Union[ImageTyping, np.ndarray]
174
+
175
+ :param size: The size of the input image to be used for feature extraction. (default: ``384``)
176
+ :type size: int
177
+
178
+ :param model: The name of the model to use for feature extraction. (default: ``ccip-caformer-24-randaug-pruned``)
179
+ The available model names are: ``ccip-caformer-24-randaug-pruned``,
180
+ ``ccip-caformer-6-randaug-pruned_fp32``, ``ccip-caformer-5_fp32``.
181
+ :type model: str
182
+
183
+ :return: The difference value between the two anime characters.
184
+ :rtype: float
185
+
186
+ Examples::
187
+ >>> from imgutils.metrics import ccip_difference
188
+ >>>
189
+ >>> ccip_difference('ccip/1.jpg', 'ccip/2.jpg') # same character
190
+ 0.16583099961280823
191
+ >>>
192
+ >>> # different characters
193
+ >>> ccip_difference('ccip/1.jpg', 'ccip/6.jpg')
194
+ 0.42947039008140564
195
+ >>> ccip_difference('ccip/1.jpg', 'ccip/7.jpg')
196
+ 0.4037521779537201
197
+ >>> ccip_difference('ccip/2.jpg', 'ccip/6.jpg')
198
+ 0.4371533691883087
199
+ >>> ccip_difference('ccip/2.jpg', 'ccip/7.jpg')
200
+ 0.40748104453086853
201
+ >>> ccip_difference('ccip/6.jpg', 'ccip/7.jpg')
202
+ 0.392294704914093
203
+ """
204
+ return ccip_batch_differences([x, y], size, model)[0, 1].item()
205
+
206
+
207
+ def ccip_batch_differences(images: List[_FeatureOrImage],
208
+ size: int = 384, model: str = _DEFAULT_MODEL_NAMES) -> np.ndarray:
209
+ """
210
+ Calculates the pairwise differences between a given list of images or feature vectors representing anime characters.
211
+
212
+ :param images: The list of images or feature vectors representing anime characters.
213
+ :type images: List[Union[ImageTyping, np.ndarray]]
214
+
215
+ :param size: The size of the input image to be used for feature extraction. (default: ``384``)
216
+ :type size: int
217
+
218
+ :param model: The name of the model to use for feature extraction. (default: ``ccip-caformer-24-randaug-pruned``)
219
+ The available model names are: ``ccip-caformer-24-randaug-pruned``,
220
+ ``ccip-caformer-6-randaug-pruned_fp32``, ``ccip-caformer-5_fp32``.
221
+ :type model: str
222
+
223
+ :return: The matrix of pairwise differences between the given images or feature vectors.
224
+ :rtype: np.ndarray
225
+
226
+ Examples::
227
+ >>> from imgutils.metrics import ccip_batch_differences
228
+ >>>
229
+ >>> ccip_batch_differences(['ccip/1.jpg', 'ccip/2.jpg', 'ccip/6.jpg', 'ccip/7.jpg'])
230
+ array([[6.5350548e-08, 1.6583106e-01, 4.2947042e-01, 4.0375218e-01],
231
+ [1.6583106e-01, 9.8025822e-08, 4.3715334e-01, 4.0748104e-01],
232
+ [4.2947042e-01, 4.3715334e-01, 3.2675274e-08, 3.9229470e-01],
233
+ [4.0375218e-01, 4.0748104e-01, 3.9229470e-01, 6.5350548e-08]],
234
+ dtype=float32)
235
+ """
236
+ input_ = np.stack([_p_feature(img, size, model) for img in images]).astype(np.float32)
237
+ output, = _open_metric_model(model).run(['output'], {'input': input_})
238
+ return output
ccip_app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+
5
+ from ccip import _VALID_MODEL_NAMES, _DEFAULT_MODEL_NAMES, ccip_difference, ccip_default_threshold
6
+
7
+
8
+ def _compare(imagex, imagey, model_name):
9
+ threshold = ccip_default_threshold(model_name)
10
+ diff = ccip_difference(imagex, imagey)
11
+
12
+ return diff, 'Same' if diff <= threshold else 'Not Same'
13
+
14
+
15
+ if __name__ == '__main__':
16
+ with gr.Blocks() as demo:
17
+ with gr.Row():
18
+ with gr.Column():
19
+ with gr.Row():
20
+ with gr.Column():
21
+ gr_input_x = gr.Image(type='pil', label='Image X')
22
+ with gr.Column():
23
+ gr_input_y = gr.Image(type='pil', label='Image Y')
24
+ with gr.Row():
25
+ gr_model_name = gr.Dropdown(_VALID_MODEL_NAMES, value=_DEFAULT_MODEL_NAMES, label='Model')
26
+
27
+ gr_button = gr.Button(value='Compare', variant='primary')
28
+
29
+ with gr.Column():
30
+ with gr.Row():
31
+ gr_diff = gr.Number(value=0.0, label='Difference')
32
+ with gr.Row():
33
+ gr_prediction = gr.Text(value='', label='Prediction')
34
+
35
+ gr_button.click(
36
+ _compare,
37
+ inputs=[gr_input_x, gr_input_y, gr_model_name],
38
+ outputs=[gr_diff, gr_prediction],
39
+ )
40
+
41
+ demo.queue(os.cpu_count()).launch(share = True)
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #gradio==3.41.2
2
+ gradio==5.10.0
3
+ numpy
4
+ pillow
5
+ huggingface_hub
6
+ scikit-image
7
+ pandas
8
+ opencv-python>=4.6.0
9
+ hbutils>=0.9.0
10
+ dghs-imgutils[gpu]>=0.2.3
11
+ #### pip install onnxruntime-gpu==1.17.0 --index-url=https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple
12
+ onnxruntime-gpu==1.17.0
13
+ #dghs-imgutils>=0.2.3
14
+ #onnxruntime-cpu
15
+ httpx
16
+
17
+ datasets
/347/217/255/345/260/274/347/211/271.png ADDED
/350/212/255/350/212/255/346/213/211.png ADDED
/350/241/214/347/247/213.png ADDED