Spaces:
Running
Running
File size: 17,327 Bytes
2f3d724 e94371d fc2d897 e94371d 5f2c171 fc2d897 5f2c171 fc2d897 5f2c171 e94371d 2f3d724 fc2d897 e94371d 0f3c87f fc2d897 e94371d 5f2c171 e94371d fc2d897 9542215 e94371d fc2d897 9542215 e94371d 5f2c171 2f3d724 5f2c171 fc2d897 e94371d fc2d897 e94371d 48c1b5c e94371d 865d2cc e94371d 865d2cc e94371d 865d2cc e94371d fc2d897 e94371d 33562f3 e94371d fc2d897 33562f3 e94371d 5a30396 33562f3 e94371d a91e1f6 1c479ac 2f3d724 fc2d897 2f3d724 e94371d 17e3440 8ebd7dd 5f2c171 e94371d 2f3d724 e94371d 17e3440 a91e1f6 e94371d fc2d897 8ebd7dd e94371d fc2d897 e94371d 48c1b5c 62195aa e94371d a2917b0 892b238 95c6d27 1c479ac 122bee5 fc2d897 a2917b0 48c1b5c a2917b0 95c6d27 62195aa e94371d a2917b0 95c6d27 2f3d724 62195aa a2917b0 48c1b5c a2917b0 95c6d27 a2917b0 62195aa e94371d a2917b0 2f3d724 a2917b0 2f3d724 a2917b0 a91e1f6 95c6d27 62195aa a2917b0 2f3d724 a2917b0 a91e1f6 95c6d27 62195aa b41dd6a a2917b0 48c1b5c a2917b0 95c6d27 fc2d897 1c479ac 17e3440 e94371d fc2d897 e94371d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
import random
import gradio as gr
import imageio
import numpy as np
import onnx
import onnxruntime as rt
import huggingface_hub
from numpy.random import RandomState
from skimage import transform
def get_inter(r1, r2):
h_inter = max(min(r1[3], r2[3]) - max(r1[1], r2[1]), 0)
w_inter = max(min(r1[2], r2[2]) - max(r1[0], r2[0]), 0)
return h_inter * w_inter
def iou(r1, r2):
s1 = (r1[2] - r1[0]) * (r1[3] - r1[1])
s2 = (r2[2] - r2[0]) * (r2[3] - r2[1])
i = get_inter(r1, r2)
return i / (s1 + s2 - i)
def letterbox(im, new_shape=(640, 640), color=(0.5, 0.5, 0.5), stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape != new_unpad: # resize
im = transform.resize(im, (new_unpad[1], new_unpad[0]))
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im_new = np.full((new_unpad[1] + top + bottom, new_unpad[0] + left + right, 3), color, dtype=np.float32)
im_new[top:new_unpad[1] + top, left:new_unpad[0] + left] = im
return im_new
def nms(pred, conf_thres, iou_thres, max_instance=20): # pred (anchor_num, 5 + cls_num)
nc = pred.shape[1] - 5
candidates = [list() for x in range(nc)]
for x in pred:
if x[4] < conf_thres:
continue
cls = np.argmax(x[5:])
p = x[4] * x[5 + cls]
if conf_thres <= p:
box = (x[0] - x[2] / 2, x[1] - x[3] / 2, x[0] + x[2] / 2, x[1] + x[3] / 2) # xywh2xyxy
candidates[cls].append([p, box])
result = [list() for x in range(nc)]
for i, candidate in enumerate(candidates):
candidate = sorted(candidate, key=lambda a: a[0], reverse=True)
candidate = candidate[:max_instance]
for x in candidate:
ok = True
for r in result[i]:
if iou(r[1], x[1]) > iou_thres:
ok = False
break
if ok:
result[i].append(x)
return result
class Model:
def __init__(self):
self.detector = None
self.encoder = None
self.g_synthesis = None
self.g_mapping = None
self.detector_stride = None
self.detector_imgsz = None
self.detector_class_names = None
self.anime_seg = None
self.w_avg = None
self.load_models()
def load_models(self):
g_mapping_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_mapping.onnx")
g_synthesis_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_synthesis.onnx")
encoder_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "encoder.onnx")
detector_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "waifu_dect.onnx")
anime_seg_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
providers = ['CPUExecutionProvider']
gpu_providers = ['CUDAExecutionProvider']
g_mapping = onnx.load(g_mapping_path)
w_avg = [x for x in g_mapping.graph.initializer if x.name == "w_avg"][0]
w_avg = np.frombuffer(w_avg.raw_data, dtype=np.float32)[np.newaxis, :]
w_avg = w_avg.repeat(16, axis=0)[np.newaxis, :]
self.w_avg = w_avg
self.g_mapping = rt.InferenceSession(g_mapping_path, providers=providers)
self.g_synthesis = rt.InferenceSession(g_synthesis_path, providers=providers)
self.encoder = rt.InferenceSession(encoder_path, providers=providers)
self.detector = rt.InferenceSession(detector_path, providers=providers)
detector_meta = self.detector.get_modelmeta().custom_metadata_map
self.detector_stride = int(detector_meta['stride'])
self.detector_imgsz = 1088
self.detector_class_names = eval(detector_meta['names'])
self.anime_seg = rt.InferenceSession(anime_seg_path, providers=providers)
def get_img(self, w, noise=0):
img = self.g_synthesis.run(None, {'w': w.astype(np.float32), "noise": np.asarray([noise], dtype=np.float32)})[0]
return (img.transpose(0, 2, 3, 1) * 127.5 + 128).clip(0, 255).astype(np.uint8)[0]
def get_w(self, z, psi1, psi2):
return self.g_mapping.run(None, {'z': z.astype(np.float32), 'psi': np.asarray([psi1, psi2], dtype=np.float32)})[0]
def remove_bg(self, img, s=1024):
img0 = img
img = (img / 255).astype(np.float32)
h, w = h0, w0 = img.shape[:-1]
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
ph, pw = s - h, s - w
img_input = np.zeros([s, s, 3], dtype=np.float32)
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = transform.resize(img, (h, w))
img_input = np.transpose(img_input, (2, 0, 1))
img_input = img_input[np.newaxis, :]
mask = self.anime_seg.run(None, {'img': img_input})[0][0]
mask = np.transpose(mask, (1, 2, 0))
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
mask = transform.resize(mask, (h0, w0))
img0 = (img0 * mask + 255 * (1 - mask)).astype(np.uint8)
return img0
def encode_img(self, img):
img = transform.resize(((img / 255 - 0.5) / 0.5), (256, 256)).transpose(2, 0, 1)[np.newaxis, :].astype(
np.float32)
return self.encoder.run(None, {'img': img})[0] + self.w_avg
def detect(self, im0, conf_thres, iou_thres, detail=False):
if im0 is None:
return []
img = letterbox((im0 / 255).astype(np.float32), (self.detector_imgsz, self.detector_imgsz),
stride=self.detector_stride)
# Convert
img = img.transpose(2, 0, 1)
img = img[np.newaxis, :]
pred = self.detector.run(None, {'images': img})[0][0]
dets = nms(pred, conf_thres, iou_thres)
imgs = []
# Print results
s = '%gx%g ' % img.shape[2:] # print string
for i, det in enumerate(dets):
n = len(det)
s += f"{n} {self.detector_class_names[i]}{'s' * (n > 1)}, " # add to string
if detail:
print(s)
waifu_rects = []
head_rects = []
body_rects = []
for i, det in enumerate(dets):
for x in det:
# Rescale boxes from img_size to im0 size
wr = im0.shape[1] / img.shape[3]
hr = im0.shape[0] / img.shape[2]
x[1] = (int(x[1][0] * wr), int(x[1][1] * hr),
int(x[1][2] * wr), int(x[1][3] * hr))
if i == 0:
head_rects.append(x[1])
elif i == 1:
body_rects.append(x[1])
elif i == 2:
waifu_rects.append(x[1])
for j, waifu_rect in enumerate(waifu_rects):
msg = f'waifu {j + 1} '
head_num = 0
body_num = 0
hr, br = None, None
for r in head_rects:
if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.75:
hr = r
head_num += 1
if head_num != 1:
if detail:
print(msg + f'head num error: {head_num}')
continue
for r in body_rects:
if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.65:
br = r
body_num += 1
if body_num != 1:
if detail:
print(msg + f'body num error: {body_num}')
continue
bounds = (min(waifu_rect[0], hr[0], br[0]),
min(waifu_rect[1], hr[1], br[1]),
max(waifu_rect[2], hr[2], br[2]),
max(waifu_rect[3], hr[3], br[3]))
if (bounds[2] - bounds[0]) / (bounds[3] - bounds[1]) > 0.7:
if detail:
print(msg + "ratio out of limit")
continue
expand_pixel = (bounds[3] - bounds[1]) // 20
bounds = [max(bounds[0] - expand_pixel // 2, 0),
max(bounds[1] - expand_pixel, 0),
min(bounds[2] + expand_pixel // 2, im0.shape[1]),
min(bounds[3] + expand_pixel, im0.shape[0]),
]
# corp and resize
w = bounds[2] - bounds[0]
h = bounds[3] - bounds[1]
bounds[3] += h % 2
h += h % 2
r = min(512 / w, 1024 / h)
pw, ph = int(512 / r - w), int(1024 / r - h)
bounds_tmp = (bounds[0] - pw // 2, bounds[1] - ph // 2,
bounds[2] + pw // 2 + pw % 2, bounds[3] + ph // 2 + ph % 2)
bounds = (max(0, bounds_tmp[0]), max(0, bounds_tmp[1]),
min(im0.shape[1], bounds_tmp[2]), min(im0.shape[0], bounds_tmp[3]))
dl = bounds[0] - bounds_tmp[0]
dr = bounds[2] - bounds_tmp[2]
dt = bounds[1] - bounds_tmp[1]
db = bounds[3] - bounds_tmp[3]
w = bounds_tmp[2] - bounds_tmp[0]
h = bounds_tmp[3] - bounds_tmp[1]
temp_img = np.full((h, w, 3), 255, dtype=np.uint8)
temp_img[dt:h + db, dl:w + dr] = im0[bounds[1]:bounds[3], bounds[0]:bounds[2]]
temp_img = transform.resize(temp_img, (1024, 512), preserve_range=True).astype(np.uint8)
imgs.append(temp_img)
return imgs
def gen_video(self, w1, w2, noise, path, frame_num=10):
video = imageio.get_writer(path, mode='I', fps=frame_num // 2, codec='libx264', bitrate='16M')
lin = np.linspace(0, 1, frame_num)
frames = []
for i in range(0, frame_num):
img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise)
frames.append(img)
video.append_data(img)
for i in reversed(range(0, frame_num)):
video.append_data(frames[i])
video.close()
def get_thumbnail(img):
img_new = np.full((256, 384, 3), 200, dtype=np.uint8)
img_new[:, 128:256] = transform.resize(img, (256, 128), preserve_range=True)
return img_new
def gen_fn(seed, random_seed, psi1, psi2, noise):
if random_seed:
seed = random.randint(0, 2 ** 32 - 1)
z = RandomState(int(seed)).randn(1, 1024)
w = model.get_w(z.astype(dtype=np.float32), psi1, psi2)
img_out = model.get_img(w, noise)
return img_out, seed, w, get_thumbnail(img_out)
def encode_img_fn(img, noise):
if img is None:
return "please upload a image", None, None, None, None
img = model.remove_bg(img)
imgs = model.detect(img, 0.2, 0.03)
if len(imgs) == 0:
return "failed to detect anime character", None, None, None, None
w = model.encode_img(imgs[0])
img_out = model.get_img(w, noise)
return "success", imgs[0], img_out, w, get_thumbnail(img_out)
def gen_video_fn(w1, w2, noise, frame):
if w1 is None or w2 is None:
return None
model.gen_video(w1, w2, noise, "video.mp4", int(frame))
return "video.mp4"
if __name__ == '__main__':
model = Model()
app = gr.Blocks()
with app:
gr.Markdown("# full-body anime GAN\n\n"
"![visitor badge](https://api.visitorbadge.io/api/visitors?path=skytnt.full-body-anime-gan&countColor=%23263759&style=flat&labelStyle=lower)\n\n")
with gr.Tabs():
with gr.TabItem("generate image"):
with gr.Row():
with gr.Column():
gr.Markdown("generate image")
with gr.Row():
gen_input1 = gr.Slider(minimum=0, maximum=2 ** 32 - 1, step=1, value=0, label="seed")
gen_input2 = gr.Checkbox(label="Random", value=True)
gen_input3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="truncation psi 1")
gen_input4 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="truncation psi 2")
gen_input5 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="noise strength")
with gr.Group():
gen_submit = gr.Button("Generate", variant="primary")
with gr.Column():
gen_output1 = gr.Image(label="output image")
select_img_input_w1 = gr.State()
select_img_input_img1 = gr.State()
with gr.TabItem("encode image"):
with gr.Row():
with gr.Column():
gr.Markdown("you'd better upload a standing full-body image")
encode_img_input = gr.Image(label="input image")
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 5)]
encode_img_examples = gr.Examples(examples=examples_data,inputs=[encode_img_input],cache_examples=False)
with gr.Group():
encode_img_submit = gr.Button("Run", variant="primary")
with gr.Column():
encode_img_output1 = gr.Textbox(label="output message")
with gr.Row():
encode_img_output2 = gr.Image(label="detected")
encode_img_output3 = gr.Image(label="encoded")
select_img_input_w2 = gr.State()
select_img_input_img2 = gr.State()
with gr.TabItem("generate video"):
with gr.Row():
with gr.Column():
gr.Markdown("generate video between 2 images")
with gr.Row():
with gr.Column():
select_img1_dropdown = gr.Radio(label="Select image 1", value="current generated image",
choices=["current generated image",
"current encoded image"], type="index")
with gr.Group():
select_img1_button = gr.Button("Select", variant="primary")
select_img1_output_img = gr.Image(label="selected image 1")
select_img1_output_w = gr.State()
with gr.Column():
select_img2_dropdown = gr.Radio(label="Select image 2", value="current generated image",
choices=["current generated image",
"current encoded image"], type="index")
with gr.Group():
select_img2_button = gr.Button("Select", variant="primary")
select_img2_output_img = gr.Image(label="selected image 2")
select_img2_output_w = gr.State()
generate_video_frame = gr.Slider(minimum=3, maximum=5, step=1, label="frame", value=3)
with gr.Group():
generate_video_button = gr.Button("Generate", variant="primary")
with gr.Column():
generate_video_output = gr.Video(label="output video")
gen_submit.click(gen_fn, [gen_input1, gen_input2, gen_input3, gen_input4, gen_input5],
[gen_output1, gen_input1, select_img_input_w1, select_img_input_img1])
encode_img_submit.click(encode_img_fn, [encode_img_input, gen_input5],
[encode_img_output1, encode_img_output2, encode_img_output3, select_img_input_w2,
select_img_input_img2])
select_img1_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2),
[select_img1_dropdown, select_img_input_img1, select_img_input_img2,
select_img_input_w1, select_img_input_w2],
[select_img1_output_img, select_img1_output_w])
select_img2_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2),
[select_img2_dropdown, select_img_input_img1, select_img_input_img2,
select_img_input_w1, select_img_input_w2],
[select_img2_output_img, select_img2_output_w])
generate_video_button.click(gen_video_fn,
[select_img1_output_w, select_img2_output_w, gen_input5, generate_video_frame],
[generate_video_output])
app.launch()
|