import os
import cv2
import gradio as gr
import torch
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from gfpgan.utils import GFPGANer
from huggingface_hub import snapshot_download, hf_hub_download
from realesrgan.utils import RealESRGANer
import examples
REALESRGAN_REPO_ID = 'leonelhs/realesrgan'
GFPGAN_REPO_ID = 'leonelhs/gfpgan'
os.system("pip freeze")
examples.download()
# background enhancer with RealESRGAN
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = hf_hub_download(repo_id=REALESRGAN_REPO_ID, filename='realesr-general-x4v3.pth')
half = True if torch.cuda.is_available() else False
upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
os.makedirs('output', exist_ok=True)
# def inference(img, version, scale, weight):
def predict(img, version, scale):
# weight /= 100
print(img, version, scale)
if scale > 4:
scale = 4 # avoid too large scale value
try:
extension = os.path.splitext(os.path.basename(str(img)))[1]
img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
elif len(img.shape) == 2: # for gray inputs
img_mode = None
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_mode = None
h, w = img.shape[0:2]
if h > 3500 or w > 3500:
print('too large size')
return None, None
if h < 300:
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
face_enhancer = None
snapshot_folder = snapshot_download(repo_id=GFPGAN_REPO_ID)
if version == 'v1.2':
path = os.path.join(snapshot_folder, 'GFPGANv1.2.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'v1.3':
path = os.path.join(snapshot_folder, 'GFPGANv1.3.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'v1.4':
path = os.path.join(snapshot_folder, 'GFPGANv1.4.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'RestoreFormer':
path = os.path.join(snapshot_folder, 'RestoreFormer.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='RestoreFormer', channel_multiplier=2,
bg_upsampler=upsampler)
try:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
except RuntimeError as error:
print('Error', error)
try:
if scale != 2:
interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
h, w = img.shape[0:2]
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
except Exception as error:
print('wrong scale input.', error)
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
else:
extension = 'jpg'
save_path = f'output/out.{extension}'
cv2.imwrite(save_path, output)
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
return output, save_path
except Exception as error:
print('global exception', error)
return None, None
title = "GFPGAN: Practical Face Restoration Algorithm"
description = r"""Gradio demo for GFPGAN: Towards Real-World Blind Face Restoration with Generative Facial Prior.
It can be used to restore your **old photos** or improve **AI-generated faces**.
To use it, simply upload your image.
If GFPGAN is helpful, please help to ⭐ the Github Repo and recommend it to your friends 😊
"""
article = r"""
[![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases)
[![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN)
[![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/abs/2101.04061)
If you have any question, please email 📧 `xintao.wang@outlook.com` or `xintaowang@tencent.com`.
"""
demo = gr.Interface(
predict, [
gr.Image(type="filepath", label="Input"),
gr.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], type="value", value='v1.4', label='version'),
gr.Number(label="Rescaling factor", value=2),
], [
gr.Image(type="numpy", label="Output (The whole image)"),
gr.File(label="Download the output image")
],
title=title,
description=description,
article=article,
examples=[['AI-generate.jpg', 'v1.4', 2],
['lincoln.jpg', 'v1.4', 2],
['Blake_Lively.jpg', 'v1.4', 2],
['10045.png', 'v1.4', 2]])
demo.queue().launch()