Upload project
Browse filesA demo for my self trained models
- app.py +240 -0
- requirements.txt +11 -0
- weights/2xHFA2kCompact.pth +3 -0
- weights/2xParimgCompact.pth +3 -0
- weights/4xHFA2k.pth +3 -0
- weights/4xLSDIR.pth +3 -0
- weights/4xLSDIRCompactC3.pth +3 -0
- weights/4xLSDIRCompactN.pth +3 -0
- weights/4xLSDIRCompactR3.pth +3 -0
- weights/4xLSDIRplusC.pth +3 -0
- weights/4xLSDIRplusN.pth +3 -0
- weights/4xLSDIRplusR.pth +3 -0
- weights/4xNomos8kSC.pth +3 -0
app.py
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code taken (and slightly adopted) from https://huggingface.co/spaces/havas79/Real-ESRGAN_Demo/blob/main/app.py - credit where credit is due. I am not showcasing code here, but demoing my own trained models ;)
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import cv2
|
5 |
+
import numpy
|
6 |
+
import os
|
7 |
+
import random
|
8 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
9 |
+
from basicsr.utils.download_util import load_file_from_url
|
10 |
+
|
11 |
+
from realesrgan import RealESRGANer
|
12 |
+
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
13 |
+
|
14 |
+
last_file = None
|
15 |
+
img_mode = "RGBA"
|
16 |
+
|
17 |
+
def realesrgan(img, model_name, face_enhance):
|
18 |
+
|
19 |
+
if not img:
|
20 |
+
return
|
21 |
+
|
22 |
+
imgwidth, imgheight = img.size
|
23 |
+
|
24 |
+
if imgwidth > 1000 or imgheight > 1000:
|
25 |
+
return error("Input Image too big")
|
26 |
+
|
27 |
+
# Define model parameters
|
28 |
+
if model_name == '4xNomos8kSC':
|
29 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
30 |
+
netscale = 4
|
31 |
+
elif model_name == '4xHFA2k':
|
32 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
33 |
+
netscale = 4
|
34 |
+
elif model_name == '4xLSDIR':
|
35 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
36 |
+
netscale = 4
|
37 |
+
elif model_name == '4xLSDIRplusN':
|
38 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
39 |
+
netscale = 4
|
40 |
+
elif model_name == '4xLSDIRplusC':
|
41 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
42 |
+
netscale = 4
|
43 |
+
elif model_name == '4xLSDIRplusR':
|
44 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
45 |
+
netscale = 4
|
46 |
+
elif model_name == '2xParimgCompact':
|
47 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
|
48 |
+
netscale = 2
|
49 |
+
elif model_name == '2xHFA2kCompact':
|
50 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
|
51 |
+
netscale = 2
|
52 |
+
elif model_name == '4xLSDIRCompactN':
|
53 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
54 |
+
netscale = 4
|
55 |
+
elif model_name == '4xLSDIRCompactC3':
|
56 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
57 |
+
netscale = 4
|
58 |
+
elif model_name == '4xLSDIRCompactR3':
|
59 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
60 |
+
netscale = 4
|
61 |
+
|
62 |
+
# Determine model paths
|
63 |
+
model_path = os.path.join('weights', model_name + '.pth')
|
64 |
+
|
65 |
+
# Restorer Class
|
66 |
+
upsampler = RealESRGANer(
|
67 |
+
scale=netscale,
|
68 |
+
model_path=model_path,
|
69 |
+
dni_weight=None,
|
70 |
+
model=model,
|
71 |
+
tile=0,
|
72 |
+
tile_pad=10,
|
73 |
+
pre_pad=10,
|
74 |
+
half=False,
|
75 |
+
gpu_id=None,
|
76 |
+
)
|
77 |
+
|
78 |
+
# Use GFPGAN for face enhancement
|
79 |
+
if face_enhance:
|
80 |
+
from gfpgan import GFPGANer
|
81 |
+
face_enhancer = GFPGANer(
|
82 |
+
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
83 |
+
upscale=netscale,
|
84 |
+
arch='clean',
|
85 |
+
channel_multiplier=2,
|
86 |
+
bg_upsampler=upsampler)
|
87 |
+
|
88 |
+
# Convert the input PIL image to cv2 image, so that it can be processed by realesrgan
|
89 |
+
cv_img = numpy.array(img)
|
90 |
+
img = cv2.cvtColor(cv_img, cv2.COLOR_RGBA2BGRA)
|
91 |
+
|
92 |
+
# Apply restoration
|
93 |
+
try:
|
94 |
+
if face_enhance:
|
95 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
96 |
+
else:
|
97 |
+
output, _ = upsampler.enhance(img, netscale)
|
98 |
+
except RuntimeError as error:
|
99 |
+
print('Error', error)
|
100 |
+
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
101 |
+
else:
|
102 |
+
# Save restored image and return it to the output Image component
|
103 |
+
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
104 |
+
extension = 'png'
|
105 |
+
else:
|
106 |
+
extension = 'jpg'
|
107 |
+
|
108 |
+
out_filename = f"output_{rnd_string(16)}.{extension}"
|
109 |
+
cv2.imwrite(out_filename, output)
|
110 |
+
global last_file
|
111 |
+
last_file = out_filename
|
112 |
+
return out_filename
|
113 |
+
|
114 |
+
|
115 |
+
def rnd_string(x):
|
116 |
+
"""Returns a string of 'x' random characters
|
117 |
+
"""
|
118 |
+
characters = "abcdefghijklmnopqrstuvwxyz_0123456789"
|
119 |
+
result = "".join((random.choice(characters)) for i in range(x))
|
120 |
+
return result
|
121 |
+
|
122 |
+
|
123 |
+
def reset():
|
124 |
+
"""Resets the Image components of the Gradio interface and deletes
|
125 |
+
the last processed image
|
126 |
+
"""
|
127 |
+
global last_file
|
128 |
+
if last_file:
|
129 |
+
print(f"Deleting {last_file} ...")
|
130 |
+
os.remove(last_file)
|
131 |
+
last_file = None
|
132 |
+
return gr.update(value=None), gr.update(value=None)
|
133 |
+
|
134 |
+
|
135 |
+
def has_transparency(img):
|
136 |
+
"""This function works by first checking to see if a "transparency" property is defined
|
137 |
+
in the image's info -- if so, we return "True". Then, if the image is using indexed colors
|
138 |
+
(such as in GIFs), it gets the index of the transparent color in the palette
|
139 |
+
(img.info.get("transparency", -1)) and checks if it's used anywhere in the canvas
|
140 |
+
(img.getcolors()). If the image is in RGBA mode, then presumably it has transparency in
|
141 |
+
it, but it double-checks by getting the minimum and maximum values of every color channel
|
142 |
+
(img.getextrema()), and checks if the alpha channel's smallest value falls below 255.
|
143 |
+
https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent
|
144 |
+
"""
|
145 |
+
if img.info.get("transparency", None) is not None:
|
146 |
+
return True
|
147 |
+
if img.mode == "P":
|
148 |
+
transparent = img.info.get("transparency", -1)
|
149 |
+
for _, index in img.getcolors():
|
150 |
+
if index == transparent:
|
151 |
+
return True
|
152 |
+
elif img.mode == "RGBA":
|
153 |
+
extrema = img.getextrema()
|
154 |
+
if extrema[3][0] < 255:
|
155 |
+
return True
|
156 |
+
return False
|
157 |
+
|
158 |
+
|
159 |
+
def image_properties(img):
|
160 |
+
"""Returns the dimensions (width and height) and color mode of the input image and
|
161 |
+
also sets the global img_mode variable to be used by the realesrgan function
|
162 |
+
"""
|
163 |
+
global img_mode
|
164 |
+
if img:
|
165 |
+
if has_transparency(img):
|
166 |
+
img_mode = "RGBA"
|
167 |
+
else:
|
168 |
+
img_mode = "RGB"
|
169 |
+
properties = f"Width: {img.size[0]}, Height: {img.size[1]} | Color Mode: {img_mode}"
|
170 |
+
return properties
|
171 |
+
|
172 |
+
|
173 |
+
def main():
|
174 |
+
# Gradio Interface
|
175 |
+
with gr.Blocks(title="Self-trained ESRGAN models demo", theme="dark") as demo:
|
176 |
+
|
177 |
+
gr.Markdown(
|
178 |
+
"""# <div align="center"> Upscale image </div>
|
179 |
+
Here I demo my self-trained models. The models with their corresponding infos can be found on [my github repo](https://github.com/phhofm/models).
|
180 |
+
"""
|
181 |
+
)
|
182 |
+
|
183 |
+
with gr.Group():
|
184 |
+
with gr.Group():
|
185 |
+
model_name = gr.Dropdown(label="Model to be used",
|
186 |
+
choices=["2xHFA2kCompact", "2xParimgCompact", "4xLSDIRCompactN", "4xLSDIRCompactC3", "4xLSDIRCompactR3", "4xNomos8kSC", "4xHFA2k", "4xLSDIR", "4xLSDIRplusN", "4xLSDIRplusC", "4xLSDIRplusR"], value="4xLSDIRCompactC3",
|
187 |
+
info="See model infos at the bottom of this page")
|
188 |
+
face_enhance = gr.Checkbox(label="Face Enhancement using GFPGAN (Doesn't work for anime images)",value=False, show_label=True)
|
189 |
+
|
190 |
+
with gr.Row():
|
191 |
+
with gr.Group():
|
192 |
+
input_image = gr.Image(label="Source Image", type="pil", image_mode="RGBA")
|
193 |
+
input_image_properties = gr.Textbox(label="Image Properties - Demo will throw error if input image has either width or height > 1000. Output download is jpg for smaller size. Use models locally to circument these limits.", max_lines=1)
|
194 |
+
output_image = gr.Image(label="Upscaled Image", image_mode="RGBA")
|
195 |
+
with gr.Row():
|
196 |
+
upscale_btn = gr.Button("Upscale")
|
197 |
+
reset_btn = gr.Button("Reset")
|
198 |
+
with gr.Group():
|
199 |
+
gr.Markdown(
|
200 |
+
"""
|
201 |
+
**Model infos**
|
202 |
+
*SRVGGNetCompact models - in general faster, but less powerful, than RRDBNet*
|
203 |
+
2xHFA2kCompact - use for upscaling anime images 2x, faster than 4xHFA2k but less powerful (SRVGGNetCompact)
|
204 |
+
2xParimgCompact - upscaling photos 2x, fast (SRVGGNetCompact)
|
205 |
+
4xLSDIRCompactN - upscale a good quality photo (no degradations) 4x, faster than 4xLSDIRN but less powerful (SRVGGNetCompact)
|
206 |
+
4xLSDIRCompactC3 - upscale a jpg compressed photo 4x, fast (SRVGGNetCompact)
|
207 |
+
4xLSDIRCompactR3 - upscale a degraded photo 4x, fast (SRVGGNetCompact) (too strong, best used for interpolation like 4xLSDIRCompactN (or C) 75% 4xLSDIRCompactR3 25% to add little degradation handling to the previous one)
|
208 |
+
|
209 |
+
*RRDBNet models - in general more powerful than SRVGGNetCompact, but very slow in this demo*
|
210 |
+
4xNomos8kSC - use for upscaling photos 4x
|
211 |
+
4xHFA2k - use for upscaling anime images 4x
|
212 |
+
4xLSDIR - upscale a good quality photo (no degradation) 4x
|
213 |
+
4xLSDIRplusN - upscale a good quality photo (no degradation) 4x
|
214 |
+
4xLSDIRplusC - upscale a jpg compressed photo 4x
|
215 |
+
4xLSDIRplusR - upscale a degraded photo 4x (too strong, best used for interpolation like 4xLSDIRplusN (or C) 75% 4xLSDIRplusR 25% to add little degradation handling to the previous one)
|
216 |
+
|
217 |
+
*The following are not models I had trained, but rather interpolations I had created, they are available on my [repo](https://github.com/phhofm/models) and can be tried out locally with chaiNNer:*
|
218 |
+
4xLSDIRCompact3 (4xLSDIRCompactC3 + 4xLSDIRCompactR3)
|
219 |
+
4xLSDIRCompact2 (4xLSDIRCompactC2 + 4xLSDIRCompactR2)
|
220 |
+
4xInt-Ultracri (UltraSharp + Remacri)
|
221 |
+
4xInt-Superscri (Superscale + Remacri)
|
222 |
+
4xInt-Siacri(Siax + Remacri)
|
223 |
+
4xInt-RemDF2K (Remacri + RealSR_DF2K_JPEG)
|
224 |
+
4xInt-RemArt (Remacri + VolArt)
|
225 |
+
4xInt-RemAnime (Remacri + AnimeSharp)
|
226 |
+
4xInt-RemacRestore (Remacri + UltraMix_Restore)
|
227 |
+
4xInt-AnimeArt (AnimeSharp + VolArt)
|
228 |
+
2xInt-LD-AnimeJaNai (LD-Anime + AnimeJaNai)
|
229 |
+
""")
|
230 |
+
|
231 |
+
# Event listeners:
|
232 |
+
input_image.change(fn=image_properties, inputs=input_image, outputs=input_image_properties)
|
233 |
+
upscale_btn.click(fn=realesrgan, inputs=[input_image, model_name, face_enhance], outputs=output_image)
|
234 |
+
reset_btn.click(fn=reset, inputs=[], outputs=[output_image, input_image])
|
235 |
+
|
236 |
+
demo.launch()
|
237 |
+
|
238 |
+
|
239 |
+
if __name__ == "__main__":
|
240 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
numpy
|
4 |
+
opencv-python
|
5 |
+
Pillow
|
6 |
+
basicsr
|
7 |
+
facexlib
|
8 |
+
gfpgan
|
9 |
+
tqdm
|
10 |
+
gradio
|
11 |
+
realesrgan
|
weights/2xHFA2kCompact.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78da2d5c636f868f7741d5c736b34cdfc9fae3f2f104bc9dc655b963def784dc
|
3 |
+
size 4838301
|
weights/2xParimgCompact.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8222a24a2549189e78501811f0652c92d1460351c8c083df55ba49edcc86913
|
3 |
+
size 4839603
|
weights/4xHFA2k.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9d713f33ba671da364164117c271ff4866217420d5b20a220c79171467b2576
|
3 |
+
size 134057493
|
weights/4xLSDIR.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1a3f88b923df74014b7531268e9a48b3755bc7b627fd96994f8cf8f64a4dba5
|
3 |
+
size 134070901
|
weights/4xLSDIRCompactC3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a330cac38d956c7c3d98cd477f51b9f69df0f92f6c04d6f22998370718f846b9
|
3 |
+
size 5004681
|
weights/4xLSDIRCompactN.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bc7ed761954a0ffb5ac48f34021334253fddb17fc62b94392762a013fb30d7d
|
3 |
+
size 5005877
|
weights/4xLSDIRCompactR3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b7d8bccfee8897fb72126f05de83e1bdf36e7dc2b17023b074fc1c97580e568
|
3 |
+
size 5004681
|
weights/4xLSDIRplusC.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bea179c9c1dbf8979a7eb000efd9bb0f016542a58968db6ecf3fbadf776a1e78
|
3 |
+
size 134057493
|
weights/4xLSDIRplusN.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2e60334068e1ac72909f97d7316b39a8b43b0201561a0ff8013a1d6285c1cf5
|
3 |
+
size 134068099
|
weights/4xLSDIRplusR.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:351bfd7f7eed1a42b8fa6dbcc4700cfec859c206861b6557d8d0cf222d41293e
|
3 |
+
size 134070921
|
weights/4xNomos8kSC.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c690dbb755a5a24839754abb5c6d6cfbb7f14dc992dea54a38921b06f9b5e1a
|
3 |
+
size 134057493
|