Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,6 @@ from transformers import (
|
|
13 |
from diffusers import DDPMScheduler,AutoencoderKL
|
14 |
from typing import List
|
15 |
|
16 |
-
|
17 |
import torch
|
18 |
import os
|
19 |
from transformers import AutoTokenizer
|
@@ -27,7 +26,6 @@ from preprocess.openpose.run_openpose import OpenPose
|
|
27 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
28 |
from torchvision.transforms.functional import to_pil_image
|
29 |
|
30 |
-
|
31 |
def pil_to_binary_mask(pil_image, threshold=0):
|
32 |
np_image = np.array(pil_image)
|
33 |
grayscale_image = Image.fromarray(np_image).convert("L")
|
@@ -41,7 +39,6 @@ def pil_to_binary_mask(pil_image, threshold=0):
|
|
41 |
output_mask = Image.fromarray(mask)
|
42 |
return output_mask
|
43 |
|
44 |
-
|
45 |
base_path = 'yisol/IDM-VTON'
|
46 |
example_path = os.path.join(os.path.dirname(__file__), 'example')
|
47 |
|
@@ -85,7 +82,6 @@ vae = AutoencoderKL.from_pretrained(base_path,
|
|
85 |
torch_dtype=torch.float16,
|
86 |
)
|
87 |
|
88 |
-
# "stabilityai/stable-diffusion-xl-base-1.0",
|
89 |
UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
|
90 |
base_path,
|
91 |
subfolder="unet_encoder",
|
@@ -124,14 +120,14 @@ pipe = TryonPipeline.from_pretrained(
|
|
124 |
pipe.unet_encoder = UNet_Encoder
|
125 |
|
126 |
@spaces.GPU
|
127 |
-
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed,category):
|
128 |
device = "cuda"
|
129 |
|
130 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
131 |
pipe.to(device)
|
132 |
pipe.unet_encoder.to(device)
|
133 |
|
134 |
-
garm_img= garm_img.convert("RGB").resize((768,1024))
|
135 |
human_img_orig = dict["background"].convert("RGB")
|
136 |
|
137 |
if is_checked_crop:
|
@@ -148,33 +144,31 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
148 |
else:
|
149 |
human_img = human_img_orig.resize((768,1024))
|
150 |
|
151 |
-
|
152 |
if is_checked:
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
|
|
157 |
else:
|
158 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
159 |
-
|
160 |
-
# mask = mask.unsqueeze(0)
|
161 |
mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
162 |
mask_gray = to_pil_image((mask_gray+1.0)/2.0)
|
163 |
|
164 |
-
|
165 |
human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
|
166 |
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
167 |
|
168 |
-
|
169 |
-
|
170 |
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
|
171 |
-
# verbosity = getattr(args, "verbosity", None)
|
172 |
pose_img = args.func(args,human_img_arg)
|
173 |
pose_img = pose_img[:,:,::-1]
|
174 |
pose_img = Image.fromarray(pose_img).resize((768,1024))
|
175 |
|
176 |
with torch.no_grad():
|
177 |
-
# Extract the images
|
178 |
with torch.cuda.amp.autocast():
|
179 |
with torch.no_grad():
|
180 |
prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), the model is wearing " + garment_des
|
@@ -211,8 +205,6 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
211 |
negative_prompt=negative_prompt,
|
212 |
)
|
213 |
|
214 |
-
|
215 |
-
|
216 |
pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16)
|
217 |
garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16)
|
218 |
generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
|
@@ -238,10 +230,9 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
238 |
if is_checked_crop:
|
239 |
out_img = images[0].resize(crop_size)
|
240 |
human_img_orig.paste(out_img, (int(left), int(top)))
|
241 |
-
return human_img_orig, mask_gray
|
242 |
else:
|
243 |
-
return images[0], mask_gray
|
244 |
-
# return images[0], mask_gray
|
245 |
|
246 |
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
247 |
garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
|
@@ -257,10 +248,8 @@ for ex_human in human_list_path:
|
|
257 |
ex_dict['composite'] = None
|
258 |
human_ex_list.append(ex_dict)
|
259 |
|
260 |
-
|
261 |
image_blocks = gr.Blocks(theme="Nymbo/Nymbo_Theme").queue(max_size=12)
|
262 |
with image_blocks as demo:
|
263 |
-
|
264 |
with gr.Column():
|
265 |
try_button = gr.Button(value="๊ฐ์ ํผํ
์์")
|
266 |
with gr.Accordion(label="๊ณ ๊ธ ์ค์ ", open=False):
|
@@ -301,7 +290,13 @@ with image_blocks as demo:
|
|
301 |
masked_img = gr.Image(label="๋ง์คํฌ ์ ์ฉ ์ด๋ฏธ์ง", elem_id="masked-img",show_share_button=False)
|
302 |
with gr.Column():
|
303 |
image_out = gr.Image(label="๊ฒฐ๊ณผ", elem_id="output-img",show_share_button=False)
|
|
|
|
|
|
|
304 |
|
305 |
-
try_button.click(fn=start_tryon,
|
|
|
|
|
|
|
306 |
|
307 |
image_blocks.launch(auth=("gini","pick"))
|
|
|
13 |
from diffusers import DDPMScheduler,AutoencoderKL
|
14 |
from typing import List
|
15 |
|
|
|
16 |
import torch
|
17 |
import os
|
18 |
from transformers import AutoTokenizer
|
|
|
26 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
27 |
from torchvision.transforms.functional import to_pil_image
|
28 |
|
|
|
29 |
def pil_to_binary_mask(pil_image, threshold=0):
|
30 |
np_image = np.array(pil_image)
|
31 |
grayscale_image = Image.fromarray(np_image).convert("L")
|
|
|
39 |
output_mask = Image.fromarray(mask)
|
40 |
return output_mask
|
41 |
|
|
|
42 |
base_path = 'yisol/IDM-VTON'
|
43 |
example_path = os.path.join(os.path.dirname(__file__), 'example')
|
44 |
|
|
|
82 |
torch_dtype=torch.float16,
|
83 |
)
|
84 |
|
|
|
85 |
UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
|
86 |
base_path,
|
87 |
subfolder="unet_encoder",
|
|
|
120 |
pipe.unet_encoder = UNet_Encoder
|
121 |
|
122 |
@spaces.GPU
|
123 |
+
def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, category):
|
124 |
device = "cuda"
|
125 |
|
126 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
127 |
pipe.to(device)
|
128 |
pipe.unet_encoder.to(device)
|
129 |
|
130 |
+
garm_img = garm_img.convert("RGB").resize((768,1024))
|
131 |
human_img_orig = dict["background"].convert("RGB")
|
132 |
|
133 |
if is_checked_crop:
|
|
|
144 |
else:
|
145 |
human_img = human_img_orig.resize((768,1024))
|
146 |
|
147 |
+
status_message = ""
|
148 |
if is_checked:
|
149 |
+
try:
|
150 |
+
keypoints = openpose_model(human_img.resize((384,512)))
|
151 |
+
model_parse, _ = parsing_model(human_img.resize((384,512)))
|
152 |
+
mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
|
153 |
+
mask = mask.resize((768,1024))
|
154 |
+
except NotImplementedError:
|
155 |
+
status_message = f"์นดํ
๊ณ ๋ฆฌ {category}์ ๋ํ ์๋ ๋ง์คํฌ ์์ฑ์ด ๊ตฌํ๋์ง ์์์ต๋๋ค. ์ฌ์ฉ์๊ฐ ๊ทธ๋ฆฐ ๋ง์คํฌ๋ฅผ ์ฌ์ฉํฉ๋๋ค."
|
156 |
+
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
157 |
else:
|
158 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
159 |
+
|
|
|
160 |
mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
161 |
mask_gray = to_pil_image((mask_gray+1.0)/2.0)
|
162 |
|
|
|
163 |
human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
|
164 |
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
165 |
|
|
|
|
|
166 |
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
|
|
|
167 |
pose_img = args.func(args,human_img_arg)
|
168 |
pose_img = pose_img[:,:,::-1]
|
169 |
pose_img = Image.fromarray(pose_img).resize((768,1024))
|
170 |
|
171 |
with torch.no_grad():
|
|
|
172 |
with torch.cuda.amp.autocast():
|
173 |
with torch.no_grad():
|
174 |
prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), the model is wearing " + garment_des
|
|
|
205 |
negative_prompt=negative_prompt,
|
206 |
)
|
207 |
|
|
|
|
|
208 |
pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16)
|
209 |
garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16)
|
210 |
generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
|
|
|
230 |
if is_checked_crop:
|
231 |
out_img = images[0].resize(crop_size)
|
232 |
human_img_orig.paste(out_img, (int(left), int(top)))
|
233 |
+
return human_img_orig, mask_gray, status_message
|
234 |
else:
|
235 |
+
return images[0], mask_gray, status_message
|
|
|
236 |
|
237 |
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
238 |
garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
|
|
|
248 |
ex_dict['composite'] = None
|
249 |
human_ex_list.append(ex_dict)
|
250 |
|
|
|
251 |
image_blocks = gr.Blocks(theme="Nymbo/Nymbo_Theme").queue(max_size=12)
|
252 |
with image_blocks as demo:
|
|
|
253 |
with gr.Column():
|
254 |
try_button = gr.Button(value="๊ฐ์ ํผํ
์์")
|
255 |
with gr.Accordion(label="๊ณ ๊ธ ์ค์ ", open=False):
|
|
|
290 |
masked_img = gr.Image(label="๋ง์คํฌ ์ ์ฉ ์ด๋ฏธ์ง", elem_id="masked-img",show_share_button=False)
|
291 |
with gr.Column():
|
292 |
image_out = gr.Image(label="๊ฒฐ๊ณผ", elem_id="output-img",show_share_button=False)
|
293 |
+
|
294 |
+
with gr.Column():
|
295 |
+
status_message = gr.Textbox(label="์ํ", interactive=False)
|
296 |
|
297 |
+
try_button.click(fn=start_tryon,
|
298 |
+
inputs=[imgs, garm_img, prompt, is_checked, is_checked_crop, denoise_steps, seed, category],
|
299 |
+
outputs=[image_out, masked_img, status_message],
|
300 |
+
api_name='tryon')
|
301 |
|
302 |
image_blocks.launch(auth=("gini","pick"))
|