import spaces import gradio as gr from PIL import Image from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref from src.unet_hacked_tryon import UNet2DConditionModel from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, CLIPTextModel, CLIPTextModelWithProjection, ) from diffusers import DDPMScheduler,AutoencoderKL from typing import List import torch import os from transformers import AutoTokenizer import numpy as np from utils_mask import get_mask_location from torchvision import transforms import apply_net from preprocess.humanparsing.run_parsing import Parsing from preprocess.openpose.run_openpose import OpenPose from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation from torchvision.transforms.functional import to_pil_image def pil_to_binary_mask(pil_image, threshold=0): np_image = np.array(pil_image) grayscale_image = Image.fromarray(np_image).convert("L") binary_mask = np.array(grayscale_image) > threshold mask = np.zeros(binary_mask.shape, dtype=np.uint8) for i in range(binary_mask.shape[0]): for j in range(binary_mask.shape[1]): if binary_mask[i,j] == True : mask[i,j] = 1 mask = (mask*255).astype(np.uint8) output_mask = Image.fromarray(mask) return output_mask import numpy as np from PIL import Image def get_mask_location(mode, category, parsing, keypoints): parsing = np.array(parsing) mask = np.zeros_like(parsing) print(f"Selected category: {category}") print(f"Unique values in parsing: {np.unique(parsing)}") if category == "상의": # 상의에 해당하는 부분만 마스킹 (상체, 팔) upper_body = [5, 6, 7] mask[np.isin(parsing, upper_body)] = 255 print(f"Masking upper body parts: {upper_body}") elif category == "하의": # 하의에 해당하는 부분만 마스킹 (하체) lower_body = [9, 12, 13, 14, 15, 16, 17, 18, 19] mask[np.isin(parsing, lower_body)] = 255 print(f"Masking lower body parts: {lower_body}") elif category == "드레스": # 드레스에 해당하는 부분 마스킹 (상체와 하체) full_body = [5, 6, 7, 9, 12, 13, 14, 15, 16, 17, 18, 19] mask[np.isin(parsing, full_body)] = 255 print(f"Masking full body parts: {full_body}") else: raise ValueError(f"Unknown category: {category}") print(f"Mask shape: {mask.shape}, Unique values in mask: {np.unique(mask)}") print(f"Number of masked pixels: {np.sum(mask == 255)}") mask_gray = Image.fromarray(mask) return mask_gray, mask_gray base_path = 'yisol/IDM-VTON' example_path = os.path.join(os.path.dirname(__file__), 'example') unet = UNet2DConditionModel.from_pretrained( base_path, subfolder="unet", torch_dtype=torch.float16, ) unet.requires_grad_(False) tokenizer_one = AutoTokenizer.from_pretrained( base_path, subfolder="tokenizer", revision=None, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( base_path, subfolder="tokenizer_2", revision=None, use_fast=False, ) noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler") text_encoder_one = CLIPTextModel.from_pretrained( base_path, subfolder="text_encoder", torch_dtype=torch.float16, ) text_encoder_two = CLIPTextModelWithProjection.from_pretrained( base_path, subfolder="text_encoder_2", torch_dtype=torch.float16, ) image_encoder = CLIPVisionModelWithProjection.from_pretrained( base_path, subfolder="image_encoder", torch_dtype=torch.float16, ) vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae", torch_dtype=torch.float16, ) UNet_Encoder = UNet2DConditionModel_ref.from_pretrained( base_path, subfolder="unet_encoder", torch_dtype=torch.float16, ) parsing_model = Parsing(0) openpose_model = OpenPose(0) UNet_Encoder.requires_grad_(False) image_encoder.requires_grad_(False) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) tensor_transfrom = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) pipe = TryonPipeline.from_pretrained( base_path, unet=unet, vae=vae, feature_extractor= CLIPImageProcessor(), text_encoder = text_encoder_one, text_encoder_2 = text_encoder_two, tokenizer = tokenizer_one, tokenizer_2 = tokenizer_two, scheduler = noise_scheduler, image_encoder=image_encoder, torch_dtype=torch.float16, ) pipe.unet_encoder = UNet_Encoder @spaces.GPU def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, category): device = "cuda" openpose_model.preprocessor.body_estimation.model.to(device) pipe.to(device) pipe.unet_encoder.to(device) garm_img = garm_img.convert("RGB").resize((768,1024)) human_img_orig = dict["background"].convert("RGB") if is_checked_crop: width, height = human_img_orig.size target_width = int(min(width, height * (3 / 4))) target_height = int(min(height, width * (4 / 3))) left = (width - target_width) / 2 top = (height - target_height) / 2 right = (width + target_width) / 2 bottom = (height + target_height) / 2 cropped_img = human_img_orig.crop((left, top, right, bottom)) crop_size = cropped_img.size human_img = cropped_img.resize((768,1024)) else: human_img = human_img_orig.resize((768,1024)) status_message = "" if is_checked: try: print(f"Processing category: {category}") keypoints = openpose_model(human_img.resize((384,512))) model_parse, _ = parsing_model(human_img.resize((384,512))) # 파싱 모델의 출력 확인 print(f"Parsing model output shape: {model_parse.shape}") print(f"Unique values in parsing model output: {np.unique(model_parse)}") mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints) mask = mask.resize((768,1024)) print(f"Mask created for category {category}") # 마스크 확인 print(f"Final mask shape: {mask.size}") mask_array = np.array(mask) print(f"Unique values in final mask: {np.unique(mask_array)}") print(f"Number of masked pixels in final mask: {np.sum(mask_array == 255)}") except Exception as e: status_message = f"자동 마스크 생성 중 오류가 발생했습니다: {str(e)}. 기본 마스크를 사용합니다." print(f"Error in mask creation: {str(e)}") mask = Image.new('L', (768, 1024), 255) else: if dict['layers'] and dict['layers'][0]: mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024))) else: mask = Image.new('L', (768, 1024), 255) mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img) mask_gray = to_pil_image((mask_gray+1.0)/2.0) human_img_arg = _apply_exif_orientation(human_img.resize((384,512))) human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR") args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda')) pose_img = args.func(args,human_img_arg) pose_img = pose_img[:,:,::-1] pose_img = Image.fromarray(pose_img).resize((768,1024)) with torch.no_grad(): with torch.cuda.amp.autocast(): with torch.no_grad(): prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), the model is wearing " + garment_des negative_prompt = "monochrome, lowres, bad anatomy, worst quality, normal quality, low quality, blurry, jpeg artifacts, sketch" with torch.inference_mode(): ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = pipe.encode_prompt( prompt, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt=negative_prompt, ) prompt = "((best quality, masterpiece, ultra-detailed, high quality photography, photo realistic)), a photo of " + garment_des negative_prompt = "monochrome, lowres, bad anatomy, worst quality, normal quality, low quality, blurry, jpeg artifacts, sketch" if not isinstance(prompt, List): prompt = [prompt] * 1 if not isinstance(negative_prompt, List): negative_prompt = [negative_prompt] * 1 with torch.inference_mode(): ( prompt_embeds_c, _, _, _, ) = pipe.encode_prompt( prompt, num_images_per_prompt=1, do_classifier_free_guidance=False, negative_prompt=negative_prompt, ) pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16) garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16) generator = torch.Generator(device).manual_seed(seed) if seed is not None else None result = pipe( prompt_embeds=prompt_embeds.to(device,torch.float16), negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16), pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16), negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,torch.float16), num_inference_steps=denoise_steps, generator=generator, strength = 1.0, pose_img = pose_img.to(device,torch.float16), text_embeds_cloth=prompt_embeds_c.to(device,torch.float16), cloth = garm_tensor.to(device,torch.float16), mask_image=mask, image=human_img, height=1024, width=768, ip_adapter_image = garm_img.resize((768,1024)), guidance_scale=2.0, ) # 결과 형태 확인 및 처리 if isinstance(result, tuple): images = result[0] elif hasattr(result, 'images'): images = result.images else: raise ValueError(f"Unexpected result type: {type(result)}") print(f"Result type: {type(result)}") print(f"Result content: {result}") print(f"Mask shape: {mask.size}") print(f"Human image shape: {human_img.size}") print(f"Garment image shape: {garm_img.size}") print(f"Output image shape: {images[0].size}") if is_checked_crop: out_img = images[0].resize(crop_size) human_img_orig.paste(out_img, (int(left), int(top))) return human_img_orig, mask_gray, status_message else: return images[0], mask_gray, status_message garm_list = os.listdir(os.path.join(example_path,"cloth")) garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list] human_list = os.listdir(os.path.join(example_path,"human")) human_list_path = [os.path.join(example_path,"human",human) for human in human_list] human_ex_list = [] for ex_human in human_list_path: ex_dict= {} ex_dict['background'] = ex_human ex_dict['layers'] = None ex_dict['composite'] = None human_ex_list.append(ex_dict) image_blocks = gr.Blocks(theme="Nymbo/Nymbo_Theme").queue(max_size=12) with image_blocks as demo: with gr.Column(): try_button = gr.Button(value="가상 피팅 시작") with gr.Accordion(label="고급 설정", open=False): with gr.Row(): denoise_steps = gr.Number(label="디노이징 단계", minimum=20, maximum=40, value=30, step=1) seed = gr.Number(label="시드", minimum=-1, maximum=2147483647, step=1, value=-1) with gr.Row(): with gr.Column(): imgs = gr.ImageEditor(sources='upload', type="pil", label='인물 사진. 펜으로 마스크 또는 자동 마스킹 사용', interactive=True) with gr.Row(): is_checked = gr.Checkbox(label="예", info="자동 생성 마스크 사용 (5초 소요)",value=True) with gr.Row(): category = gr.Dropdown( choices=["상의", "하의", "드레스"], label="카테고리", value="상의" ) with gr.Row(): is_checked_crop = gr.Checkbox(label="예", info="자동 자르기 및 크기 조정 사용",value=False) example = gr.Examples( inputs=imgs, examples_per_page=15, examples=human_ex_list ) with gr.Column(): garm_img = gr.Image(label="의류", sources='upload', type="pil") with gr.Row(elem_id="prompt-container"): with gr.Row(): prompt = gr.Textbox(label="의류 설명", placeholder="반소매 라운드넥 티셔츠", show_label=True, elem_id="prompt") example = gr.Examples( inputs=garm_img, examples_per_page=16, examples=garm_list_path) with gr.Column(): masked_img = gr.Image(label="마스크 적용 이미지", elem_id="masked-img",show_share_button=False) with gr.Column(): image_out = gr.Image(label="결과", elem_id="output-img",show_share_button=False) with gr.Column(): status_message = gr.Textbox(label="상태", interactive=False) try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked, is_checked_crop, denoise_steps, seed, category], outputs=[image_out, masked_img, status_message], api_name='tryon') image_blocks.launch(auth=("gini","pick"))