aiqcamp commited on
Commit
1f54bd2
1 Parent(s): d095bd7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -17
app.py CHANGED
@@ -211,7 +211,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
211
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16)
212
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16)
213
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
214
- images = pipe(
215
  prompt_embeds=prompt_embeds.to(device,torch.float16),
216
  negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16),
217
  pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16),
@@ -228,8 +228,18 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
228
  width=768,
229
  ip_adapter_image = garm_img.resize((768,1024)),
230
  guidance_scale=2.0,
231
- ).images
232
-
 
 
 
 
 
 
 
 
 
 
233
  print(f"Mask shape: {mask.size}")
234
  print(f"Human image shape: {human_img.size}")
235
  print(f"Garment image shape: {garm_img.size}")
@@ -242,20 +252,6 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
242
  else:
243
  return images[0], mask_gray, status_message
244
 
245
- garm_list = os.listdir(os.path.join(example_path,"cloth"))
246
- garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
247
-
248
- human_list = os.listdir(os.path.join(example_path,"human"))
249
- human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
250
-
251
- human_ex_list = []
252
- for ex_human in human_list_path:
253
- ex_dict= {}
254
- ex_dict['background'] = ex_human
255
- ex_dict['layers'] = None
256
- ex_dict['composite'] = None
257
- human_ex_list.append(ex_dict)
258
-
259
  image_blocks = gr.Blocks(theme="Nymbo/Nymbo_Theme").queue(max_size=12)
260
  with image_blocks as demo:
261
  with gr.Column():
 
211
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16)
212
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16)
213
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
214
+ result = pipe(
215
  prompt_embeds=prompt_embeds.to(device,torch.float16),
216
  negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16),
217
  pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16),
 
228
  width=768,
229
  ip_adapter_image = garm_img.resize((768,1024)),
230
  guidance_scale=2.0,
231
+ )
232
+
233
+ # 결과 형태 확인 및 처리
234
+ if isinstance(result, tuple):
235
+ images = result[0]
236
+ elif hasattr(result, 'images'):
237
+ images = result.images
238
+ else:
239
+ raise ValueError(f"Unexpected result type: {type(result)}")
240
+
241
+ print(f"Result type: {type(result)}")
242
+ print(f"Result content: {result}")
243
  print(f"Mask shape: {mask.size}")
244
  print(f"Human image shape: {human_img.size}")
245
  print(f"Garment image shape: {garm_img.size}")
 
252
  else:
253
  return images[0], mask_gray, status_message
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  image_blocks = gr.Blocks(theme="Nymbo/Nymbo_Theme").queue(max_size=12)
256
  with image_blocks as demo:
257
  with gr.Column():