JoPmt commited on
Commit
a13051b
1 Parent(s): 3016d83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -35
app.py CHANGED
@@ -93,7 +93,7 @@ face_clip_model.to(device, dtype=dtype)
93
  face_helper.face_det.to(device)
94
  face_helper.face_parse.to(device)
95
  transformer.to(device, dtype=dtype)
96
- free_memory()
97
 
98
  pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
99
  # If you're using with lora, add this code
@@ -140,49 +140,60 @@ def delete_old_files():
140
  os.remove(file_path)
141
  time.sleep(600)
142
 
143
-
144
- ##threading.Thread(target=delete_old_files, daemon=True).start()
145
- @spaces.GPU(duration=70)
146
- def generate(
147
- prompt,
148
- image_input,
149
- seed_value,
150
- scale_status,
151
- rife_status,
152
- progress=gr.Progress(track_tqdm=True)
153
  ):
154
- def infer(prompt: str,image_input: str,num_inference_steps: int,guidance_scale: float,seed: int = 42,progress=gr.Progress(track_tqdm=True),):
155
- if seed == -1:
156
- seed = random.randint(0, 2**8 - 1)
157
 
158
- id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB"))
159
- id_image = resize_numpy_image_long(id_image, 1024)
160
- id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante,
161
  eva_transform_mean, eva_transform_std,
162
  face_main_model, device, dtype, id_image,
163
  original_id_image=id_image, is_align_face=True,
164
  cal_uncond=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
- if is_kps:
167
- kps_cond = face_kps
168
- else:
169
- kps_cond = None
170
-
171
- tensor = align_crop_face_image.cpu().detach()
172
- tensor = tensor.squeeze()
173
- tensor = tensor.permute(1, 2, 0)
174
- tensor = tensor.numpy() * 255
175
- tensor = tensor.astype(np.uint8)
176
- image = ImageOps.exif_transpose(Image.fromarray(tensor))
177
-
178
- prompt = prompt.strip('"')
179
 
180
- generator = torch.Generator(device).manual_seed(seed) if seed else None
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=num_inference_steps,num_frames=49,use_dynamic_cfg=False,guidance_scale=guidance_scale,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",).frames
183
-
184
- ##free_memory()
185
- return video_pt, seed
 
186
 
187
  latents, seed = infer(prompt,image_input,num_inference_steps=4,guidance_scale=7.0,seed=seed_value,progress=progress,)
188
  if scale_status:
 
93
  face_helper.face_det.to(device)
94
  face_helper.face_parse.to(device)
95
  transformer.to(device, dtype=dtype)
96
+ ##free_memory()
97
 
98
  pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
99
  # If you're using with lora, add this code
 
140
  os.remove(file_path)
141
  time.sleep(600)
142
 
143
+ def infer(
144
+ prompt: str,
145
+ image_input: str,
146
+ num_inference_steps: int,
147
+ guidance_scale: float,
148
+ seed: int = 42,
149
+ progress=gr.Progress(track_tqdm=True),
 
 
 
150
  ):
151
+ if seed == -1:
152
+ seed = random.randint(0, 2**8 - 1)
 
153
 
154
+ id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB"))
155
+ id_image = resize_numpy_image_long(id_image, 1024)
156
+ id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante,
157
  eva_transform_mean, eva_transform_std,
158
  face_main_model, device, dtype, id_image,
159
  original_id_image=id_image, is_align_face=True,
160
  cal_uncond=False)
161
+ if is_kps:
162
+ kps_cond = face_kps
163
+ else:
164
+ kps_cond = None
165
+
166
+ tensor = align_crop_face_image.cpu().detach()
167
+ tensor = tensor.squeeze()
168
+ tensor = tensor.permute(1, 2, 0)
169
+ tensor = tensor.numpy() * 255
170
+ tensor = tensor.astype(np.uint8)
171
+ image = ImageOps.exif_transpose(Image.fromarray(tensor))
172
+
173
+ prompt = prompt.strip('"')
174
 
175
+ generator = torch.Generator(device).manual_seed(seed) if seed else None
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
+ video_pt = pipe(
178
+ prompt=prompt,
179
+ image=image,
180
+ num_videos_per_prompt=1,
181
+ num_inference_steps=num_inference_steps,
182
+ num_frames=49,
183
+ use_dynamic_cfg=False,
184
+ guidance_scale=guidance_scale,
185
+ generator=generator,
186
+ id_vit_hidden=id_vit_hidden,
187
+ id_cond=id_cond,
188
+ kps_cond=kps_cond,
189
+ output_type="pt",
190
+ ).frames
191
 
192
+ ##free_memory()
193
+ return video_pt, seed
194
+ ##threading.Thread(target=delete_old_files, daemon=True).start()
195
+ @spaces.GPU(duration=70)
196
+ def generate(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
197
 
198
  latents, seed = infer(prompt,image_input,num_inference_steps=4,guidance_scale=7.0,seed=seed_value,progress=progress,)
199
  if scale_status: