JoPmt commited on
Commit
d49a0b0
1 Parent(s): fc4f323

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -14
app.py CHANGED
@@ -1,4 +1,5 @@
1
- import os, json, re, sys, subprocess, gc, tqdm, math, time, random, threading, spaces, numpy, torch
 
2
  import gradio as gr
3
  from PIL import Image, ImageOps
4
  from moviepy import VideoFileClip
@@ -115,7 +116,7 @@ def convert_to_gif(video_path):
115
  return gif_path
116
 
117
  @spaces.GPU(duration=180)
118
- def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
119
  seed = seed_value
120
  if seed == -1:
121
  seed = random.randint(0, 2**8 - 1)
@@ -134,7 +135,7 @@ def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Prog
134
  image = ImageOps.exif_transpose(Image.fromarray(tensor))
135
  prompt = prompt.strip('"')
136
  generator = torch.Generator(device).manual_seed(seed) if seed else None
137
- video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=10,num_frames=49,use_dynamic_cfg=False,guidance_scale=7.0,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",)
138
  latents = video_pt.frames
139
  ##free_memory()
140
  if scale_status:
@@ -169,7 +170,7 @@ examples_images = [
169
  with gr.Blocks() as demo:
170
  gr.Markdown("""
171
  <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
172
- ConsisID Space🤗
173
  </div>
174
  <div style="text-align: center;">
175
  <a href="https://huggingface.co/BestWishYsh/ConsisID">🤗 Model Hub</a> |
@@ -192,20 +193,17 @@ with gr.Blocks() as demo:
192
  with gr.Column():
193
  with gr.Accordion("IPT2V: Face Input", open=True):
194
  image_input = gr.Image(label="Input Image (should contain clear face)")
195
- prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
196
 
197
  with gr.Group():
198
  with gr.Column():
199
  with gr.Row():
200
- seed_param = gr.Number(
201
- label="Inference Seed (Enter a positive number, -1 for random)", value=42
202
- )
203
  with gr.Row():
204
- enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920)", value=False)
205
- enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps)", value=True)
206
- gr.Markdown(
207
- "✨In this demo, we use [RIFE](https://github.com/hzwer/ECCV2022-RIFE) for frame interpolation and [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) for upscaling(Super-Resolution)."
208
- )
209
 
210
  generate_button = gr.Button("🎬 Generate Video")
211
 
@@ -223,7 +221,7 @@ with gr.Blocks() as demo:
223
  )
224
  generate_button.click(
225
  fn=plex,
226
- inputs=[prompt, image_input, seed_param, enable_scale, enable_rife],
227
  outputs=[video_output, download_video_button, download_gif_button, seed_text],
228
  )
229
 
 
1
+ import os, json, re, sys, subprocess, gc, tqdm, math, time, random, threading, spaces, torch
2
+ import numpy as np
3
  import gradio as gr
4
  from PIL import Image, ImageOps
5
  from moviepy import VideoFileClip
 
116
  return gif_path
117
 
118
  @spaces.GPU(duration=180)
119
+ def plex(prompt,image_input,stips,gscale,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
120
  seed = seed_value
121
  if seed == -1:
122
  seed = random.randint(0, 2**8 - 1)
 
135
  image = ImageOps.exif_transpose(Image.fromarray(tensor))
136
  prompt = prompt.strip('"')
137
  generator = torch.Generator(device).manual_seed(seed) if seed else None
138
+ video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=stips,num_frames=49,use_dynamic_cfg=False,guidance_scale=gscale,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",)
139
  latents = video_pt.frames
140
  ##free_memory()
141
  if scale_status:
 
170
  with gr.Blocks() as demo:
171
  gr.Markdown("""
172
  <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
173
+ 🤗ConsisID Space🤗
174
  </div>
175
  <div style="text-align: center;">
176
  <a href="https://huggingface.co/BestWishYsh/ConsisID">🤗 Model Hub</a> |
 
193
  with gr.Column():
194
  with gr.Accordion("IPT2V: Face Input", open=True):
195
  image_input = gr.Image(label="Input Image (should contain clear face)")
196
+ prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=3)
197
 
198
  with gr.Group():
199
  with gr.Column():
200
  with gr.Row():
201
+ stips = gr.Slider(label="Steps", minimum=6, step=1, maximum=10, value=10)
202
+ gscale = gr.Slider(label="Guidance scale", minimum=1, step=0.1, maximum=20, value=7.0)
203
+ seed_param = gr.Slider(label="Inference Seed (Leave -1 for random)", minimum=0, step=32, maximum=2**8 - 1, value=-1)
204
  with gr.Row():
205
+ enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920) [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN)", value=False)
206
+ enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps) [RIFE](https://github.com/hzwer/ECCV2022-RIFE)", value=True)
 
 
 
207
 
208
  generate_button = gr.Button("🎬 Generate Video")
209
 
 
221
  )
222
  generate_button.click(
223
  fn=plex,
224
+ inputs=[prompt, image_input, stips, gscale, seed_param, enable_scale, enable_rife],
225
  outputs=[video_output, download_video_button, download_gif_button, seed_text],
226
  )
227