artificialguybr commited on
Commit
5d311f1
1 Parent(s): b1f60b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -110,7 +110,10 @@ def process_video(radio, video, target_language):
110
 
111
  video_path_fix = video_path
112
 
113
- has_face = check_for_faces(video_path)
 
 
 
114
 
115
  if has_face:
116
  cmd = f"python Wav2Lip/inference.py --checkpoint_path 'Wav2Lip/checkpoints/wav2lip_gan.pth' --face {shlex.quote(video_path)} --audio '{run_uuid}_output_synth.wav' --pads {pad_top} {pad_bottom} {pad_left} {pad_right} --resize_factor {rescaleFactor} --nosmooth --outfile '{run_uuid}_output_video.mp4'"
@@ -155,6 +158,7 @@ iface = gr.Interface(
155
  radio,
156
  video,
157
  gr.Dropdown(choices=["English", "Spanish", "French", "German", "Italian", "Portuguese", "Polish", "Turkish", "Russian", "Dutch", "Czech", "Arabic", "Chinese (Simplified)"], label="Target Language for Dubbing", value="Spanish")
 
158
  ],
159
  outputs=gr.Video(),
160
  live=False,
@@ -172,6 +176,7 @@ with gr.Blocks() as demo:
172
  - The tool uses open-source models for all models. It's a alpha version.
173
  - Quality can be improved but would require more processing time per video. For scalability and hardware limitations, speed was chosen, not just quality.
174
  - If you need more than 1 minute, duplicate the Space and change the limit on app.py.
 
175
  """)
176
  demo.queue(concurrency_count=1, max_size=15)
177
  demo.launch()
 
110
 
111
  video_path_fix = video_path
112
 
113
+ if has_closeup_face:
114
+ has_face = True
115
+ else:
116
+ has_face = check_for_faces(video_path)
117
 
118
  if has_face:
119
  cmd = f"python Wav2Lip/inference.py --checkpoint_path 'Wav2Lip/checkpoints/wav2lip_gan.pth' --face {shlex.quote(video_path)} --audio '{run_uuid}_output_synth.wav' --pads {pad_top} {pad_bottom} {pad_left} {pad_right} --resize_factor {rescaleFactor} --nosmooth --outfile '{run_uuid}_output_video.mp4'"
 
158
  radio,
159
  video,
160
  gr.Dropdown(choices=["English", "Spanish", "French", "German", "Italian", "Portuguese", "Polish", "Turkish", "Russian", "Dutch", "Czech", "Arabic", "Chinese (Simplified)"], label="Target Language for Dubbing", value="Spanish")
161
+ checkbox = gr.Checkbox(label="Video has a close-up face", default=False)
162
  ],
163
  outputs=gr.Video(),
164
  live=False,
 
176
  - The tool uses open-source models for all models. It's a alpha version.
177
  - Quality can be improved but would require more processing time per video. For scalability and hardware limitations, speed was chosen, not just quality.
178
  - If you need more than 1 minute, duplicate the Space and change the limit on app.py.
179
+ - If you incorrectly mark the 'Video has a close-up face' checkbox, the dubbing may not work as expected.
180
  """)
181
  demo.queue(concurrency_count=1, max_size=15)
182
  demo.launch()