artificialguybr's picture
Update app.py
7ac5c7d verified
raw
history blame
No virus
7.72 kB
import os
import stat
import uuid
import subprocess
import tempfile
from zipfile import ZipFile
import gradio as gr
import spaces
from googletrans import Translator
from TTS.api import TTS
from faster_whisper import WhisperModel
import soundfile as sf
import numpy as np
import cv2
from huggingface_hub import HfApi
HF_TOKEN = os.environ.get("HF_TOKEN")
os.environ["COQUI_TOS_AGREED"] = "1"
api = HfApi(token=HF_TOKEN)
repo_id = "artificialguybr/video-dubbing"
# Extract FFmpeg
ZipFile("ffmpeg.zip").extractall()
st = os.stat('ffmpeg')
os.chmod('ffmpeg', st.st_mode | stat.S_IEXEC)
# Whisper model initialization
model_size = "small"
model = WhisperModel(model_size, device="cpu", compute_type="int8")
def check_for_faces(video_path):
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(video_path)
while True:
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) > 0:
return True
return False
@spaces.GPU
def process_video(radio, video, target_language, has_closeup_face):
if target_language is None:
return gr.Error("Please select a Target Language for Dubbing.")
run_uuid = uuid.uuid4().hex[:6]
output_filename = f"{run_uuid}_resized_video.mp4"
# Use FFmpeg via subprocess
subprocess.run(['ffmpeg', '-i', video, '-vf', 'scale=-2:720', output_filename])
video_path = output_filename
if not os.path.exists(video_path):
return f"Error: {video_path} does not exist."
# Check video duration
video_info = subprocess.run(['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', video_path], capture_output=True, text=True)
video_duration = float(video_info.stdout)
if video_duration > 60:
os.remove(video_path)
return gr.Error("Video duration exceeds 1 minute. Please upload a shorter video.")
# Extract audio
subprocess.run(['ffmpeg', '-i', video_path, '-acodec', 'pcm_s24le', '-ar', '48000', '-map', 'a', f"{run_uuid}_output_audio.wav"])
# Audio processing
subprocess.run(['ffmpeg', '-y', '-i', f"{run_uuid}_output_audio.wav", '-af', 'lowpass=3000,highpass=100', f"{run_uuid}_output_audio_final.wav"])
print("Attempting to transcribe with Whisper...")
try:
segments, info = model.transcribe(f"{run_uuid}_output_audio_final.wav", beam_size=5)
whisper_text = " ".join(segment.text for segment in segments)
whisper_language = info.language
print(f"Transcription successful: {whisper_text}")
except RuntimeError as e:
print(f"RuntimeError encountered: {str(e)}")
if "CUDA failed with error device-side assert triggered" in str(e):
gr.Warning("Error. Space needs to restart. Please retry in a minute")
api.restart_space(repo_id=repo_id)
language_mapping = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Italian': 'it', 'Portuguese': 'pt', 'Polish': 'pl', 'Turkish': 'tr', 'Russian': 'ru', 'Dutch': 'nl', 'Czech': 'cs', 'Arabic': 'ar', 'Chinese (Simplified)': 'zh-cn'}
target_language_code = language_mapping[target_language]
translator = Translator()
translated_text = translator.translate(whisper_text, src=whisper_language, dest=target_language_code).text
print(translated_text)
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2")
tts.tts_to_file(translated_text, speaker_wav=f"{run_uuid}_output_audio_final.wav", file_path=f"{run_uuid}_output_synth.wav", language=target_language_code)
has_face = check_for_faces(video_path) if not has_closeup_face else True
if has_closeup_face:
try:
subprocess.run(['python', 'Wav2Lip/inference.py', '--checkpoint_path', 'Wav2Lip/checkpoints/wav2lip_gan.pth', '--face', video_path, '--audio', f'{run_uuid}_output_synth.wav', '--pads', '0', '15', '0', '0', '--resize_factor', '1', '--nosmooth', '--outfile', f'{run_uuid}_output_video.mp4'], check=True)
except subprocess.CalledProcessError as e:
if "Face not detected! Ensure the video contains a face in all the frames." in str(e.stderr):
gr.Warning("Wav2lip didn't detect a face. Please try again with the option disabled.")
subprocess.run(['ffmpeg', '-i', video_path, '-i', f'{run_uuid}_output_synth.wav', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', '-map', '0:v:0', '-map', '1:a:0', f'{run_uuid}_output_video.mp4'])
else:
subprocess.run(['ffmpeg', '-i', video_path, '-i', f'{run_uuid}_output_synth.wav', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', '-map', '0:v:0', '-map', '1:a:0', f'{run_uuid}_output_video.mp4'])
if not os.path.exists(f"{run_uuid}_output_video.mp4"):
raise FileNotFoundError(f"Error: {run_uuid}_output_video.mp4 was not generated.")
output_video_path = f"{run_uuid}_output_video.mp4"
# Cleanup
files_to_delete = [
f"{run_uuid}_resized_video.mp4",
f"{run_uuid}_output_audio.wav",
f"{run_uuid}_output_audio_final.wav",
f"{run_uuid}_output_synth.wav"
]
for file in files_to_delete:
try:
os.remove(file)
except FileNotFoundError:
print(f"File {file} not found for deletion.")
return output_video_path
def swap(radio):
if radio == "Upload":
return gr.update(source="upload")
else:
return gr.update(source="webcam")
video = gr.Video()
radio = gr.Radio(["Upload", "Record"], value="Upload", show_label=False)
iface = gr.Interface(
fn=process_video,
inputs=[
radio,
video,
gr.Dropdown(choices=["English", "Spanish", "French", "German", "Italian", "Portuguese", "Polish", "Turkish", "Russian", "Dutch", "Czech", "Arabic", "Chinese (Simplified)"], label="Target Language for Dubbing", value="Spanish"),
gr.Checkbox(
label="Video has a close-up face. Use Wav2lip.",
value=False,
info="Say if video have close-up face. For Wav2lip. Will not work if checked wrongly.")
],
outputs=gr.Video(),
live=False,
title="AI Video Dubbing",
description="""This tool was developed by [@artificialguybr](https://twitter.com/artificialguybr) using entirely open-source tools. Special thanks to Hugging Face for the GPU support. Thanks [@yeswondwer](https://twitter.com/@yeswondwerr) for original code. Test the [Video Transcription and Translate](https://huggingface.co/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION) space!""",
allow_flagging=False
)
with gr.Blocks() as demo:
iface.render()
radio.change(swap, inputs=[radio], outputs=video)
gr.Markdown("""
**Note:**
- Video limit is 1 minute. It will dubbling all people using just one voice.
- Generation may take up to 5 minutes.
- By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml
- The tool uses open-source models for all models. It's a alpha version.
- Quality can be improved but would require more processing time per video. For scalability and hardware limitations, speed was chosen, not just quality.
- If you need more than 1 minute, duplicate the Space and change the limit on app.py.
- If you incorrectly mark the 'Video has a close-up face' checkbox, the dubbing may not work as expected.
""")
demo.queue(concurrency_count=1, max_size=15)
demo.launch()