|
import gradio as gr |
|
import requests |
|
from PIL import Image |
|
import os |
|
token = os.environ.get('HF_TOKEN') |
|
whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT") |
|
tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan") |
|
talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) |
|
|
|
def infer(audio): |
|
gpt_response = whisper_to_gpt(audio, "translate", fn_index=0) |
|
|
|
audio_response = tts(gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0) |
|
|
|
portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0) |
|
|
|
portrait_response = requests.get(f'{portrait_link}', headers={'Authorization': 'Bearer ' + token}) |
|
print(portrait_response.text) |
|
return audio_response, portrait_link |
|
|
|
inputs = gr.Audio(source="microphone",type="filepath") |
|
outputs = [gr.Audio(), gr.Video()] |
|
|
|
demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs) |
|
demo.launch() |
|
|
|
|