File size: 2,128 Bytes
321bf66
eb4b6d0
087a581
 
 
da3a952
 
087a581
 
 
 
 
 
33c26b8
087a581
 
 
 
 
 
 
 
 
 
 
 
 
 
eb4b6d0
 
087a581
 
cb4e037
087a581
 
 
eb4b6d0
087a581
 
 
eb4b6d0
 
087a581
 
 
eb4b6d0
087a581
eb4b6d0
5a92355
 
eb4b6d0
5a92355
087a581
 
5a92355
 
6a72a1f
087a581
 
5a92355
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
import torch
from datasets import load_dataset
from transformers import pipeline, SpeechT5Processor, SpeechT5HifiGan, SpeechT5ForTextToSpeech

model_id = "gitgato/tts-model-v2"  # update with your model id
pipe = pipeline("text-to-speech", model="gitgato/tts-model-v2")
model = SpeechT5ForTextToSpeech.from_pretrained(model_id)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7440]["xvector"]).unsqueeze(0)

# checkpoint = "microsoft/speecht5_tts"
processor = SpeechT5Processor.from_pretrained(model_id)

replacements = [
    ("à", "a"),
    ("â", "a"),
    ("ç", "c"),
    ("è", "e"),
    ("ë", "e"),
    ("î", "i"),
    ("ï", "i"),
    ("ô", "o"),
    ("ù", "u"),
    ("û", "u"),
    ("ü", "u"),
]


title = "Text-to-Speech"
description = """
Demo for text-to-speech translation in French. Demo uses [gitgato/trtts](https://huggingface.co) checkpoint, which is based on Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model and is fine-tuned in French Audio dataset
![Text-to-Speech (TTS)"](https://geekflare.com/wp-content/uploads/2021/07/texttospeech-1200x385.png "Diagram of Text-to-Speech (TTS)")
"""

def cleanup_text(text):
    for src, dst in replacements:
        text = text.replace(src, dst)
    return text

def synthesize_speech(text):
    text = cleanup_text(text)
    inputs = processor(text=text, return_tensors="pt")

    speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)

    # Crear el objeto gr.Audio directamente con los parámetros necesarios
    return gr.Audio((16000, speech.cpu().numpy()))

# Definir la interfaz de Gradio
syntesize_speech_gradio = gr.Interface(
    synthesize_speech,
    inputs=gr.Textbox(label="Text", placeholder="Type something here..."),
    outputs=gr.Audio(label="Speech"),
    examples=["Hola, probando audio."],
    title=title,
    description=description,
)

# Lanzar la interfaz
syntesize_speech_gradio.launch()