File size: 4,753 Bytes
b970552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622bd75
b970552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0085b92
 
b970552
 
 
 
0085b92
 
b970552
 
 
 
0085b92
b970552
 
 
 
 
 
 
 
 
 
0085b92
b970552
 
0085b92
b970552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0085b92
 
 
 
f0c14a6
0085b92
 
 
 
b970552
 
 
 
 
 
 
 
c2fa5b6
 
0085b92
b970552
 
0085b92
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
import torch
import soundfile as sf
import spaces
import os
import numpy as np
import re
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
from speechbrain.pretrained import EncoderClassifier
from datasets import load_dataset

device = "cuda" if torch.cuda.is_available() else "cpu"

def load_models_and_data():
    model_name = "microsoft/speecht5_tts"
    processor = SpeechT5Processor.from_pretrained(model_name)
    model = SpeechT5ForTextToSpeech.from_pretrained("Omarrran/turkish_finetuned_speecht5_tts").to(device)
    vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
    
    spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
    speaker_model = EncoderClassifier.from_hparams(
        source=spk_model_name,
        run_opts={"device": device},
        savedir=os.path.join("/tmp", spk_model_name),
    )
    
    # Load a sample from a dataset for default embedding
    dataset = load_dataset("erenfazlioglu/turkishvoicedataset", split="train")
    example = dataset[304]
    
    return model, processor, vocoder, speaker_model, example

model, processor, vocoder, speaker_model, default_example = load_models_and_data()

def create_speaker_embedding(waveform):
    with torch.no_grad():
        speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
        speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
        speaker_embeddings = speaker_embeddings.squeeze()
    return speaker_embeddings

def prepare_default_embedding(example):
    audio = example["audio"]
    return create_speaker_embedding(audio["array"])

default_embedding = prepare_default_embedding(default_example)

replacements = [
    ("â", "a"), ("ç", "ch"), ("ğ", "gh"), ("ı", "i"), ("î", "i"),
    ("ö", "oe"), ("ş", "sh"), ("ü", "ue"), ("û", "u"),
]

number_words = {
    0: "sıfır", 1: "bir", 2: "iki", 3: "üç", 4: "dört", 5: "beş", 6: "altı", 7: "yedi", 8: "sekiz", 9: "dokuz",
    10: "on", 20: "yirmi", 30: "otuz", 40: "kırk", 50: "elli", 60: "altmış", 70: "yetmiş", 80: "seksen", 90: "doksan",
    100: "yüz", 1000: "bin"
}

def number_to_words(number):
    if number < 20:
        return number_words.get(number, str(number))
    elif number < 100:
        tens, unit = divmod(number, 10)
        return number_words[tens * 10] + (" " + number_words[unit] if unit else "")
    elif number < 1000:
        hundreds, remainder = divmod(number, 100)
        return (number_words[hundreds] + " yüz" if hundreds > 1 else "yüz") + (" " + number_to_words(remainder) if remainder else "")
    elif number < 1000000:
        thousands, remainder = divmod(number, 1000)
        return (number_to_words(thousands) + " bin" if thousands > 1 else "bin") + (" " + number_to_words(remainder) if remainder else "")
    else:
        return str(number)  # For very large numbers, return as is

def replace_numbers_with_words(text):
    return re.sub(r'\b\d+\b', lambda m: number_to_words(int(m.group())), text)

def normalize_text(text):
    text = text.lower()
    text = replace_numbers_with_words(text)
    for old, new in replacements:
        text = text.replace(old, new)
    text = re.sub(r'[^\w\s]', '', text)
    return text

@spaces.GPU(duration=60)
def text_to_speech(text, audio_file=None):
    normalized_text = normalize_text(text)
    inputs = processor(text=normalized_text, return_tensors="pt").to(device)
    speaker_embeddings = default_embedding
    
    with torch.no_grad():
        speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder)
    
    speech_np = speech.cpu().numpy()
    return (16000, speech_np)

# Add example Turkish sentences
example_sentences = [
    "Merhaba, nasılsın?",
    "Bugün hava çok güzel. Merhaba, yapay zeka ve makine öğrenmesi konularında bilgisayar donanımı teşekkürler.",
    "Türk kahvesi içmeyi seviyorum.",
    "İstanbul Boğazı'nda yürüyüş yapmak harika."
]

iface = gr.Interface(
    fn=text_to_speech,
    inputs=[
        gr.Textbox(label="Enter Turkish text to convert to speech")
    ],
    outputs=[
        gr.Audio(label="Generated Speech", type="numpy")
    ],
     title="Fine-tuned Turkish SpeechT5 Text-to-Speech Demo",
    description="This demo uses a fine-tuned model based on microsoft/speecht5_tts for Turkish text-to-speech. Enter Turkish text and listen to the generated speech.\n\nNote: This report was prepared as a task given by the IIT Roorkee PARIMAL intern program. This space demonstrates the demo version of Omarrran/turkish_finetuned_speecht5_tts version for the Turkish language.",
    examples=example_sentences
)

iface.launch(share=True)