File size: 8,415 Bytes
182306d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22b9e3b
30855e2
182306d
 
22b9e3b
30855e2
 
22b9e3b
 
30855e2
 
 
 
 
 
22b9e3b
182306d
 
22b9e3b
 
 
182306d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22b9e3b
 
 
 
 
 
 
 
 
182306d
 
 
 
22b9e3b
 
 
 
 
 
 
 
 
 
 
182306d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22b9e3b
182306d
 
356c167
182306d
 
 
356c167
182306d
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
import spaces
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
import soundfile as sf
from xcodec2.modeling_xcodec2 import XCodec2Model
import torchaudio
import gradio as gr
import tempfile

import os
api_key = os.getenv("HF_TOKEN")

from huggingface_hub import login
login(token=api_key)

llasa_3b ='Steveeeeeeen/Llasagna-v0.1'

tokenizer = AutoTokenizer.from_pretrained(llasa_3b)

model = AutoModelForCausalLM.from_pretrained(
    llasa_3b,
    trust_remote_code=True,
    device_map='cuda',
)

model_path = "srinivasbilla/xcodec2"
 
Codec_model = XCodec2Model.from_pretrained(model_path)
Codec_model.eval().cuda()

whisper_turbo_pipe = pipeline(
    "automatic-speech-recognition",
    model="openai/whisper-large-v3-turbo",
    torch_dtype=torch.float16,
    device='cuda',
)

SPEAKERS = {
    "Female 1": {
        "path": "speakers/female_0.wav",
        "transcript": "e lo stesso alessi che andò ad aprire non riconobbe antoni il quale tornava con la sporta sotto il braccio tanto era mutato coperto di polvere e con la barba lungacome fu entrato e si fu messo a sedere in un cantuccio non osavano quasi fargli festa.",
    },
    "Male 1": {
        "path": "speakers/male_0.wav",
        "transcript": "cadeva la sera smorto in un gran silenzio poi si udirono lontano le chiese di francoforte che scampanavanola bella vigilia di natale che mi mandò dome dio balbettò compare cosimo con la lingua grossa dello spasimo",
    },
    "Female 2": {
        "path": "speakers/female_2.wav",
        "transcript": "la zia baronessa che aveva il cacciatore con le penne i cugini del babbo che possedevano cinque feudi l'uno attaccato all'altro nello stato di caltagirone",
    },
    "Male 2": {
        "path": "speakers/male_1.wav",
        "transcript": "solo è abbandonato come uno che non ha né possiede chi vi siete trovato accanto nel bisogno ditelo vostra figlia vi manda soltanto belle parole",
    },
}

banner_url = "https://huggingface.co/datasets/Steveeeeeeen/random_images/resolve/main/llasagna.png"
BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 150px; max-width: 300px;"> </div>'

def preview_speaker(display_name):
    """Returns the audio and transcript for preview"""
    speaker_name = speaker_display_dict[display_name] 
    if speaker_name in SPEAKERS:
        waveform, sample_rate = torchaudio.load(SPEAKERS[speaker_name]["path"])
        return (sample_rate, waveform[0].numpy()), SPEAKERS[speaker_name]["transcript"]
    return None, ""


def ids_to_speech_tokens(speech_ids):
 
    speech_tokens_str = []
    for speech_id in speech_ids:
        speech_tokens_str.append(f"<|s_{speech_id}|>")
    return speech_tokens_str

def extract_speech_ids(speech_tokens_str):
 
    speech_ids = []
    for token_str in speech_tokens_str:
        if token_str.startswith('<|s_') and token_str.endswith('|>'):
            num_str = token_str[4:-2]

            num = int(num_str)
            speech_ids.append(num)
        else:
            print(f"Unexpected token: {token_str}")
    return speech_ids

@spaces.GPU(duration=60)
def infer(sample_audio_path, target_text, progress=gr.Progress()):
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
        progress(0, 'Loading and trimming audio...')
        waveform, sample_rate = torchaudio.load(sample_audio_path)
        if len(waveform[0])/sample_rate > 15:
            gr.Warning("Trimming audio to first 15secs.")
            waveform = waveform[:, :sample_rate*15]

        # Check if the audio is stereo (i.e., has more than one channel)
        if waveform.size(0) > 1:
            # Convert stereo to mono by averaging the channels
            waveform_mono = torch.mean(waveform, dim=0, keepdim=True)
        else:
            # If already mono, just use the original waveform
            waveform_mono = waveform

        prompt_wav = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform_mono)
        prompt_text = whisper_turbo_pipe(prompt_wav[0].numpy())['text'].strip()
        progress(0.5, 'Transcribed! Generating speech...')

        if len(target_text) == 0:
            return None
        elif len(target_text) > 300:
            gr.Warning("Text is too long. Please keep it under 300 characters.")
            target_text = target_text[:300]
            
        input_text = prompt_text + ' ' + target_text

        #TTS start!
        with torch.no_grad():
            # Encode the prompt wav
            vq_code_prompt = Codec_model.encode_code(input_waveform=prompt_wav)

            vq_code_prompt = vq_code_prompt[0,0,:]
            # Convert int 12345 to token <|s_12345|>
            speech_ids_prefix = ids_to_speech_tokens(vq_code_prompt)

            formatted_text = f"<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>"

            # Tokenize the text and the speech prefix
            chat = [
                {"role": "user", "content": "Convert the text to speech:" + formatted_text},
                {"role": "assistant", "content": "<|SPEECH_GENERATION_START|>" + ''.join(speech_ids_prefix)}
            ]

            input_ids = tokenizer.apply_chat_template(
                chat, 
                tokenize=True, 
                return_tensors='pt', 
                continue_final_message=True
            )
            input_ids = input_ids.to('cuda')
            speech_end_id = tokenizer.convert_tokens_to_ids('<|SPEECH_GENERATION_END|>')

            # Generate the speech autoregressively
            outputs = model.generate(
                input_ids,
                max_length=2048,  # We trained our model with a max length of 2048
                eos_token_id= speech_end_id ,
                do_sample=True,
                top_p=1,           
                temperature=0.8
            )
            # Extract the speech tokens
            generated_ids = outputs[0][input_ids.shape[1]-len(speech_ids_prefix):-1]

            speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)   

            # Convert  token <|s_23456|> to int 23456 
            speech_tokens = extract_speech_ids(speech_tokens)

            speech_tokens = torch.tensor(speech_tokens).cuda().unsqueeze(0).unsqueeze(0)

            # Decode the speech tokens to speech waveform
            gen_wav = Codec_model.decode_code(speech_tokens) 

            # if only need the generated part
            gen_wav = gen_wav[:,:,prompt_wav.shape[1]:]

            progress(1, 'Synthesized!')

        return (16000, gen_wav[0, 0, :].cpu().numpy())

with gr.Blocks() as app_tts:
    gr.Markdown("# Zero Shot Voice Clone TTS")
    
    with gr.Row():
        ref_audio_input = gr.Audio(label="Reference Audio", type="filepath")
        speaker_dropdown = gr.Dropdown(
            choices=list(SPEAKERS.keys()),
            label="Or select a predefined speaker",
            value=None
        )
    
    gen_text_input = gr.Textbox(label="Text to Generate", lines=10)
    generate_btn = gr.Button("Synthesize", variant="primary")
    audio_output = gr.Audio(label="Synthesized Audio")

    def update_audio(speaker):
        if speaker in SPEAKERS:
            return SPEAKERS[speaker]["path"]
        return None

    speaker_dropdown.change(
        fn=update_audio,
        inputs=[speaker_dropdown],
        outputs=[ref_audio_input]
    )

    generate_btn.click(
        infer,
        inputs=[
            ref_audio_input,
            gen_text_input,
        ],
        outputs=[audio_output],
    )

with gr.Blocks() as app_credits:
    gr.Markdown("""
# Credits

* [zhenye234](https://github.com/zhenye234) for the original [repo](https://github.com/zhenye234/LLaSA_training)
* [mrfakename](https://huggingface.co/mrfakename) for the [gradio demo code](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)        
""")

with gr.Blocks() as app:
    gr.HTML(BANNER, elem_id="banner")
    gr.Markdown(
        """
# Llasagna v0.1 1b TTS

This is a local web UI for Llasagna 1b Zero Shot Voice Cloning and TTS model.

It is a fine-tuned version of Llasa-1b that supports Italian

If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 15s, and shortening your prompt.
"""
    )
    gr.TabbedInterface([app_tts], ["TTS"])


app.launch(ssr_mode=False, share=True)