from parler_tts import ParlerTTSForConditionalGeneration from transformers import AutoTokenizer import soundfile as sf import pygame from dora import DoraStatus model = ParlerTTSForConditionalGeneration.from_pretrained( "parler-tts/parler_tts_mini_v0.1" ).to("cuda:0") tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler_tts_mini_v0.1") pygame.mixer.init() input_ids = tokenizer( "A female speaker with a slightly low-pitched voice delivers her words quite expressively, in a very confined sounding environment with clear audio quality. She speaks very fast.", return_tensors="pt", ).input_ids.to("cuda:0") class Operator: def on_event( self, dora_event, send_output, ): if dora_event["type"] == "INPUT": generation = model.generate( max_new_tokens=300, input_ids=input_ids, prompt_input_ids=tokenizer( dora_event["value"][0].as_py(), return_tensors="pt" ).input_ids.to("cuda:0"), ) print(dora_event["value"][0].as_py(), flush=True) sf.write( f"parler_tts_out.wav", generation.cpu().numpy().squeeze(), model.config.sampling_rate, ) while pygame.mixer.get_busy(): pass pygame.mixer.music.load(f"parler_tts_out.wav") pygame.mixer.music.play() return DoraStatus.CONTINUE