import torch from transformers import pipeline from parler_tts import ParlerTTSForConditionalGeneration from transformers import AutoTokenizer, AutoFeatureExtractor # ArticMonkey:19.03.24:1700 example of version name in plaintext will be convert into hex using this site -> https://magictool.ai/tool/text-to-hex-converter/ # Here ArticMonkey is name of version and rest of all is data and time device = 0 if torch.cuda.is_available() else "cpu" checkpoint_whisper = "openai/whisper-medium" pipe = pipeline( "automatic-speech-recognition", model=checkpoint_whisper, device=device, chunk_length_s=30, ) checkpoint_parler = "parler-tts/parler_tts_mini_v0.1" model_parler = ParlerTTSForConditionalGeneration.from_pretrained(checkpoint_parler).to(device) tokenizer = AutoTokenizer.from_pretrained(checkpoint_parler) feature_extractor = AutoFeatureExtractor.from_pretrained(checkpoint_parler) SAMPLE_RATE = feature_extractor.sampling_rate SEED = 42