from transformers import ( AutomaticSpeechRecognitionPipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, ) from peft import PeftModel, PeftConfig import torch from huggingface_hub import snapshot_download, login login() peft_model_id = "aisha-org/faster-whisper-uz" language = "uz" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id, use_auth_token=True) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto", use_auth_token=True, force_download=True, resume_download=False ) model = PeftModel.from_pretrained(model, peft_model_id, use_auth_token=True) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) def transcribe(audio): with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] return text import gradio as gr demo = gr.Blocks() mic_transcribe = gr.Interface( fn=transcribe, inputs=gr.Audio(sources="microphone", type="filepath"), outputs=gr.Textbox(), ) file_transcribe = gr.Interface( fn=transcribe, inputs=gr.Audio(sources="upload", type="filepath"), outputs=gr.Textbox(), ) with demo: gr.TabbedInterface( [mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"], ) demo.launch(debug=True)