#!/usr/local/bin/python3 #-*- coding:utf-8 -*- import gradio as gr import librosa import torch from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq checkpoint = "openai/whisper-large-v2" processor = AutoProcessor.from_pretrained(checkpoint) model = AutoModelForSpeechSeq2Seq.from_pretrained(checkpoint) def process_audio(sampling_rate, waveform): # convert from int16 to floating point waveform = waveform / 32678.0 # convert to mono if stereo if len(waveform.shape) > 1: waveform = librosa.to_mono(waveform.T) # resample to 16 kHz if necessary if sampling_rate != 16000: waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000) # limit to 30 seconds waveform = waveform[:16000*30] # make PyTorch tensor waveform = torch.tensor(waveform) return waveform def predict(audio, mic_audio=None): # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels)) if mic_audio is not None: sampling_rate, waveform = mic_audio elif audio is not None: sampling_rate, waveform = audio else: return "(please provide audio)" waveform = process_audio(sampling_rate, waveform) inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt") predicted_ids = model.generate(**inputs, max_length=400) transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) return transcription[0] title = "OpenAI Whisper Large v2" description = """ 本例用于演示 openai/whisper-large-v2 模型的语音识别(ASR)能力。目前没有对模型做微调,基于原始模型开发。 Whisper原始模型主要支持英语语音的识别。英语的效果最好,中文语音识别后只会输出汉语拼音。 更多的信息请参考: openai/whisper-large-v2。 使用方法: 上传一个音频文件或直接在页面中录制音频。音频会在传递到模型之前转换为单声道并重新采样为16 kHz。 """ article = """
参考: OpenAI Whisper Large v2 | Innev GitHub
音频案例: