Spaces:
Running
on
Zero
Running
on
Zero
from time import time | |
from datasets import load_dataset | |
from faster_whisper import WhisperModel | |
# from transformers import WhisperForConditionalGeneration, WhisperProcessor | |
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", cache_dir=".") | |
# processor = WhisperProcessor.from_pretrained("openai/whisper-large-v3") | |
# model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v3").to("mps") | |
model = WhisperModel("large-v3", device="cuda", compute_type="float16", download_root=".") | |
audio_sample = ds[0]["audio"] | |
waveform = audio_sample["array"] | |
sampling_rate = audio_sample["sampling_rate"] | |
tic = time() | |
# input_features = processor( | |
# waveform, sampling_rate=sampling_rate, return_tensors="pt" | |
# ).input_features | |
segments, info = model.transcribe(waveform, beam_size=5) | |
# predicted_ids = model.generate(input_features.to("mps")) | |
# transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) | |
toc = time() | |
# print(transcription[0]) | |
for segment in segments: | |
print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) | |
print(toc - tic) |