Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,782 Bytes
bef66de 409084a bef66de 409084a bef66de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import os
import time
import gradio as gr
from pydub import AudioSegment
from transformers import pipeline
is_hf = os.getenv("SYSTEM") == "spaces"
generate_kwargs = {
"language": "Japanese",
"do_sample": False,
"num_beams": 1,
"no_repeat_ngram_size": 3,
}
model_dict = {
"whisper-large-v2": "openai/whisper-large-v2",
"whisper-large-v3": "openai/whisper-large-v3",
"whisper-large-v3-turbo": "openai/whisper-large-v3-turbo",
"kotoba-whisper-v1.0": "kotoba-tech/kotoba-whisper-v1.0",
"kotoba-whisper-v2.0": "kotoba-tech/kotoba-whisper-v2.0",
"galgame-whisper-wip": (
"litagin/galgame-whisper-wip"
if is_hf
else "../whisper_finetune/galgame-whisper"
),
}
# Download models
for model in model_dict.values():
pipeline("automatic-speech-recognition", model=model)
def transcribe_common(audio: str, model: str) -> tuple[str, float]:
# Get duration of audio
duration = AudioSegment.from_file(audio).duration_seconds
if duration > 15:
return "Audio too long, limit is 15 seconds", 0
start_time = time.time()
pipe = pipeline("automatic-speech-recognition", model=model)
end_time = time.time()
return pipe(audio, generate_kwargs=generate_kwargs)["text"], end_time - start_time
def transcribe_large_v2(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["whisper-large-v2"])
def transcribe_large_v3(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["whisper-large-v3"])
def transcribe_large_v3_turbo(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["whisper-large-v3-turbo"])
def transcribe_kotoba_v1(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["kotoba-whisper-v1.0"])
def transcribe_kotoba_v2(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["kotoba-whisper-v2.0"])
def transcribe_galgame_whisper(audio) -> tuple[str, float]:
return transcribe_common(audio, model_dict["galgame-whisper-wip"])
initial_md = """
# Galgame-Whisper (WIP) Demo
- 日本語のみ対応
- 他の書き起こしとついでに比較できるようにいろいろ入れた
- 現在0.1エポックくらい
- 速度はCPUです
- 音声は15秒まで
pipeのハイパラ:
```python
generate_kwargs = {
"language": "Japanese",
"do_sample": False,
"num_beams": 1,
"no_repeat_ngram_size": 3,
}
```
"""
with gr.Blocks() as app:
gr.Markdown(initial_md)
audio = gr.Audio(type="filepath")
with gr.Row():
with gr.Column():
gr.Markdown("### Whisper-Large-V2")
button_v2 = gr.Button("Transcribe with Whisper-Large-V2")
output_v2 = gr.Textbox()
time_v2 = gr.Textbox("Time taken")
with gr.Column():
gr.Markdown("### Whisper-Large-V3")
button_v3 = gr.Button("Transcribe with Whisper-Large-V3")
output_v3 = gr.Textbox()
time_v3 = gr.Textbox("Time taken")
with gr.Column():
gr.Markdown("### Whisper-Large-V3-Turbo")
button_v3_turbo = gr.Button("Transcribe with Whisper-Large-V3-Turbo")
output_v3_turbo = gr.Textbox()
time_v3_turbo = gr.Textbox()
with gr.Row():
with gr.Column():
gr.Markdown("### Kotoba-Whisper-V1.0")
button_kotoba_v1 = gr.Button("Transcribe with Kotoba-Whisper-V1.0")
output_kotoba_v1 = gr.Textbox()
time_kotoba_v1 = gr.Textbox("Time taken")
with gr.Column():
gr.Markdown("### Kotoba-Whisper-V2.0")
button_kotoba_v2 = gr.Button("Transcribe with Kotoba-Whisper-V2.0")
output_kotoba_v2 = gr.Textbox()
time_kotoba_v2 = gr.Textbox("Time taken")
with gr.Row():
with gr.Column():
gr.Markdown("### Galgame-Whisper (WIP)")
button_galgame = gr.Button("Transcribe with Galgame-Whisper (WIP)")
output_galgame = gr.Textbox()
time_galgame = gr.Textbox("Time taken")
button_v2.click(transcribe_large_v2, inputs=audio, outputs=[output_v2, time_v2])
button_v3.click(transcribe_large_v3, inputs=audio, outputs=[output_v3, time_v3])
button_v3_turbo.click(
transcribe_large_v3_turbo,
inputs=audio,
outputs=[output_v3_turbo, time_v3_turbo],
)
button_kotoba_v1.click(
transcribe_kotoba_v1, inputs=audio, outputs=[output_kotoba_v1, time_kotoba_v1]
)
button_kotoba_v2.click(
transcribe_kotoba_v2, inputs=audio, outputs=[output_kotoba_v2, time_kotoba_v2]
)
button_galgame.click(
transcribe_galgame_whisper,
inputs=audio,
outputs=[output_galgame, time_galgame],
)
app.launch(inbrowser=True)
|