ylacombe HF staff commited on
Commit
8e73e42
1 Parent(s): 934b11d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +218 -0
app.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import gradio as gr
4
+ import yt_dlp as youtube_dl
5
+ import numpy as np
6
+ from datasets import Dataset, Audio
7
+ from scipy.io import wavfile
8
+
9
+ from transformers import pipeline
10
+ from transformers.pipelines.audio_utils import ffmpeg_read
11
+
12
+ import tempfile
13
+ import os
14
+ import time
15
+ os.environ["GRADIO_TEMP_DIR"] = "/home/yoach/spaces/tmp"
16
+
17
+
18
+ MODEL_NAME = "openai/whisper-large-v3"
19
+ BATCH_SIZE = 8
20
+ FILE_LIMIT_MB = 1000
21
+ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
22
+
23
+ device = 0 if torch.cuda.is_available() else "cpu"
24
+
25
+ pipe = pipeline(
26
+ task="automatic-speech-recognition",
27
+ model=MODEL_NAME,
28
+ chunk_length_s=30,
29
+ device=device,
30
+ )
31
+
32
+
33
+ def transcribe(inputs_path, task, dataset_name, oauth_token: gr.OAuthToken):
34
+ if inputs_path is None:
35
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
36
+
37
+ sampling_rate, inputs = wavfile.read(inputs_path)
38
+
39
+ out = pipe(inputs_path, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
40
+
41
+ text = out["text"]
42
+
43
+ chunks = naive_postprocess_whisper_chunks(out["chunks"])
44
+
45
+ transcripts = []
46
+ audios = []
47
+ with tempfile.TemporaryDirectory() as tmpdirname:
48
+ for i,chunk in enumerate(chunks):
49
+ begin, end = chunk["timestamp"]
50
+ begin, end = int(begin*sampling_rate), int(end*sampling_rate)
51
+ # TODO: make sure 1D or 2D?
52
+ arr = inputs[begin:end]
53
+ path = os.path.join(tmpdirname, f"{i}.wav")
54
+ wavfile.write(path, sampling_rate, arr)
55
+ audios.append(path)
56
+ transcripts.append(chunk["text"])
57
+
58
+ dataset = Dataset.from_dict({"audio": audios, "transcript": transcripts}).cast_column("audio", Audio())
59
+
60
+
61
+ dataset.push_to_hub(dataset_name, token=oauth_token)
62
+
63
+ return text
64
+
65
+
66
+ def _return_yt_html_embed(yt_url):
67
+ video_id = yt_url.split("?v=")[-1]
68
+ HTML_str = (
69
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
70
+ " </center>"
71
+ )
72
+ return HTML_str
73
+
74
+ def download_yt_audio(yt_url, filename):
75
+ info_loader = youtube_dl.YoutubeDL()
76
+
77
+ try:
78
+ info = info_loader.extract_info(yt_url, download=False)
79
+ except youtube_dl.utils.DownloadError as err:
80
+ raise gr.Error(str(err))
81
+
82
+ file_length = info["duration_string"]
83
+ file_h_m_s = file_length.split(":")
84
+ file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
85
+
86
+ if len(file_h_m_s) == 1:
87
+ file_h_m_s.insert(0, 0)
88
+ if len(file_h_m_s) == 2:
89
+ file_h_m_s.insert(0, 0)
90
+ file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
91
+
92
+ if file_length_s > YT_LENGTH_LIMIT_S:
93
+ yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
94
+ file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
95
+ raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
96
+
97
+ ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
98
+
99
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
100
+ try:
101
+ ydl.download([yt_url])
102
+ except youtube_dl.utils.ExtractorError as err:
103
+ raise gr.Error(str(err))
104
+
105
+
106
+ def yt_transcribe(yt_url, task, dataset_name, oauth_token: gr.OAuthToken, max_filesize=75.0, dataset_sampling_rate = 24000):
107
+ html_embed_str = _return_yt_html_embed(yt_url)
108
+
109
+ with tempfile.TemporaryDirectory() as tmpdirname:
110
+ filepath = os.path.join(tmpdirname, "video.mp4")
111
+ download_yt_audio(yt_url, filepath)
112
+ with open(filepath, "rb") as f:
113
+ inputs_path = f.read()
114
+
115
+ inputs = ffmpeg_read(inputs_path, pipe.feature_extractor.sampling_rate)
116
+ inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
117
+
118
+ out = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
119
+
120
+ text = out["text"]
121
+
122
+ chunks = naive_postprocess_whisper_chunks(out["chunks"])
123
+
124
+ inputs = ffmpeg_read(inputs_path, dataset_sampling_rate)
125
+
126
+ transcripts = []
127
+ audios = []
128
+ with tempfile.TemporaryDirectory() as tmpdirname:
129
+ for i,chunk in enumerate(chunks):
130
+ begin, end = chunk["timestamp"]
131
+ begin, end = int(begin*dataset_sampling_rate), int(end*dataset_sampling_rate)
132
+ # TODO: make sure 1D or 2D?
133
+ arr = inputs[begin:end]
134
+ path = os.path.join(tmpdirname, f"{i}.wav")
135
+ wavfile.write(path, dataset_sampling_rate, arr)
136
+ audios.append(path)
137
+ transcripts.append(chunk["text"])
138
+
139
+ dataset = Dataset.from_dict({"audio": audios, "transcript": transcripts}).cast_column("audio", Audio())
140
+
141
+
142
+ dataset.push_to_hub(dataset_name, token=oauth_token)
143
+
144
+
145
+ return html_embed_str, text
146
+
147
+
148
+ def naive_postprocess_whisper_chunks(chunks, stop_chars = ".!:;?", min_duration = 5):
149
+ new_chunks = []
150
+
151
+ while chunks:
152
+ current_chunk = chunks.pop(0)
153
+ begin, end = current_chunk["timestamp"]
154
+ text = current_chunk["text"]
155
+
156
+ while chunks and (text[-1] not in stop_chars or (end-begin<min_duration)):
157
+ ch = chunks.pop(0)
158
+ end = ch["timestamp"][1]
159
+ text = "".join([text, ch["text"]])
160
+
161
+ new_chunks.append({
162
+ "text": text.strip(),
163
+ "timestamp": (begin, end),
164
+ })
165
+ print(f"LENGTH CHUNK #{len(new_chunks)}: {end-begin}s")
166
+
167
+ return new_chunks
168
+
169
+
170
+
171
+
172
+
173
+
174
+ demo = gr.Blocks()
175
+
176
+ mf_transcribe = gr.Interface(
177
+ fn=transcribe,
178
+ inputs=[
179
+ gr.Audio(type="filepath"),
180
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
181
+ gr.Textbox(lines=1, placeholder="Place your new dataset name here", label="Dataset name"),
182
+ ],
183
+ outputs="text",
184
+ theme="huggingface",
185
+ title="Whisper Large V3: Transcribe Audio",
186
+ description=(
187
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
188
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
189
+ " of arbitrary length."
190
+ ),
191
+ allow_flagging="never",
192
+ )
193
+
194
+ yt_transcribe = gr.Interface(
195
+ fn=yt_transcribe,
196
+ inputs=[
197
+ gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
198
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
199
+ gr.Textbox(lines=1, placeholder="Place your new dataset name here", label="Dataset name"),
200
+ ],
201
+ outputs=["html", "text"],
202
+ theme="huggingface",
203
+ title="Whisper Large V3: Transcribe YouTube",
204
+ description=(
205
+ "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
206
+ f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
207
+ " arbitrary length."
208
+ ),
209
+ allow_flagging="never",
210
+ )
211
+
212
+ with demo:
213
+ with gr.Row():
214
+ gr.LoginButton()
215
+ gr.LogoutButton()
216
+ gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Microphone or Audio file", "YouTube"])
217
+
218
+ demo.launch(debug=True)