Spaces:
Sleeping
Sleeping
bofenghuang
commited on
Commit
•
5140605
1
Parent(s):
0585497
up
Browse files- README.md +1 -1
- run_demo_ct2.py +11 -15
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🤫
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.16.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
run_demo_ct2.py
CHANGED
@@ -27,7 +27,7 @@ warnings.filterwarnings("ignore")
|
|
27 |
disable_progress_bar()
|
28 |
|
29 |
# DEFAULT_MODEL_NAME = "bofenghuang/whisper-large-v2-cv11-french"
|
30 |
-
DEFAULT_MODEL_NAME = "bofenghuang/whisper-large-
|
31 |
# CHECKPOINT_FILENAME = "checkpoint_openai.pt"
|
32 |
|
33 |
GEN_KWARGS = {
|
@@ -35,13 +35,15 @@ GEN_KWARGS = {
|
|
35 |
"language": "fr",
|
36 |
# "without_timestamps": True,
|
37 |
# decode options
|
38 |
-
# "beam_size":
|
39 |
# "patience": 2,
|
40 |
# disable fallback
|
41 |
# "compression_ratio_threshold": None,
|
42 |
# "logprob_threshold": None,
|
43 |
# vad threshold
|
44 |
# "no_speech_threshold": None,
|
|
|
|
|
45 |
}
|
46 |
|
47 |
logging.basicConfig(
|
@@ -110,7 +112,8 @@ def maybe_load_cached_pipeline(model_name):
|
|
110 |
model = cached_models.get(model_name)
|
111 |
if model is None:
|
112 |
# downloaded_model_path = hf_hub_download(repo_id=model_name, filename=CHECKPOINT_FILENAME)
|
113 |
-
downloaded_model_path = snapshot_download(repo_id=model_name)
|
|
|
114 |
|
115 |
# model = whisper.load_model(downloaded_model_path, device=device)
|
116 |
model = WhisperModel(downloaded_model_path, device=device, compute_type="float16")
|
@@ -233,8 +236,8 @@ with gr.Blocks() as demo:
|
|
233 |
"""
|
234 |
)
|
235 |
|
236 |
-
microphone_input = gr.
|
237 |
-
upload_input = gr.
|
238 |
with_timestamps_input = gr.Checkbox(label="With timestamps?")
|
239 |
|
240 |
microphone_transcribe_btn = gr.Button("Transcribe Audio")
|
@@ -247,10 +250,7 @@ with gr.Blocks() as demo:
|
|
247 |
text_output_df2 = gr.DataFrame(
|
248 |
value=default_text_output_df,
|
249 |
label="Transcription",
|
250 |
-
row_count=(0, "dynamic"),
|
251 |
-
max_rows=10,
|
252 |
wrap=True,
|
253 |
-
overflow_row_behaviour="paginate",
|
254 |
)
|
255 |
|
256 |
microphone_transcribe_btn.click(
|
@@ -301,7 +301,7 @@ with gr.Blocks() as demo:
|
|
301 |
"""
|
302 |
)
|
303 |
|
304 |
-
yt_link_input = gr.
|
305 |
download_youtube_btn = gr.Button("Download Youtube video")
|
306 |
downloaded_video_output = gr.Video(label="Video file", mirror_webcam=False)
|
307 |
download_youtube_btn.click(download_video_from_youtube, inputs=[yt_link_input], outputs=[downloaded_video_output])
|
@@ -311,14 +311,10 @@ with gr.Blocks() as demo:
|
|
311 |
text_output_df = gr.DataFrame(
|
312 |
value=default_text_output_df,
|
313 |
label="Transcription",
|
314 |
-
row_count=(0, "dynamic"),
|
315 |
-
max_rows=10,
|
316 |
wrap=True,
|
317 |
-
overflow_row_behaviour="paginate",
|
318 |
)
|
319 |
|
320 |
video_transcribe_btn.click(video_transcribe, inputs=[downloaded_video_output, with_timestamps_input3], outputs=[text_output_df])
|
321 |
|
322 |
-
# demo.launch(server_name="0.0.0.0", debug=True)
|
323 |
-
|
324 |
-
demo.launch(enable_queue=True)
|
|
|
27 |
disable_progress_bar()
|
28 |
|
29 |
# DEFAULT_MODEL_NAME = "bofenghuang/whisper-large-v2-cv11-french"
|
30 |
+
DEFAULT_MODEL_NAME = "bofenghuang/whisper-large-v3-french"
|
31 |
# CHECKPOINT_FILENAME = "checkpoint_openai.pt"
|
32 |
|
33 |
GEN_KWARGS = {
|
|
|
35 |
"language": "fr",
|
36 |
# "without_timestamps": True,
|
37 |
# decode options
|
38 |
+
# "beam_size": 1,
|
39 |
# "patience": 2,
|
40 |
# disable fallback
|
41 |
# "compression_ratio_threshold": None,
|
42 |
# "logprob_threshold": None,
|
43 |
# vad threshold
|
44 |
# "no_speech_threshold": None,
|
45 |
+
# "condition_on_previous_text": False, # todo: only for distilled version
|
46 |
+
"vad_filter": True,
|
47 |
}
|
48 |
|
49 |
logging.basicConfig(
|
|
|
112 |
model = cached_models.get(model_name)
|
113 |
if model is None:
|
114 |
# downloaded_model_path = hf_hub_download(repo_id=model_name, filename=CHECKPOINT_FILENAME)
|
115 |
+
# downloaded_model_path = snapshot_download(repo_id=model_name)
|
116 |
+
downloaded_model_path = snapshot_download(repo_id=model_name, allow_patterns="ctranslate2/*")
|
117 |
|
118 |
# model = whisper.load_model(downloaded_model_path, device=device)
|
119 |
model = WhisperModel(downloaded_model_path, device=device, compute_type="float16")
|
|
|
236 |
"""
|
237 |
)
|
238 |
|
239 |
+
microphone_input = gr.Audio(sources="microphone", type="filepath", label="Record")
|
240 |
+
upload_input = gr.Audio(sources="upload", type="filepath", label="Upload File")
|
241 |
with_timestamps_input = gr.Checkbox(label="With timestamps?")
|
242 |
|
243 |
microphone_transcribe_btn = gr.Button("Transcribe Audio")
|
|
|
250 |
text_output_df2 = gr.DataFrame(
|
251 |
value=default_text_output_df,
|
252 |
label="Transcription",
|
|
|
|
|
253 |
wrap=True,
|
|
|
254 |
)
|
255 |
|
256 |
microphone_transcribe_btn.click(
|
|
|
301 |
"""
|
302 |
)
|
303 |
|
304 |
+
yt_link_input = gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")
|
305 |
download_youtube_btn = gr.Button("Download Youtube video")
|
306 |
downloaded_video_output = gr.Video(label="Video file", mirror_webcam=False)
|
307 |
download_youtube_btn.click(download_video_from_youtube, inputs=[yt_link_input], outputs=[downloaded_video_output])
|
|
|
311 |
text_output_df = gr.DataFrame(
|
312 |
value=default_text_output_df,
|
313 |
label="Transcription",
|
|
|
|
|
314 |
wrap=True,
|
|
|
315 |
)
|
316 |
|
317 |
video_transcribe_btn.click(video_transcribe, inputs=[downloaded_video_output, with_timestamps_input3], outputs=[text_output_df])
|
318 |
|
319 |
+
# demo.queue(max_size=10).launch(server_name="0.0.0.0", debug=True, ssl_certfile="/home/bhuang/tools/cert.pem", ssl_keyfile="/home/bhuang/tools/key.pem", ssl_verify=False)
|
320 |
+
demo.queue(max_size=10).launch()
|
|