Ziyou Li Chenzhou commited on
Commit
ae3911f
0 Parent(s):

Duplicate from Chenzhou/Whisper-zh-HK

Browse files

Co-authored-by: Chenzhou Huang <Chenzhou@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +94 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: UITest
3
+ emoji: 👁
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.12.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: Chenzhou/Whisper-zh-HK
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import os
4
+ import subprocess
5
+ from pytube import YouTube
6
+
7
+ pipe = pipeline(model="tilos/whisper-small-zh-HK") # change to "your-username/the-name-you-picked"
8
+
9
+ def video2mp3(video_file, output_ext="mp3"):
10
+ filename, ext = os.path.splitext(video_file)
11
+ subprocess.call(["ffmpeg", "-y", "-i", video_file, f"{filename}.{output_ext}"],
12
+ stdout=subprocess.DEVNULL,
13
+ stderr=subprocess.STDOUT)
14
+ return f"{filename}.{output_ext}"
15
+
16
+ def transcribe(audio):
17
+ text = pipe(audio)["text"]
18
+ return text
19
+
20
+
21
+ def get_text(url):
22
+ result = pipe(get_audio(url))
23
+ return result['text'].strip()
24
+
25
+ def get_audio(url):
26
+ website = YouTube(url)
27
+ video = website.streams.filter(only_audio=True).first()
28
+ out_file = video.download(output_path=".")
29
+ base, ext = os.path.splitext(out_file)
30
+ new_file = base + '.mp3'
31
+ os.rename(out_file, new_file)
32
+ audio = new_file
33
+ return audio
34
+
35
+ def offline_video(video):
36
+ audio_file = video2mp3(video)
37
+ text = transcribe(audio_file)
38
+ return text
39
+
40
+
41
+ with gr.Blocks() as demo:
42
+
43
+ # video file input
44
+ gr.Interface(
45
+ title="Whisper: Real Time Cantonese Recognition",
46
+ description="Realtime demo for Cantonese speech recognition using a fine-tuned Whisper small model. "
47
+ "Generate zh-HK subtitle from video file, audio file, your microphone, and Youtube URL",
48
+ fn=offline_video,
49
+ inputs="video",
50
+ outputs="text",
51
+ allow_flagging="never",
52
+ )
53
+
54
+ # audio file input
55
+ with gr.Row():
56
+ with gr.Column():
57
+ input_audio = gr.Audio(source="upload", type="filepath")
58
+ micro_btn = gr.Button('Generate Voice Subtitles')
59
+ with gr.Column():
60
+ output_audio = gr.Textbox(placeholder='Transcript from audio', label='Subtitles')
61
+ micro_btn.click(transcribe, inputs=input_audio, outputs=output_audio)
62
+ """
63
+ gr.Interface(
64
+ fn=transcribe,
65
+ title="Whisper: zh-HK Subtitle Generator",
66
+ description="Generate zh-HK subtitle from audio file, your microphone and Youtube",
67
+ inputs = gr.Audio(source="upload", type="filepath", optional=True),
68
+ outputs = "text",
69
+ allow_flagging= "never",
70
+ )
71
+ """
72
+
73
+ # microphone input
74
+ with gr.Row():
75
+ with gr.Column():
76
+ input_mircro = gr.Audio(source="microphone", type="filepath")
77
+ micro_btn = gr.Button('Generate Voice Subtitles')
78
+ with gr.Column():
79
+ output_micro = gr.Textbox(placeholder='Transcript from mic', label='Subtitles')
80
+ micro_btn.click(transcribe, inputs=input_mircro, outputs=output_micro)
81
+
82
+ # Youtube url input
83
+ with gr.Row():
84
+ with gr.Column():
85
+ inputs_url = gr.Textbox(placeholder='Youtube URL', label='URL')
86
+ url_btn = gr.Button('Generate Youtube Video Subtitles')
87
+ examples = gr.Examples(examples=["https://www.youtube.com/watch?v=Yw4EoGWe0vw"],inputs=[inputs_url])
88
+ with gr.Column():
89
+ output_url = gr.Textbox(placeholder='Transcript from video.', label='Transcript')
90
+ url_btn.click(get_text, inputs=inputs_url, outputs=output_url )
91
+
92
+
93
+
94
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ numpy
3
+ transformers
4
+ gradio
5
+ pytube