Sandiago21 commited on
Commit
9d2c3b2
1 Parent(s): cee3120

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +12 -1
  2. example.wav +0 -0
app.py CHANGED
@@ -5,6 +5,14 @@ from transformers import pipeline
5
  model_id = "Sandiago21/whisper-large-v2-greek" # update with your model id
6
  pipe = pipeline("automatic-speech-recognition", model=model_id)
7
 
 
 
 
 
 
 
 
 
8
  def transcribe_speech(filepath):
9
  output = pipe(
10
  filepath,
@@ -36,6 +44,9 @@ with demo:
36
  gr.TabbedInterface(
37
  [mic_transcribe, file_transcribe],
38
  ["Transcribe Microphone", "Transcribe Audio File"],
39
- )
 
 
 
40
 
41
  demo.launch()
 
5
  model_id = "Sandiago21/whisper-large-v2-greek" # update with your model id
6
  pipe = pipeline("automatic-speech-recognition", model=model_id)
7
 
8
+
9
+ title = "Automatic Speech Recognition (ASR)"
10
+ description = """
11
+ Demo for automatic speech recognition in Greek. Demo uses [Sandiago21/whisper-large-v2-greek](https://huggingface.co/Sandiago21/whisper-large-v2-greek) checkpoint, which is based on OpenAI's
12
+ [Whisper](https://huggingface.co/openai/whisper-large-v2) model and is fine-tuned in Greek Audio dataset
13
+ ![Automatic Speech Recognition (ASR)"](https://datasets-server.huggingface.co/assets/huggingface-course/audio-course-images/--/huggingface-course--audio-course-images/train/2/image/image.png "Diagram of Automatic Speech Recognition (ASR)")
14
+ """
15
+
16
  def transcribe_speech(filepath):
17
  output = pipe(
18
  filepath,
 
44
  gr.TabbedInterface(
45
  [mic_transcribe, file_transcribe],
46
  ["Transcribe Microphone", "Transcribe Audio File"],
47
+ ),
48
+ examples=[["./example.wav"]],
49
+ tilte=title,
50
+ description=description,
51
 
52
  demo.launch()
example.wav ADDED
Binary file (603 kB). View file