anyantudre commited on
Commit
ee5f3cc
1 Parent(s): 5ef186d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -1,33 +1,31 @@
1
  import torch
2
  import scipy
3
  import gradio as gr
4
-
5
- from transformers import set_seed, pipeline
6
- from transformers import VitsTokenizer, VitsModel
7
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
8
  from datasets import load_dataset, Audio
9
 
10
- import speech_to_text, text_to_speech, translation
11
 
12
- language_list = ['mos', 'fra', 'eng']
 
13
 
14
  demo = gr.Blocks()
15
 
16
- mms_stt = gr.Interface(
17
- fn=speech_to_text.transcribe,
18
  inputs=[
19
  gr.Audio(sources=["microphone", "upload"], type="filepath"),
20
- gr.Dropdown(language_list, label="Language")
21
  ],
22
  outputs="text",
23
  title="Speech-to-text"
24
  )
25
 
26
- mms_tts = gr.Interface(
27
- fn=text_to_speech.synthesize_facebook,
28
  inputs=[
29
  gr.Text(label="Input text"),
30
- gr.Dropdown(language_list, label="Language")
31
  ],
32
  outputs=[
33
  gr.Audio(label="Generated Audio", type="numpy")
@@ -35,21 +33,22 @@ mms_tts = gr.Interface(
35
  title="Text-to-speech"
36
  )
37
 
38
- mms_translate = gr.Interface(
39
- fn=translation.translation,
40
  inputs=[
41
  gr.Textbox(label="Text", placeholder="Yaa sõama"),
42
  gr.Dropdown(label="Source Language", choices=["eng_Latn", "fra_Latn", "mos_Latn"]),
43
  gr.Dropdown(label="Target Language", choices=["eng_Latn", "fra_Latn", "mos_Latn"])
44
  ],
45
  outputs=["text"],
46
- examples=[["Building a translation demo with Gradio is so easy!", "eng_Latn", "mos_Latn"]],
47
  title="Translation Demo",
48
  )
49
 
 
50
  with demo:
51
  gr.TabbedInterface(
52
- [mms_translate, mms_tts, mms_stt],
53
  ["Translation", "Text-to-speech", "Speech-to-text"],
54
  )
55
 
 
1
  import torch
2
  import scipy
3
  import gradio as gr
4
+ from transformers import set_seed
 
 
 
5
  from datasets import load_dataset, Audio
6
 
7
+ import goai_stt, goai_tts, goai_traduction
8
 
9
+ #language_list = ['mos', 'fra', 'eng']
10
+ device = 0 if torch.cuda.is_available() else "cpu"
11
 
12
  demo = gr.Blocks()
13
 
14
+ goai_stt = gr.Interface(
15
+ fn = goai_stt.goai_stt,
16
  inputs=[
17
  gr.Audio(sources=["microphone", "upload"], type="filepath"),
18
+ #gr.Dropdown(language_list, label="Language")
19
  ],
20
  outputs="text",
21
  title="Speech-to-text"
22
  )
23
 
24
+ goai_tts = gr.Interface(
25
+ fn=goai_tts.goai_tts,
26
  inputs=[
27
  gr.Text(label="Input text"),
28
+ #gr.Dropdown(language_list, label="Language")
29
  ],
30
  outputs=[
31
  gr.Audio(label="Generated Audio", type="numpy")
 
33
  title="Text-to-speech"
34
  )
35
 
36
+ goai_traduction = gr.Interface(
37
+ fn=goai_traduction.goai_traduction,
38
  inputs=[
39
  gr.Textbox(label="Text", placeholder="Yaa sõama"),
40
  gr.Dropdown(label="Source Language", choices=["eng_Latn", "fra_Latn", "mos_Latn"]),
41
  gr.Dropdown(label="Target Language", choices=["eng_Latn", "fra_Latn", "mos_Latn"])
42
  ],
43
  outputs=["text"],
44
+ examples=[["Yʋʋm a wãn la b kẽesd biig lekolle?", "mos_Latn", "fra_Latn"]],
45
  title="Translation Demo",
46
  )
47
 
48
+
49
  with demo:
50
  gr.TabbedInterface(
51
+ [goai_traduction, goai_tts, goai_stt],
52
  ["Translation", "Text-to-speech", "Speech-to-text"],
53
  )
54