pedropauletti commited on
Commit
bf61dc0
1 Parent(s): 111bf40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -89,8 +89,8 @@ def to_chatbot():
89
 
90
  with gr.Blocks() as demo:
91
 
92
- # with gr.Accordion("Settings", open=True):
93
- # language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
94
 
95
  with gr.Row():
96
  btn0 = gr.Button("Classificação de áudio", scale=1, icon='content/Audio Classification.png', size='lg')
@@ -111,9 +111,9 @@ with gr.Blocks() as demo:
111
  audioOutput = gr.Audio(label="Saída de áudio", interactive=False)
112
 
113
 
114
- inputRecord.stop_recording(interface, [inputRecord, "pt-br"], [output])
115
- inputUpload.upload(interface, [inputUpload, "pt-br"], [output])
116
- btn.click(fn=TTS, inputs=[output, "pt-br"], outputs=audioOutput)
117
 
118
  examples = gr.Examples(fn=interface, examples=examples_audio_classification, inputs=[inputRecord], outputs=[output], run_on_click=True)
119
 
@@ -125,7 +125,7 @@ with gr.Blocks() as demo:
125
  with gr.Column(min_width=700):
126
  output = gr.Label(label="Classificação de Áudio")
127
 
128
- input.change(interface_realtime, [input, "pt-br"], output)
129
  input.change(updateHistory, None, historyOutput)
130
  input.start_recording(clearHistory, None, historyOutput)
131
 
@@ -141,8 +141,8 @@ with gr.Blocks() as demo:
141
  output = gr.Label(label="Transcrição")
142
 
143
 
144
- inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [output])
145
- inputUpload.upload(transcribe_speech, [inputUpload, "pt-br"], [output])
146
 
147
  # examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples")
148
  examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
@@ -171,12 +171,12 @@ with gr.Blocks() as demo:
171
  audioOutput = gr.Audio(interactive=False, min_width=600)
172
 
173
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
174
- chatbot_response, [chatbot, "pt-br"], chatbot)
175
  txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
176
  submit.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
177
- chatbot_response, [chatbot, "pt-br"], chatbot).then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
178
- inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [txt])
179
- btn.click(fn=TTS_chatbot, inputs=["pt-br"], outputs=audioOutput)
180
 
181
  with gr.Row(min_width=2000):
182
  # examplesChatbotEn = gr.Examples(examples=examples_chatbot_en, inputs=[txt], label="English Examples")
 
89
 
90
  with gr.Blocks() as demo:
91
 
92
+ with gr.Accordion("Idioma de saída", open=False):
93
+ language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='pt-br', interactive=True)
94
 
95
  with gr.Row():
96
  btn0 = gr.Button("Classificação de áudio", scale=1, icon='content/Audio Classification.png', size='lg')
 
111
  audioOutput = gr.Audio(label="Saída de áudio", interactive=False)
112
 
113
 
114
+ inputRecord.stop_recording(interface, [inputRecord, language], [output])
115
+ inputUpload.upload(interface, [inputUpload, language], [output])
116
+ btn.click(fn=TTS, inputs=[output, language], outputs=audioOutput)
117
 
118
  examples = gr.Examples(fn=interface, examples=examples_audio_classification, inputs=[inputRecord], outputs=[output], run_on_click=True)
119
 
 
125
  with gr.Column(min_width=700):
126
  output = gr.Label(label="Classificação de Áudio")
127
 
128
+ input.change(interface_realtime, [input, language], output)
129
  input.change(updateHistory, None, historyOutput)
130
  input.start_recording(clearHistory, None, historyOutput)
131
 
 
141
  output = gr.Label(label="Transcrição")
142
 
143
 
144
+ inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [output])
145
+ inputUpload.upload(transcribe_speech, [inputUpload, language], [output])
146
 
147
  # examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples")
148
  examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
 
171
  audioOutput = gr.Audio(interactive=False, min_width=600)
172
 
173
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
174
+ chatbot_response, [chatbot, language], chatbot)
175
  txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
176
  submit.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
177
+ chatbot_response, [chatbot, language], chatbot).then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
178
+ inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [txt])
179
+ btn.click(fn=TTS_chatbot, inputs=[language], outputs=audioOutput)
180
 
181
  with gr.Row(min_width=2000):
182
  # examplesChatbotEn = gr.Examples(examples=examples_chatbot_en, inputs=[txt], label="English Examples")