pedropauletti commited on
Commit
111bf40
1 Parent(s): 2b93794

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -38
app.py CHANGED
@@ -89,63 +89,63 @@ def to_chatbot():
89
 
90
  with gr.Blocks() as demo:
91
 
92
- with gr.Accordion("Settings", open=True):
93
- language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
94
 
95
  with gr.Row():
96
- btn0 = gr.Button("Audio Classification", scale=1, icon='content/Audio Classification.png', size='lg')
97
- btn1 = gr.Button("Realtime Audio Classification", scale=1, icon='content/Realtime Audio Classification.png', size='lg')
98
- btn2 = gr.Button("Speech Recognition", scale=1, icon='content/Speech Recognition.png', size='lg')
99
- btn3 = gr.Button("Help", scale=1, icon='content/Chatbot.png', size='lg')
100
 
101
  with gr.Row(visible=False) as audio_classification:
102
  with gr.Column(min_width=700):
103
- with gr.Accordion("Record an Audio", open=True):
104
- inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
105
- with gr.Accordion("Upload a file", open=False):
106
- inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
107
  clearBtn = gr.ClearButton([inputRecord, inputUpload])
108
  with gr.Column(min_width=700):
109
- output = gr.Label(label="Audio Classification")
110
- btn = gr.Button(value="Generate Audio")
111
- audioOutput = gr.Audio(label="Audio Output", interactive=False)
112
 
113
 
114
- inputRecord.stop_recording(interface, [inputRecord, language], [output])
115
- inputUpload.upload(interface, [inputUpload, language], [output])
116
- btn.click(fn=TTS, inputs=[output, language], outputs=audioOutput)
117
 
118
  examples = gr.Examples(fn=interface, examples=examples_audio_classification, inputs=[inputRecord], outputs=[output], run_on_click=True)
119
 
120
  with gr.Row(visible=False) as realtime_classification:
121
  with gr.Column(min_width=700):
122
- input = gr.Audio(label="Audio Input", source="microphone", type="filepath",streaming=True, every=10)
123
- historyOutput = gr.Textbox(label="History", interactive=False)
124
  # historyOutput = gr.Label(label="History")
125
  with gr.Column(min_width=700):
126
- output = gr.Label(label="Audio Classification")
127
 
128
- input.change(interface_realtime, [input, language], output)
129
  input.change(updateHistory, None, historyOutput)
130
  input.start_recording(clearHistory, None, historyOutput)
131
 
132
 
133
  with gr.Row(visible=False) as speech_recognition:
134
  with gr.Column(min_width=700):
135
- with gr.Accordion("Record an Audio", open=True):
136
- inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
137
- with gr.Accordion("Upload a file", open=False):
138
- inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
139
  clearBtn = gr.ClearButton([inputRecord])
140
  with gr.Column(min_width=700):
141
- output = gr.Label(label="Transcription")
142
 
143
 
144
- inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [output])
145
- inputUpload.upload(transcribe_speech, [inputUpload, language], [output])
146
 
147
- examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples")
148
- # examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
149
 
150
  with gr.Row(visible=False) as chatbot_qa:
151
  chatbot = gr.Chatbot(
@@ -159,28 +159,28 @@ with gr.Blocks() as demo:
159
  txt = gr.Textbox(
160
  scale=4,
161
  show_label=False,
162
- placeholder="Enter text and press enter",
163
  container=False,
164
  min_width=1000
165
  )
166
  submit = gr.Button(value="", size='sm', scale=1, icon='content/send-icon.png')
167
 
168
 
169
- inputRecord = gr.Audio(label="Record a question", source="microphone", type="filepath", min_width=600)
170
- btn = gr.Button(value="Listen the answer")
171
  audioOutput = gr.Audio(interactive=False, min_width=600)
172
 
173
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
174
- chatbot_response, [chatbot, language], chatbot)
175
  txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
176
  submit.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
177
- chatbot_response, [chatbot, language], chatbot).then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
178
- inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [txt])
179
- btn.click(fn=TTS_chatbot, inputs=[language], outputs=audioOutput)
180
 
181
  with gr.Row(min_width=2000):
182
- examplesChatbotEn = gr.Examples(examples=examples_chatbot_en, inputs=[txt], label="English Examples")
183
- examplesChatbotPtbr = gr.Examples(examples=examples_chatbot_ptbr, inputs=[txt], label="Portuguese Examples")
184
 
185
 
186
  btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
 
89
 
90
  with gr.Blocks() as demo:
91
 
92
+ # with gr.Accordion("Settings", open=True):
93
+ # language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
94
 
95
  with gr.Row():
96
+ btn0 = gr.Button("Classificação de áudio", scale=1, icon='content/Audio Classification.png', size='lg')
97
+ btn1 = gr.Button("Classificação de áudio em tempo real", scale=1, icon='content/Realtime Audio Classification.png', size='lg')
98
+ btn2 = gr.Button("Reconhecimento de Fala", scale=1, icon='content/Speech Recognition.png', size='lg')
99
+ btn3 = gr.Button("Ajuda Q&A", scale=1, icon='content/Chatbot.png', size='lg')
100
 
101
  with gr.Row(visible=False) as audio_classification:
102
  with gr.Column(min_width=700):
103
+ with gr.Accordion("Grave um áudio", open=True):
104
+ inputRecord = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath")
105
+ with gr.Accordion("Carregue um arquivo", open=False):
106
+ inputUpload = gr.Audio(label="Entrada de áudio", source="upload", type="filepath")
107
  clearBtn = gr.ClearButton([inputRecord, inputUpload])
108
  with gr.Column(min_width=700):
109
+ output = gr.Label(label="Classificação de Áudio")
110
+ btn = gr.Button(value="Gerar áudio")
111
+ audioOutput = gr.Audio(label="Saída de áudio", interactive=False)
112
 
113
 
114
+ inputRecord.stop_recording(interface, [inputRecord, "pt-br"], [output])
115
+ inputUpload.upload(interface, [inputUpload, "pt-br"], [output])
116
+ btn.click(fn=TTS, inputs=[output, "pt-br"], outputs=audioOutput)
117
 
118
  examples = gr.Examples(fn=interface, examples=examples_audio_classification, inputs=[inputRecord], outputs=[output], run_on_click=True)
119
 
120
  with gr.Row(visible=False) as realtime_classification:
121
  with gr.Column(min_width=700):
122
+ input = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath",streaming=True, every=10)
123
+ historyOutput = gr.Textbox(label="Histórico", interactive=False)
124
  # historyOutput = gr.Label(label="History")
125
  with gr.Column(min_width=700):
126
+ output = gr.Label(label="Classificação de Áudio")
127
 
128
+ input.change(interface_realtime, [input, "pt-br"], output)
129
  input.change(updateHistory, None, historyOutput)
130
  input.start_recording(clearHistory, None, historyOutput)
131
 
132
 
133
  with gr.Row(visible=False) as speech_recognition:
134
  with gr.Column(min_width=700):
135
+ with gr.Accordion("Grave um áudio", open=True):
136
+ inputRecord = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath")
137
+ with gr.Accordion("Carregue um arquivo", open=False):
138
+ inputUpload = gr.Audio(label="Entrada de áudio", source="upload", type="filepath")
139
  clearBtn = gr.ClearButton([inputRecord])
140
  with gr.Column(min_width=700):
141
+ output = gr.Label(label="Transcrição")
142
 
143
 
144
+ inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [output])
145
+ inputUpload.upload(transcribe_speech, [inputUpload, "pt-br"], [output])
146
 
147
+ # examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples")
148
+ examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
149
 
150
  with gr.Row(visible=False) as chatbot_qa:
151
  chatbot = gr.Chatbot(
 
159
  txt = gr.Textbox(
160
  scale=4,
161
  show_label=False,
162
+ placeholder="Escreva o texto e precione enter",
163
  container=False,
164
  min_width=1000
165
  )
166
  submit = gr.Button(value="", size='sm', scale=1, icon='content/send-icon.png')
167
 
168
 
169
+ inputRecord = gr.Audio(label="Grave uma pergunta", source="microphone", type="filepath", min_width=600)
170
+ btn = gr.Button(value="Escute a resposta")
171
  audioOutput = gr.Audio(interactive=False, min_width=600)
172
 
173
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
174
+ chatbot_response, [chatbot, "pt-br"], chatbot)
175
  txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
176
  submit.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
177
+ chatbot_response, [chatbot, "pt-br"], chatbot).then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
178
+ inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [txt])
179
+ btn.click(fn=TTS_chatbot, inputs=["pt-br"], outputs=audioOutput)
180
 
181
  with gr.Row(min_width=2000):
182
+ # examplesChatbotEn = gr.Examples(examples=examples_chatbot_en, inputs=[txt], label="English Examples")
183
+ examplesChatbotPtbr = gr.Examples(examples=examples_chatbot_ptbr, inputs=[txt], label="Exemplos")
184
 
185
 
186
  btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])