neke-leo commited on
Commit
81bc8b3
1 Parent(s): 42b9715

FIX: Add small fixes

Browse files
Files changed (2) hide show
  1. app.py +3 -2
  2. bot_gradio.ipynb +13 -72
app.py CHANGED
@@ -114,7 +114,7 @@ def bot_respond(user_query, history: list):
114
  context = context_format(qnas)
115
  prompt += context
116
 
117
- chat_messages.append({"role": "user", "content": user_query})
118
 
119
  completion = openai.ChatCompletion.create(
120
  model="gpt-4", messages=chat_messages, temperature=0
@@ -196,6 +196,7 @@ def handle_audiofile(audio_filepath: str, history: list):
196
  res["prompt"],
197
  display_history(history["chat_messages"]),
198
  res["mp3_path"],
 
199
  )
200
  else:
201
  bot_response_text = res["bot_response"]
@@ -260,7 +261,7 @@ with gr.Blocks() as demo:
260
  conversation_history = gr.Textbox(label="Conversation history")
261
 
262
  with gr.Row():
263
- file_output = gr.File(label="Download questions file", download=True)
264
 
265
  # when the audio input is stopped, run the transcribe function
266
  audio_input.stop_recording(
 
114
  context = context_format(qnas)
115
  prompt += context
116
 
117
+ chat_messages.append({"role": "user", "content": prompt})
118
 
119
  completion = openai.ChatCompletion.create(
120
  model="gpt-4", messages=chat_messages, temperature=0
 
196
  res["prompt"],
197
  display_history(history["chat_messages"]),
198
  res["mp3_path"],
199
+ "runtime_questions.json",
200
  )
201
  else:
202
  bot_response_text = res["bot_response"]
 
261
  conversation_history = gr.Textbox(label="Conversation history")
262
 
263
  with gr.Row():
264
+ file_output = gr.File(label="Download questions file")
265
 
266
  # when the audio input is stopped, run the transcribe function
267
  audio_input.stop_recording(
bot_gradio.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -17,7 +17,7 @@
17
  },
18
  {
19
  "cell_type": "code",
20
- "execution_count": 2,
21
  "metadata": {},
22
  "outputs": [],
23
  "source": [
@@ -34,7 +34,7 @@
34
  },
35
  {
36
  "cell_type": "code",
37
- "execution_count": 3,
38
  "metadata": {},
39
  "outputs": [],
40
  "source": [
@@ -49,7 +49,7 @@
49
  },
50
  {
51
  "cell_type": "code",
52
- "execution_count": 4,
53
  "metadata": {},
54
  "outputs": [],
55
  "source": [
@@ -64,17 +64,9 @@
64
  },
65
  {
66
  "cell_type": "code",
67
- "execution_count": 5,
68
  "metadata": {},
69
- "outputs": [
70
- {
71
- "name": "stdout",
72
- "output_type": "stream",
73
- "text": [
74
- "Conversation conversations/3331\n"
75
- ]
76
- }
77
- ],
78
  "source": [
79
  "from langdetect import detect\n",
80
  "import random\n",
@@ -158,7 +150,7 @@
158
  " context = context_format(qnas)\n",
159
  " prompt += context\n",
160
  "\n",
161
- " chat_messages.append({\"role\": \"user\", \"content\": user_query})\n",
162
  "\n",
163
  " completion = openai.ChatCompletion.create(\n",
164
  " model=\"gpt-4\", messages=chat_messages, temperature=0\n",
@@ -198,7 +190,7 @@
198
  },
199
  {
200
  "cell_type": "code",
201
- "execution_count": 6,
202
  "metadata": {},
203
  "outputs": [],
204
  "source": [
@@ -218,7 +210,7 @@
218
  },
219
  {
220
  "cell_type": "code",
221
- "execution_count": 7,
222
  "metadata": {},
223
  "outputs": [],
224
  "source": [
@@ -251,6 +243,7 @@
251
  " res[\"prompt\"],\n",
252
  " display_history(history[\"chat_messages\"]),\n",
253
  " res[\"mp3_path\"],\n",
 
254
  " )\n",
255
  " else:\n",
256
  " bot_response_text = res[\"bot_response\"]\n",
@@ -288,54 +281,9 @@
288
  },
289
  {
290
  "cell_type": "code",
291
- "execution_count": 8,
292
  "metadata": {},
293
- "outputs": [
294
- {
295
- "name": "stderr",
296
- "output_type": "stream",
297
- "text": [
298
- "c:\\Users\\LeonidTanas\\source\\BiogenaProject\\PhoneBot\\.env\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
299
- " from .autonotebook import tqdm as notebook_tqdm\n",
300
- "C:\\Users\\LeonidTanas\\AppData\\Local\\Temp\\ipykernel_20200\\2772520277.py:26: GradioUnusedKwargWarning: You have unused kwarg parameters in File, please remove them: {'download': True}\n",
301
- " file_output = gr.File(label=\"Download questions file\", download=True)\n"
302
- ]
303
- },
304
- {
305
- "name": "stdout",
306
- "output_type": "stream",
307
- "text": [
308
- "Running on local URL: http://127.0.0.1:7860\n",
309
- "Running on public URL: https://21d8b4f54c5ce2bb30.gradio.live\n",
310
- "\n",
311
- "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
312
- ]
313
- },
314
- {
315
- "data": {
316
- "text/plain": []
317
- },
318
- "execution_count": 8,
319
- "metadata": {},
320
- "output_type": "execute_result"
321
- },
322
- {
323
- "name": "stdout",
324
- "output_type": "stream",
325
- "text": [
326
- "Transcription Hello, my name is Leo.\n",
327
- "Querying database for question: Hello, my name is Leo.\n",
328
- "\n",
329
- "Total_qnas: 3 [0.43892043828964233, 0.44170859456062317, 0.4578746557235718]\n",
330
- "Filtered_qnas: 3\n",
331
- "Querying database for question: Hello, my name is Leo.\n",
332
- "source == 'base'\n",
333
- "Total_qnas: 1 [0.43892043828964233]\n",
334
- "Filtered_qnas: 1\n",
335
- "Detected language: en for text: Hello Leo! How can I assist you today?\n"
336
- ]
337
- }
338
- ],
339
  "source": [
340
  "import gradio as gr\n",
341
  "\n",
@@ -362,7 +310,7 @@
362
  " conversation_history = gr.Textbox(label=\"Conversation history\")\n",
363
  "\n",
364
  " with gr.Row():\n",
365
- " file_output = gr.File(label=\"Download questions file\", download=True)\n",
366
  "\n",
367
  " # when the audio input is stopped, run the transcribe function\n",
368
  " audio_input.stop_recording(\n",
@@ -381,13 +329,6 @@
381
  "\n",
382
  "demo.launch(share=True, inbrowser=True, inline=False)"
383
  ]
384
- },
385
- {
386
- "cell_type": "code",
387
- "execution_count": null,
388
- "metadata": {},
389
- "outputs": [],
390
- "source": []
391
  }
392
  ],
393
  "metadata": {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": null,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
17
  },
18
  {
19
  "cell_type": "code",
20
+ "execution_count": null,
21
  "metadata": {},
22
  "outputs": [],
23
  "source": [
 
34
  },
35
  {
36
  "cell_type": "code",
37
+ "execution_count": null,
38
  "metadata": {},
39
  "outputs": [],
40
  "source": [
 
49
  },
50
  {
51
  "cell_type": "code",
52
+ "execution_count": null,
53
  "metadata": {},
54
  "outputs": [],
55
  "source": [
 
64
  },
65
  {
66
  "cell_type": "code",
67
+ "execution_count": null,
68
  "metadata": {},
69
+ "outputs": [],
 
 
 
 
 
 
 
 
70
  "source": [
71
  "from langdetect import detect\n",
72
  "import random\n",
 
150
  " context = context_format(qnas)\n",
151
  " prompt += context\n",
152
  "\n",
153
+ " chat_messages.append({\"role\": \"user\", \"content\": prompt})\n",
154
  "\n",
155
  " completion = openai.ChatCompletion.create(\n",
156
  " model=\"gpt-4\", messages=chat_messages, temperature=0\n",
 
190
  },
191
  {
192
  "cell_type": "code",
193
+ "execution_count": null,
194
  "metadata": {},
195
  "outputs": [],
196
  "source": [
 
210
  },
211
  {
212
  "cell_type": "code",
213
+ "execution_count": null,
214
  "metadata": {},
215
  "outputs": [],
216
  "source": [
 
243
  " res[\"prompt\"],\n",
244
  " display_history(history[\"chat_messages\"]),\n",
245
  " res[\"mp3_path\"],\n",
246
+ " \"runtime_questions.json\",\n",
247
  " )\n",
248
  " else:\n",
249
  " bot_response_text = res[\"bot_response\"]\n",
 
281
  },
282
  {
283
  "cell_type": "code",
284
+ "execution_count": null,
285
  "metadata": {},
286
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  "source": [
288
  "import gradio as gr\n",
289
  "\n",
 
310
  " conversation_history = gr.Textbox(label=\"Conversation history\")\n",
311
  "\n",
312
  " with gr.Row():\n",
313
+ " file_output = gr.File(label=\"Download questions file\")\n",
314
  "\n",
315
  " # when the audio input is stopped, run the transcribe function\n",
316
  " audio_input.stop_recording(\n",
 
329
  "\n",
330
  "demo.launch(share=True, inbrowser=True, inline=False)"
331
  ]
 
 
 
 
 
 
 
332
  }
333
  ],
334
  "metadata": {