Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -50,10 +50,13 @@ def predict(_chatbot, task_history_str, user_input):
|
|
| 50 |
task_history = parse_task_history(task_history_str)
|
| 51 |
print("Type of user_input:", type(user_input))
|
| 52 |
print("Type of task_history:", type(task_history))
|
|
|
|
| 53 |
if not isinstance(task_history, list):
|
| 54 |
task_history = []
|
|
|
|
| 55 |
query = user_input if user_input else (task_history[-1][0] if task_history else "")
|
| 56 |
print("User: " + _parse_text(query))
|
|
|
|
| 57 |
if not task_history:
|
| 58 |
return _chatbot
|
| 59 |
history_cp = copy.deepcopy(task_history)
|
|
@@ -79,7 +82,7 @@ def predict(_chatbot, task_history_str, user_input):
|
|
| 79 |
response, history = model.chat(tokenizer, message, history=history)
|
| 80 |
ts_pattern = r"<\|\d{1,2}\.\d+\|>"
|
| 81 |
all_time_stamps = re.findall(ts_pattern, response)
|
| 82 |
-
if
|
| 83 |
ts_float = [ float(t.replace("<|","").replace("|>","")) for t in all_time_stamps]
|
| 84 |
ts_float_pair = [ts_float[i:i + 2] for i in range(0,len(all_time_stamps),2)]
|
| 85 |
# θ―»ει³ι’ζδ»Ά
|
|
@@ -99,9 +102,11 @@ def predict(_chatbot, task_history_str, user_input):
|
|
| 99 |
audio_clip.export(filename, format=format)
|
| 100 |
_chatbot[-1] = (_parse_text(query), chat_response)
|
| 101 |
_chatbot.append((None, (str(filename),)))
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
| 105 |
print("Predict - End: task_history =", task_history)
|
| 106 |
return _chatbot[-1][1], _chatbot
|
| 107 |
|
|
|
|
| 50 |
task_history = parse_task_history(task_history_str)
|
| 51 |
print("Type of user_input:", type(user_input))
|
| 52 |
print("Type of task_history:", type(task_history))
|
| 53 |
+
|
| 54 |
if not isinstance(task_history, list):
|
| 55 |
task_history = []
|
| 56 |
+
|
| 57 |
query = user_input if user_input else (task_history[-1][0] if task_history else "")
|
| 58 |
print("User: " + _parse_text(query))
|
| 59 |
+
|
| 60 |
if not task_history:
|
| 61 |
return _chatbot
|
| 62 |
history_cp = copy.deepcopy(task_history)
|
|
|
|
| 82 |
response, history = model.chat(tokenizer, message, history=history)
|
| 83 |
ts_pattern = r"<\|\d{1,2}\.\d+\|>"
|
| 84 |
all_time_stamps = re.findall(ts_pattern, response)
|
| 85 |
+
if all_time_stamps and last_audio:
|
| 86 |
ts_float = [ float(t.replace("<|","").replace("|>","")) for t in all_time_stamps]
|
| 87 |
ts_float_pair = [ts_float[i:i + 2] for i in range(0,len(all_time_stamps),2)]
|
| 88 |
# θ―»ει³ι’ζδ»Ά
|
|
|
|
| 102 |
audio_clip.export(filename, format=format)
|
| 103 |
_chatbot[-1] = (_parse_text(query), chat_response)
|
| 104 |
_chatbot.append((None, (str(filename),)))
|
| 105 |
+
return str(filename), _chatbot
|
| 106 |
+
else:
|
| 107 |
+
_chatbot.append((query, response))
|
| 108 |
+
return response, _chatbot
|
| 109 |
+
|
| 110 |
print("Predict - End: task_history =", task_history)
|
| 111 |
return _chatbot[-1][1], _chatbot
|
| 112 |
|