Update app.py
Browse files
app.py
CHANGED
@@ -49,8 +49,20 @@ def chatbot_response(user_message, history=[]):
|
|
49 |
# If the user input is a question, use TAPEx for question-answering
|
50 |
#inputs = user_query
|
51 |
encoding = sql_tokenizer(table=table, query=user_message, return_tensors="pt")
|
52 |
-
outputs = sql_model.generate(**encoding)
|
53 |
-
response = sql_tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
else:
|
55 |
# Generate chatbot response using the chatbot model
|
56 |
'''
|
|
|
49 |
# If the user input is a question, use TAPEx for question-answering
|
50 |
#inputs = user_query
|
51 |
encoding = sql_tokenizer(table=table, query=user_message, return_tensors="pt")
|
52 |
+
#outputs = sql_model.generate(**encoding)
|
53 |
+
#response = sql_tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
54 |
+
|
55 |
+
# append the new user input tokens to the chat history
|
56 |
+
bot_input_ids = torch.cat([torch.LongTensor(history), **encoding], dim=-1)
|
57 |
+
|
58 |
+
# generate a response
|
59 |
+
history = sql_model.generate(bot_input_ids, max_length=1000, pad_token_id=sql_tokenizer.eos_token_id).tolist()
|
60 |
+
|
61 |
+
# convert the tokens to text, and then split the responses into the right format
|
62 |
+
response = sql_tokenizer.decode(history[0]).split("<|endoftext|>")
|
63 |
+
response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] # convert to tuples of list
|
64 |
+
|
65 |
+
|
66 |
else:
|
67 |
# Generate chatbot response using the chatbot model
|
68 |
'''
|