Update app.py
Browse files
app.py
CHANGED
@@ -97,14 +97,14 @@ def predict(input, history=[]):
|
|
97 |
return response, history
|
98 |
|
99 |
|
100 |
-
def sqlquery(input
|
101 |
|
102 |
#input_text = " ".join(conversation_history) + " " + input
|
103 |
sql_encoding = sql_tokenizer(table=table, query=input + sql_tokenizer.eos_token, return_tensors="pt")
|
104 |
sql_outputs = sql_model.generate(**sql_encoding)
|
105 |
sql_response = sql_tokenizer.batch_decode(sql_outputs, skip_special_tokens=True)
|
106 |
|
107 |
-
|
108 |
|
109 |
# Maintain the conversation history
|
110 |
conversation_history.append("User: " + input)
|
@@ -129,7 +129,7 @@ sql_interface = gr.Interface(
|
|
129 |
fn=sqlquery,
|
130 |
theme="default",
|
131 |
#inputs=gr.Textbox(prompt="You:"),
|
132 |
-
inputs=
|
133 |
outputs=gr.Textbox(),
|
134 |
live=True,
|
135 |
capture_session=True,
|
|
|
97 |
return response, history
|
98 |
|
99 |
|
100 |
+
def sqlquery(input):
|
101 |
|
102 |
#input_text = " ".join(conversation_history) + " " + input
|
103 |
sql_encoding = sql_tokenizer(table=table, query=input + sql_tokenizer.eos_token, return_tensors="pt")
|
104 |
sql_outputs = sql_model.generate(**sql_encoding)
|
105 |
sql_response = sql_tokenizer.batch_decode(sql_outputs, skip_special_tokens=True)
|
106 |
|
107 |
+
global conversation_history
|
108 |
|
109 |
# Maintain the conversation history
|
110 |
conversation_history.append("User: " + input)
|
|
|
129 |
fn=sqlquery,
|
130 |
theme="default",
|
131 |
#inputs=gr.Textbox(prompt="You:"),
|
132 |
+
inputs=gr.Textbox(prompt="You:"),
|
133 |
outputs=gr.Textbox(),
|
134 |
live=True,
|
135 |
capture_session=True,
|