Update app.py
Browse files
app.py
CHANGED
@@ -11,16 +11,16 @@ import random
|
|
11 |
|
12 |
#import pyodbc
|
13 |
|
|
|
14 |
import pkg_resources
|
15 |
|
16 |
-
|
17 |
# Get a list of installed packages and their versions
|
18 |
installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set}
|
19 |
|
20 |
# Print the list of packages
|
21 |
for package, version in installed_packages.items():
|
22 |
print(f"{package}=={version}")
|
23 |
-
|
24 |
|
25 |
'''
|
26 |
# Replace the connection parameters with your SQL Server information
|
@@ -47,8 +47,8 @@ df = pd.read_sql_query(query, conn)
|
|
47 |
conn.close()
|
48 |
'''
|
49 |
|
50 |
-
# Create a sample DataFrame with 3,000 records and 20 columns
|
51 |
'''
|
|
|
52 |
num_records = 100
|
53 |
num_columns = 20
|
54 |
|
@@ -133,17 +133,6 @@ def sqlquery(input): #, history=[]):
|
|
133 |
sql_outputs = sql_model.generate(**sql_encoding)
|
134 |
sql_response = sql_tokenizer.batch_decode(sql_outputs, skip_special_tokens=True)
|
135 |
|
136 |
-
|
137 |
-
#global conversation_history
|
138 |
-
'''
|
139 |
-
# Maintain the conversation history
|
140 |
-
conversation_history.append("User: " + input + "\n")
|
141 |
-
conversation_history.append("Bot: " + " ".join(sql_response) + "\n" )
|
142 |
-
|
143 |
-
output = " ".join(conversation_history)
|
144 |
-
return output
|
145 |
-
'''
|
146 |
-
|
147 |
#history.append((input, sql_response))
|
148 |
conversation_history.append(("User", input))
|
149 |
conversation_history.append(("Bot", sql_response))
|
@@ -153,7 +142,7 @@ def sqlquery(input): #, history=[]):
|
|
153 |
conversation = "\n".join([f"{sender}: {msg}" for sender, msg in conversation_history])
|
154 |
|
155 |
return conversation
|
156 |
-
|
157 |
#return sql_response, history
|
158 |
|
159 |
'''
|
@@ -164,8 +153,6 @@ def sqlquery(input): #, history=[]):
|
|
164 |
html += "</div>"
|
165 |
return html
|
166 |
'''
|
167 |
-
#return sql_response
|
168 |
-
|
169 |
|
170 |
chat_interface = gr.Interface(
|
171 |
fn=chat,
|
@@ -211,4 +198,37 @@ combine_interface = gr.TabbedInterface(
|
|
211 |
|
212 |
if __name__ == '__main__':
|
213 |
combine_interface.launch()
|
214 |
-
#iface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
#import pyodbc
|
13 |
|
14 |
+
'''
|
15 |
import pkg_resources
|
16 |
|
|
|
17 |
# Get a list of installed packages and their versions
|
18 |
installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set}
|
19 |
|
20 |
# Print the list of packages
|
21 |
for package, version in installed_packages.items():
|
22 |
print(f"{package}=={version}")
|
23 |
+
'''
|
24 |
|
25 |
'''
|
26 |
# Replace the connection parameters with your SQL Server information
|
|
|
47 |
conn.close()
|
48 |
'''
|
49 |
|
|
|
50 |
'''
|
51 |
+
# Create a sample DataFrame with 3,000 records and 20 columns
|
52 |
num_records = 100
|
53 |
num_columns = 20
|
54 |
|
|
|
133 |
sql_outputs = sql_model.generate(**sql_encoding)
|
134 |
sql_response = sql_tokenizer.batch_decode(sql_outputs, skip_special_tokens=True)
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
#history.append((input, sql_response))
|
137 |
conversation_history.append(("User", input))
|
138 |
conversation_history.append(("Bot", sql_response))
|
|
|
142 |
conversation = "\n".join([f"{sender}: {msg}" for sender, msg in conversation_history])
|
143 |
|
144 |
return conversation
|
145 |
+
#return sql_response
|
146 |
#return sql_response, history
|
147 |
|
148 |
'''
|
|
|
153 |
html += "</div>"
|
154 |
return html
|
155 |
'''
|
|
|
|
|
156 |
|
157 |
chat_interface = gr.Interface(
|
158 |
fn=chat,
|
|
|
198 |
|
199 |
if __name__ == '__main__':
|
200 |
combine_interface.launch()
|
201 |
+
#iface.launch(debug=True)
|
202 |
+
|
203 |
+
|
204 |
+
'''
|
205 |
+
batch_size = 10 # Number of records in each batch
|
206 |
+
num_records = 3000 # Total number of records in the dataset
|
207 |
+
|
208 |
+
for start_idx in range(0, num_records, batch_size):
|
209 |
+
end_idx = min(start_idx + batch_size, num_records)
|
210 |
+
|
211 |
+
# Get a batch of records
|
212 |
+
batch_data = dataset[start_idx:end_idx] # Replace with your dataset
|
213 |
+
|
214 |
+
# Tokenize the batch
|
215 |
+
tokenized_batch = tokenizer.batch_encode_plus(
|
216 |
+
batch_data, padding=True, truncation=True, return_tensors="pt"
|
217 |
+
)
|
218 |
+
|
219 |
+
# Perform inference
|
220 |
+
with torch.no_grad():
|
221 |
+
output = model.generate(
|
222 |
+
input_ids=tokenized_batch["input_ids"],
|
223 |
+
max_length=1024,
|
224 |
+
pad_token_id=tokenizer.eos_token_id,
|
225 |
+
)
|
226 |
+
|
227 |
+
# Decode the output and process the responses
|
228 |
+
responses = [tokenizer.decode(ids, skip_special_tokens=True) for ids in output]
|
229 |
+
|
230 |
+
# Process responses and maintain conversation context
|
231 |
+
# ...
|
232 |
+
|
233 |
+
|
234 |
+
'''
|