Update app.py
Browse files
app.py
CHANGED
@@ -48,7 +48,7 @@ conn.close()
|
|
48 |
'''
|
49 |
|
50 |
# Create a sample DataFrame with 3,000 records and 20 columns
|
51 |
-
num_records =
|
52 |
num_columns = 20
|
53 |
|
54 |
data = {
|
@@ -59,8 +59,8 @@ data = {
|
|
59 |
years = list(range(2000, 2023)) # Range of years
|
60 |
cities = ["New York", "Los Angeles", "Chicago", "Houston", "Miami"] # List of cities
|
61 |
|
62 |
-
data["year"] = [random.choice(years) for _ in range(num_records)]
|
63 |
-
data["city"] = [random.choice(cities) for _ in range(num_records)]
|
64 |
|
65 |
table = pd.DataFrame(data)
|
66 |
|
@@ -76,11 +76,17 @@ chatbot_model_name = "microsoft/DialoGPT-medium"
|
|
76 |
tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
|
77 |
model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
|
78 |
|
|
|
|
|
|
|
79 |
# Load the SQL Model
|
80 |
sql_model_name = "microsoft/tapex-large-finetuned-wtq"
|
81 |
sql_tokenizer = TapexTokenizer.from_pretrained(sql_model_name)
|
82 |
sql_model = BartForConditionalGeneration.from_pretrained(sql_model_name)
|
83 |
|
|
|
|
|
|
|
84 |
#sql_response = None
|
85 |
conversation_history = []
|
86 |
|
|
|
48 |
'''
|
49 |
|
50 |
# Create a sample DataFrame with 3,000 records and 20 columns
|
51 |
+
num_records = 30
|
52 |
num_columns = 20
|
53 |
|
54 |
data = {
|
|
|
59 |
years = list(range(2000, 2023)) # Range of years
|
60 |
cities = ["New York", "Los Angeles", "Chicago", "Houston", "Miami"] # List of cities
|
61 |
|
62 |
+
#data["year"] = [random.choice(years) for _ in range(num_records)]
|
63 |
+
#data["city"] = [random.choice(cities) for _ in range(num_records)]
|
64 |
|
65 |
table = pd.DataFrame(data)
|
66 |
|
|
|
76 |
tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
|
77 |
model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
|
78 |
|
79 |
+
cmax_token_limit = tokenizer.max_model_input_sizes[chatbot_model_name]
|
80 |
+
print(f"Chat bot Maximum token limit for {chatbot_model_name}: {cmax_token_limit}")
|
81 |
+
|
82 |
# Load the SQL Model
|
83 |
sql_model_name = "microsoft/tapex-large-finetuned-wtq"
|
84 |
sql_tokenizer = TapexTokenizer.from_pretrained(sql_model_name)
|
85 |
sql_model = BartForConditionalGeneration.from_pretrained(sql_model_name)
|
86 |
|
87 |
+
max_token_limit = TapexTokenizer.max_model_input_sizes[sql_model_name]
|
88 |
+
print(f"SQL Maximum token limit for {sql_model_name}: {max_token_limit}")
|
89 |
+
|
90 |
#sql_response = None
|
91 |
conversation_history = []
|
92 |
|