teaevo commited on
Commit
6de04aa
·
1 Parent(s): b84957a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -48,10 +48,10 @@ conn.close()
48
  '''
49
 
50
  # Create a sample DataFrame with 3,000 records and 20 columns
 
51
  num_records = 20
52
  num_columns = 20
53
 
54
-
55
  data = {
56
  f"column_{i}": np.random.randint(0, 100, num_records) for i in range(num_columns)
57
  }
@@ -64,12 +64,13 @@ cities = ["New York", "Los Angeles", "Chicago", "Houston", "Miami"] # List of c
64
  #data["city"] = [random.choice(cities) for _ in range(num_records)]
65
 
66
  table = pd.DataFrame(data)
 
67
 
68
  data = {
69
  "year": [1896, 1900, 1904, 2004, 2008, 2012],
70
  "city": ["athens", "paris", "st. louis", "athens", "beijing", "london"]
71
  }
72
- #table = pd.DataFrame.from_dict(data)
73
 
74
 
75
  # Load the chatbot model
@@ -85,8 +86,9 @@ sql_model_name = "microsoft/tapex-large-finetuned-wtq"
85
  sql_tokenizer = TapexTokenizer.from_pretrained(sql_model_name)
86
  sql_model = BartForConditionalGeneration.from_pretrained(sql_model_name)
87
 
88
- #max_token_limit = sql_tokenizer.max_model_input_sizes[sql_model_name]
89
- #print(f"SQL Maximum token limit for {sql_model_name}: {max_token_limit}")
 
90
 
91
  #sql_response = None
92
  conversation_history = []
 
48
  '''
49
 
50
  # Create a sample DataFrame with 3,000 records and 20 columns
51
+ '''
52
  num_records = 20
53
  num_columns = 20
54
 
 
55
  data = {
56
  f"column_{i}": np.random.randint(0, 100, num_records) for i in range(num_columns)
57
  }
 
64
  #data["city"] = [random.choice(cities) for _ in range(num_records)]
65
 
66
  table = pd.DataFrame(data)
67
+ '''
68
 
69
  data = {
70
  "year": [1896, 1900, 1904, 2004, 2008, 2012],
71
  "city": ["athens", "paris", "st. louis", "athens", "beijing", "london"]
72
  }
73
+ table = pd.DataFrame.from_dict(data)
74
 
75
 
76
  # Load the chatbot model
 
86
  sql_tokenizer = TapexTokenizer.from_pretrained(sql_model_name)
87
  sql_model = BartForConditionalGeneration.from_pretrained(sql_model_name)
88
 
89
+ stokenizer = AutoTokenizer.from_pretrained(model_name)
90
+ max_token_limit = stokenizer.max_model_input_sizes[sql_model_name]
91
+ print(f"SQL Maximum token limit for {sql_model_name}: {max_token_limit}")
92
 
93
  #sql_response = None
94
  conversation_history = []