teaevo commited on
Commit
69beb29
·
1 Parent(s): c8d5ecf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -52
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from transformers import TapexTokenizer, BartForConditionalGeneration
4
  import pandas as pd
5
- import torch
6
  #import pkg_resources
7
 
8
  '''
@@ -15,7 +15,7 @@ for package, version in installed_packages.items():
15
  '''
16
 
17
  # Load the chatbot model
18
- chatbot_model_name = "microsoft/DialoGPT-medium"
19
  chatbot_tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
20
  chatbot_model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
21
 
@@ -35,47 +35,24 @@ data = {
35
  }
36
  table = pd.DataFrame.from_dict(data)
37
 
38
- bot_input_ids = None
39
-
40
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
41
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
42
-
43
-
44
  def chatbot_response(user_message):
45
- # Generate chatbot response using the chatbot model
46
- #inputs = chatbot_tokenizer.encode("User: " + user_message, return_tensors="pt")
47
- #outputs = chatbot_model.generate(inputs, max_length=100, num_return_sequences=1)
48
- #response = chatbot_tokenizer.decode(outputs[0], skip_special_tokens=True)
49
- response = None
50
-
51
- # Let's chat for 5 lines
52
- for step in range(1):
53
- # encode the new user input, add the eos_token and return a tensor in Pytorch
54
- new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
55
-
56
- # append the new user input tokens to the chat history
57
- bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
58
-
59
- # generated a response while limiting the total chat history to 1000 tokens,
60
- chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
61
-
62
- # pretty print last ouput tokens from bot
63
- #print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
64
-
65
- response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
66
 
67
  return response
68
 
69
- def sql_response(user_query):
70
-
71
- #inputs = tokenizer.encode("User: " + user_query, return_tensors="pt")
72
- inputs = user_query
73
- encoding = sql_tokenizer(table=table, query=inputs, return_tensors="pt")
74
- outputs = sql_model.generate(**encoding)
75
- response = sql_tokenizer.batch_decode(outputs, skip_special_tokens=True)
76
-
77
- return response
78
-
79
  # Define the chatbot and SQL execution interfaces using Gradio
80
  chatbot_interface = gr.Interface(
81
  fn=chatbot_response,
@@ -87,20 +64,8 @@ chatbot_interface = gr.Interface(
87
  description="Type your message in the box above, and the chatbot will respond.",
88
  )
89
 
90
- # Define the chatbot interface using Gradio
91
- sql_interface = gr.Interface(
92
- fn=sql_response,
93
- inputs=gr.Textbox(prompt="Enter your SQL Qus:"),
94
- outputs=gr.Textbox(),
95
- live=True,
96
- capture_session=True,
97
- title="ST SQL Chatbot",
98
- description="Type your message in the box above, and the chatbot will respond.",
99
- )
100
-
101
  # Launch the Gradio interface
102
  if __name__ == "__main__":
103
  chatbot_interface.launch()
104
-
105
- sql_interface.launch()
106
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from transformers import TapexTokenizer, BartForConditionalGeneration
4
  import pandas as pd
5
+ #import torch
6
  #import pkg_resources
7
 
8
  '''
 
15
  '''
16
 
17
  # Load the chatbot model
18
+ chatbot_model_name = "gpt2" #"microsoft/DialoGPT-medium"
19
  chatbot_tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name)
20
  chatbot_model = AutoModelForCausalLM.from_pretrained(chatbot_model_name)
21
 
 
35
  }
36
  table = pd.DataFrame.from_dict(data)
37
 
 
 
 
 
 
 
38
  def chatbot_response(user_message):
39
+ # Check if the user input is a question
40
+ is_question = "?" in user_message
41
+
42
+ if is_question:
43
+ # If the user input is a question, use TAPEx for question-answering
44
+ inputs = user_query
45
+ encoding = sql_tokenizer(table=table, query=inputs, return_tensors="pt")
46
+ outputs = sql_model.generate(**encoding)
47
+ response = sql_tokenizer.batch_decode(outputs, skip_special_tokens=True)
48
+ else:
49
+ # Generate chatbot response using the chatbot model
50
+ inputs = chatbot_tokenizer.encode("User: " + user_message, return_tensors="pt")
51
+ outputs = chatbot_model.generate(inputs, max_length=100, num_return_sequences=1)
52
+ response = chatbot_tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
53
 
54
  return response
55
 
 
 
 
 
 
 
 
 
 
 
56
  # Define the chatbot and SQL execution interfaces using Gradio
57
  chatbot_interface = gr.Interface(
58
  fn=chatbot_response,
 
64
  description="Type your message in the box above, and the chatbot will respond.",
65
  )
66
 
 
 
 
 
 
 
 
 
 
 
 
67
  # Launch the Gradio interface
68
  if __name__ == "__main__":
69
  chatbot_interface.launch()
70
+
 
71