|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers import TapexTokenizer, BartForConditionalGeneration |
|
import pandas as pd |
|
import torch |
|
|
|
import numpy as np |
|
import time |
|
import os |
|
|
|
|
|
''' |
|
# Get a list of installed packages and their versions |
|
installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set} |
|
|
|
# Print the list of packages |
|
for package, version in installed_packages.items(): |
|
print(f"{package}=={version}") |
|
''' |
|
|
|
|
|
chatbot_model_name = "microsoft/DialoGPT-medium" |
|
chatbot_tokenizer = AutoTokenizer.from_pretrained(chatbot_model_name) |
|
chatbot_model = AutoModelForCausalLM.from_pretrained(chatbot_model_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name = "microsoft/tapex-large-finetuned-wtq" |
|
|
|
sql_tokenizer = TapexTokenizer.from_pretrained(model_name) |
|
sql_model = BartForConditionalGeneration.from_pretrained(model_name) |
|
|
|
data = { |
|
"year": [1896, 1900, 1904, 2004, 2008, 2012], |
|
"city": ["athens", "paris", "st. louis", "athens", "beijing", "london"] |
|
} |
|
table = pd.DataFrame.from_dict(data) |
|
|
|
def chatbot_response(user_message, history=[]): |
|
|
|
|
|
is_question = "?" in user_message |
|
|
|
if is_question: |
|
|
|
|
|
encoding = sql_tokenizer(table=table, query=user_message, return_tensors="pt") |
|
|
|
|
|
|
|
|
|
bot_input_ids = torch.cat([torch.LongTensor(history), encoding], dim=-1) |
|
|
|
|
|
history = sql_model.generate(bot_input_ids, max_length=1000, pad_token_id=sql_tokenizer.eos_token_id).tolist() |
|
|
|
|
|
response = sql_tokenizer.decode(history[0]).split("<|endoftext|>") |
|
response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] |
|
|
|
|
|
else: |
|
|
|
''' |
|
inputs = chatbot_tokenizer.encode("User: " + user_message, return_tensors="pt") |
|
outputs = chatbot_model.generate(inputs, max_length=100, num_return_sequences=1) |
|
response = chatbot_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
''' |
|
|
|
new_user_input_ids = chatbot_tokenizer.encode(user_message + chatbot_tokenizer.eos_token, return_tensors='pt') |
|
|
|
|
|
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) |
|
|
|
|
|
history = chatbot_model.generate(bot_input_ids, max_length=1000, pad_token_id=chatbot_tokenizer.eos_token_id).tolist() |
|
|
|
|
|
response = chatbot_tokenizer.decode(history[0]).split("<|endoftext|>") |
|
response = [(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)] |
|
|
|
return response, history |
|
|
|
|
|
chatbot_interface = gr.Interface( |
|
fn=chatbot_response, |
|
|
|
|
|
inputs=["text", "state"], |
|
outputs=["chatbot", "state"], |
|
live=True, |
|
capture_session=True, |
|
title="ST Chatbot", |
|
description="Type your message in the box above, and the chatbot will respond.", |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
chatbot_interface.launch() |
|
|
|
|