mathslearn's picture
Update app.py
e93af3d verified
raw
history blame
4 kB
#from transformers import AutoModelForCausalLM, AutoTokenizer
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
import streamlit as st
from streamlit_chat import message
from PIL import Image
st.title("Nexus TCM Chatbot")
query = st.text_input("Query: ", key="input")
# Load and display the logo image with specified height
logo_image = Image.open("logo.jpg")
logo_height = 200 # Specify the desired height in pixels
st.image(logo_image, caption="Nexus TCM Chatbot Logo", use_column_width=True, height=logo_height)
if 'responses' not in st.session_state:
st.session_state['responses'] = []
if 'requests' not in st.session_state:
st.session_state['requests'] = []
#REPO_ID = "Xenova/gpt-3.5-turbo"
# Load the model and tokenizer from Hugging Face's model hub
#model = AutoModelForCausalLM.from_pretrained(REPO_ID)
#tokenizer = AutoTokenizer.from_pretrained(REPO_ID)
#llm = ChatOpenAI(model=model, tokenizer=tokenizer)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key="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")
# Initialize global variable for conversation memory
if 'buffer_memory' not in st.session_state:
st.session_state.buffer_memory= ConversationBufferWindowMemory(k=8)
#st.session_state.buffer_memory= ConversationBufferWindowMemory(k=3)
conversation = ConversationChain(
llm=llm,
memory=st.session_state.buffer_memory,
verbose=True
)
context = """
You act as a chatbot to interact with users on their questions about traditional chinese medicine (TCM). \
Welcome the user in a friendly way.
You respond in a short and friendly style. \
For each text, mark NER tags. \
Tag categories: location, product. \
Text: I want to visit a clinic in [Ang Mo Kio](location). \
If a user says he/she wants to visit a traditional chinese medicine (TCM) clinic, \
ask for which location he/she wants to visit. \
After user replies with location, show in chat window the Google Map from this link \
https://www.google.com/maps/search/tcm+clinics+at+location+name \
For example, if a user wants to visit a clinic in Ang Mo Kio, \
show in chat window the Google Map from this link \
https://www.google.com/maps/search/tcm+clinics+at+ang+mo+kio \
Examoples of location names:
North:
Sembawang
Woodlands
Yishun
North-East:
Ang Mo Kio
Hougang
Punggol
Sengkang
Serangoon
East:
Bedok
Pasir Ris
Tampines
West:
Bukit Batok
Bukit Panjang
Choa Chu Kang
Clementi
Jurong East
Jurong West
Tengah
Central:
Bishan
Bukit Merah
Bukit Timah
Central Area
Geylang
Kallang
Whampoa
Marine Parade
Queenstown
Toa Payoh
For each text, mark NER tags. \
Tag categories: location, product. \
Text: I want to buy/get [Po Chai Pills](product). \
If a user wants to buy/get a product, suggest that \
he/she can consider buying/getting from https://www.amazon.sg/s?k=product+name \
For example, if a user wants to buy Po Chai Pills, suggest \
he/she can consider buying/getting from https://www.amazon.sg/s?k=po+chai+pills \
Examples of product names:
Ointment/Hong You/Feng You/Fengyou
Liquorice/Gan cao/Gancao
Chrysanthemum/Ju hua/Juhua
Goji berry/wolfberry/Gou Qi Zi/Gouqizi
Red dates/Jujubes/Hong Zao/Hongzao
"""
prompt_template = PromptTemplate.from_template(
'''system role :{context} \
user:{query}\
assistance:
''')
# Define Streamlit Interface
if query:
formatquery= prompt_template.format(context=context, query=query)
response = conversation.run(formatquery)
st.session_state.requests.append(query)
st.session_state.responses.append(response)
if st.session_state['responses']:
for i in range(len(st.session_state['responses'])-1, -1, -1):
message(st.session_state['requests'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["responses"][i], key=str(i))
# gr.load("models/ksh-nyp/llama-2-7b-chat-TCMKB").launch()