from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig import streamlit as st from peft import PeftModel, PeftConfig tokenizer = AutoTokenizer.from_pretrained("SSahas/openai_community_med_e3") #model = AutoModelForCausalLM.from_pretrained("SSahas/openai_community_med_e3") config = PeftConfig.from_pretrained("SSahas/openai_community_med_e3") model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium") model = PeftModel.from_pretrained(model, "SSahas/openai_community_med_e3") def response_generator(prompt): input_text = tokenizer.apply_chat_template( prompt, tokenize=False, truncation=False, add_generation_prompt=True) print(input_text) input_ids = tokenizer(input_text, padding=True, return_tensors="pt") output_ids = model.generate(input_ids=input_ids['input_ids'], generation_config=GenerationConfig( max_new_tokens=30, pad_token_id=50256)) output = tokenizer.decode( output_ids[0][input_ids['input_ids'].shape[1]:], skip_special_tokens=True) return output st.title("Simple friendly chatbot for normal conversations") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What is up?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): # response = st.write(response_generator(prompt)) # print(prompt) print(st.session_state.messages) response = response_generator(st.session_state.messages) st.write(response) # Add assistant response to chat history st.session_state.messages.append( {"role": "assistant", "content": response})