# Import necessary libraries import streamlit as st import openai import os # Set the title for the Streamlit app st.title("Simple Chatbot") # Load the OpenAI API key from Hugging Face's environment variables openai_api_key = os.getenv("OPENAI_API_KEY") # Check if the API key is loaded if openai_api_key is None: st.error("API key not found. Please set the OpenAI API key in the environment.") st.stop() # Set the API key for OpenAI openai.api_key = openai_api_key # Define the template for the chatbot prompt prompt_template = """ You are a helpful Assistant who answers users' questions based on your general knowledge. Keep your answers short and to the point. """ # Get the current prompt from the session state or set default if "prompt" not in st.session_state: st.session_state["prompt"] = [{"role": "system", "content": prompt_template}] prompt = st.session_state["prompt"] # Display previous chat messages for message in prompt: if message["role"] != "system": with st.chat_message(message["role"]): st.write(message["content"]) # Get the user's question using Streamlit's chat input question = st.chat_input("Ask anything") # Handle the user's question if question: # Add the user's question to the prompt and display it prompt.append({"role": "user", "content": question}) with st.chat_message("user"): st.write(question) # Display an empty assistant message while waiting for response with st.chat_message("assistant"): botmsg = st.empty() # Define a function to interact with OpenAI API def chat_gpt(messages): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages ) return response.choices[0].message['content'].strip() # Call the chat_gpt function result = chat_gpt(prompt) # Display the assistant's response botmsg.write(result) # Add the assistant's response to the prompt prompt.append({"role": "assistant", "content": result}) # Store the updated prompt in the session state st.session_state["prompt"] = prompt