richardorama commited on
Commit
2f21070
1 Parent(s): f5cd439

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -25
app.py CHANGED
@@ -127,41 +127,73 @@ else:
127
 
128
 
129
 
130
- # Load pre-trained GPT-2 model and tokenizer
131
- model_name = "gpt-3.5-turbo" # "gpt2" # Use "gpt-3.5-turbo" or another model from Hugging Face if needed
132
- model = GPT2LMHeadModel.from_pretrained(model_name)
133
- tokenizer = GPT2Tokenizer.from_pretrained(model_name)
134
 
135
- # Initialize the text generation pipeline
136
- gpt_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
137
 
138
- # Streamlit UI
139
- st.title("Chat with GPT-2")
140
 
141
- if 'conversation' not in st.session_state:
142
- st.session_state.conversation = ""
143
 
144
- def chat_with_gpt(user_input):
145
- # Append user input to the conversation
146
- st.session_state.conversation += f"User: {user_input}\n"
147
 
148
- # Generate response
149
- response = gpt_pipeline(user_input, max_length=100, num_return_sequences=1)[0]['generated_text']
150
- response_text = response.replace(user_input, '') # Strip the user input part from response
151
 
152
- # Append GPT's response to the conversation
153
- st.session_state.conversation += f"GPT: {response_text}\n"
154
- return response_text
155
 
156
- # Text input for user query
157
- user_input = st.text_input("You:", "")
158
 
159
- if st.button("Send"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  if user_input:
161
- chat_with_gpt(user_input)
 
 
 
 
 
 
 
 
 
 
162
 
163
- # Display conversation history
164
- st.text_area("Conversation", value=st.session_state.conversation, height=400)
165
 
166
  # ################ END #################
167
 
 
127
 
128
 
129
 
130
+ # # Load pre-trained GPT-2 model and tokenizer
131
+ # model_name = "gpt-3.5-turbo" # "gpt2" # Use "gpt-3.5-turbo" or another model from Hugging Face if needed
132
+ # model = GPT2LMHeadModel.from_pretrained(model_name)
133
+ # tokenizer = GPT2Tokenizer.from_pretrained(model_name)
134
 
135
+ # # Initialize the text generation pipeline
136
+ # gpt_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
137
 
138
+ # # Streamlit UI
139
+ # st.markdown("<h3 style='text-align: center; font-size: 20px;'>Chat with GPT</h3>", unsafe_allow_html=True)
140
 
141
+ # if 'conversation' not in st.session_state:
142
+ # st.session_state.conversation = ""
143
 
144
+ # def chat_with_gpt(user_input):
145
+ # # Append user input to the conversation
146
+ # st.session_state.conversation += f"User: {user_input}\n"
147
 
148
+ # # Generate response
149
+ # response = gpt_pipeline(user_input, max_length=100, num_return_sequences=1)[0]['generated_text']
150
+ # response_text = response.replace(user_input, '') # Strip the user input part from response
151
 
152
+ # # Append GPT's response to the conversation
153
+ # st.session_state.conversation += f"GPT: {response_text}\n"
154
+ # return response_text
155
 
156
+ # # Text input for user query
157
+ # user_input = st.text_input("You:", "")
158
 
159
+ # if st.button("Send"):
160
+ # if user_input:
161
+ # chat_with_gpt(user_input)
162
+
163
+ # # Display conversation history
164
+ # st.text_area("Conversation", value=st.session_state.conversation, height=400)
165
+
166
+
167
+ from transformers import AutoModelForCausalLM, AutoTokenizer
168
+ import torch
169
+
170
+ # Load the model and tokenizer from Hugging Face (LLaMA or OpenAssistant)
171
+ # Example: "OpenAssistant/oa-v1" (Open Assistant) or "huggyllama/llama-7b" (LLaMA)
172
+
173
+ MODEL_NAME = "OpenAssistant/oa_v1" # You can replace this with a LLaMA model like "huggyllama/llama-7b"
174
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
175
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
176
+
177
+ # Streamlit UI for input
178
+ st.title("Chat with OpenAssistant/LLaMA")
179
+
180
+ # Input text area
181
+ user_input = st.text_area("You:", "", height=150)
182
+
183
+ if st.button('Generate Response'):
184
  if user_input:
185
+ # Tokenize the input and generate response
186
+ inputs = tokenizer(user_input, return_tensors="pt")
187
+ outputs = model.generate(**inputs, max_length=150)
188
+
189
+ # Decode the generated response
190
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
191
+
192
+ # Display the model's response
193
+ st.write("Assistant: ", response)
194
+ else:
195
+ st.warning('Please enter some text to get a response!')
196
 
 
 
197
 
198
  # ################ END #################
199