richardorama commited on
Commit
de19eb1
1 Parent(s): 7bbff5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -26
app.py CHANGED
@@ -166,40 +166,40 @@ else:
166
 
167
  #############
168
 
169
- # LLaMA 7B model from Hugging Face
170
- # MODEL_NAME = "huggyllama/llama-7b" # Example of a LLaMA model
171
 
172
- # Try this OpenAssistant model available on Hugging Face
173
- MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Example of an OpenAssistant model
174
 
175
- import streamlit as st
176
- from transformers import AutoModelForCausalLM, AutoTokenizer
177
- import torch
178
 
179
- # Load the model and tokenizer (OpenAssistant or LLaMA)
180
- MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Replace with "huggyllama/llama-7b" for LLaMA
181
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
182
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
183
 
184
- # Streamlit UI for input
185
- st.markdown("<h3 style='text-align: center; font-size: 20px;'>Chat with OpenAssistant/LLaMA</h3>", unsafe_allow_html=True)
186
 
187
- # Input text area
188
- user_input = st.text_area("You:", "", height=150)
189
 
190
- if st.button('Generate Response'):
191
- if user_input:
192
- # Tokenize the input and generate response
193
- inputs = tokenizer(user_input, return_tensors="pt")
194
- outputs = model.generate(**inputs, max_length=150)
195
 
196
- # Decode the generated response
197
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
198
 
199
- # Display the model's response
200
- st.write("Assistant: ", response)
201
- else:
202
- st.warning('Please enter some text to get a response!')
203
 
204
 
205
 
 
166
 
167
  #############
168
 
169
+ # # LLaMA 7B model from Hugging Face
170
+ # # MODEL_NAME = "huggyllama/llama-7b" # Example of a LLaMA model
171
 
172
+ # # Try this OpenAssistant model available on Hugging Face
173
+ # MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Example of an OpenAssistant model
174
 
175
+ # import streamlit as st
176
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
177
+ # import torch
178
 
179
+ # # Load the model and tokenizer (OpenAssistant or LLaMA)
180
+ # MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Replace with "huggyllama/llama-7b" for LLaMA
181
+ # tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
182
+ # model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
183
 
184
+ # # Streamlit UI for input
185
+ # st.markdown("<h3 style='text-align: center; font-size: 20px;'>Chat with OpenAssistant/LLaMA</h3>", unsafe_allow_html=True)
186
 
187
+ # # Input text area
188
+ # user_input = st.text_area("You:", "", height=150)
189
 
190
+ # if st.button('Generate Response'):
191
+ # if user_input:
192
+ # # Tokenize the input and generate response
193
+ # inputs = tokenizer(user_input, return_tensors="pt")
194
+ # outputs = model.generate(**inputs, max_length=150)
195
 
196
+ # # Decode the generated response
197
+ # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
198
 
199
+ # # Display the model's response
200
+ # st.write("Assistant: ", response)
201
+ # else:
202
+ # st.warning('Please enter some text to get a response!')
203
 
204
 
205