richardorama
commited on
Commit
•
7bbff5e
1
Parent(s):
b0d69bc
Update app.py
Browse files
app.py
CHANGED
@@ -164,8 +164,13 @@ else:
|
|
164 |
# st.text_area("Conversation", value=st.session_state.conversation, height=400)
|
165 |
|
166 |
|
|
|
|
|
167 |
# LLaMA 7B model from Hugging Face
|
168 |
-
MODEL_NAME = "huggyllama/llama-7b" # Example of a LLaMA model
|
|
|
|
|
|
|
169 |
|
170 |
import streamlit as st
|
171 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
@@ -177,7 +182,7 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
|
177 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
178 |
|
179 |
# Streamlit UI for input
|
180 |
-
st.
|
181 |
|
182 |
# Input text area
|
183 |
user_input = st.text_area("You:", "", height=150)
|
|
|
164 |
# st.text_area("Conversation", value=st.session_state.conversation, height=400)
|
165 |
|
166 |
|
167 |
+
#############
|
168 |
+
|
169 |
# LLaMA 7B model from Hugging Face
|
170 |
+
# MODEL_NAME = "huggyllama/llama-7b" # Example of a LLaMA model
|
171 |
+
|
172 |
+
# Try this OpenAssistant model available on Hugging Face
|
173 |
+
MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Example of an OpenAssistant model
|
174 |
|
175 |
import streamlit as st
|
176 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
182 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
183 |
|
184 |
# Streamlit UI for input
|
185 |
+
st.markdown("<h3 style='text-align: center; font-size: 20px;'>Chat with OpenAssistant/LLaMA</h3>", unsafe_allow_html=True)
|
186 |
|
187 |
# Input text area
|
188 |
user_input = st.text_area("You:", "", height=150)
|