richardorama
commited on
Commit
•
b0d69bc
1
Parent(s):
2f21070
Update app.py
Browse files
app.py
CHANGED
@@ -164,13 +164,15 @@ else:
|
|
164 |
# st.text_area("Conversation", value=st.session_state.conversation, height=400)
|
165 |
|
166 |
|
|
|
|
|
|
|
|
|
167 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
168 |
import torch
|
169 |
|
170 |
-
# Load the model and tokenizer
|
171 |
-
|
172 |
-
|
173 |
-
MODEL_NAME = "OpenAssistant/oa_v1" # You can replace this with a LLaMA model like "huggyllama/llama-7b"
|
174 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
175 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
176 |
|
@@ -195,6 +197,7 @@ if st.button('Generate Response'):
|
|
195 |
st.warning('Please enter some text to get a response!')
|
196 |
|
197 |
|
|
|
198 |
# ################ END #################
|
199 |
|
200 |
|
|
|
164 |
# st.text_area("Conversation", value=st.session_state.conversation, height=400)
|
165 |
|
166 |
|
167 |
+
# LLaMA 7B model from Hugging Face
|
168 |
+
MODEL_NAME = "huggyllama/llama-7b" # Example of a LLaMA model
|
169 |
+
|
170 |
+
import streamlit as st
|
171 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
172 |
import torch
|
173 |
|
174 |
+
# Load the model and tokenizer (OpenAssistant or LLaMA)
|
175 |
+
MODEL_NAME = "OpenAssistant/oasst-sft-1-pythia-12b" # Replace with "huggyllama/llama-7b" for LLaMA
|
|
|
|
|
176 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
177 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
178 |
|
|
|
197 |
st.warning('Please enter some text to get a response!')
|
198 |
|
199 |
|
200 |
+
|
201 |
# ################ END #################
|
202 |
|
203 |
|