Spaces:
Running
Running
File size: 2,997 Bytes
d75759d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import os
import streamlit as st
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from models import llms
class UserInterface():
def __init__(self, ):
st.warning("Warning: Some models may not work and some models may require GPU to run")
st.text("An Open Source Chat Application")
st.header("Open LLMs")
self.API_KEY = st.sidebar.text_input(
'API Key',
type='password',
help="Type in your HuggingFace API key to use this app"
)
models_name = (
"HuggingFaceH4/zephyr-7b-beta",
"Open-Orca/Mistral-7B-OpenOrca",
)
self.models = st.sidebar.selectbox(
label="Choose your models",
options=models_name,
help="Choose your model",
)
self.temperature = st.sidebar.slider(
label='Temperature',
min_value=0.1,
max_value=1.0,
step=0.1,
value=0.5,
help="Set the temperature to get accurate or random result"
)
self.max_token_length = st.sidebar.slider(
label="Token Length",
min_value=32,
max_value=2048,
step=16,
value=64,
help="Set max tokens to generate maximum amount of text output"
)
self.model_kwargs = {
"temperature": self.temperature,
"max_length": self.max_token_length
}
os.environ['HUGGINGFACEHUB_API_TOKEN'] = self.API_KEY
def form_data(self):
try:
if not self.API_KEY.startswith('hf_'):
st.warning('Please enter your API key!', icon='⚠')
text_input_visibility = True
st.subheader("Context")
context = st.chat_input(disabled=text_input_visibility)
st.subheader("Question")
question = st.chat_input(disabled=text_input_visibility)
template = """
Answer the question based on the context, if you don't know then output "Out of Context"
Context: {context}
Question: {question}
Answer:
"""
prompt = PromptTemplate(
template=template,
input_variables=[
'question',
'context'
]
)
llm = HuggingFaceHub(
repo_id = self.model_name,
model_kwargs = self.model_kwargs
)
llm_chain = LLMChain(
prompt=prompt,
llm=llm,
)
result = llm_chain.run({
"question": question,
"context": context
})
st.markdown(result)
except Exception as e:
st.error(e, icon="🚨")
model = UserInterface()
model.form_data() |