Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain.llms import HuggingFaceHub | |
# Function to return the response | |
def generate_answer(query): | |
llm = HuggingFaceHub( | |
model_class="goliath-120b.Q4_K_M.gguf", | |
repo_id="alpindale/goliath-120b", | |
model_kwargs={"temperature": 0.5, "max_length": 64, "max_new_tokens": 512}, | |
task = "text2text-generation" | |
) | |
prompt = f""" | |
You are a helpful AI assistant. | |
USER: | |
{query} | |
ASSISTANT: | |
""" | |
result = llm.predict(prompt) | |
return result | |
# App UI starts here | |
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:") | |
st.header("LangChain Demo") | |
# Gets User Input | |
def get_text(): | |
input_text = st.text_input("You: ", key="input") | |
return input_text | |
user_input = get_text() | |
response = generate_answer(user_input) | |
submit = st.button("Generate") | |
# If the button is clicked | |
if submit: | |
st.subheader("Answer:") | |
st.write(response) |