Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,46 @@
|
|
1 |
import streamlit as st
|
2 |
-
from
|
|
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
#
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
if st.button("Send"):
|
15 |
-
# Use TinyLlama chatbot pipeline to generate a response
|
16 |
-
messages = [{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"},
|
17 |
-
{"role": "user", "content": user_message}]
|
18 |
-
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
19 |
-
|
20 |
-
# Generate response using TinyLlama
|
21 |
-
response = pipe(prompt, max_length=256, temperature=0.7, top_k=50, top_p=0.95)[0]["generated_text"]
|
22 |
|
23 |
-
|
24 |
-
st.text_area("Model Response:", response, height=100)
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain.llms import HuggingFaceHub
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
|
6 |
+
#Function to return the response
|
7 |
+
def generate_answer(query):
|
8 |
+
llm = HuggingFaceHub(
|
9 |
+
repo_id = "google/flan-t5-xxl",
|
10 |
+
model_kwargs={"temperature": 0.7, "max_length": 64,"max_new_tokens":512}
|
11 |
+
)
|
12 |
+
|
13 |
+
template = """Question: {query}
|
14 |
|
15 |
+
Answer: Let's think step by step.
|
16 |
+
"""
|
17 |
+
|
18 |
+
PromptTemplate(template=template, input_variables=["query"])
|
19 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
20 |
+
result = llm_chain.run(query)
|
21 |
+
return result
|
22 |
+
|
23 |
|
24 |
+
#App UI starts here
|
25 |
+
st.set_page_config(page_title = "LangChain Demo", page_icon = ":robot:")
|
26 |
+
st.header("LangChain Demo")
|
27 |
+
|
28 |
+
|
29 |
+
#Gets User Input
|
30 |
+
def get_text():
|
31 |
+
input_text = st.text_input("You: ", key="input")
|
32 |
+
return input_text
|
33 |
+
|
34 |
+
|
35 |
+
user_input = get_text()
|
36 |
+
response = generate_answer(user_input)
|
37 |
+
|
38 |
+
submit = st.button("Generate")
|
39 |
+
|
40 |
+
#If the button clicked
|
41 |
+
if submit:
|
42 |
+
st.subheader("Answer: ")
|
43 |
+
st.write(response)
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
+
|
|