Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +124 -0
- chainlit.md +24 -0
app.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import required dependencies
|
2 |
+
# https://docs.chainlit.io/integrations/langchain
|
3 |
+
import os
|
4 |
+
from typing import List
|
5 |
+
from langchain_groq import ChatGroq
|
6 |
+
from langchain.prompts import PromptTemplate
|
7 |
+
from langchain_community.vectorstores import Qdrant
|
8 |
+
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
|
9 |
+
from qdrant_client import QdrantClient
|
10 |
+
from langchain_community.chat_models import ChatOllama
|
11 |
+
|
12 |
+
|
13 |
+
import chainlit as cl
|
14 |
+
from langchain.chains import RetrievalQA
|
15 |
+
|
16 |
+
# bring in our GROQ_API_KEY
|
17 |
+
from dotenv import load_dotenv
|
18 |
+
load_dotenv()
|
19 |
+
|
20 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
21 |
+
qdrant_url = os.getenv("QDRANT_URL")
|
22 |
+
qdrant_api_key = os.getenv("QDRANT_API_KEY")
|
23 |
+
|
24 |
+
custom_prompt_template = """Use the following pieces of information to answer the user's question.
|
25 |
+
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
26 |
+
|
27 |
+
Context: {context}
|
28 |
+
Question: {question}
|
29 |
+
|
30 |
+
Only return the helpful answer below and nothing else.
|
31 |
+
Helpful answer:
|
32 |
+
"""
|
33 |
+
|
34 |
+
def set_custom_prompt():
|
35 |
+
"""
|
36 |
+
Prompt template for QA retrieval for each vectorstore
|
37 |
+
"""
|
38 |
+
prompt = PromptTemplate(template=custom_prompt_template,
|
39 |
+
input_variables=['context', 'question'])
|
40 |
+
return prompt
|
41 |
+
|
42 |
+
|
43 |
+
chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
|
44 |
+
#chat_model = ChatGroq(temperature=0, model_name="Llama2-70b-4096")
|
45 |
+
#chat_model = ChatOllama(model="llama2", request_timeout=30.0)
|
46 |
+
|
47 |
+
client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
|
48 |
+
|
49 |
+
|
50 |
+
def retrieval_qa_chain(llm, prompt, vectorstore):
|
51 |
+
qa_chain = RetrievalQA.from_chain_type(
|
52 |
+
llm=llm,
|
53 |
+
chain_type="stuff",
|
54 |
+
retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
|
55 |
+
return_source_documents=True,
|
56 |
+
chain_type_kwargs={'prompt': prompt}
|
57 |
+
)
|
58 |
+
return qa_chain
|
59 |
+
|
60 |
+
|
61 |
+
def qa_bot():
|
62 |
+
embeddings = FastEmbedEmbeddings()
|
63 |
+
vectorstore = Qdrant(client=client, embeddings=embeddings, collection_name="rag")
|
64 |
+
llm = chat_model
|
65 |
+
qa_prompt=set_custom_prompt()
|
66 |
+
qa = retrieval_qa_chain(llm, qa_prompt, vectorstore)
|
67 |
+
return qa
|
68 |
+
|
69 |
+
|
70 |
+
@cl.on_chat_start
|
71 |
+
async def start():
|
72 |
+
"""
|
73 |
+
Initializes the bot when a new chat starts.
|
74 |
+
|
75 |
+
This asynchronous function creates a new instance of the retrieval QA bot,
|
76 |
+
sends a welcome message, and stores the bot instance in the user's session.
|
77 |
+
"""
|
78 |
+
chain = qa_bot()
|
79 |
+
welcome_message = cl.Message(content="Starting the bot...")
|
80 |
+
await welcome_message.send()
|
81 |
+
welcome_message.content = (
|
82 |
+
"Hi, Welcome to Chat With Documents using Llamaparse, LangChain, Qdrant and models from Groq."
|
83 |
+
)
|
84 |
+
await welcome_message.update()
|
85 |
+
cl.user_session.set("chain", chain)
|
86 |
+
|
87 |
+
|
88 |
+
@cl.on_message
|
89 |
+
async def main(message):
|
90 |
+
"""
|
91 |
+
Processes incoming chat messages.
|
92 |
+
|
93 |
+
This asynchronous function retrieves the QA bot instance from the user's session,
|
94 |
+
sets up a callback handler for the bot's response, and executes the bot's
|
95 |
+
call method with the given message and callback. The bot's answer and source
|
96 |
+
documents are then extracted from the response.
|
97 |
+
"""
|
98 |
+
chain = cl.user_session.get("chain")
|
99 |
+
cb = cl.AsyncLangchainCallbackHandler()
|
100 |
+
cb.answer_reached = True
|
101 |
+
# res=await chain.acall(message, callbacks=[cb])
|
102 |
+
res = await chain.acall(message.content, callbacks=[cb])
|
103 |
+
#print(f"response: {res}")
|
104 |
+
answer = res["result"]
|
105 |
+
#answer = answer.replace(".", ".\n")
|
106 |
+
source_documents = res["source_documents"]
|
107 |
+
|
108 |
+
text_elements = [] # type: List[cl.Text]
|
109 |
+
|
110 |
+
if source_documents:
|
111 |
+
for source_idx, source_doc in enumerate(source_documents):
|
112 |
+
source_name = f"source_{source_idx}"
|
113 |
+
# Create the text element referenced in the message
|
114 |
+
text_elements.append(
|
115 |
+
cl.Text(content=source_doc.page_content, name=source_name)
|
116 |
+
)
|
117 |
+
source_names = [text_el.name for text_el in text_elements]
|
118 |
+
|
119 |
+
if source_names:
|
120 |
+
answer += f"\nSources: {', '.join(source_names)}"
|
121 |
+
else:
|
122 |
+
answer += "\nNo sources found"
|
123 |
+
|
124 |
+
await cl.Message(content=answer, elements=text_elements).send()
|
chainlit.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# The Great Gatsby GPT 🎩🍸
|
2 |
+
|
3 |
+
Welcome to the glamorous world of The Great Gatsby GPT! ✨
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
Step into the dazzling era of the 1920s, where every inquiry is met with an opulent response. Our GPT model is steeped in the allure of the Jazz Age, ready to illuminate the intricacies of F. Scott Fitzgerald's masterpiece.
|
7 |
+
|
8 |
+
## Key Features
|
9 |
+
- **Gatsby's Insights:** Pose any question about The Great Gatsby, and our GPT will conjure up detailed and immersive responses, as if transported to the lavish parties of West Egg.
|
10 |
+
- **Gilded Guidance:** With a flick of your digital cigarette holder, explore the depths of Gatsby's world with our GPT as your dapper guide.
|
11 |
+
- **Mystery Unveiled:** Watch as the veil of ambiguity lifts with each query, revealing the hidden truths and complexities of the Roaring Twenties.
|
12 |
+
- **24/7 Availability:** Whether it's dawn or dusk, our Great Gatsby GPT is at your service, ready to whisk you away to an era of extravagance at any hour.
|
13 |
+
|
14 |
+
## Benefits
|
15 |
+
- **Unravel Intrigue:** Discover the secrets of Jay Gatsby's enigmatic persona, the allure of Daisy Buchanan, and the tragedy of the American Dream through captivating conversations with our GPT.
|
16 |
+
- **Immersive Prohibition Era:** Engage in discussions about wealth, love, and society in the 1920s, and witness the glamour and decadence of the era come to life through our GPT's responses.
|
17 |
+
- **Community of Flappers and Dappers:** Join fellow literature enthusiasts in discussions, share your favorite passages from the novel, and indulge in the shared fascination for The Great Gatsby.
|
18 |
+
- **Endless Exploration:** With our Great Gatsby GPT, the adventure never ceases. Delve into the depths of East Egg or stroll along the shores of Long Island Sound with just a question.
|
19 |
+
|
20 |
+
## Get in Touch
|
21 |
+
Have a burning question about The Great Gatsby or eager to immerse yourself in the world of flappers and bootleggers? Reach out to us and let the allure begin! [gatsby_gpt@roaringtwenties.com](mailto:xyzash568@gmail.com) please click on it and mail.
|
22 |
+
|
23 |
+
## Let the Glamour Unfold!
|
24 |
+
With a swirl of champagne bubbles and the echo of a jazz tune, let The Great Gatsby GPT enchant and captivate you with its boundless knowledge and lavish charm.
|