Spaces:
Sleeping
Sleeping
harshitv804
commited on
Commit
•
9bff18f
1
Parent(s):
2ae7d1e
Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,11 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
|
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
from langchain_together import Together
|
5 |
import os
|
6 |
-
from langchain.retrievers.document_compressors import EmbeddingsFilter
|
7 |
-
from langchain.retrievers import ContextualCompressionRetriever
|
8 |
from langchain.memory import ConversationBufferWindowMemory
|
9 |
from langchain.chains import ConversationalRetrievalChain
|
10 |
import streamlit as st
|
11 |
import time
|
|
|
12 |
st.set_page_config(page_title="MedChat", page_icon="favicon.png")
|
13 |
|
14 |
col1, col2, col3 = st.columns([1,4,1])
|
@@ -53,11 +52,11 @@ if "messages" not in st.session_state:
|
|
53 |
if "memory" not in st.session_state:
|
54 |
st.session_state.memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history",return_messages=True)
|
55 |
|
56 |
-
embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1",model_kwargs={"trust_remote_code":True})
|
57 |
db = FAISS.load_local("medchat_db", embeddings)
|
58 |
db_retriever = db.as_retriever(search_type="similarity",search_kwargs={"k": 4})
|
59 |
|
60 |
-
|
61 |
|
62 |
CONTEXT: {context}
|
63 |
|
@@ -66,9 +65,10 @@ CHAT HISTORY: {chat_history}
|
|
66 |
QUESTION: {question}
|
67 |
|
68 |
ANSWER:
|
|
|
69 |
"""
|
70 |
|
71 |
-
prompt = PromptTemplate(template=
|
72 |
input_variables=['context', 'question', 'chat_history'])
|
73 |
|
74 |
TOGETHER_AI_API= os.environ['TOGETHER_AI']
|
@@ -79,8 +79,6 @@ llm = Together(
|
|
79 |
together_api_key=f"{TOGETHER_AI_API}"
|
80 |
)
|
81 |
|
82 |
-
embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.80)
|
83 |
-
|
84 |
qa = ConversationalRetrievalChain.from_llm(
|
85 |
llm=llm,
|
86 |
memory=st.session_state.memory,
|
|
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
from langchain_together import Together
|
5 |
import os
|
|
|
|
|
6 |
from langchain.memory import ConversationBufferWindowMemory
|
7 |
from langchain.chains import ConversationalRetrievalChain
|
8 |
import streamlit as st
|
9 |
import time
|
10 |
+
|
11 |
st.set_page_config(page_title="MedChat", page_icon="favicon.png")
|
12 |
|
13 |
col1, col2, col3 = st.columns([1,4,1])
|
|
|
52 |
if "memory" not in st.session_state:
|
53 |
st.session_state.memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history",return_messages=True)
|
54 |
|
55 |
+
embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1",model_kwargs={"trust_remote_code":True, "revision":"289f532e14dbbbd5a04753fa58739e9ba766f3c7"})
|
56 |
db = FAISS.load_local("medchat_db", embeddings)
|
57 |
db_retriever = db.as_retriever(search_type="similarity",search_kwargs={"k": 4})
|
58 |
|
59 |
+
prompt_template = """<s>[INST]Follow these instructions carefully: You are a medical practitioner chatbot providing accurate medical information, adopting a doctor's perspective in your responses. Utilize the provided context, chat history, and question, choosing only the necessary information based on the user's query. Do not reference chat history if irrelevant to the current question; only use it for similar-related queries. Prioritize the given context when searching for relevant information, emphasizing clarity and conciseness in your responses. If multiple medicines share the same name but have different strengths, ensure to mention them. Exclude any mention of medicine costs. Stick to context directly related to the user's question, and use your knowledge base to answer inquiries outside the given context. Abstract and concise responses are key; do not repeat the chat template in your answers. If you lack information, simply state that you don't know. Avoid creating your own questions and answers. Enhance readability with markdown, incorporating bullet points and bold text when necessary.
|
60 |
|
61 |
CONTEXT: {context}
|
62 |
|
|
|
65 |
QUESTION: {question}
|
66 |
|
67 |
ANSWER:
|
68 |
+
</s>[INST]
|
69 |
"""
|
70 |
|
71 |
+
prompt = PromptTemplate(template=prompt_template,
|
72 |
input_variables=['context', 'question', 'chat_history'])
|
73 |
|
74 |
TOGETHER_AI_API= os.environ['TOGETHER_AI']
|
|
|
79 |
together_api_key=f"{TOGETHER_AI_API}"
|
80 |
)
|
81 |
|
|
|
|
|
82 |
qa = ConversationalRetrievalChain.from_llm(
|
83 |
llm=llm,
|
84 |
memory=st.session_state.memory,
|