anishas19 commited on
Commit
a0219fe
·
verified ·
1 Parent(s): 020301a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from google.colab import drive
2
+ drive.mount("/content/drive")
3
+ !pip install langchain sentence-transformers chromadb llama-cpp-python langchain_community pypdf
4
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
5
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
6
+ from langchain_community.embeddings import SentenceTransformerEmbeddings
7
+ from langchain.vectorstores import Chroma
8
+ from langchain_community.llms import LlamaCpp
9
+ from langchain.chains import RetrievalQA, LLMChain
10
+ loader=PyPDFDirectoryLoader("/content/drive/MyDrive/BioMistral/Data")
11
+ docs=loader.load()
12
+ text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
13
+ chunks=text_splitter.split_documents(docs)
14
+ import os
15
+ import os
16
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
17
+
18
+ embeddings = SentenceTransformerEmbeddings(model_name="NeuML/pubmedbert-base-embeddings")
19
+ vectorstore = Chroma.from_documents(chunks, embeddings)
20
+ query="What are the major risk factors of heart disease?"
21
+ search_results=vectorstore.similarity_search(query)
22
+ search_results
23
+ retriever=vectorstore.as_retriever(search_kwargs={"k":5})
24
+ retriever.get_relevant_documents(query)
25
+ llm=LlamaCpp(
26
+ model_path="/content/drive/MyDrive/BioMistral/BioMistral-7B.Q4_K_M.gguf",
27
+ temperature=0.2,
28
+ max_tokens=2048,
29
+ top_p=1
30
+ )
31
+ template="""
32
+ <|context|>
33
+ You are an Medical Assistant that follows the instruction and generate the accurate response based on the query and the context provided.
34
+ Please be truthful and give direct answers.
35
+ </s>
36
+ <|user|>
37
+ {query}
38
+ </s>
39
+ <|assistant|>
40
+ """
41
+ from langchain.schema.runnable import RunnablePassthrough
42
+ from langchain.schema.output_parser import StrOutputParser
43
+ from langchain.prompts import ChatPromptTemplate
44
+ prompt=ChatPromptTemplate.from_template(template)
45
+ rag_chain=(
46
+ {"context":retriever,"query":RunnablePassthrough()}
47
+ | prompt
48
+ | llm
49
+ | StrOutputParser()
50
+ )
51
+ response=rag_chain.invoke("query")
52
+ response
53
+ import sys
54
+ while True:
55
+ user_input=input(f"Input query: ")
56
+ if user_input=='exit':
57
+ print("Exiting...")
58
+ sys.exit()
59
+ if user_input=="":
60
+ continue
61
+ result=rag_chain.invoke(user_input)
62
+ print("Answer: ",result)
63
+ !pip install gradio
64
+ import gradio as gr
65
+
66
+ # Define a function to handle queries
67
+ def chatbot_ui(user_query):
68
+ if not user_query.strip():
69
+ return "Please enter a valid query."
70
+ try:
71
+ result = rag_chain.invoke(user_query)
72
+ return result
73
+ except Exception as e:
74
+ return f"Error: {str(e)}"
75
+
76
+ # Create the Gradio interface
77
+ interface = gr.Interface(
78
+ fn=chatbot_ui, # Function to process the query
79
+ inputs=gr.Textbox(label="Enter your medical query:", placeholder="Ask a medical question here..."),
80
+ outputs=gr.Textbox(label="Chatbot Response"),
81
+ title="Medical Assistant Chatbot",
82
+ description="A chatbot made for heart patients.",
83
+ examples=[
84
+ ["What are the symptoms of diabetes?"],
85
+ ["Explain the risk factors of heart disease."],
86
+ ["How can I reduce cholesterol levels naturally?"],
87
+ ]
88
+ )
89
+
90
+ # Launch the Gradio interface
91
+ interface.launch(share=True)