drkareemkamal's picture
Create app.py
9e8cdbb verified
# import libraries
import os
from dotenv import load_dotenv
import streamlit as st
import pinecone
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain_community.llms import CTransformers
from langchain_community.embeddings.huggingface import HuggingFaceBgeEmbeddings
load_dotenv()
embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
model_kwargs = {'device':'cpu'})
os.environ['PINECONE_API_KEY'] = 'afb0bb4d-3c15-461b-91a4-fb12fb1f25f2'
index_name = 'harisonvecot'
vectorstore = PineconeVectorStore(index_name=index_name,embedding=embeddings)
# Create the vector index from documents
def create_index(documents):
vectorstore.add_documents(documents)
# Retrieve query from Pinecone
def retrieve_query(query, k=2):
matching_results = vectorstore.similarity_search(query, k=k)
return matching_results
# Custom prompt template
custom_prompt_template = '''
use the following pieces of information to answer the user's questions.
If you don't know the answer, please just say that you don't know the answer, don't try to make up an answer.
Content : {context}
Question : {question}
only return the helpful answer below and nothing else.
'''
def set_custom_prompt():
prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question'])
return prompt
# Load LLM model
llm_model = CTransformers(model='TheBloke/Llama-2-7B-Chat-GGML',
model_type = 'llama',
max_new_token = 512,
temperature=0.5)
# Create retrieval QA chain
def retrieval_qa_chain():
prompt = set_custom_prompt()
chain = load_qa_chain(llm_model, chain_type='stuff', prompt=prompt)
return chain
# Search answers from Vector DB
def retrieve_answer(query):
doc_search = retrieve_query(query)
chain = retrieval_qa_chain()
response = chain.run(input_documents=doc_search, question=query)
return response
queries = st.text_input('write a medical questions ?')
# Example usage
submit = st.button('submit')
# Read and process documents
# doc = read_doc('documents/')
# documents = chunk_data(docs=doc)
# create_index(documents)
if submit :
if queries :
# Query and get answer
#our_query = 'What is cause of Eczema?'
answer = retrieve_answer(queries)
st.write(answer)