bipin commited on
Commit
b69fb46
1 Parent(s): ecef7f6

update 2.1

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +1 -1
  3. app2.py +68 -0
.gitignore CHANGED
@@ -13,6 +13,7 @@ develop-eggs/
13
  dist/
14
  downloads/
15
  etc/
 
16
  eggs/
17
  scripts/
18
  share/
 
13
  dist/
14
  downloads/
15
  etc/
16
+ include/
17
  eggs/
18
  scripts/
19
  share/
app.py CHANGED
@@ -127,7 +127,7 @@ else:
127
 
128
  elif q_input and option=="Code":
129
  image_file = "pro-vision-dummy.jpg"
130
- image = Image.open(image_file)
131
  with st.spinner("Processing..."):
132
  mod_prompt = code_prompt + pdf_text
133
  response = get_gemini_response(mod_prompt, q_input)
 
127
 
128
  elif q_input and option=="Code":
129
  image_file = "pro-vision-dummy.jpg"
130
+ #image = Image.open(image_file)
131
  with st.spinner("Processing..."):
132
  mod_prompt = code_prompt + pdf_text
133
  response = get_gemini_response(mod_prompt, q_input)
app2.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import google.generativeai as genai
3
+ from langchain_google_genai import GoogleGenerativeAI, ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
4
+ from dotenv import load_dotenv
5
+ import urllib
6
+ import warnings
7
+ from pathlib import Path as p
8
+ from pprint import pprint
9
+ from text_ext import extract_text_from_pdf
10
+ import pandas as pd
11
+ from langchain import PromptTemplate
12
+ from langchain.chains.question_answering import load_qa_chain
13
+ from langchain.document_loaders import PyPDFLoader
14
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
15
+ from langchain.vectorstores import Chroma
16
+ from langchain.chains import RetrievalQA
17
+ import streamlit as st
18
+
19
+ import warnings
20
+
21
+ # Filter out LangChainDeprecationWarning
22
+ warnings.filterwarnings("ignore", category=Warning)
23
+ warnings.filterwarnings("ignore", category=UserWarning)
24
+
25
+ load_dotenv()
26
+
27
+ GOOGLE_API_KEY=os.getenv("GOOGLE_API_KEY")
28
+ #print(GOOGLE_API_KEY)
29
+
30
+
31
+
32
+ chat_model = ChatGoogleGenerativeAI(model="gemini-pro",google_api_key=GOOGLE_API_KEY, temperature=0.2,convert_system_message_to_human=True)
33
+
34
+ st.set_page_config(page_title="Gemini ChatPDF Langchain Application", layout="wide")
35
+ question = st.chat_input(key="input", placeholder="Ask your question")
36
+
37
+
38
+ pdf_file_path = "Uploaded\paper.pdf"
39
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
40
+ context = extract_text_from_pdf(pdf_file_path)
41
+ #context = "No more today"
42
+ context = "\n\n"+context
43
+ #print(context)
44
+ texts = text_splitter.split_text(context)
45
+ #print(len(texts))
46
+
47
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001",google_api_key=GOOGLE_API_KEY)
48
+
49
+ if question:
50
+ vector_index = Chroma.from_texts(texts, embeddings).as_retriever(search_kwargs={"k":5})
51
+ related_docs = vector_index.get_relevant_documents(question)
52
+
53
+ prompt_template = """
54
+ Answer the question with full detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
55
+ provided context just say, try to answer it from your knowledge but don't provide the wrong answer\n\n
56
+ Context:\n {context}?\n
57
+ Question: \n{question}\n
58
+
59
+ Response:
60
+ """
61
+ prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
62
+ chain = load_qa_chain(chat_model, chain_type="stuff", prompt=prompt)
63
+ response = chain({"input_documents":related_docs, "question": question}, return_only_outputs=True)
64
+
65
+ #question = "Describe the Multi-head attention layer in detail?"
66
+
67
+ result = response
68
+ st.write(result["output_text"])