bipin
commited on
Commit
•
ecef7f6
1
Parent(s):
a09727c
update to chat
Browse files- .gitignore +3 -0
- app.py +51 -28
- chat_mode.py +26 -0
- pyvenv.cfg +5 -0
- random.txt +0 -1
.gitignore
CHANGED
@@ -12,7 +12,10 @@ build/
|
|
12 |
develop-eggs/
|
13 |
dist/
|
14 |
downloads/
|
|
|
15 |
eggs/
|
|
|
|
|
16 |
.eggs/
|
17 |
lib/
|
18 |
lib64/
|
|
|
12 |
develop-eggs/
|
13 |
dist/
|
14 |
downloads/
|
15 |
+
etc/
|
16 |
eggs/
|
17 |
+
scripts/
|
18 |
+
share/
|
19 |
.eggs/
|
20 |
lib/
|
21 |
lib64/
|
app.py
CHANGED
@@ -4,6 +4,8 @@ import google.generativeai as genai
|
|
4 |
from text_ext import extract_text_from_pdf
|
5 |
import base64
|
6 |
from dotenv import load_dotenv
|
|
|
|
|
7 |
|
8 |
load_dotenv()
|
9 |
|
@@ -14,26 +16,29 @@ vision_model=genai.GenerativeModel("gemini-pro-vision")
|
|
14 |
chat = text_model.start_chat(history=[])
|
15 |
|
16 |
def get_gemini_response(input, pdf_content):
|
17 |
-
text_model = genai.GenerativeModel('gemini-pro')
|
18 |
response = text_model.generate_content([input, pdf_content])
|
19 |
return response.text
|
20 |
|
21 |
-
def get_gemini_vision_response(input, pdf_content):
|
22 |
-
|
23 |
-
response = text_model.generate_content([input, pdf_content])
|
24 |
return response.text
|
25 |
|
26 |
##initialize our streamlit app
|
27 |
st.set_page_config(page_title="Gemini ChatPDF Application", layout="wide")
|
28 |
-
st.subheader("Chat with PDF")
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
with st.sidebar:
|
31 |
st.title("Upload PDF:")
|
32 |
research_field = st.text_input("Research Field: ",key="research_field", placeholder="Enter research fields with commas")
|
33 |
uploaded_file = st.file_uploader("", type=["pdf"])
|
34 |
-
option = st.selectbox('Select Mode', ('Chat', 'Graph and Table', 'Code'))
|
35 |
-
print(option)
|
36 |
-
submit = st.button("Submit", type="primary")
|
37 |
#submit1 = st.button("Resume Assesmet")
|
38 |
#submit2 = st.button("Possible Improvements")
|
39 |
#submit3 = st.button("Percentage Match")
|
@@ -46,23 +51,25 @@ else:
|
|
46 |
file.write(uploaded_file.getvalue())
|
47 |
|
48 |
|
49 |
-
initial_prompt = f"""
|
50 |
-
Imagine you are a seasoned researcher specializing in the field of {research_field}.
|
51 |
-
You are presented with a research paper within your domain. Evaluate its working methodology
|
52 |
-
and discuss its research impact through concise bullet points. Conclude by summarizing the
|
53 |
-
research paper and propose three questions for the user based on the paper's context. Finnaly
|
54 |
-
remeber the research paper context for the next questions.
|
55 |
-
|
56 |
-
Output will be as,
|
57 |
-
Research Paper Title
|
58 |
-
Research Summary
|
59 |
-
Methodology
|
60 |
-
Research Impact
|
61 |
-
Suggested Questions"""
|
62 |
|
63 |
-
q_input=st.text_input("Question: ",key="input", placeholder="Ask your question")
|
64 |
-
ask=st.button("Ask", type="primary")
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
pdf_file_path = "Uploaded/paper.pdf"
|
@@ -73,8 +80,21 @@ if uploaded_file:
|
|
73 |
else:
|
74 |
pdf_text = ""
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
with st.spinner("Processing..."):
|
79 |
response = get_gemini_response(initial_prompt, pdf_text)
|
80 |
st.write(response)
|
@@ -98,19 +118,22 @@ explain the code in by each and every steps. \n \n \n"""
|
|
98 |
if q_input is None:
|
99 |
st.stop()
|
100 |
else:
|
101 |
-
if
|
102 |
with st.spinner("Processing..."):
|
103 |
mod_prompt = question_prompt + pdf_text
|
104 |
response = get_gemini_response(mod_prompt, q_input)
|
105 |
-
|
|
|
106 |
|
107 |
-
elif
|
|
|
|
|
108 |
with st.spinner("Processing..."):
|
109 |
mod_prompt = code_prompt + pdf_text
|
110 |
response = get_gemini_response(mod_prompt, q_input)
|
111 |
st.write(response)
|
112 |
|
113 |
-
elif
|
114 |
with st.spinner("Processing..."):
|
115 |
#mod_prompt = code_prompt + pdf_text
|
116 |
#response = get_gemini_response(mod_prompt, q_input)
|
|
|
4 |
from text_ext import extract_text_from_pdf
|
5 |
import base64
|
6 |
from dotenv import load_dotenv
|
7 |
+
from chat_mode import chat_response
|
8 |
+
from PIL import Image
|
9 |
|
10 |
load_dotenv()
|
11 |
|
|
|
16 |
chat = text_model.start_chat(history=[])
|
17 |
|
18 |
def get_gemini_response(input, pdf_content):
|
|
|
19 |
response = text_model.generate_content([input, pdf_content])
|
20 |
return response.text
|
21 |
|
22 |
+
def get_gemini_vision_response(input, image, pdf_content):
|
23 |
+
response = vision_model.generate_content([input, image, pdf_content])
|
|
|
24 |
return response.text
|
25 |
|
26 |
##initialize our streamlit app
|
27 |
st.set_page_config(page_title="Gemini ChatPDF Application", layout="wide")
|
28 |
+
#st.subheader("Chat with PDF")
|
29 |
+
# Add some space at the top to center the subheader
|
30 |
+
#st.markdown("<h1 style='text-align: center;'> </h1>", unsafe_allow_html=True)
|
31 |
+
st.markdown("<h2 style='text-align: center;'>chatPDF</h2>", unsafe_allow_html=True)
|
32 |
+
|
33 |
+
|
34 |
|
35 |
with st.sidebar:
|
36 |
st.title("Upload PDF:")
|
37 |
research_field = st.text_input("Research Field: ",key="research_field", placeholder="Enter research fields with commas")
|
38 |
uploaded_file = st.file_uploader("", type=["pdf"])
|
39 |
+
option = st.selectbox('Select Mode', ('', 'Chat', 'Graph and Table', 'Code'))
|
40 |
+
#print(option)
|
41 |
+
#submit = st.button("Submit", type="primary")
|
42 |
#submit1 = st.button("Resume Assesmet")
|
43 |
#submit2 = st.button("Possible Improvements")
|
44 |
#submit3 = st.button("Percentage Match")
|
|
|
51 |
file.write(uploaded_file.getvalue())
|
52 |
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
|
|
|
|
55 |
|
56 |
+
q_input=st.chat_input(key="input", placeholder="Ask your question")
|
57 |
+
#ask=st.button("Ask", type="primary")
|
58 |
+
|
59 |
+
def input_image_setup(uploaded_file):
|
60 |
+
if uploaded_file is not None:
|
61 |
+
bytes_data = uploaded_file.getvalue()
|
62 |
+
|
63 |
+
image_parts = [
|
64 |
+
{
|
65 |
+
"mime_type": uploaded_file.type,
|
66 |
+
"data": bytes_data
|
67 |
+
}
|
68 |
+
]
|
69 |
+
return image_parts
|
70 |
+
|
71 |
+
else:
|
72 |
+
raise FileNotFoundError("No file uploaded")
|
73 |
|
74 |
|
75 |
pdf_file_path = "Uploaded/paper.pdf"
|
|
|
80 |
else:
|
81 |
pdf_text = ""
|
82 |
|
83 |
+
initial_prompt = f"""
|
84 |
+
Imagine you are a seasoned researcher specializing in the field of {research_field}.
|
85 |
+
You are presented with a research paper within your domain. Evaluate its working methodology
|
86 |
+
and discuss its research impact through concise bullet points. Conclude by summarizing the
|
87 |
+
research paper and propose three questions for the user based on the paper's context. Finnaly
|
88 |
+
remeber the research paper context for the next questions.
|
89 |
|
90 |
+
Output will be as,
|
91 |
+
Research Paper Title \n
|
92 |
+
Research Summary \n
|
93 |
+
Methodology \n
|
94 |
+
Research Impact \n
|
95 |
+
Suggested Questions"""
|
96 |
+
|
97 |
+
if option=='':
|
98 |
with st.spinner("Processing..."):
|
99 |
response = get_gemini_response(initial_prompt, pdf_text)
|
100 |
st.write(response)
|
|
|
118 |
if q_input is None:
|
119 |
st.stop()
|
120 |
else:
|
121 |
+
if q_input and option=="Chat":
|
122 |
with st.spinner("Processing..."):
|
123 |
mod_prompt = question_prompt + pdf_text
|
124 |
response = get_gemini_response(mod_prompt, q_input)
|
125 |
+
chat_response(q_input, response)
|
126 |
+
#st.write(response)
|
127 |
|
128 |
+
elif q_input and option=="Code":
|
129 |
+
image_file = "pro-vision-dummy.jpg"
|
130 |
+
image = Image.open(image_file)
|
131 |
with st.spinner("Processing..."):
|
132 |
mod_prompt = code_prompt + pdf_text
|
133 |
response = get_gemini_response(mod_prompt, q_input)
|
134 |
st.write(response)
|
135 |
|
136 |
+
elif q_input and option=="Graph and Table":
|
137 |
with st.spinner("Processing..."):
|
138 |
#mod_prompt = code_prompt + pdf_text
|
139 |
#response = get_gemini_response(mod_prompt, q_input)
|
chat_mode.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
|
4 |
+
def chat_response(user_prompt, assistant_response):
|
5 |
+
if "chat_history" not in st.session_state:
|
6 |
+
st.session_state.chat_history = []
|
7 |
+
|
8 |
+
for message in st.session_state.chat_history:
|
9 |
+
if message["role"] == "user":
|
10 |
+
with st.chat_message("user"):
|
11 |
+
st.write(f"**You**: {message['content']}")
|
12 |
+
|
13 |
+
elif message["role"] == "assistant":
|
14 |
+
with st.chat_message("assistant"):
|
15 |
+
st.write(f"**Assistant**: {message['content']}")
|
16 |
+
|
17 |
+
if user_prompt:
|
18 |
+
st.session_state.chat_history.append({"role": "user", "content": user_prompt})
|
19 |
+
with st.chat_message("user"):
|
20 |
+
st.write(f"**You**: {user_prompt}")
|
21 |
+
|
22 |
+
with st.chat_message("assistant"):
|
23 |
+
st.write(f"**Assistant**: {assistant_response}")
|
24 |
+
|
25 |
+
st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
|
26 |
+
|
pyvenv.cfg
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
home = C:\Users\HP\anaconda3
|
2 |
+
include-system-site-packages = false
|
3 |
+
version = 3.11.5
|
4 |
+
executable = C:\Users\HP\anaconda3\python.exe
|
5 |
+
command = C:\Users\HP\anaconda3\python.exe -m venv C:\Users\HP\Desktop\chatPDF
|
random.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
This is a random text file for test
|
|
|
|