tcy6 commited on
Commit
5807a33
1 Parent(s): 4599dc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -24
app.py CHANGED
@@ -15,7 +15,7 @@ import os
15
  import numpy as np
16
  import json
17
 
18
- cache_dir = '/home/user/data'
19
  os.makedirs(cache_dir, exist_ok=True)
20
 
21
  def get_image_md5(img: Image.Image):
@@ -33,7 +33,8 @@ def calculate_md5_from_binary(binary_data):
33
  @spaces.GPU(duration=100)
34
  def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
35
  global model, tokenizer
36
-
 
37
  knowledge_base_name = calculate_md5_from_binary(pdf_file_binary)
38
 
39
  this_cache_dir = os.path.join(cache_dir, knowledge_base_name)
@@ -78,6 +79,8 @@ def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
78
  def retrieve_gradio(knowledge_base: str, query: str, topk: int):
79
  global model, tokenizer
80
 
 
 
81
  target_cache_dir = os.path.join(cache_dir, knowledge_base)
82
 
83
  if not os.path.exists(target_cache_dir):
@@ -90,7 +93,7 @@ def retrieve_gradio(knowledge_base: str, query: str, topk: int):
90
 
91
  doc_reps = np.load(os.path.join(target_cache_dir, f"reps.npy"))
92
 
93
- query_with_instruction = "Represent this query for retrieving relevant document: " + query
94
  with torch.no_grad():
95
  query_rep = model(text=[query_with_instruction], image=[None], tokenizer=tokenizer).reps.squeeze(0).cpu()
96
 
@@ -166,53 +169,91 @@ def downvote(knowledge_base, query):
166
  return
167
 
168
 
 
169
  device = 'cuda'
170
- model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
 
 
171
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
172
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
 
173
  model.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
 
176
  with gr.Blocks() as app:
177
- gr.Markdown("# Memex: OCR-free Visual Document Embedding Model as Your Personal Librarian")
178
- gr.Markdown("""The model only takes images as document-side inputs and produce vectors representing document pages. Memex is trained with over 200k query-visual document pairs, including textual document, visual document, arxiv figures, plots, charts, industry documents, textbooks, ebooks, and openly-available PDFs, etc. Its performance is on a par with our ablation text embedding model on text-oriented documents, and an advantages on visually-intensive documents.
179
- Our model is capable of:
180
- - Help you read a long visually-intensive or text-oriented PDF document and find the pages that answer your question.
181
- - Help you build a personal library and retireve book pages from a large collection of books.
182
- - It works like human: read and comprehend with vision and remember multimodal information in hippocampus.""")
183
-
184
- gr.Markdown("- Our model is proudly based on MiniCPM-V series [MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6) [MiniCPM-V-2](https://huggingface.co/openbmb/MiniCPM-V-2).")
185
 
186
- gr.Markdown("- We open-sourced our model at [RhapsodyAI/minicpm-visual-embedding-v0](https://huggingface.co/RhapsodyAI/minicpm-visual-embedding-v0)")
 
 
 
 
 
 
 
 
 
 
187
 
188
- gr.Markdown("- Currently we support PDF document with less than 50 pages, PDF over 50 pages will reach GPU time limit.")
189
 
190
  with gr.Row():
191
- file_input = gr.File(type="binary", label="Upload PDF")
192
- file_result = gr.Text(label="Knowledge Base ID (remember this!)")
193
  process_button = gr.Button("Process PDF (Don't click until PDF upload success)")
194
 
195
  process_button.click(add_pdf_gradio, inputs=[file_input], outputs=file_result)
196
 
197
  with gr.Row():
198
- kb_id_input = gr.Text(label="Your Knowledge Base ID (paste your Knowledge Base ID here:)")
199
  query_input = gr.Text(label="Your Queston")
200
  topk_input = inputs=gr.Number(value=5, minimum=1, maximum=10, step=1, label="Number of pages to retrieve")
201
- retrieve_button = gr.Button("Step 1: Retrieve")
202
-
203
- with gr.Row():
204
- downvote_button = gr.Button("🤣Downvote")
205
- upvote_button = gr.Button("🤗Upvote")
206
 
207
  with gr.Row():
208
- images_output = gr.Gallery(label="Step 2: Retrieved Pages")
209
 
210
  retrieve_button.click(retrieve_gradio, inputs=[kb_id_input, query_input, topk_input], outputs=images_output)
211
 
 
 
 
 
 
 
 
 
 
 
 
212
  upvote_button.click(upvote, inputs=[kb_id_input, query_input], outputs=None)
213
  downvote_button.click(downvote, inputs=[kb_id_input, query_input], outputs=None)
214
 
215
  gr.Markdown("By using this demo, you agree to share your use data with us for research purpose, to help improve user experience.")
216
 
217
 
218
- app.launch()
 
15
  import numpy as np
16
  import json
17
 
18
+ cache_dir = '/data/KB'
19
  os.makedirs(cache_dir, exist_ok=True)
20
 
21
  def get_image_md5(img: Image.Image):
 
33
  @spaces.GPU(duration=100)
34
  def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
35
  global model, tokenizer
36
+ model.eval()
37
+
38
  knowledge_base_name = calculate_md5_from_binary(pdf_file_binary)
39
 
40
  this_cache_dir = os.path.join(cache_dir, knowledge_base_name)
 
79
  def retrieve_gradio(knowledge_base: str, query: str, topk: int):
80
  global model, tokenizer
81
 
82
+ model.eval()
83
+
84
  target_cache_dir = os.path.join(cache_dir, knowledge_base)
85
 
86
  if not os.path.exists(target_cache_dir):
 
93
 
94
  doc_reps = np.load(os.path.join(target_cache_dir, f"reps.npy"))
95
 
96
+ query_with_instruction = "Represent this query for retrieving relavant document: " + query
97
  with torch.no_grad():
98
  query_rep = model(text=[query_with_instruction], image=[None], tokenizer=tokenizer).reps.squeeze(0).cpu()
99
 
 
169
  return
170
 
171
 
172
+
173
  device = 'cuda'
174
+
175
+ print("emb model load begin...")
176
+ model_path = 'openbmb/VisRAG-Ret' # replace with your local model path
177
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
178
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
179
+ model.eval()
180
  model.to(device)
181
+ print("emb model load success!")
182
+
183
+ print("gen model load begin...")
184
+ gen_model_path = 'openbmb/MiniCPM-V-2_6'
185
+ gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, attn_implementation='sdpa', trust_remote_code=True)
186
+ gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
187
+ gen_model.eval()
188
+ gen_model.to(device)
189
+ print("gen model load success!")
190
+
191
+
192
+ @spaces.GPU(duration=50)
193
+ def answer_question(images, question):
194
+ global gen_model, gen_tokenizer
195
+ # here each element of images is a tuple of (image_path, None).
196
+ images_ = [Image.open(image[0]).convert('RGB') for image in images]
197
+ msgs = [{'role': 'user', 'content': [question, *images_]}]
198
+ answer = gen_model.chat(
199
+ image=None,
200
+ msgs=msgs,
201
+ tokenizer=gen_tokenizer
202
+ )
203
+ print(answer)
204
+ return answer
205
 
206
 
207
  with gr.Blocks() as app:
208
+ gr.Markdown("# MiniCPMV-RAG-PDFQA: Two Vision Language Models Enable End-to-End RAG")
 
 
 
 
 
 
 
209
 
210
+ gr.Markdown("""
211
+ - A Vision Language Model Dense Retriever ([minicpm-visual-embedding-v0](https://huggingface.co/RhapsodyAI/minicpm-visual-embedding-v0)) **directly reads** your PDFs **without need of OCR**, produce **multimodal dense representations** and build your personal library.
212
+
213
+ - **Ask a question**, it retrieve most relavant pages, then [MiniCPM-V-2.6](https://huggingface.co/spaces/openbmb/MiniCPM-V-2_6) will answer your question based on pages recalled, with strong multi-image understanding capability.
214
+
215
+ - It helps you read a long **visually-intensive** or **text-oriented** PDF document and find the pages that answer your question.
216
+
217
+ - It helps you build a personal library and retireve book pages from a large collection of books.
218
+
219
+ - It works like a human: read, store, retrieve, and answer with full vision.
220
+ """)
221
 
222
+ gr.Markdown("- Currently online demo support PDF document with less than 50 pages due to GPU time limit. Deploy on your own machine for longer PDFs and books.")
223
 
224
  with gr.Row():
225
+ file_input = gr.File(type="binary", label="Step 1: Upload PDF")
226
+ file_result = gr.Text(label="Knowledge Base ID (remember it, it is re-usable!)")
227
  process_button = gr.Button("Process PDF (Don't click until PDF upload success)")
228
 
229
  process_button.click(add_pdf_gradio, inputs=[file_input], outputs=file_result)
230
 
231
  with gr.Row():
232
+ kb_id_input = gr.Text(label="Your Knowledge Base ID (paste your Knowledge Base ID here, it is re-usable:)")
233
  query_input = gr.Text(label="Your Queston")
234
  topk_input = inputs=gr.Number(value=5, minimum=1, maximum=10, step=1, label="Number of pages to retrieve")
235
+ retrieve_button = gr.Button("Step2: Retrieve Pages")
 
 
 
 
236
 
237
  with gr.Row():
238
+ images_output = gr.Gallery(label="Retrieved Pages")
239
 
240
  retrieve_button.click(retrieve_gradio, inputs=[kb_id_input, query_input, topk_input], outputs=images_output)
241
 
242
+ with gr.Row():
243
+ button = gr.Button("Step 3: Answer Question with Retrieved Pages")
244
+
245
+ gen_model_response = gr.Textbox(label="MiniCPM-V-2.6's Answer")
246
+
247
+ button.click(fn=answer_question, inputs=[images_output, query_input], outputs=gen_model_response)
248
+
249
+ with gr.Row():
250
+ downvote_button = gr.Button("🤣Downvote")
251
+ upvote_button = gr.Button("🤗Upvote")
252
+
253
  upvote_button.click(upvote, inputs=[kb_id_input, query_input], outputs=None)
254
  downvote_button.click(downvote, inputs=[kb_id_input, query_input], outputs=None)
255
 
256
  gr.Markdown("By using this demo, you agree to share your use data with us for research purpose, to help improve user experience.")
257
 
258
 
259
+ app.launch()