chatpaper / pdfextract_fun.py
zliang's picture
Update pdfextract_fun.py
d5da17c verified
raw
history blame
4.06 kB
import os
import re
import warnings
import cv2
import fitz # PyMuPDF
import numpy as np
import pytesseract
import torch
from PIL import Image
from tqdm import tqdm
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import ColorMode, Visualizer
from unilm.dit.object_detection.ditod import add_vit_config
# Filter specific warnings
warnings.filterwarnings("ignore", message="None of the inputs have requires_grad=True. Gradients will be None")
warnings.filterwarnings("ignore", message="torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument.")
# Configuration setup
def setup_config():
cfg = get_cfg()
add_vit_config(cfg)
cfg.merge_from_file("cascade_dit_base.yml")
cfg.MODEL.WEIGHTS = "publaynet_dit-b_cascade.pth"
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
return cfg
# Analyze image
def analyze_image(img, cfg):
"""Analyze an image and return the result image, output, and visualizer."""
md = MetadataCatalog.get(cfg.DATASETS.TEST[0])
thing_classes = ["table"] if cfg.DATASETS.TEST[0] == 'icdar2019_test' else ["text", "title", "list", "table", "figure"]
md.set(thing_classes=thing_classes)
output = DefaultPredictor(cfg)(img)["instances"]
v = Visualizer(img[:, :, ::-1], metadata=md, scale=1.0, instance_mode=ColorMode.SEGMENTATION)
result = v.draw_instance_predictions(output.to("cpu"))
return result.get_image()[:, :, ::-1], output, v
# PDF to JPEG conversion
def convert_pdf_to_jpg(pdf_path, output_folder, zoom_factor=2):
"""Convert PDF file to JPEG images, saved in the specified output folder."""
doc = fitz.open(pdf_path)
for page_num, page in enumerate(doc):
mat = fitz.Matrix(zoom_factor, zoom_factor)
pix = page.get_pixmap(matrix=mat)
output_file = os.path.join(output_folder, f"page_{page_num}.jpg")
pix.save(output_file)
# Process JPEG images in a folder
def process_jpeg_images((output_folder, cfg, batch_size=10):
image_paths = [os.path.join(output_folder, f) for f in os.listdir(output_folder) if f.endswith('.jpg')]
batches = [image_paths[i:i + batch_size] for i in range(0, len(image_paths), batch_size)]
for batch in tqdm(batches, desc="Processing images in batches"):
images = [cv2.imread(image_path) for image_path in batch]
batch_results = batch_analyze_images(images, cfg) # This function needs to be implemented to support batch processing
for i, (result_image, output, v) in enumerate(batch_results):
# Assuming batch_analyze_images returns a list of tuples, each containing the results for one image
save_extracted_instances(images[i], output, i, output_folder)
# Save extracted instances
def save_extracted_instances(img, output, page_num, dest_folder, confidence_threshold=0.8):
"""Save instances extracted from an image to the destination folder."""
class_names = {0: "text", 1: "title", 2: "list", 3: "table", 4: "figure"}
instances = output.to("cpu")
for i, (box, class_id, score) in enumerate(zip(instances.pred_boxes.tensor.numpy(), instances.pred_classes.tolist(), instances.scores.tolist())):
if score >= confidence_threshold and class_names.get(class_id) in ["figure", "table", "text"]:
x1, y1, x2, y2 = map(int, box)
cropped_image = img[y1:y2, x1:x2]
if np.std(cropped_image) > 0 and (y2 - y1) > 0: # Replace with actual thresholds if needed
save_path = os.path.join(dest_folder, f"page_{page_num}_{class_names[class_id]}_{i + 1}.jpg")
cv2.imwrite(save_path, cropped_image)
# Additional functions like delete_files_in_folder, rename_files_sequentially, ocr_folder, and ocr_image can be included as is, assuming they were satisfactory before.
cfg = setup_config()
# Example usage
convert_pdf_to_jpg("sample.pdf", "output_folder")
process_jpeg_images("output_folder", cfg)