Spaces:
Sleeping
Sleeping
import os | |
import re | |
import time | |
import logging | |
import zipfile | |
import requests | |
import bibtexparser | |
from tqdm import tqdm | |
from urllib.parse import quote, urlencode | |
import gradio as gr | |
from bs4 import BeautifulSoup | |
import io | |
from docx import Document | |
from docx.shared import Inches | |
from docx.enum.text import WD_ALIGN_PARAGRAPH | |
# Configure logging | |
logging.basicConfig(level=logging.INFO, | |
format='%(asctime)s - %(levelname)s: %(message)s') | |
logger = logging.getLogger(__name__) | |
class PaperDownloader: | |
def __init__(self, output_dir='papers'): | |
self.output_dir = output_dir | |
os.makedirs(output_dir, exist_ok=True) | |
# Updated download sources | |
self.download_sources = [ | |
'https://sci-hub.ee/', | |
'https://sci-hub.st/', | |
'https://sci-hub.ru/', | |
'https://sci-hub.ren/', | |
'https://sci-hub.mksa.top/', | |
'https://sci-hub.se/', | |
'https://libgen.rs/scimag/' | |
] | |
# Request headers | |
self.headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', | |
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8' | |
} | |
def clean_doi(self, doi): | |
"""Clean and encode DOI for URL""" | |
if not isinstance(doi, str): | |
return None | |
return quote(doi.strip()) if doi else None | |
def download_paper_scihub(self, doi): | |
"""Improved method to download paper from Sci-Hub""" | |
if not doi: | |
logger.warning("DOI not provided") | |
return None | |
for base_url in self.download_sources: | |
try: | |
scihub_url = f"{base_url}{self.clean_doi(doi)}" | |
# Request with more tolerance | |
response = requests.get(scihub_url, | |
headers=self.headers, | |
allow_redirects=True, | |
timeout=15) | |
# Search for multiple PDF URL patterns | |
pdf_patterns = [ | |
r'(https?://[^\s<>"]+?\.pdf)', | |
r'(https?://[^\s<>"]+?download/[^\s<>"]+)', | |
r'(https?://[^\s<>"]+?\/pdf\/[^\s<>"]+)', | |
] | |
pdf_urls = [] | |
for pattern in pdf_patterns: | |
pdf_urls.extend(re.findall(pattern, response.text)) | |
# Try downloading from found URLs | |
for pdf_url in pdf_urls: | |
try: | |
pdf_response = requests.get(pdf_url, | |
headers=self.headers, | |
timeout=10) | |
# Verify if it's a PDF | |
if 'application/pdf' in pdf_response.headers.get('Content-Type', ''): | |
logger.debug(f"Found PDF from: {pdf_url}") | |
return pdf_response.content | |
except Exception as e: | |
logger.debug(f"Error downloading PDF from {pdf_url}: {e}") | |
except Exception as e: | |
logger.debug(f"Error trying to download {doi} from {base_url}: {e}") | |
return None | |
def download_paper_libgen(self, doi): | |
"""Download from Libgen, handles the query and the redirection""" | |
if not doi: | |
return None | |
base_url = 'https://libgen.rs/scimag/' | |
try: | |
search_url = f"{base_url}?q={self.clean_doi(doi)}" | |
response = requests.get(search_url, headers=self.headers, allow_redirects=True, timeout=10) | |
response.raise_for_status() | |
if "No results" in response.text: | |
logger.debug(f"No results for DOI: {doi} on libgen") | |
return None | |
soup = BeautifulSoup(response.text, 'html.parser') | |
# Find the link using a specific selector | |
links = soup.select('table.c > tbody > tr:nth-child(2) > td:nth-child(1) > a') | |
if links: | |
link = links[0] | |
pdf_url = link['href'] | |
pdf_response = requests.get(pdf_url, headers=self.headers, allow_redirects=True, timeout=10) | |
if 'application/pdf' in pdf_response.headers.get('Content-Type', ''): | |
logger.debug(f"Found PDF from: {pdf_url}") | |
return pdf_response.content | |
except Exception as e: | |
logger.debug(f"Error trying to download {doi} from libgen: {e}") | |
return None | |
def download_paper_google_scholar(self, doi): | |
"""Search google scholar to find an article with the given doi, try to get the pdf""" | |
if not doi: | |
return None | |
try: | |
query = f'doi:"{doi}"' | |
params = {'q': query} | |
url = f'https://scholar.google.com/scholar?{urlencode(params)}' | |
response = requests.get(url, headers = self.headers, timeout = 10) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, 'html.parser') | |
# Find any links with [PDF] | |
links = soup.find_all('a', string=re.compile(r'\[PDF\]', re.IGNORECASE)) | |
if links: | |
pdf_url = links[0]['href'] | |
pdf_response = requests.get(pdf_url, headers = self.headers, timeout=10) | |
if 'application/pdf' in pdf_response.headers.get('Content-Type', ''): | |
logger.debug(f"Found PDF from: {pdf_url}") | |
return pdf_response.content | |
except Exception as e: | |
logger.debug(f"Google Scholar error for {doi}: {e}") | |
return None | |
def download_paper_crossref(self, doi): | |
"""Alternative search method using Crossref""" | |
if not doi: | |
return None | |
try: | |
# Search for open access link | |
url = f"https://api.crossref.org/works/{doi}" | |
response = requests.get(url, headers=self.headers, timeout=10) | |
if response.status_code == 200: | |
data = response.json() | |
work = data.get('message', {}) | |
# Search for open access links | |
links = work.get('link', []) | |
for link in links: | |
if link.get('content-type') == 'application/pdf': | |
pdf_url = link.get('URL') | |
if pdf_url: | |
pdf_response = requests.get(pdf_url, headers=self.headers) | |
if 'application/pdf' in pdf_response.headers.get('Content-Type', ''): | |
logger.debug(f"Found PDF from: {pdf_url}") | |
return pdf_response.content | |
except Exception as e: | |
logger.debug(f"Crossref error for {doi}: {e}") | |
return None | |
def download_with_retry(self, doi, max_retries=3, initial_delay=2): | |
"""Downloads a paper using multiple strategies with exponential backoff""" | |
pdf_content = None | |
retries = 0 | |
delay = initial_delay | |
while retries < max_retries and not pdf_content: | |
try: | |
pdf_content = ( | |
self.download_paper_scihub(doi) or | |
self.download_paper_libgen(doi) or | |
self.download_paper_google_scholar(doi) or | |
self.download_paper_crossref(doi) | |
) | |
if pdf_content: | |
return pdf_content | |
except Exception as e: | |
logger.error(f"Error in download attempt {retries + 1} for DOI {doi}: {e}") | |
if not pdf_content: | |
retries += 1 | |
logger.warning(f"Retry attempt {retries} for DOI: {doi} after {delay} seconds") | |
time.sleep(delay) | |
delay *= 2 # Exponential backoff | |
return None | |
def download_single_doi(self, doi): | |
"""Downloads a single paper using a DOI""" | |
if not doi: | |
return None, "Error: DOI not provided", "Error: DOI not provided", None, None | |
try: | |
pdf_content = self.download_with_retry(doi) | |
if pdf_content: | |
if doi is None: | |
return None, "Error: DOI not provided", "", None, None | |
filename = f"{str(doi).replace('/', '_').replace('.', '_')}.pdf" | |
filepath = os.path.join(self.output_dir, filename) | |
with open(filepath, 'wb') as f: | |
f.write(pdf_content) | |
logger.info(f"Successfully downloaded: {filename}") | |
return filepath, f'<a href="https://doi.org/{doi}">{doi}</a>', "", None | |
else: | |
logger.warning(f"Could not download: {doi}") | |
return None, f"Could not download {doi}", f'<a href="https://doi.org/{doi}">{doi}</a>', None, None | |
except Exception as e: | |
logger.error(f"Error processing {doi}: {e}") | |
return None, f"Error processing {doi}: {e}", f"Error processing {doi}: {e}", None, None | |
# Create PaperDownloader instance | |
downloader = PaperDownloader() | |
def download_doi_callback(doi): | |
filepath, success_message, fail_message, _, _ = downloader.download_single_doi(doi) | |
if filepath: | |
return filepath, success_message, fail_message | |
else: | |
return None, fail_message, success_message | |
# Test or connect to a Gradio interface | |
iface = gr.Interface( | |
fn=download_doi_callback, | |
inputs=gr.Textbox(label="Enter DOI", placeholder="Enter DOI to download"), | |
outputs=[ | |
gr.File(label="Download Paper"), | |
gr.HTML(label="Success Message"), | |
gr.HTML(label="Failure Message") | |
], | |
live=True, | |
) | |
iface.launch() | |