|
import json |
|
import xml.etree.ElementTree as ET |
|
from bs4 import BeautifulSoup |
|
import requests |
|
import threading |
|
import utils |
|
|
|
def fetch_links(category): |
|
links = [] |
|
xml_data = utils.fetch_page(f"https://www.sciencedaily.com/rss/top/{category.lower()}.xml") |
|
items = ET.fromstring(xml_data).findall('channel/item') |
|
for item in items: |
|
link = item.find('link').text |
|
links.append(link) |
|
return links |
|
|
|
def fetch_all_links(): |
|
categories = ["Science", "Health", "Environment", "Technology", "Society"] |
|
sd_links_data = {} |
|
for category in categories: |
|
links = fetch_links(category) |
|
sd_links_data[category] = links |
|
return json.dumps(sd_links_data, indent=4, ensure_ascii=False) |
|
|
|
def fetch_dois(): |
|
doi_data = {} |
|
data = json.loads(fetch_all_links()) |
|
for topic, links in data.items(): |
|
doi_list = [] |
|
for link in links: |
|
page_content = utils.fetch_page(link) |
|
page_datas = BeautifulSoup(page_content, 'html.parser').find_all("div", id="journal_references") |
|
for page_data in page_datas: |
|
doi_link = page_data.find("a", href=True) |
|
if doi_link: |
|
doi = doi_link.text |
|
if doi.startswith('10.'): |
|
doi_list.append(doi) |
|
doi_data[topic] = doi_list |
|
return json.dumps(doi_data, indent=4, ensure_ascii=False) |
|
|
|
def fetch_doi_data(): |
|
result = [] |
|
def fetch_and_store(): |
|
result.append(fetch_dois()) |
|
thread = threading.Thread(target=fetch_and_store) |
|
thread.start() |
|
thread.join() |
|
return result[0] if result else {} |
|
|
|
def doi_to_pmc(): |
|
data = json.loads(fetch_doi_data()) |
|
pmc_data = {} |
|
for topic, dois in data.items(): |
|
if not dois: |
|
continue |
|
doi_list = ",".join(dois) |
|
try: |
|
url = f"https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?email=raannakasturi@gmail.com&ids={doi_list}&format=json" |
|
doi_pmc_data = requests.get(url).json() |
|
|
|
if doi_pmc_data['status'] == 'ok': |
|
pmc_list = [record['pmcid'] for record in doi_pmc_data['records'] if 'pmcid' in record and record.get('live', True)] |
|
pmc_data[topic] = pmc_list[:2] |
|
except Exception as e: |
|
print(f"Error: {str(e)}") |
|
return json.dumps(pmc_data, indent=4, ensure_ascii=False) |
|
|
|
def extract_pmc_data(): |
|
if not utils.download_datafile('pmc.txt'): |
|
raise Exception("Failed to download datafile") |
|
pmc_data = {} |
|
pmcid_data = json.loads(doi_to_pmc()) |
|
for topic, pmcids in pmcid_data.items(): |
|
pmc_ids = [] |
|
for pmcid in pmcids: |
|
if len(pmc_ids) >= 2: |
|
break |
|
if not utils.check_data_in_file(pmcid, 'pmc.txt'): |
|
utils.write_data_to_file(pmcid, 'pmc.txt') |
|
pmc_ids.append(pmcid) |
|
pmc_data[topic] = {"ids": pmc_ids, "count": len(pmc_ids)} |
|
if not utils.upload_datafile('pmc.txt'): |
|
raise Exception("Failed to upload datafile") |
|
return json.dumps(pmc_data, indent=4, ensure_ascii=False) |
|
|
|
if __name__ == "__main__": |
|
data = extract_pmc_data() |
|
with open('pmc_data.json', 'w') as f: |
|
f.write(data) |
|
|