|
|
|
""" |
|
This scripts downloads WARC files from commoncrawl.org's news crawl and extracts articles from these files. You can |
|
define filter criteria that need to be met (see YOUR CONFIG section), otherwise an article is discarded. Currently, the |
|
script stores the extracted articles in JSON files, but this behaviour can be adapted to your needs in the method |
|
on_valid_article_extracted. To speed up the crawling and extraction process, the script supports multiprocessing. You can |
|
control the number of processes with the parameter my_number_of_extraction_processes. |
|
|
|
You can also crawl and extract articles programmatically, i.e., from within |
|
your own code, by using the class CommonCrawlCrawler or the function |
|
commoncrawl_crawler.crawl_from_commoncrawl(...) provided in |
|
newsplease.crawler.commoncrawl_crawler.py. In this case there is also the |
|
possibility of passing in a your own subclass of CommonCrawlExtractor as |
|
extractor_cls=... . One use case here is that your subclass can customise |
|
filtering by overriding `.filter_record(...)`. |
|
|
|
This script uses relative imports to ensure that the latest, local version of news-please is used, instead of the one |
|
that might have been installed with pip. Hence, you must run this script following this workflow. |
|
git clone https://github.com/fhamborg/news-please.git |
|
cd news-please |
|
python3 -m newsplease.examples.commoncrawl |
|
|
|
Note that by default the script does not extract main images since they are not contained |
|
WARC files. You can enable extraction of main images by setting `my_fetch_images=True` |
|
""" |
|
import hashlib |
|
import json |
|
import logging |
|
import os |
|
import sys |
|
import datetime |
|
from datetime import date |
|
|
|
|
|
|
|
import custom_commoncrawl_crawler |
|
|
|
|
|
|
|
__author__ = "Felix Hamborg" |
|
__copyright__ = "Copyright 2017" |
|
__credits__ = ["Sebastian Nagel"] |
|
|
|
|
|
|
|
|
|
my_local_download_dir_warc = './cc_download_warc/' |
|
|
|
my_local_download_dir_article = './cc_download_articles/' |
|
|
|
my_filter_valid_hosts = [] |
|
|
|
my_filter_start_date = None |
|
|
|
my_filter_end_date = None |
|
|
|
|
|
|
|
|
|
|
|
my_warc_files_start_date = datetime.datetime(2022, 1, 1) |
|
my_warc_files_end_date = None |
|
|
|
my_filter_strict_date = True |
|
|
|
|
|
my_reuse_previously_downloaded_files = False |
|
|
|
my_continue_after_error = True |
|
|
|
my_show_download_progress = True |
|
|
|
my_log_level = logging.INFO |
|
|
|
my_json_export_style = 2 |
|
|
|
my_number_of_extraction_processes = 10 |
|
|
|
my_delete_warc_after_extraction = True |
|
|
|
|
|
my_continue_process = True |
|
|
|
|
|
|
|
my_fetch_images = False |
|
|
|
my_dry_run=False |
|
|
|
shuffle=False |
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=my_log_level) |
|
__logger = logging.getLogger(__name__) |
|
|
|
|
|
def __setup__(): |
|
""" |
|
Setup |
|
:return: |
|
""" |
|
os.makedirs(my_local_download_dir_article, exist_ok=True) |
|
|
|
|
|
def __get_pretty_filepath(path, article): |
|
""" |
|
Pretty might be an euphemism, but this function tries to avoid too long filenames, while keeping some structure. |
|
:param path: |
|
:param name: |
|
:return: |
|
""" |
|
short_filename = hashlib.sha256(article.filename.encode()).hexdigest() |
|
sub_dir = article.source_domain |
|
final_path = os.path.join(path, sub_dir) |
|
os.makedirs(final_path, exist_ok=True) |
|
return os.path.join(final_path, short_filename + '.json') |
|
|
|
|
|
def on_valid_article_extracted(article, extractor, extra_data = {}): |
|
""" |
|
This function will be invoked for each article that was extracted successfully from the archived data and that |
|
satisfies the filter criteria. |
|
:param article: |
|
:return: |
|
""" |
|
|
|
data = article.__dict__ |
|
data.update(extra_data) |
|
if my_json_export_style != 2: |
|
with open(__get_pretty_filepath(my_local_download_dir_article, article), 'w', encoding='utf-8') as outfile: |
|
if my_json_export_style == 0: |
|
json.dump(data, outfile, default=str, separators=(',', ':'), ensure_ascii=False) |
|
elif my_json_export_style == 1: |
|
json.dump(data, outfile, default=str, indent=4, sort_keys=True, ensure_ascii=False) |
|
else: |
|
warc_filename = os.path.basename(extractor.warc_path).replace('.warc.gz', '') |
|
year = warc_filename[8:12] |
|
os.makedirs(os.path.join(my_local_download_dir_article, year), exist_ok=True) |
|
with open(os.path.join(my_local_download_dir_article, year, warc_filename + '.jsonl'), 'a', encoding='utf-8') as outfile: |
|
outfile.write(json.dumps(data, default=str, separators=(',', ':'), ensure_ascii=False) + '\n') |
|
|
|
|
|
|
|
def callback_on_warc_completed(warc_path, counter_article_passed, counter_article_discarded, |
|
counter_article_error, counter_article_total, counter_warc_processed): |
|
""" |
|
This function will be invoked for each WARC file that was processed completely. Parameters represent total values, |
|
i.e., cumulated over all all previously processed WARC files. |
|
:param warc_path: |
|
:param counter_article_passed: |
|
:param counter_article_discarded: |
|
:param counter_article_error: |
|
:param counter_article_total: |
|
:param counter_warc_processed: |
|
:return: |
|
""" |
|
pass |
|
|
|
""" |
|
class CustomExtractor(CommonCrawlExtractor): |
|
|
|
def filter_record(self, warc_record, article=None): |
|
|
|
url = warc_record.rec_headers.get_header('WARC-Target-URI') |
|
|
|
# filter by host |
|
if self.__filter_valid_hosts: |
|
# very simple check, check if one of the required host names is contained in the url of the WARC transaction |
|
# better would be to extract the host name from the WARC transaction Target URI and then check for equality |
|
# because currently something like g.co?forward_url=facebook.com would yield a positive filter test for |
|
# facebook.com even though the actual host is g.co |
|
for valid_host in self.__filter_valid_hosts: |
|
if valid_host in url: |
|
break |
|
else: |
|
return False, article |
|
|
|
# filter by url suffix |
|
valid_suffixes = {'br', 'pt'} |
|
url_suffixes = tldextract.extract(url).suffix.split('.') |
|
valid_suffix = False |
|
for suffix in url_suffixes: |
|
if suffix in valid_suffixes: |
|
valid_suffix = True |
|
break |
|
|
|
if not valid_suffix: |
|
return False, article |
|
|
|
# filter by date |
|
if self.__filter_start_date or self.__filter_end_date: |
|
if not article: |
|
article = self._from_warc(warc_record) |
|
|
|
publishing_date = self.__get_publishing_date(warc_record, article) |
|
if not publishing_date: |
|
if self.__filter_strict_date: |
|
return False, article |
|
else: # here we for sure have a date |
|
# is article published too early? |
|
if self.__filter_start_date and publishing_date < self.__filter_start_date: |
|
return False, article |
|
if self.__filter_end_date and publishing_date > self.__filter_end_date: |
|
return False, article |
|
|
|
return True, article |
|
""" |
|
|
|
def main(): |
|
global my_local_download_dir_warc |
|
global my_local_download_dir_article |
|
global my_delete_warc_after_extraction |
|
global my_number_of_extraction_processes |
|
|
|
if len(sys.argv) >= 2: |
|
my_local_download_dir_warc = sys.argv[1] |
|
if len(sys.argv) >= 3: |
|
my_local_download_dir_article = sys.argv[2] |
|
if len(sys.argv) >= 4: |
|
my_delete_warc_after_extraction = sys.argv[3] == "delete" |
|
if len(sys.argv) >= 5: |
|
my_number_of_extraction_processes = int(sys.argv[4]) |
|
|
|
print("my_local_download_dir_warc=" + my_local_download_dir_warc) |
|
print("my_local_download_dir_article=" + my_local_download_dir_article) |
|
print("my_delete_warc_after_extraction=" + str(my_delete_warc_after_extraction)) |
|
print("my_number_of_extraction_processes=" + str(my_number_of_extraction_processes)) |
|
|
|
__setup__() |
|
custom_commoncrawl_crawler.crawl_from_commoncrawl(on_valid_article_extracted, |
|
callback_on_warc_completed=callback_on_warc_completed, |
|
valid_hosts=my_filter_valid_hosts, |
|
start_date=my_filter_start_date, |
|
end_date=my_filter_end_date, |
|
warc_files_start_date=my_warc_files_start_date, |
|
warc_files_end_date=my_warc_files_end_date, |
|
strict_date=my_filter_strict_date, |
|
reuse_previously_downloaded_files=my_reuse_previously_downloaded_files, |
|
local_download_dir_warc=my_local_download_dir_warc, |
|
continue_after_error=my_continue_after_error, |
|
show_download_progress=my_show_download_progress, |
|
number_of_extraction_processes=my_number_of_extraction_processes, |
|
log_level=my_log_level, |
|
delete_warc_after_extraction=my_delete_warc_after_extraction, |
|
continue_process=True, |
|
fetch_images=my_fetch_images, |
|
dry_run=my_dry_run, |
|
shuffle=shuffle, |
|
local_download_dir_article=my_local_download_dir_article) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |