import datasets import zipfile import urllib.request import os logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ A colossal, cleaned version of Common Crawl's web crawl corpus. Based on Common Crawl dataset: "https://commoncrawl.org". This is the processed version of Google's mC4 dataset by AllenAI. """ _CITATION = """ @article{2019t5, author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, journal = {arXiv e-prints}, year = {2019}, archivePrefix = {arXiv}, eprint = {1910.10683}, } """ _URL = "https://linghub.ru/static/Taiga/news.zip" _DATA_URL = "https://linghub.ru/static/Taiga/news.zip" class TaigaConfig(datasets.BuilderConfig): """BuilderConfig for mC4.""" def __init__(self, *args, **kwargs): """BuilderConfig for mC4. Args: languages (:obj:`List[str]`): list of languages to load **kwargs: keyword arguments forwarded to super. """ super().__init__( *args, name="taiga", **kwargs, ) class Taiga(datasets.GeneratorBasedBuilder): """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus.""" BUILDER_CONFIGS = [TaigaConfig()] BUILDER_CONFIG_CLASS = TaigaConfig def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), } ), ) def _split_generators(self, dl_manager): downloaded_file = dl_manager.download(['proza_ru2.zip', 'stihi_ru.zip', 'proza_ru1.zip']) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_file}), ] def _generate_examples(self, filepaths): """This function returns the examples in the raw (text) form by iterating on all the files.""" id_ = 0 for filepath in filepaths: logger.info("generating examples from = %s", filepath) with zipfile.ZipFile(filepath) as z: for filename in z.namelist(): if not os.path.isdir(filename) and '.txt' in filename.split('/') and len(filename.split('/')) > 2: # read the file with z.open(filename) as f: txt = f.read().decode('utf-8') yield id_, {'text': txt} id_ += 1