|
import io |
|
|
|
import zstandard |
|
import jsonlines |
|
import datasets |
|
|
|
import json |
|
|
|
|
|
def parse_json(x): |
|
return json.loads(x) |
|
|
|
|
|
_DESCRIPTION = "DaruLM for LLM" |
|
_URLS = [ |
|
"libru_accounting_0.jsonl.zst", |
|
"libru_antique_0.jsonl.zst", |
|
"libru_antique_1.jsonl.zst", |
|
"libru_aphorisms_0.jsonl.zst", |
|
"libru_art_0.jsonl.zst", |
|
"libru_biography_0.jsonl.zst", |
|
"libru_biography_1.jsonl.zst", |
|
"libru_biography_2.jsonl.zst", |
|
"libru_biography_3.jsonl.zst", |
|
"libru_biography_4.jsonl.zst", |
|
"libru_biology_0.jsonl.zst", |
|
"libru_business_0.jsonl.zst", |
|
"libru_cinema_0.jsonl.zst", |
|
"libru_computers_0.jsonl.zst", |
|
"libru_design_0.jsonl.zst", |
|
"libru_dramaturgy_0.jsonl.zst", |
|
"libru_economics_0.jsonl.zst", |
|
"libru_essay_0.jsonl.zst", |
|
"libru_essay_1.jsonl.zst", |
|
"libru_essay_2.jsonl.zst", |
|
"libru_fantasy_0.jsonl.zst", |
|
"libru_geography_0.jsonl.zst", |
|
"libru_guidebooks_0.jsonl.zst", |
|
"libru_guidebooks_1.jsonl.zst", |
|
"libru_history_0.jsonl.zst", |
|
"libru_history_1.jsonl.zst", |
|
"libru_history_2.jsonl.zst", |
|
"libru_history_3.jsonl.zst", |
|
"libru_history_4.jsonl.zst", |
|
"libru_history_5.jsonl.zst", |
|
"libru_humor_0.jsonl.zst", |
|
"libru_language_0.jsonl.zst", |
|
"libru_law_0.jsonl.zst", |
|
"libru_literature_0.jsonl.zst", |
|
"libru_medicine_0.jsonl.zst", |
|
"libru_military_0.jsonl.zst", |
|
"libru_music_0.jsonl.zst", |
|
"libru_philosophy_0.jsonl.zst", |
|
"libru_politic_0.jsonl.zst", |
|
"libru_prose_0.jsonl.zst", |
|
"libru_prose_1.jsonl.zst", |
|
"libru_prose_2.jsonl.zst", |
|
"libru_psychology_0.jsonl.zst", |
|
"libru_psychology_1.jsonl.zst", |
|
"libru_reference_0.jsonl.zst", |
|
"libru_religion_0.jsonl.zst", |
|
"libru_religion_1.jsonl.zst", |
|
"libru_religion_2.jsonl.zst", |
|
"libru_religion_3.jsonl.zst", |
|
"libru_science_0.jsonl.zst", |
|
"libru_science_1.jsonl.zst", |
|
"libru_science_2.jsonl.zst", |
|
"libru_sociology_0.jsonl.zst", |
|
"libru_textbook_0.jsonl.zst", |
|
"libru_UNDEFINED_0.jsonl.zst", |
|
"rulm_buriy_0.jsonl.zst", |
|
"rulm_buriy_1.jsonl.zst", |
|
"rulm_buriy_2.jsonl.zst", |
|
"rulm_gazeta_0.jsonl.zst", |
|
"rulm_habr_0.jsonl.zst", |
|
"rulm_habr_1.jsonl.zst", |
|
"rulm_lenta_0.jsonl.zst", |
|
"rulm_ods-tass_0.jsonl.zst", |
|
"rulm_ods-tass_1.jsonl.zst", |
|
"rulm_pikabu_0.jsonl.zst", |
|
"rulm_pikabu_1.jsonl.zst", |
|
"rulm_pikabu_2.jsonl.zst", |
|
"rulm_taiga-fontanka_0.jsonl.zst", |
|
"rulm_wiki_0.jsonl.zst", |
|
"rulm_wiki_1.jsonl.zst", |
|
"rulm_wiki_2.jsonl.zst", |
|
"wiki40_enwiki_0.jsonl.zst", |
|
] |
|
|
|
|
|
DOMAINS = sorted(set(url.split('_')[1] for url in _URLS)) |
|
|
|
_VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
class DarulmConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for DaruLM.""" |
|
|
|
def __init__(self, domains="all", version=_VERSION, **kwargs): |
|
"""BuilderConfig for DaruLM. |
|
Args: |
|
domains (str or list, default 'all'): domain names. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
if isinstance(domains, str): |
|
domains = [domains] |
|
name = "+".join(domains) |
|
if name == "all": |
|
domains = DOMAINS |
|
|
|
missing_domains = set(domains)-set(DOMAINS) |
|
assert len( |
|
missing_domains) == 0, f"{missing_domains} not found in domain list" |
|
|
|
if version != _VERSION: |
|
name = f"{name}-{version}" |
|
super().__init__(name=name, version=version, **kwargs) |
|
self.domains = domains |
|
|
|
|
|
class DarulmDataset(datasets.GeneratorBasedBuilder): |
|
VERSION = _VERSION |
|
|
|
BUILDER_CONFIG_CLASS = DarulmConfig |
|
BUILDER_CONFIGS = [ |
|
DarulmConfig( |
|
domains="all", |
|
description="Full DaruLM collection.", |
|
), |
|
] + [ |
|
DarulmConfig( |
|
domains=dn, |
|
description=f"DaruLM part with {dn} texts", |
|
) |
|
for dn in sorted(DOMAINS) |
|
] |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"domain": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
required_urls = [ |
|
path for dn in self.config.domains for path in _URLS if f"_{dn}_" in path |
|
] |
|
downloaded_files = dl_manager.download(required_urls) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ |
|
"paths": downloaded_files}), |
|
] |
|
|
|
def _generate_examples(self, paths): |
|
true_id = 0 |
|
for path in paths: |
|
with open(path, "rb") as f: |
|
cctx = zstandard.ZstdDecompressor() |
|
reader_stream = io.BufferedReader(cctx.stream_reader(f)) |
|
reader = jsonlines.Reader(reader_stream, loads=parse_json) |
|
for id_, item in enumerate(reader): |
|
yield true_id, item |
|
true_id += 1 |
|
|