File size: 5,067 Bytes
537088c 72d2170 537088c 72d2170 537088c 72d2170 537088c 72d2170 537088c 942706b 537088c 942706b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import io
import zstandard
import jsonlines
import datasets
import json
def parse_json(x):
return json.loads(x)
_DESCRIPTION = "DaruLM for LLM"
_URLS = [
"libru_accounting_0.jsonl.zst",
"libru_antique_0.jsonl.zst",
"libru_antique_1.jsonl.zst",
"libru_aphorisms_0.jsonl.zst",
"libru_art_0.jsonl.zst",
"libru_biography_0.jsonl.zst",
"libru_biography_1.jsonl.zst",
"libru_biography_2.jsonl.zst",
"libru_biography_3.jsonl.zst",
"libru_biography_4.jsonl.zst",
"libru_biology_0.jsonl.zst",
"libru_business_0.jsonl.zst",
"libru_cinema_0.jsonl.zst",
"libru_computers_0.jsonl.zst",
"libru_design_0.jsonl.zst",
"libru_dramaturgy_0.jsonl.zst",
"libru_economics_0.jsonl.zst",
"libru_essay_0.jsonl.zst",
"libru_essay_1.jsonl.zst",
"libru_essay_2.jsonl.zst",
"libru_fantasy_0.jsonl.zst",
"libru_geography_0.jsonl.zst",
"libru_guidebooks_0.jsonl.zst",
"libru_guidebooks_1.jsonl.zst",
"libru_history_0.jsonl.zst",
"libru_history_1.jsonl.zst",
"libru_history_2.jsonl.zst",
"libru_history_3.jsonl.zst",
"libru_history_4.jsonl.zst",
"libru_history_5.jsonl.zst",
"libru_humor_0.jsonl.zst",
"libru_language_0.jsonl.zst",
"libru_law_0.jsonl.zst",
"libru_literature_0.jsonl.zst",
"libru_medicine_0.jsonl.zst",
"libru_military_0.jsonl.zst",
"libru_music_0.jsonl.zst",
"libru_philosophy_0.jsonl.zst",
"libru_politic_0.jsonl.zst",
"libru_prose_0.jsonl.zst",
"libru_prose_1.jsonl.zst",
"libru_prose_2.jsonl.zst",
"libru_psychology_0.jsonl.zst",
"libru_psychology_1.jsonl.zst",
"libru_reference_0.jsonl.zst",
"libru_religion_0.jsonl.zst",
"libru_religion_1.jsonl.zst",
"libru_religion_2.jsonl.zst",
"libru_religion_3.jsonl.zst",
"libru_science_0.jsonl.zst",
"libru_science_1.jsonl.zst",
"libru_science_2.jsonl.zst",
"libru_sociology_0.jsonl.zst",
"libru_textbook_0.jsonl.zst",
"libru_UNDEFINED_0.jsonl.zst",
"rulm_buriy_0.jsonl.zst",
"rulm_buriy_1.jsonl.zst",
"rulm_buriy_2.jsonl.zst",
"rulm_gazeta_0.jsonl.zst",
"rulm_habr_0.jsonl.zst",
"rulm_habr_1.jsonl.zst",
"rulm_lenta_0.jsonl.zst",
"rulm_ods-tass_0.jsonl.zst",
"rulm_ods-tass_1.jsonl.zst",
"rulm_pikabu_0.jsonl.zst",
"rulm_pikabu_1.jsonl.zst",
"rulm_pikabu_2.jsonl.zst",
"rulm_taiga-fontanka_0.jsonl.zst",
"rulm_wiki_0.jsonl.zst",
"rulm_wiki_1.jsonl.zst",
"rulm_wiki_2.jsonl.zst",
"wiki40_enwiki_0.jsonl.zst",
]
DOMAINS = sorted(set(url.split('_')[1] for url in _URLS))
_VERSION = datasets.Version("1.1.0")
class DarulmConfig(datasets.BuilderConfig):
"""BuilderConfig for DaruLM."""
def __init__(self, domains="all", version=_VERSION, **kwargs):
"""BuilderConfig for DaruLM.
Args:
domains (str or list, default 'all'): domain names.
**kwargs: keyword arguments forwarded to super.
"""
if isinstance(domains, str):
domains = [domains]
name = "+".join(domains)
if name == "all":
domains = DOMAINS
missing_domains = set(domains)-set(DOMAINS)
assert len(
missing_domains) == 0, f"{missing_domains} not found in domain list"
if version != _VERSION:
name = f"{name}-{version}"
super().__init__(name=name, version=version, **kwargs)
self.domains = domains
class DarulmDataset(datasets.GeneratorBasedBuilder):
VERSION = _VERSION
BUILDER_CONFIG_CLASS = DarulmConfig
BUILDER_CONFIGS = [
DarulmConfig(
domains="all",
description="Full DaruLM collection.",
),
] + [
DarulmConfig(
domains=dn,
description=f"DaruLM part with {dn} texts",
)
for dn in sorted(DOMAINS)
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"domain": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features
)
def _split_generators(self, dl_manager):
required_urls = [
path for dn in self.config.domains for path in _URLS if f"_{dn}_" in path
]
downloaded_files = dl_manager.download(required_urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
"paths": downloaded_files}),
]
def _generate_examples(self, paths):
true_id = 0
for path in paths:
with open(path, "rb") as f:
cctx = zstandard.ZstdDecompressor()
reader_stream = io.BufferedReader(cctx.stream_reader(f))
reader = jsonlines.Reader(reader_stream, loads=parse_json)
for id_, item in enumerate(reader):
yield true_id, item
true_id += 1
|