Datasets:
wmt
/

Modalities:
Text
Libraries:
Datasets
albertvillanova HF staff commited on
Commit
34be8c5
1 Parent(s): d0747ec

Add loading script (#1)

Browse files

- Add original wmt_utils.py file (6f885f769e056ef167e4c52a9be535a9de03b968)
- Update wmt_utils with Yakut (e1ebc674689e37457140ada63ed7bc3f8ee91e19)
- Add loading script (f55f48f3bc380c72bfb9d7d15f9b50feabdb9b00)

Files changed (2) hide show
  1. wmt_utils.py +1029 -0
  2. yakut.py +34 -0
wmt_utils.py ADDED
@@ -0,0 +1,1029 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """WMT: Translate dataset."""
2
+
3
+
4
+ import codecs
5
+ import functools
6
+ import glob
7
+ import gzip
8
+ import itertools
9
+ import os
10
+ import re
11
+ import xml.etree.cElementTree as ElementTree
12
+
13
+ import datasets
14
+
15
+
16
+ logger = datasets.logging.get_logger(__name__)
17
+
18
+
19
+ _DESCRIPTION = """\
20
+ Translation dataset based on the data from statmt.org.
21
+
22
+ Versions exist for different years using a combination of data
23
+ sources. The base `wmt` allows you to create a custom dataset by choosing
24
+ your own data/language pair. This can be done as follows:
25
+
26
+ ```python
27
+ from datasets import inspect_dataset, load_dataset_builder
28
+
29
+ inspect_dataset("wmt19", "path/to/scripts")
30
+ builder = load_dataset_builder(
31
+ "path/to/scripts/wmt_utils.py",
32
+ language_pair=("fr", "de"),
33
+ subsets={
34
+ datasets.Split.TRAIN: ["commoncrawl_frde"],
35
+ datasets.Split.VALIDATION: ["euelections_dev2019"],
36
+ },
37
+ )
38
+
39
+ # Standard version
40
+ builder.download_and_prepare()
41
+ ds = builder.as_dataset()
42
+
43
+ # Streamable version
44
+ ds = builder.as_streaming_dataset()
45
+ ```
46
+
47
+ """
48
+
49
+
50
+ CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
51
+
52
+
53
+ class SubDataset:
54
+ """Class to keep track of information on a sub-dataset of WMT."""
55
+
56
+ def __init__(self, name, target, sources, url, path, manual_dl_files=None):
57
+ """Sub-dataset of WMT.
58
+
59
+ Args:
60
+ name: `string`, a unique dataset identifier.
61
+ target: `string`, the target language code.
62
+ sources: `set<string>`, the set of source language codes.
63
+ url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
64
+ where to download the raw data from. If two strings are provided, the
65
+ first is used for the source language and the second for the target.
66
+ Template strings can either contain '{src}' placeholders that will be
67
+ filled in with the source language code, '{0}' and '{1}' placeholders
68
+ that will be filled in with the source and target language codes in
69
+ alphabetical order, or all 3.
70
+ path: `string` or `(string, string)`, path(s) or path template(s)
71
+ specifing the path to the raw data relative to the root of the
72
+ downloaded archive. If two strings are provided, the dataset is assumed
73
+ to be made up of parallel text files, the first being the source and the
74
+ second the target. If one string is provided, both languages are assumed
75
+ to be stored within the same file and the extension is used to determine
76
+ how to parse it. Template strings should be formatted the same as in
77
+ `url`.
78
+ manual_dl_files: `<list>(string)` (optional), the list of files that must
79
+ be manually downloaded to the data directory.
80
+ """
81
+ self._paths = (path,) if isinstance(path, str) else path
82
+ self._urls = (url,) if isinstance(url, str) else url
83
+ self._manual_dl_files = manual_dl_files if manual_dl_files else []
84
+ self.name = name
85
+ self.target = target
86
+ self.sources = set(sources)
87
+
88
+ def _inject_language(self, src, strings):
89
+ """Injects languages into (potentially) template strings."""
90
+ if src not in self.sources:
91
+ raise ValueError(f"Invalid source for '{self.name}': {src}")
92
+
93
+ def _format_string(s):
94
+ if "{0}" in s and "{1}" and "{src}" in s:
95
+ return s.format(*sorted([src, self.target]), src=src)
96
+ elif "{0}" in s and "{1}" in s:
97
+ return s.format(*sorted([src, self.target]))
98
+ elif "{src}" in s:
99
+ return s.format(src=src)
100
+ else:
101
+ return s
102
+
103
+ return [_format_string(s) for s in strings]
104
+
105
+ def get_url(self, src):
106
+ return self._inject_language(src, self._urls)
107
+
108
+ def get_manual_dl_files(self, src):
109
+ return self._inject_language(src, self._manual_dl_files)
110
+
111
+ def get_path(self, src):
112
+ return self._inject_language(src, self._paths)
113
+
114
+
115
+ # Subsets used in the training sets for various years of WMT.
116
+ _TRAIN_SUBSETS = [
117
+ # pylint:disable=line-too-long
118
+ SubDataset(
119
+ name="commoncrawl",
120
+ target="en", # fr-de pair in commoncrawl_frde
121
+ sources={"cs", "de", "es", "fr", "ru"},
122
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip",
123
+ path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
124
+ ),
125
+ SubDataset(
126
+ name="commoncrawl_frde",
127
+ target="de",
128
+ sources={"fr"},
129
+ url=(
130
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
131
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.de.gz",
132
+ ),
133
+ path=("", ""),
134
+ ),
135
+ SubDataset(
136
+ name="czeng_10",
137
+ target="en",
138
+ sources={"cs"},
139
+ url="http://ufal.mff.cuni.cz/czeng/czeng10",
140
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
141
+ # Each tar contains multiple files, which we process specially in
142
+ # _parse_czeng.
143
+ path=("data.plaintext-format/??train.gz",) * 10,
144
+ ),
145
+ SubDataset(
146
+ name="czeng_16pre",
147
+ target="en",
148
+ sources={"cs"},
149
+ url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
150
+ manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
151
+ path="",
152
+ ),
153
+ SubDataset(
154
+ name="czeng_16",
155
+ target="en",
156
+ sources={"cs"},
157
+ url="http://ufal.mff.cuni.cz/czeng",
158
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
159
+ # Each tar contains multiple files, which we process specially in
160
+ # _parse_czeng.
161
+ path=("data.plaintext-format/??train.gz",) * 10,
162
+ ),
163
+ SubDataset(
164
+ # This dataset differs from the above in the filtering that is applied
165
+ # during parsing.
166
+ name="czeng_17",
167
+ target="en",
168
+ sources={"cs"},
169
+ url="http://ufal.mff.cuni.cz/czeng",
170
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
171
+ # Each tar contains multiple files, which we process specially in
172
+ # _parse_czeng.
173
+ path=("data.plaintext-format/??train.gz",) * 10,
174
+ ),
175
+ SubDataset(
176
+ name="dcep_v1",
177
+ target="en",
178
+ sources={"lv"},
179
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip",
180
+ path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
181
+ ),
182
+ SubDataset(
183
+ name="europarl_v7",
184
+ target="en",
185
+ sources={"cs", "de", "es", "fr"},
186
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip",
187
+ path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
188
+ ),
189
+ SubDataset(
190
+ name="europarl_v7_frde",
191
+ target="de",
192
+ sources={"fr"},
193
+ url=(
194
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
195
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.de.gz",
196
+ ),
197
+ path=("", ""),
198
+ ),
199
+ SubDataset(
200
+ name="europarl_v8_18",
201
+ target="en",
202
+ sources={"et", "fi"},
203
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
204
+ path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
205
+ ),
206
+ SubDataset(
207
+ name="europarl_v8_16",
208
+ target="en",
209
+ sources={"fi", "ro"},
210
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
211
+ path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
212
+ ),
213
+ SubDataset(
214
+ name="europarl_v9",
215
+ target="en",
216
+ sources={"cs", "de", "fi", "lt"},
217
+ url="https://huggingface.co/datasets/wmt/europarl/resolve/main/v9/training/europarl-v9.{src}-en.tsv.gz",
218
+ path="",
219
+ ),
220
+ SubDataset(
221
+ name="gigafren",
222
+ target="en",
223
+ sources={"fr"},
224
+ url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip",
225
+ path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
226
+ ),
227
+ SubDataset(
228
+ name="hindencorp_01",
229
+ target="en",
230
+ sources={"hi"},
231
+ url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
232
+ manual_dl_files=["hindencorp0.1.gz"],
233
+ path="",
234
+ ),
235
+ SubDataset(
236
+ name="leta_v1",
237
+ target="en",
238
+ sources={"lv"},
239
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip",
240
+ path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
241
+ ),
242
+ SubDataset(
243
+ name="multiun",
244
+ target="en",
245
+ sources={"es", "fr"},
246
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip",
247
+ path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
248
+ ),
249
+ SubDataset(
250
+ name="newscommentary_v9",
251
+ target="en",
252
+ sources={"cs", "de", "fr", "ru"},
253
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip",
254
+ path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
255
+ ),
256
+ SubDataset(
257
+ name="newscommentary_v10",
258
+ target="en",
259
+ sources={"cs", "de", "fr", "ru"},
260
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip",
261
+ path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
262
+ ),
263
+ SubDataset(
264
+ name="newscommentary_v11",
265
+ target="en",
266
+ sources={"cs", "de", "ru"},
267
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip",
268
+ path=(
269
+ "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
270
+ "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
271
+ ),
272
+ ),
273
+ SubDataset(
274
+ name="newscommentary_v12",
275
+ target="en",
276
+ sources={"cs", "de", "ru", "zh"},
277
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip",
278
+ path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
279
+ ),
280
+ SubDataset(
281
+ name="newscommentary_v13",
282
+ target="en",
283
+ sources={"cs", "de", "ru", "zh"},
284
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip",
285
+ path=(
286
+ "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
287
+ "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
288
+ ),
289
+ ),
290
+ SubDataset(
291
+ name="newscommentary_v14",
292
+ target="en", # fr-de pair in newscommentary_v14_frde
293
+ sources={"cs", "de", "kk", "ru", "zh"},
294
+ url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
295
+ path="",
296
+ ),
297
+ SubDataset(
298
+ name="newscommentary_v14_frde",
299
+ target="de",
300
+ sources={"fr"},
301
+ url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
302
+ path="",
303
+ ),
304
+ SubDataset(
305
+ name="onlinebooks_v1",
306
+ target="en",
307
+ sources={"lv"},
308
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip",
309
+ path=("farewell/farewell.lv", "farewell/farewell.en"),
310
+ ),
311
+ SubDataset(
312
+ name="paracrawl_v1",
313
+ target="en",
314
+ sources={"cs", "de", "et", "fi", "ru"},
315
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
316
+ path=(
317
+ "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
318
+ "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
319
+ ),
320
+ ),
321
+ SubDataset(
322
+ name="paracrawl_v1_ru",
323
+ target="en",
324
+ sources={"ru"},
325
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
326
+ path=(
327
+ "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
328
+ "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
329
+ ),
330
+ ),
331
+ SubDataset(
332
+ name="paracrawl_v3",
333
+ target="en", # fr-de pair in paracrawl_v3_frde
334
+ sources={"cs", "de", "fi", "lt"},
335
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
336
+ path="",
337
+ ),
338
+ SubDataset(
339
+ name="paracrawl_v3_frde",
340
+ target="de",
341
+ sources={"fr"},
342
+ url=(
343
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
344
+ "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
345
+ ),
346
+ path=("", ""),
347
+ ),
348
+ SubDataset(
349
+ name="rapid_2016",
350
+ target="en",
351
+ sources={"de", "et", "fi"},
352
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip",
353
+ path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
354
+ ),
355
+ SubDataset(
356
+ name="rapid_2016_ltfi",
357
+ target="en",
358
+ sources={"fi", "lt"},
359
+ url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
360
+ path="rapid2016.en-{src}.tmx",
361
+ ),
362
+ SubDataset(
363
+ name="rapid_2019",
364
+ target="en",
365
+ sources={"de"},
366
+ url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
367
+ path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
368
+ ),
369
+ SubDataset(
370
+ name="setimes_2",
371
+ target="en",
372
+ sources={"ro", "tr"},
373
+ url="https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
374
+ path="",
375
+ ),
376
+ SubDataset(
377
+ name="uncorpus_v1",
378
+ target="en",
379
+ sources={"ru", "zh"},
380
+ url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip",
381
+ path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
382
+ ),
383
+ SubDataset(
384
+ name="wikiheadlines_fi",
385
+ target="en",
386
+ sources={"fi"},
387
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
388
+ path="wiki/fi-en/titles.fi-en",
389
+ ),
390
+ SubDataset(
391
+ name="wikiheadlines_hi",
392
+ target="en",
393
+ sources={"hi"},
394
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip",
395
+ path="wiki/hi-en/wiki-titles.hi-en",
396
+ ),
397
+ SubDataset(
398
+ # Verified that wmt14 and wmt15 files are identical.
399
+ name="wikiheadlines_ru",
400
+ target="en",
401
+ sources={"ru"},
402
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
403
+ path="wiki/ru-en/wiki.ru-en",
404
+ ),
405
+ SubDataset(
406
+ name="wikititles_v1",
407
+ target="en",
408
+ sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
409
+ url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/v1/wikititles-v1.{src}-en.tsv.gz",
410
+ path="",
411
+ ),
412
+ SubDataset(
413
+ name="yakut",
414
+ target="ru",
415
+ sources={"sah"},
416
+ url="https://huggingface.co/datasets/wmt/yakut/resolve/main/data/yakut.zip",
417
+ path="yakut/sah-ru.parallel.uniq.tsv",
418
+ ),
419
+ SubDataset(
420
+ name="yandexcorpus",
421
+ target="en",
422
+ sources={"ru"},
423
+ url="https://translate.yandex.ru/corpus?lang=en",
424
+ manual_dl_files=["1mcorpus.zip"],
425
+ path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
426
+ ),
427
+ # pylint:enable=line-too-long
428
+ ] + [
429
+ SubDataset( # pylint:disable=g-complex-comprehension
430
+ name=ss,
431
+ target="en",
432
+ sources={"zh"},
433
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/%s.zip" % ss,
434
+ path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
435
+ )
436
+ for ss in CWMT_SUBSET_NAMES
437
+ ]
438
+
439
+ _DEV_SUBSETS = [
440
+ SubDataset(
441
+ name="euelections_dev2019",
442
+ target="de",
443
+ sources={"fr"},
444
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
445
+ path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
446
+ ),
447
+ SubDataset(
448
+ name="newsdev2014",
449
+ target="en",
450
+ sources={"hi"},
451
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
452
+ path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
453
+ ),
454
+ SubDataset(
455
+ name="newsdev2015",
456
+ target="en",
457
+ sources={"fi"},
458
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
459
+ path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
460
+ ),
461
+ SubDataset(
462
+ name="newsdiscussdev2015",
463
+ target="en",
464
+ sources={"ro", "tr"},
465
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
466
+ path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
467
+ ),
468
+ SubDataset(
469
+ name="newsdev2016",
470
+ target="en",
471
+ sources={"ro", "tr"},
472
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
473
+ path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
474
+ ),
475
+ SubDataset(
476
+ name="newsdev2017",
477
+ target="en",
478
+ sources={"lv", "zh"},
479
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
480
+ path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
481
+ ),
482
+ SubDataset(
483
+ name="newsdev2018",
484
+ target="en",
485
+ sources={"et"},
486
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
487
+ path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
488
+ ),
489
+ SubDataset(
490
+ name="newsdev2019",
491
+ target="en",
492
+ sources={"gu", "kk", "lt"},
493
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
494
+ path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
495
+ ),
496
+ SubDataset(
497
+ name="newsdiscussdev2015",
498
+ target="en",
499
+ sources={"fr"},
500
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
501
+ path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
502
+ ),
503
+ SubDataset(
504
+ name="newsdiscusstest2015",
505
+ target="en",
506
+ sources={"fr"},
507
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
508
+ path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
509
+ ),
510
+ SubDataset(
511
+ name="newssyscomb2009",
512
+ target="en",
513
+ sources={"cs", "de", "es", "fr"},
514
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
515
+ path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
516
+ ),
517
+ SubDataset(
518
+ name="newstest2008",
519
+ target="en",
520
+ sources={"cs", "de", "es", "fr", "hu"},
521
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
522
+ path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
523
+ ),
524
+ SubDataset(
525
+ name="newstest2009",
526
+ target="en",
527
+ sources={"cs", "de", "es", "fr"},
528
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
529
+ path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
530
+ ),
531
+ SubDataset(
532
+ name="newstest2010",
533
+ target="en",
534
+ sources={"cs", "de", "es", "fr"},
535
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
536
+ path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
537
+ ),
538
+ SubDataset(
539
+ name="newstest2011",
540
+ target="en",
541
+ sources={"cs", "de", "es", "fr"},
542
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
543
+ path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
544
+ ),
545
+ SubDataset(
546
+ name="newstest2012",
547
+ target="en",
548
+ sources={"cs", "de", "es", "fr", "ru"},
549
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
550
+ path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
551
+ ),
552
+ SubDataset(
553
+ name="newstest2013",
554
+ target="en",
555
+ sources={"cs", "de", "es", "fr", "ru"},
556
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
557
+ path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
558
+ ),
559
+ SubDataset(
560
+ name="newstest2014",
561
+ target="en",
562
+ sources={"cs", "de", "es", "fr", "hi", "ru"},
563
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
564
+ path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
565
+ ),
566
+ SubDataset(
567
+ name="newstest2015",
568
+ target="en",
569
+ sources={"cs", "de", "fi", "ru"},
570
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
571
+ path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
572
+ ),
573
+ SubDataset(
574
+ name="newsdiscusstest2015",
575
+ target="en",
576
+ sources={"fr"},
577
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
578
+ path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
579
+ ),
580
+ SubDataset(
581
+ name="newstest2016",
582
+ target="en",
583
+ sources={"cs", "de", "fi", "ro", "ru", "tr"},
584
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
585
+ path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
586
+ ),
587
+ SubDataset(
588
+ name="newstestB2016",
589
+ target="en",
590
+ sources={"fi"},
591
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
592
+ path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
593
+ ),
594
+ SubDataset(
595
+ name="newstest2017",
596
+ target="en",
597
+ sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
598
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
599
+ path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
600
+ ),
601
+ SubDataset(
602
+ name="newstestB2017",
603
+ target="en",
604
+ sources={"fi"},
605
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
606
+ path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
607
+ ),
608
+ SubDataset(
609
+ name="newstest2018",
610
+ target="en",
611
+ sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
612
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
613
+ path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
614
+ ),
615
+ ]
616
+
617
+ DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
618
+
619
+ _CZENG17_FILTER = SubDataset(
620
+ name="czeng17_filter",
621
+ target="en",
622
+ sources={"cs"},
623
+ url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
624
+ path="convert_czeng16_to_17.pl",
625
+ )
626
+
627
+
628
+ class WmtConfig(datasets.BuilderConfig):
629
+ """BuilderConfig for WMT."""
630
+
631
+ def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
632
+ """BuilderConfig for WMT.
633
+
634
+ Args:
635
+ url: The reference URL for the dataset.
636
+ citation: The paper citation for the dataset.
637
+ description: The description of the dataset.
638
+ language_pair: pair of languages that will be used for translation. Should
639
+ contain 2 letter coded strings. For example: ("en", "de").
640
+ configuration for the `datasets.features.text.TextEncoder` used for the
641
+ `datasets.features.text.Translation` features.
642
+ subsets: Dict[split, list[str]]. List of the subset to use for each of the
643
+ split. Note that WMT subclasses overwrite this parameter.
644
+ **kwargs: keyword arguments forwarded to super.
645
+ """
646
+ name = "%s-%s" % (language_pair[0], language_pair[1])
647
+ if "name" in kwargs: # Add name suffix for custom configs
648
+ name += "." + kwargs.pop("name")
649
+
650
+ super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
651
+
652
+ self.url = url or "http://www.statmt.org"
653
+ self.citation = citation
654
+ self.language_pair = language_pair
655
+ self.subsets = subsets
656
+
657
+ # TODO(PVP): remove when manual dir works
658
+ # +++++++++++++++++++++
659
+ if language_pair[1] in ["cs", "hi", "ru"]:
660
+ assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.")
661
+ # +++++++++++++++++++++
662
+
663
+
664
+ class Wmt(datasets.GeneratorBasedBuilder):
665
+ """WMT translation dataset."""
666
+
667
+ BUILDER_CONFIG_CLASS = WmtConfig
668
+
669
+ def __init__(self, *args, **kwargs):
670
+ super(Wmt, self).__init__(*args, **kwargs)
671
+
672
+ @property
673
+ def _subsets(self):
674
+ """Subsets that make up each split of the dataset."""
675
+ raise NotImplementedError("This is a abstract method")
676
+
677
+ @property
678
+ def subsets(self):
679
+ """Subsets that make up each split of the dataset for the language pair."""
680
+ source, target = self.config.language_pair
681
+ filtered_subsets = {}
682
+ subsets = self._subsets if self.config.subsets is None else self.config.subsets
683
+ for split, ss_names in subsets.items():
684
+ filtered_subsets[split] = []
685
+ for ss_name in ss_names:
686
+ dataset = DATASET_MAP[ss_name]
687
+ if dataset.target != target or source not in dataset.sources:
688
+ logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
689
+ else:
690
+ filtered_subsets[split].append(ss_name)
691
+ logger.info("Using sub-datasets: %s", filtered_subsets)
692
+ return filtered_subsets
693
+
694
+ def _info(self):
695
+ src, target = self.config.language_pair
696
+ return datasets.DatasetInfo(
697
+ description=_DESCRIPTION,
698
+ features=datasets.Features(
699
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
700
+ ),
701
+ supervised_keys=(src, target),
702
+ homepage=self.config.url,
703
+ citation=self.config.citation,
704
+ )
705
+
706
+ def _vocab_text_gen(self, split_subsets, extraction_map, language):
707
+ for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
708
+ yield ex[language]
709
+
710
+ def _split_generators(self, dl_manager):
711
+ source, _ = self.config.language_pair
712
+ manual_paths_dict = {}
713
+ urls_to_download = {}
714
+ for ss_name in itertools.chain.from_iterable(self.subsets.values()):
715
+ if ss_name == "czeng_17":
716
+ # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
717
+ # the filtering script so we can parse out which blocks need to be
718
+ # removed.
719
+ urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
720
+
721
+ # get dataset
722
+ dataset = DATASET_MAP[ss_name]
723
+ if dataset.get_manual_dl_files(source):
724
+ # TODO(PVP): following two lines skip configs that are incomplete for now
725
+ # +++++++++++++++++++++
726
+ logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
727
+ continue
728
+ # +++++++++++++++++++++
729
+
730
+ manual_dl_files = dataset.get_manual_dl_files(source)
731
+ manual_paths = [
732
+ os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
733
+ for fname in manual_dl_files
734
+ ]
735
+ assert all(
736
+ os.path.exists(path) for path in manual_paths
737
+ ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
738
+
739
+ # set manual path for correct subset
740
+ manual_paths_dict[ss_name] = manual_paths
741
+ else:
742
+ urls_to_download[ss_name] = dataset.get_url(source)
743
+
744
+ # Download and extract files from URLs.
745
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
746
+ # Extract manually downloaded files.
747
+ manual_files = dl_manager.extract(manual_paths_dict)
748
+ extraction_map = dict(downloaded_files, **manual_files)
749
+
750
+ for language in self.config.language_pair:
751
+ self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)
752
+
753
+ return [
754
+ datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
755
+ name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
756
+ )
757
+ for split, split_subsets in self.subsets.items()
758
+ ]
759
+
760
+ def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
761
+ """Returns the examples in the raw (text) form."""
762
+ source, _ = self.config.language_pair
763
+
764
+ def _get_local_paths(dataset, extract_dirs):
765
+ rel_paths = dataset.get_path(source)
766
+ if len(extract_dirs) == 1:
767
+ extract_dirs = extract_dirs * len(rel_paths)
768
+ return [
769
+ os.path.join(ex_dir, rel_path) if rel_path else ex_dir
770
+ for ex_dir, rel_path in zip(extract_dirs, rel_paths)
771
+ ]
772
+
773
+ def _get_filenames(dataset):
774
+ rel_paths = dataset.get_path(source)
775
+ urls = dataset.get_url(source)
776
+ if len(urls) == 1:
777
+ urls = urls * len(rel_paths)
778
+ return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
779
+
780
+ for ss_name in split_subsets:
781
+ # TODO(PVP) remove following five lines when manual data works
782
+ # +++++++++++++++++++++
783
+ dataset = DATASET_MAP[ss_name]
784
+ source, _ = self.config.language_pair
785
+ if dataset.get_manual_dl_files(source):
786
+ logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
787
+ continue
788
+ # +++++++++++++++++++++
789
+
790
+ logger.info("Generating examples from: %s", ss_name)
791
+ dataset = DATASET_MAP[ss_name]
792
+ extract_dirs = extraction_map[ss_name]
793
+ files = _get_local_paths(dataset, extract_dirs)
794
+ filenames = _get_filenames(dataset)
795
+
796
+ sub_generator_args = tuple(files)
797
+
798
+ if ss_name.startswith("czeng"):
799
+ if ss_name.endswith("16pre"):
800
+ sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
801
+ sub_generator_args += tuple(filenames)
802
+ elif ss_name.endswith("17"):
803
+ filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
804
+ sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
805
+ else:
806
+ sub_generator = _parse_czeng
807
+ elif ss_name == "hindencorp_01":
808
+ sub_generator = _parse_hindencorp
809
+ elif ss_name == "yakut":
810
+ sub_generator, sub_generator_args = YakutParser.create_generator(
811
+ sub_generator_args=sub_generator_args, config=self.config
812
+ )
813
+ elif len(files) == 2:
814
+ if ss_name.endswith("_frde"):
815
+ sub_generator = _parse_frde_bitext
816
+ else:
817
+ sub_generator = _parse_parallel_sentences
818
+ sub_generator_args += tuple(filenames)
819
+ elif len(files) == 1:
820
+ fname = filenames[0]
821
+ # Note: Due to formatting used by `download_manager`, the file
822
+ # extension may not be at the end of the file path.
823
+ if ".tsv" in fname:
824
+ sub_generator = _parse_tsv
825
+ sub_generator_args += tuple(filenames)
826
+ elif (
827
+ ss_name.startswith("newscommentary_v14")
828
+ or ss_name.startswith("europarl_v9")
829
+ or ss_name.startswith("wikititles_v1")
830
+ ):
831
+ sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
832
+ sub_generator_args += tuple(filenames)
833
+ elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
834
+ sub_generator = _parse_tmx
835
+ elif ss_name.startswith("wikiheadlines"):
836
+ sub_generator = _parse_wikiheadlines
837
+ else:
838
+ raise ValueError("Unsupported file format: %s" % fname)
839
+ else:
840
+ raise ValueError("Invalid number of files: %d" % len(files))
841
+
842
+ for sub_key, ex in sub_generator(*sub_generator_args):
843
+ if not all(ex.values()):
844
+ continue
845
+ # TODO(adarob): Add subset feature.
846
+ # ex["subset"] = subset
847
+ key = f"{ss_name}/{sub_key}"
848
+ if with_translation is True:
849
+ ex = {"translation": ex}
850
+ yield key, ex
851
+
852
+
853
+ def _parse_parallel_sentences(f1, f2, filename1, filename2):
854
+ """Returns examples from parallel SGML or text files, which may be gzipped."""
855
+
856
+ def _parse_text(path, original_filename):
857
+ """Returns the sentences from a single text file, which may be gzipped."""
858
+ split_path = original_filename.split(".")
859
+
860
+ if split_path[-1] == "gz":
861
+ lang = split_path[-2]
862
+
863
+ def gen():
864
+ with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
865
+ for line in g:
866
+ yield line.decode("utf-8").rstrip()
867
+
868
+ return gen(), lang
869
+
870
+ if split_path[-1] == "txt":
871
+ # CWMT
872
+ lang = split_path[-2].split("_")[-1]
873
+ lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang
874
+ else:
875
+ lang = split_path[-1]
876
+
877
+ def gen():
878
+ with open(path, "rb") as f:
879
+ for line in f:
880
+ yield line.decode("utf-8").rstrip()
881
+
882
+ return gen(), lang
883
+
884
+ def _parse_sgm(path, original_filename):
885
+ """Returns sentences from a single SGML file."""
886
+ lang = original_filename.split(".")[-2]
887
+ # Note: We can't use the XML parser since some of the files are badly
888
+ # formatted.
889
+ seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
890
+
891
+ def gen():
892
+ with open(path, encoding="utf-8") as f:
893
+ for line in f:
894
+ seg_match = re.match(seg_re, line)
895
+ if seg_match:
896
+ assert len(seg_match.groups()) == 1
897
+ yield seg_match.groups()[0]
898
+
899
+ return gen(), lang
900
+
901
+ parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text
902
+
903
+ # Some datasets (e.g., CWMT) contain multiple parallel files specified with
904
+ # a wildcard. We sort both sets to align them and parse them one by one.
905
+ f1_files = sorted(glob.glob(f1))
906
+ f2_files = sorted(glob.glob(f2))
907
+
908
+ assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
909
+ assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
910
+ len(f1_files),
911
+ len(f2_files),
912
+ f1,
913
+ f2,
914
+ )
915
+
916
+ for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
917
+ l1_sentences, l1 = parse_file(f1_i, filename1)
918
+ l2_sentences, l2 = parse_file(f2_i, filename2)
919
+
920
+ for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
921
+ key = f"{f_id}/{line_id}"
922
+ yield key, {l1: s1, l2: s2}
923
+
924
+
925
+ def _parse_frde_bitext(fr_path, de_path):
926
+ with open(fr_path, encoding="utf-8") as fr_f:
927
+ with open(de_path, encoding="utf-8") as de_f:
928
+ for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
929
+ yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}
930
+
931
+
932
+ def _parse_tmx(path):
933
+ """Generates examples from TMX file."""
934
+
935
+ def _get_tuv_lang(tuv):
936
+ for k, v in tuv.items():
937
+ if k.endswith("}lang"):
938
+ return v
939
+ raise AssertionError("Language not found in `tuv` attributes.")
940
+
941
+ def _get_tuv_seg(tuv):
942
+ segs = tuv.findall("seg")
943
+ assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
944
+ return segs[0].text
945
+
946
+ with open(path, "rb") as f:
947
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
948
+ utf_f = codecs.getreader("utf-8")(f)
949
+ for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
950
+ if elem.tag == "tu":
951
+ yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
952
+ elem.clear()
953
+
954
+
955
+ def _parse_tsv(path, filename=None, language_pair=None, skiprows=None):
956
+ """Generates examples from TSV file."""
957
+ if language_pair is None:
958
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
959
+ assert lang_match is not None, "Invalid TSV filename: %s" % filename
960
+ l1, l2 = lang_match.groups()
961
+ else:
962
+ l1, l2 = language_pair
963
+ with open(path, encoding="utf-8") as f:
964
+ for key, line in enumerate(f):
965
+ if skiprows and key < skiprows:
966
+ continue
967
+ cols = line.split("\t")
968
+ if len(cols) != 2:
969
+ logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
970
+ continue
971
+ s1, s2 = cols
972
+ yield key, {l1: s1.strip(), l2: s2.strip()}
973
+
974
+
975
+ def _parse_wikiheadlines(path):
976
+ """Generates examples from Wikiheadlines dataset file."""
977
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
978
+ assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
979
+ l1, l2 = lang_match.groups()
980
+ with open(path, encoding="utf-8") as f:
981
+ for line_id, line in enumerate(f):
982
+ s1, s2 = line.split("|||")
983
+ yield line_id, {l1: s1.strip(), l2: s2.strip()}
984
+
985
+
986
+ def _parse_czeng(*paths, **kwargs):
987
+ """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
988
+ filter_path = kwargs.get("filter_path", None)
989
+ if filter_path:
990
+ re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
991
+ with open(filter_path, encoding="utf-8") as f:
992
+ bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
993
+ logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
994
+
995
+ for path in paths:
996
+ for gz_path in sorted(glob.glob(path)):
997
+ with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
998
+ filename = os.path.basename(gz_path)
999
+ for line_id, line in enumerate(f):
1000
+ line = line.decode("utf-8") # required for py3
1001
+ if not line.strip():
1002
+ continue
1003
+ id_, unused_score, cs, en = line.split("\t")
1004
+ if filter_path:
1005
+ block_match = re.match(re_block, id_)
1006
+ if block_match and block_match.groups()[0] in bad_blocks:
1007
+ continue
1008
+ sub_key = f"{filename}/{line_id}"
1009
+ yield sub_key, {
1010
+ "cs": cs.strip(),
1011
+ "en": en.strip(),
1012
+ }
1013
+
1014
+
1015
+ def _parse_hindencorp(path):
1016
+ with open(path, encoding="utf-8") as f:
1017
+ for line_id, line in enumerate(f):
1018
+ split_line = line.split("\t")
1019
+ if len(split_line) != 5:
1020
+ logger.warning("Skipping invalid HindEnCorp line: %s", line)
1021
+ continue
1022
+ yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
1023
+
1024
+
1025
+ class YakutParser:
1026
+ @staticmethod
1027
+ def create_generator(sub_generator_args=None, config=None):
1028
+ sub_generator = functools.partial(_parse_tsv, language_pair=config.language_pair, skiprows=1)
1029
+ return sub_generator, sub_generator_args
yakut.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ from wmt_utils import Wmt, WmtConfig
4
+
5
+
6
+ _URL = "http://www.statmt.org/wmt22/translation-task.html"
7
+ # TODO: Update with citation of overview paper once it is published.
8
+ _CITATION = """
9
+ @ONLINE {wmt22translate,
10
+ author = {Wikimedia Foundation},
11
+ title = {EMNLP 2022 Seventh Conference on Machine Translation (WMT22), Shared Task: General Machine Translation},
12
+ url = {http://www.statmt.org/wmt22/translation-task.html}
13
+ }
14
+ """
15
+
16
+ _LANGUAGE_PAIRS = [("sah", "ru")]
17
+
18
+
19
+ class Yakut(Wmt):
20
+
21
+ BUILDER_CONFIGS = [
22
+ WmtConfig(
23
+ description=f"Yakut {l1}-{l2} translation dataset",
24
+ url=_URL,
25
+ citation=_CITATION,
26
+ language_pair=(l1, l2),
27
+ version=datasets.Version("1.0.0"),
28
+ )
29
+ for l1, l2 in _LANGUAGE_PAIRS
30
+ ]
31
+
32
+ @property
33
+ def _subsets(self):
34
+ return {datasets.Split.TRAIN: ["yakut"]}