mespla commited on
Commit
ea3a10e
1 Parent(s): 613c851

Adding corpus management script

Browse files
Files changed (1) hide show
  1. macocu.py +112 -0
macocu.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """macocu_parallel"""
16
+
17
+
18
+ import os
19
+ import csv
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @inproceedings{banon2022macocu,
25
+ title={MaCoCu: Massive collection and curation of monolingual and bilingual data: focus on under-resourced languages},
26
+ author={Ban{\'o}n, Marta and Espla-Gomis, Miquel and Forcada, Mikel L and Garc{\'\i}a-Romero, Cristian and Kuzman, Taja and Ljube{\v{s}}i{\'c}, Nikola and van Noord, Rik and Sempere, Leopoldo Pla and Ram{\'\i}rez-S{\'a}nchez, Gema and Rupnik, Peter and others},
27
+ booktitle={23rd Annual Conference of the European Association for Machine Translation, EAMT 2022},
28
+ pages={303--304},
29
+ year={2022},
30
+ organization={European Association for Machine Translation}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The MaCoCu parallel dataset is an English-centric collection of 11
36
+ parallel corpora including the following languages: Albanian,
37
+ Bulgarian, Bosnian, Croatian, Icelandic, Macedonian, Maltese,
38
+ Montenegrin, Serbian, Slovenian, and Turkish. These corpora have
39
+ been automatically crawled from national and generic top-level
40
+ domains (for example, ".hr" for croatian, or ".is" for icelandic);
41
+ then, a parallel curation pipeline has been applied to produce
42
+ the final data (see https://github.com/bitextor/bitextor).
43
+ """
44
+
45
+ _URL = {
46
+ "evaluation": "https://object.pouta.csc.fi/Tatoeba-Challenge-devtest/test.tar",
47
+ "development": "https://object.pouta.csc.fi/Tatoeba-Challenge-devtest/dev.tar",
48
+ }
49
+
50
+ # _RELEASE = "v2021-08-07"
51
+ # _RELEASES = [ "v2020-07-28", "v2021-03-30", "v2021-08-07", ]
52
+ _LanguagePairs = [ "en-bs", "en-bg", "en-is", "en-hr", "en-sq", "en-mt", "en-mk", "en-cnr", "en-sr", "en-sl", "en-tr" ]
53
+
54
+ _LICENSE = "cc0"
55
+ _HOMEPAGE = "https://macocu.eu"
56
+
57
+ class macocuConfig(datasets.BuilderConfig):
58
+ """BuilderConfig for macocu_parallel"""
59
+
60
+ def __init__(self, language_pair, **kwargs):
61
+ super().__init__(**kwargs)
62
+ """
63
+
64
+ Args:
65
+ language_pair: language pair to be loaded
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ self.language_pair = language_pair
69
+
70
+
71
+ class MaCoCu_parallel(datasets.GeneratorBasedBuilder):
72
+ VERSION = datasets.Version("1.0.0")
73
+
74
+ BUILDER_CONFIG_CLASS = macocuConfig
75
+ BUILDER_CONFIGS = [
76
+ macocuConfig(name=pair, description=_DESCRIPTION, language_pair=pair )
77
+ for pair in _LanguagePairs
78
+ ]
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=datasets.Features({
84
+ "sourceString": datasets.Value("string"),
85
+ "targetString": datasets.Value("string")
86
+ }),
87
+ homepage=_HOMEPAGE,
88
+ citation=_CITATION,
89
+ license=_LICENSE
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+
94
+ lang_pair = self.config.language_pair
95
+
96
+ path = os.path.join("data", f"{lang_pair}.macocuv2.tsv")
97
+
98
+ data_file = dl_manager.download_and_extract({"data_file": path})
99
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=data_file)]
100
+
101
+ def _generate_examples(self, filepath):
102
+ """Yields examples."""
103
+ with open(filepath, encoding="utf-8") as f:
104
+ reader = csv.reader(f, delimiter="\t", quotechar='"')
105
+ for id_, row in enumerate(reader):
106
+ if id_ == 0:
107
+ continue
108
+ yield id_, {
109
+ "sourceString": row[0],
110
+ "targetString": row[1]
111
+ }
112
+