Update files from the datasets library (from 1.5.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.5.0
alt.py
CHANGED
@@ -71,8 +71,8 @@ class AltParallelConfig(datasets.BuilderConfig):
|
|
71 |
available_langs = set(
|
72 |
["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
|
73 |
)
|
74 |
-
for
|
75 |
-
assert
|
76 |
|
77 |
self.languages = languages
|
78 |
|
@@ -253,7 +253,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
253 |
elif self.config.name == "alt-en":
|
254 |
data = {}
|
255 |
for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
|
256 |
-
file_path = os.path.join(basepath,
|
257 |
fin = open(file_path, encoding="utf-8")
|
258 |
for line in fin:
|
259 |
line = line.strip()
|
@@ -282,7 +282,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
282 |
elif self.config.name == "alt-jp":
|
283 |
data = {}
|
284 |
for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
|
285 |
-
file_path = os.path.join(basepath,
|
286 |
fin = open(file_path, encoding="utf-8")
|
287 |
for line in fin:
|
288 |
line = line.strip()
|
@@ -316,7 +316,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
316 |
"jp_tokenized": "word-alignment/data_ja.ja-tok",
|
317 |
}
|
318 |
for k in keys:
|
319 |
-
file_path = os.path.join(basepath,
|
320 |
fin = open(file_path, encoding="utf-8")
|
321 |
for line in fin:
|
322 |
line = line.strip()
|
@@ -338,7 +338,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
338 |
elif self.config.name == "alt-my":
|
339 |
data = {}
|
340 |
for fname in ["data"]:
|
341 |
-
file_path = os.path.join(basepath,
|
342 |
fin = open(file_path, encoding="utf-8")
|
343 |
for line in fin:
|
344 |
line = line.strip()
|
@@ -358,7 +358,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
358 |
elif self.config.name == "alt-km":
|
359 |
data = {}
|
360 |
for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
|
361 |
-
file_path = os.path.join(basepath,
|
362 |
fin = open(file_path, encoding="utf-8")
|
363 |
for line in fin:
|
364 |
line = line.strip()
|
@@ -382,7 +382,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
382 |
fin.close()
|
383 |
|
384 |
elif self.config.name == "alt-my-transliteration":
|
385 |
-
file_path = os.path.join(basepath,
|
386 |
# Need to set errors='ignore' because of the unknown error
|
387 |
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
|
388 |
# It might due to some issues related to Myanmar alphabets
|
@@ -403,7 +403,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
403 |
_id += 1
|
404 |
fin.close()
|
405 |
elif self.config.name == "alt-my-west-transliteration":
|
406 |
-
file_path = os.path.join(basepath,
|
407 |
# Need to set errors='ignore' because of the unknown error
|
408 |
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
|
409 |
# It might due to some issues related to Myanmar alphabets
|
@@ -414,7 +414,7 @@ class Alt(datasets.GeneratorBasedBuilder):
|
|
414 |
line = line.replace("\x00", "")
|
415 |
sp = line.split("|||")
|
416 |
|
417 |
-
data[_id] = {"en": sp[0].strip(), "my": [
|
418 |
_id += 1
|
419 |
fin.close()
|
420 |
|
|
|
71 |
available_langs = set(
|
72 |
["bg", "en", "en_tok", "fil", "hi", "id", "ja", "khm", "lo", "ms", "my", "th", "vi", "zh"]
|
73 |
)
|
74 |
+
for language in languages:
|
75 |
+
assert language in available_langs
|
76 |
|
77 |
self.languages = languages
|
78 |
|
|
|
253 |
elif self.config.name == "alt-en":
|
254 |
data = {}
|
255 |
for fname in ["English-ALT-Draft.txt", "English-ALT-Reviewed.txt"]:
|
256 |
+
file_path = os.path.join(basepath, "English-ALT-20170107", fname)
|
257 |
fin = open(file_path, encoding="utf-8")
|
258 |
for line in fin:
|
259 |
line = line.strip()
|
|
|
282 |
elif self.config.name == "alt-jp":
|
283 |
data = {}
|
284 |
for fname in ["Japanese-ALT-Draft.txt", "Japanese-ALT-Reviewed.txt"]:
|
285 |
+
file_path = os.path.join(basepath, "Japanese-ALT-20170330", fname)
|
286 |
fin = open(file_path, encoding="utf-8")
|
287 |
for line in fin:
|
288 |
line = line.strip()
|
|
|
316 |
"jp_tokenized": "word-alignment/data_ja.ja-tok",
|
317 |
}
|
318 |
for k in keys:
|
319 |
+
file_path = os.path.join(basepath, "Japanese-ALT-20170330", keys[k])
|
320 |
fin = open(file_path, encoding="utf-8")
|
321 |
for line in fin:
|
322 |
line = line.strip()
|
|
|
338 |
elif self.config.name == "alt-my":
|
339 |
data = {}
|
340 |
for fname in ["data"]:
|
341 |
+
file_path = os.path.join(basepath, "my-alt-190530", fname)
|
342 |
fin = open(file_path, encoding="utf-8")
|
343 |
for line in fin:
|
344 |
line = line.strip()
|
|
|
358 |
elif self.config.name == "alt-km":
|
359 |
data = {}
|
360 |
for fname in ["data_km.km-tag.nova", "data_km.km-tok.nova"]:
|
361 |
+
file_path = os.path.join(basepath, "km-nova-181101", fname)
|
362 |
fin = open(file_path, encoding="utf-8")
|
363 |
for line in fin:
|
364 |
line = line.strip()
|
|
|
382 |
fin.close()
|
383 |
|
384 |
elif self.config.name == "alt-my-transliteration":
|
385 |
+
file_path = os.path.join(basepath, "my-en-transliteration", "data.txt")
|
386 |
# Need to set errors='ignore' because of the unknown error
|
387 |
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
|
388 |
# It might due to some issues related to Myanmar alphabets
|
|
|
403 |
_id += 1
|
404 |
fin.close()
|
405 |
elif self.config.name == "alt-my-west-transliteration":
|
406 |
+
file_path = os.path.join(basepath, "western-myanmar-transliteration", "321.txt")
|
407 |
# Need to set errors='ignore' because of the unknown error
|
408 |
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte
|
409 |
# It might due to some issues related to Myanmar alphabets
|
|
|
414 |
line = line.replace("\x00", "")
|
415 |
sp = line.split("|||")
|
416 |
|
417 |
+
data[_id] = {"en": sp[0].strip(), "my": [k.strip() for k in sp[1].split("|")]}
|
418 |
_id += 1
|
419 |
fin.close()
|
420 |
|