|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""桃園多國語系翻譯競賽: Translate dataset.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Translation dataset based on the data from statmt.org. |
|
Versions exist for different years using a combination of data |
|
sources. The base `wmt` allows you to create a custom dataset by choosing |
|
your own data/language pair. This can be done as follows: |
|
```python |
|
from datasets import inspect_dataset, load_dataset_builder |
|
inspect_dataset("wmt16", "path/to/scripts") |
|
builder = load_dataset_builder( |
|
"path/to/scripts/wmt_utils.py", |
|
language_pair=("fr", "de"), |
|
subsets={ |
|
datasets.Split.TRAIN: ["commoncrawl_frde"], |
|
datasets.Split.VALIDATION: ["euelections_dev2019"], |
|
}, |
|
) |
|
# Standard version |
|
builder.download_and_prepare() |
|
ds = builder.as_dataset() |
|
# Streamable version |
|
ds = builder.as_streaming_dataset() |
|
``` |
|
""" |
|
|
|
_LANGUAGE_PAIRS = [(lang, "zh_tw") for lang in ["en", "ja", "ko", "id", "vi", "th"]] |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/Heng666" |
|
|
|
_LICENSE = "cc-by-2.0" |
|
|
|
|
|
|
|
_URL = "http://www.statmt.org/wmt16/translation-task.html" |
|
|
|
class Ted2020TWMTConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ted2020TW-mt""" |
|
|
|
def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), **kwargs): |
|
""" |
|
Args: |
|
url: The reference URL for the dataset. |
|
citation: The paper citation for the dataset. |
|
description: The description of the dataset. |
|
language_pair: pair of languages that will be used for translation. Should |
|
contain 2 letter coded strings. For example: ("en", "de"). |
|
configuration for the `datasets.features.text.TextEncoder` used for the |
|
`datasets.features.text.Translation` features. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
name = "%s-%s" % (language_pair[0], language_pair[1]) |
|
if "name" in kwargs: |
|
name += "." + kwargs.pop("name") |
|
|
|
super().__init__(name=name, description=description, **kwargs) |
|
|
|
self.url = url or "http://www.statmt.org" |
|
self.citation = citation |
|
self.language_pair = language_pair |
|
|
|
|
|
class Ted2020TWMTDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = Ted2020TWMTConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
Ted2020TWMTConfig( |
|
description="桃園捷運 %s-%s translation task dataset." % (l1, l2), |
|
url=_URL, |
|
citation=_CITATION, |
|
language_pair=(l1, l2), |
|
version=datasets.Version("1.0.0"), |
|
) |
|
for l1, l2 in _LANGUAGE_PAIRS |
|
] |
|
|
|
|
|
def _info(self): |
|
|
|
src, target = self.config.language_pair |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"translation": datasets.features.Translation(languages=self.config.language_pair) |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
lang_pair = self.config.language_pair |
|
|
|
|
|
lang_pair_str = f"{lang_pair[1]}-{lang_pair[0]}" |
|
|
|
files = {} |
|
|
|
|
|
train_path = os.path.join("train", f"{lang_pair_str}_translations_train.csv") |
|
files["train"]= train_path |
|
test_path = os.path.join("test", f"{lang_pair_str}_translations_test.csv") |
|
files["test"] = test_path |
|
|
|
try: |
|
data_dir = dl_manager.download_and_extract(files) |
|
except: |
|
files.pop("test") |
|
data_dir = dl_manager.download_and_extract(files) |
|
|
|
output = [] |
|
if "train" in files: |
|
train = datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": data_dir["train"] |
|
} |
|
) |
|
output.append(train) |
|
|
|
if "test" in files: |
|
test = datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": data_dir["test"] |
|
} |
|
) |
|
output.append(test) |
|
|
|
return output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples from the CSV file where each row contains translations in JSON format.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter=",", quotechar='"') |
|
for id_, row in enumerate(reader): |
|
if id_ == 0: |
|
continue |
|
|
|
translation_data = json.loads(row[0]) |
|
print(translation_data) |
|
yield id_, { |
|
'translation': translation_data, |
|
} |