# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """桃園多國語系翻譯競賽: Translate dataset.""" import csv import json import os import datasets logger = datasets.logging.get_logger(__name__) # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ _DESCRIPTION = """\ Translation dataset based on the data from statmt.org. Versions exist for different years using a combination of data sources. The base `wmt` allows you to create a custom dataset by choosing your own data/language pair. This can be done as follows: ```python from datasets import inspect_dataset, load_dataset_builder inspect_dataset("wmt16", "path/to/scripts") builder = load_dataset_builder( "path/to/scripts/wmt_utils.py", language_pair=("fr", "de"), subsets={ datasets.Split.TRAIN: ["commoncrawl_frde"], datasets.Split.VALIDATION: ["euelections_dev2019"], }, ) # Standard version builder.download_and_prepare() ds = builder.as_dataset() # Streamable version ds = builder.as_streaming_dataset() ``` """ _LANGUAGE_PAIRS = [(lang, "zh_tw") for lang in ["en", "ja", "ko", "id", "vi", "th"]] # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "https://huggingface.co/Heng666" _LICENSE = "cc-by-2.0" # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URL = "http://www.statmt.org/wmt16/translation-task.html" class Ted2020TWMTConfig(datasets.BuilderConfig): """BuilderConfig for ted2020TW-mt""" def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), **kwargs): """ Args: url: The reference URL for the dataset. citation: The paper citation for the dataset. description: The description of the dataset. language_pair: pair of languages that will be used for translation. Should contain 2 letter coded strings. For example: ("en", "de"). configuration for the `datasets.features.text.TextEncoder` used for the `datasets.features.text.Translation` features. **kwargs: keyword arguments forwarded to super. """ name = "%s-%s" % (language_pair[0], language_pair[1]) if "name" in kwargs: # Add name suffix for custom configs name += "." + kwargs.pop("name") super().__init__(name=name, description=description, **kwargs) self.url = url or "http://www.statmt.org" self.citation = citation self.language_pair = language_pair class Ted2020TWMTDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIG_CLASS = Ted2020TWMTConfig BUILDER_CONFIGS = [ Ted2020TWMTConfig( # pylint:disable=g-complex-comprehension description="桃園捷運 %s-%s translation task dataset." % (l1, l2), url=_URL, citation=_CITATION, language_pair=(l1, l2), version=datasets.Version("1.0.0"), ) for l1, l2 in _LANGUAGE_PAIRS ] def _info(self): src, target = self.config.language_pair return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "translation": datasets.features.Translation(languages=self.config.language_pair) } ), homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE ) def _split_generators(self, dl_manager): lang_pair = self.config.language_pair # 將語言對轉換成適合檔案命名的格式,例如 'vi-zh_tw' lang_pair_str = f"{lang_pair[1]}-{lang_pair[0]}" files = {} # 根據新的檔案命名規則更新檔案路徑 train_path = os.path.join("train", f"{lang_pair_str}_translations_train.csv") files["train"]= train_path test_path = os.path.join("test", f"{lang_pair_str}_translations_test.csv") files["test"] = test_path try: data_dir = dl_manager.download_and_extract(files) except: files.pop("test") data_dir = dl_manager.download_and_extract(files) output = [] if "train" in files: train = datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_dir["train"] } ) output.append(train) if "test" in files: test = datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_dir["test"] } ) output.append(test) return output # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` # def _generate_examples(self, filepath): # """Yields examples.""" # with open(filepath, encoding="utf-8") as f: # reader = csv.reader(f, delimiter=",", quotechar='"') # for id_, row in enumerate(reader): # if id_ == 0: # continue # yield id_, { # "instruction": row[0], # "input": row[1], # "output": row[2] # } def _generate_examples(self, filepath): """Yields examples from the CSV file where each row contains translations in JSON format.""" with open(filepath, encoding="utf-8") as f: reader = csv.reader(f, delimiter=",", quotechar='"') for id_, row in enumerate(reader): if id_ == 0: # 假設第一行是標題行,所以跳過它 continue # 假設你的CSV文件結構是每行一個JSON字符串 translation_data = json.loads(row[0]) print(translation_data) yield id_, { 'translation': translation_data, }