Datasets:

ArXiv:
License:
holylovenia commited on
Commit
195f1e9
1 Parent(s): 87d2c0a

Upload aya_collection_templated.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. aya_collection_templated.py +167 -0
aya_collection_templated.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @misc{singh2024aya,
13
+ title={Aya Dataset: An Open-Access Collection for Multilingual Instruction Tuning},
14
+ author={Shivalika Singh and Freddie Vargus and Daniel Dsouza and Börje F. Karlsson and
15
+ Abinaya Mahendiran and Wei-Yin Ko and Herumb Shandilya and Jay Patel and Deividas
16
+ Mataciunas and Laura OMahony and Mike Zhang and Ramith Hettiarachchi and Joseph
17
+ Wilson and Marina Machado and Luisa Souza Moura and Dominik Krzemiński and Hakimeh
18
+ Fadaei and Irem Ergün and Ifeoma Okoh and Aisha Alaagib and Oshan Mudannayake and
19
+ Zaid Alyafeai and Vu Minh Chien and Sebastian Ruder and Surya Guthikonda and Emad A.
20
+ Alghamdi and Sebastian Gehrmann and Niklas Muennighoff and Max Bartolo and Julia Kreutzer
21
+ and Ahmet Üstün and Marzieh Fadaee and Sara Hooker},
22
+ year={2024},
23
+ eprint={2402.06619},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DATASETNAME = "aya_collection_templated"
30
+
31
+ _DESCRIPTION = """
32
+ The Aya Collection is a massive multilingual collection consisting of 513 million instances of prompts and
33
+ completions covering a wide range of tasks. This dataset covers the templated prompts of the Aya Collection.
34
+ """
35
+
36
+ _HOMEPAGE = "https://huggingface.co/datasets/CohereForAI/aya_collection"
37
+
38
+ _LANGUAGES = ["ind", "jav", "sun", "ace", "ban", "bbc", "bjn", "min", "nij", "tha", "vie"]
39
+
40
+ _LICENSE = Licenses.APACHE_2_0.value
41
+
42
+ _LOCAL = False
43
+
44
+ _URLS = {
45
+ "ind": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_indo_stories/train-00000-of-00001.parquet",
46
+ "jav": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_indo_stories/train-00000-of-00001.parquet",
47
+ "sun": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_indo_stories/train-00000-of-00001.parquet",
48
+ "ace": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
49
+ "ban": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
50
+ "bbc": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
51
+ "bjn": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
52
+ "min": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
53
+ "nij": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_nusax_senti/train-00000-of-00001.parquet",
54
+ "tha": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_thai_wikitionary/train-00000-of-00001.parquet",
55
+ "vie": "https://huggingface.co/datasets/CohereForAI/aya_collection/resolve/main/templated_xcsqa/validation-00000-of-00001.parquet",
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.INSTRUCTION_TUNING]
59
+
60
+ _SOURCE_VERSION = "1.0.0"
61
+
62
+ _SEACROWD_VERSION = "2024.06.20"
63
+
64
+
65
+ class AyaCollectionTemplatedDataset(datasets.GeneratorBasedBuilder):
66
+ """
67
+ The Aya Collection is a massive multilingual collection consisting of 513 million instances of prompts and
68
+ completions covering a wide range of tasks. This dataset covers the templated prompts of the Aya Collection.
69
+ """
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
73
+
74
+ BUILDER_CONFIGS = [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_{LANG}_source",
77
+ version=datasets.Version(_SOURCE_VERSION),
78
+ description=f"{_DATASETNAME} {LANG} source schema",
79
+ schema="source",
80
+ subset_id=f"{_DATASETNAME}_{LANG}",
81
+ )
82
+ for LANG in _LANGUAGES
83
+ ] + [
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_{LANG}_seacrowd_t2t",
86
+ version=datasets.Version(_SEACROWD_VERSION),
87
+ description=f"{_DATASETNAME} {LANG} SEACrowd schema",
88
+ schema="seacrowd_t2t",
89
+ subset_id=f"{_DATASETNAME}_{LANG}",
90
+ )
91
+ for LANG in _LANGUAGES
92
+ ]
93
+
94
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_ind_source"
95
+
96
+ def _info(self) -> datasets.DatasetInfo:
97
+
98
+ if self.config.schema == "source":
99
+ features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("int64"),
102
+ "inputs": datasets.Value("string"),
103
+ "targets": datasets.Value("string"),
104
+ "dataset_name": datasets.Value("string"),
105
+ "sub_dataset_name": datasets.Value("string"),
106
+ "task_type": datasets.Value("string"),
107
+ "template_id": datasets.Value("int64"),
108
+ "language": datasets.Value("string"),
109
+ "split": datasets.Value("string"),
110
+ "script": datasets.Value("string"),
111
+ }
112
+ )
113
+
114
+ elif self.config.schema == "seacrowd_t2t":
115
+ features = schemas.text2text_features
116
+
117
+ return datasets.DatasetInfo(
118
+ description=_DESCRIPTION,
119
+ features=features,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
126
+ """Returns SplitGenerators."""
127
+
128
+ language = self.config.name.split("_")[3]
129
+
130
+ if language in _LANGUAGES:
131
+ data_path = Path(dl_manager.download_and_extract(_URLS[language]))
132
+ else:
133
+ data_path = [Path(dl_manager.download_and_extract(_URLS[language])) for language in _LANGUAGES]
134
+
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ gen_kwargs={
139
+ "filepath": data_path,
140
+ "split": "train",
141
+ },
142
+ )
143
+ ]
144
+
145
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
146
+ """Yields examples as (key, example) tuples."""
147
+
148
+ language = self.config.name.split("_")[3]
149
+
150
+ df = pd.read_parquet(filepath)
151
+ df = df[df["language"] == language]
152
+
153
+ for index, row in df.iterrows():
154
+
155
+ if self.config.schema == "source":
156
+ example = row.to_dict()
157
+
158
+ elif self.config.schema == "seacrowd_t2t":
159
+ example = {
160
+ "id": str(index),
161
+ "text_1": row["inputs"],
162
+ "text_2": row["targets"],
163
+ "text_1_name": "inputs",
164
+ "text_2_name": "targets",
165
+ }
166
+
167
+ yield index, example