Datasets:

ArXiv:
License:
holylovenia commited on
Commit
58942cf
1 Parent(s): 872924f

Upload basaha_corpus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. basaha_corpus.py +186 -0
basaha_corpus.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{imperial-kochmar-2023-basahacorpus,
13
+ title = "{B}asaha{C}orpus: An Expanded Linguistic Resource for Readability Assessment in {C}entral {P}hilippine Languages",
14
+ author = "Imperial, Joseph Marvin and
15
+ Kochmar, Ekaterina",
16
+ editor = "Bouamor, Houda and
17
+ Pino, Juan and
18
+ Bali, Kalika",
19
+ booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
20
+ month = dec,
21
+ year = "2023",
22
+ address = "Singapore",
23
+ publisher = "Association for Computational Linguistics",
24
+ url = "https://aclanthology.org/2023.emnlp-main.388",
25
+ doi = "10.18653/v1/2023.emnlp-main.388",
26
+ pages = "6302--6309",
27
+ }
28
+ """
29
+
30
+ _DATASETNAME = "basaha_corpus"
31
+
32
+ _DESCRIPTION = """
33
+ BasahaCorpus contains short stories in four Central Philippine languages \
34
+ (Minasbate, Rinconada, Kinaray-a, and Hiligaynon) for low-resource \
35
+ readability assessment. Each dataset per language contains stories \
36
+ distributed over the first three grade levels (L1, L2, and L3) in \
37
+ the Philippine education context. The grade levels of the dataset \
38
+ have been provided by an expert from Let's Read Asia.
39
+ """
40
+ _HOMEPAGE = "https://github.com/imperialite/BasahaCorpus-HierarchicalCrosslingualARA"
41
+
42
+ _LANGUAGES = [
43
+ "msb",
44
+ "rin",
45
+ "kar",
46
+ "hil",
47
+ ] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
48
+
49
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
50
+
51
+ _LOCAL = False
52
+
53
+ _URLS = {
54
+ # Minasbate, Rinconada, Kinaray-a, and Hiligaynon (from the _DESCRIPTION)
55
+ "msb": "https://raw.githubusercontent.com/imperialite/BasahaCorpus-HierarchicalCrosslingualARA/main/data/features/min_features.csv",
56
+ "rin": "https://raw.githubusercontent.com/imperialite/BasahaCorpus-HierarchicalCrosslingualARA/main/data/features/rin_features.csv",
57
+ "kar": "https://raw.githubusercontent.com/imperialite/BasahaCorpus-HierarchicalCrosslingualARA/main/data/features/kar_features.csv",
58
+ "hil": "https://raw.githubusercontent.com/imperialite/BasahaCorpus-HierarchicalCrosslingualARA/main/data/features/hil_features.csv",
59
+ }
60
+
61
+ _SUPPORTED_TASKS = [Tasks.READABILITY_ASSESSMENT]
62
+
63
+ _SOURCE_VERSION = "1.0.0"
64
+
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+
68
+ class BasahaCorpusDataset(datasets.GeneratorBasedBuilder):
69
+ """
70
+ BasahaCorpus comprises short stories in four Central Philippine
71
+ languages (Minasbate, Rinconada, Kinaray-a, and Hiligaynon)
72
+ for low-resource readability assessment. Each language dataset
73
+ includes stories from the first three grade levels (L1, L2, and L3)
74
+ in the Philippine education context, as classified by an expert
75
+ from Let's Read Asia.
76
+ """
77
+
78
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
79
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
80
+
81
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"{_DATASETNAME}_{lang}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME} source schema", schema="source", subset_id=f"{_DATASETNAME}_{lang}",) for lang in _LANGUAGES] + [
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_{lang}_seacrowd_text",
84
+ version=datasets.Version(_SEACROWD_VERSION),
85
+ description=f"{_DATASETNAME} SEACrowd schema",
86
+ schema="seacrowd_text",
87
+ subset_id=f"{_DATASETNAME}_{lang}",
88
+ )
89
+ for lang in _LANGUAGES
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_msb_source"
93
+
94
+ def _info(self) -> datasets.DatasetInfo:
95
+
96
+ if self.config.schema == "source":
97
+
98
+ features = datasets.Features(
99
+ {
100
+ "book_title": datasets.Value("string"),
101
+ "word_count": datasets.Value("int64"),
102
+ "sentence_count": datasets.Value("int64"),
103
+ "phrase_count_per_sentence": datasets.Value("float64"),
104
+ "average_word_len": datasets.Value("float64"),
105
+ "average_sentence_len": datasets.Value("float64"),
106
+ "average_syllable_count": datasets.Value("float64"),
107
+ "polysyll_count": datasets.Value("int64"),
108
+ "consonant_cluster_density": datasets.Value("float64"),
109
+ "v_density": datasets.Value("float64"),
110
+ "cv_density": datasets.Value("float64"),
111
+ "vc_density": datasets.Value("float64"),
112
+ "cvc_density": datasets.Value("float64"),
113
+ "vcc_density": datasets.Value("float64"),
114
+ "cvcc_density": datasets.Value("float64"),
115
+ "ccvc_density": datasets.Value("float64"),
116
+ "ccv_density": datasets.Value("float64"),
117
+ "ccvcc_density": datasets.Value("float64"),
118
+ "ccvccc_density": datasets.Value("float64"),
119
+ "tag_bigram_sim": datasets.Value("float64"),
120
+ "bik_bigram_sim": datasets.Value("float64"),
121
+ "ceb_bigram_sim": datasets.Value("float64"),
122
+ "hil_bigram_sim": datasets.Value("float64"),
123
+ "rin_bigram_sim": datasets.Value("float64"),
124
+ "min_bigram_sim": datasets.Value("float64"),
125
+ "kar_bigram_sim": datasets.Value("float64"),
126
+ "tag_trigram_sim": datasets.Value("float64"),
127
+ "bik_trigram_sim": datasets.Value("float64"),
128
+ "ceb_trigam_sim": datasets.Value("float64"),
129
+ "hil_trigam_sim": datasets.Value("float64"),
130
+ "rin_trigam_sim": datasets.Value("float64"),
131
+ "min_trigam_sim": datasets.Value("float64"),
132
+ "kar_trigam_sim": datasets.Value("float64"),
133
+ "grade_level": datasets.Value("string"),
134
+ }
135
+ )
136
+
137
+ elif self.config.schema == "seacrowd_text":
138
+ features = schemas.text_features(["1", "2", "3"])
139
+
140
+ return datasets.DatasetInfo(
141
+ description=_DESCRIPTION,
142
+ features=features,
143
+ homepage=_HOMEPAGE,
144
+ license=_LICENSE,
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
149
+ """Returns SplitGenerators."""
150
+
151
+ lang = self.config.name.split("_")[2]
152
+
153
+ if lang in _LANGUAGES:
154
+ data_path = Path(dl_manager.download_and_extract(_URLS[lang]))
155
+ else:
156
+ data_path = [Path(dl_manager.download_and_extract(_URLS[lang])) for lang in _LANGUAGES]
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={
162
+ "filepath": data_path,
163
+ "split": "train",
164
+ },
165
+ )
166
+ ]
167
+
168
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
169
+ """Yields examples as (key, example) tuples."""
170
+
171
+ df = pd.read_csv(filepath, index_col=None)
172
+
173
+ for index, row in df.iterrows():
174
+
175
+ if self.config.schema == "source":
176
+ example = row.to_dict()
177
+
178
+ elif self.config.schema == "seacrowd_text":
179
+
180
+ example = {
181
+ "id": str(index),
182
+ "text": str(row["book_title"]),
183
+ "label": str(row["grade_level"]),
184
+ }
185
+
186
+ yield index, example