Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
2e3a93e
1 Parent(s): d366fbf

Upload thai_elderly_speech.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. thai_elderly_speech.py +184 -0
thai_elderly_speech.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
24
+ Licenses, Tasks)
25
+
26
+ _CITATION = "" # no dataset/paper citation found
27
+
28
+ _DATASETNAME = "thai_elderly_speech"
29
+
30
+ _DESCRIPTION = """\
31
+ The Thai Elderly Speech dataset by Data Wow and VISAI Version 1 dataset aims at
32
+ advancing Automatic Speech Recognition (ASR) technology specifically for the
33
+ elderly population. Researchers can use this dataset to advance ASR technology
34
+ for healthcare and smart home applications. The dataset consists of 19,200 audio
35
+ files, totaling 17 hours and 11 minutes of recorded speech. The files are
36
+ divided into 2 categories: Healthcare (relating to medical issues and services
37
+ in 30 medical categories) and Smart Home (relating to smart home devices in 7
38
+ household contexts). The dataset contains 5,156 unique sentences spoken by 32
39
+ seniors (10 males and 22 females), aged 57-60 years old (average age of 63
40
+ years).
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/VISAI-DATAWOW/Thai-Elderly-Speech-dataset/releases/tag/v1.0.0"
44
+
45
+ _LANGUAGES = ["tha"]
46
+ _SUBSETS = ["healthcare", "smarthome"]
47
+
48
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = [
53
+ "https://github.com/VISAI-DATAWOW/Thai-Elderly-Speech-dataset/releases/download/v1.0.0/Dataset.zip.001",
54
+ "https://github.com/VISAI-DATAWOW/Thai-Elderly-Speech-dataset/releases/download/v1.0.0/Dataset.zip.002",
55
+ "https://github.com/VISAI-DATAWOW/Thai-Elderly-Speech-dataset/releases/download/v1.0.0/Dataset.zip.003",
56
+ ]
57
+
58
+ _SUPPORTED_TASKS = [Tasks.SPEECH_TO_TEXT_TRANSLATION]
59
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # sptext
60
+
61
+ _SOURCE_VERSION = "1.0.0"
62
+
63
+ _SEACROWD_VERSION = "2024.06.20"
64
+
65
+
66
+ class ThaiElderlySpeechDataset(datasets.GeneratorBasedBuilder):
67
+ """A speech dataset from elderly Thai speakers."""
68
+
69
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
70
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
71
+
72
+ BUILDER_CONFIGS = []
73
+ for subset in _SUBSETS:
74
+ BUILDER_CONFIGS += [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_{subset}_source",
77
+ version=SOURCE_VERSION,
78
+ description=f"{_DATASETNAME} {subset} source schema",
79
+ schema="source",
80
+ subset_id=subset,
81
+ ),
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_{subset}_{_SEACROWD_SCHEMA}",
84
+ version=SEACROWD_VERSION,
85
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
86
+ schema=_SEACROWD_SCHEMA,
87
+ subset_id=subset,
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_healthcare_source"
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+ if self.config.schema == "source":
95
+ features = datasets.Features(
96
+ {
97
+ "audio": datasets.Audio(sampling_rate=16_000),
98
+ "filename": datasets.Value("string"),
99
+ "transcription": datasets.Value("string"),
100
+ "speaker": {
101
+ "id": datasets.Value("string"),
102
+ "age": datasets.Value("int32"),
103
+ "gender": datasets.Value("string"),
104
+ },
105
+ }
106
+ )
107
+ elif self.config.schema == _SEACROWD_SCHEMA:
108
+ features = SCHEMA_TO_FEATURES[TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]] # ssp_features
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ homepage=_HOMEPAGE,
114
+ license=_LICENSE,
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
119
+ """Returns SplitGenerators."""
120
+ zip_files = list(map(Path, dl_manager.download(_URLS)))
121
+ zip_combined = zip_files[0].parent / "thai_elderly_speech.zip"
122
+
123
+ with open(str(zip_combined), "wb") as out_file:
124
+ for zip_file in zip_files:
125
+ with open(str(zip_file), "rb") as in_file:
126
+ out_file.write(in_file.read())
127
+
128
+ data_file = Path(dl_manager.extract(zip_combined)) / "Dataset"
129
+ subset_id = self.config.subset_id
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "speaker_file": data_file / "speaker_demography.json",
136
+ "audio_dir": data_file / subset_id.title() / "Record",
137
+ "transcript_file": data_file / subset_id.title() / "transcription.json",
138
+ },
139
+ ),
140
+ ]
141
+
142
+ def _generate_examples(self, speaker_file: Path, audio_dir: Path, transcript_file: Path) -> Tuple[int, Dict]:
143
+ """Yields examples as (key, example) tuples."""
144
+ # read speaker information
145
+ with open(speaker_file, "r", encoding="utf-8") as f:
146
+ speaker_info = json.load(f)
147
+ speaker_dict = {speaker["speaker_id"]: {"age": speaker["age"], "gender": speaker["gender"]} for speaker in speaker_info}
148
+
149
+ # read transcript information
150
+ with open(transcript_file, "r", encoding="utf-8") as f:
151
+ annotations = json.load(f)
152
+
153
+ for idx, instance in enumerate(annotations):
154
+ transcript = instance["transcript"]
155
+
156
+ speaker_id = instance["speaker_id"]
157
+ speaker_info = speaker_dict[int(speaker_id)]
158
+
159
+ filename = instance["filename"]
160
+ audio_file = str(audio_dir / (filename + ".wav"))
161
+
162
+ if self.config.schema == "source":
163
+ yield idx, {
164
+ "audio": audio_file,
165
+ "filename": filename,
166
+ "transcription": transcript,
167
+ "speaker": {
168
+ "id": speaker_id,
169
+ "age": speaker_info["age"],
170
+ "gender": speaker_info["gender"],
171
+ },
172
+ }
173
+ elif self.config.schema == _SEACROWD_SCHEMA:
174
+ yield idx, {
175
+ "id": idx,
176
+ "path": audio_file,
177
+ "audio": audio_file,
178
+ "text": transcript,
179
+ "speaker_id": speaker_id,
180
+ "metadata": {
181
+ "speaker_age": speaker_info["age"],
182
+ "speaker_gender": speaker_info["gender"],
183
+ },
184
+ }