holylovenia commited on
Commit
b9d7059
1 Parent(s): 2069d69

Upload asr_smaldusc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. asr_smaldusc.py +182 -0
asr_smaldusc.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ # no bibtex citation
28
+ _CITATION = ""
29
+ _DATASETNAME = "asr_smaldusc"
30
+ _DESCRIPTION = """\
31
+ This open-source dataset consists of 4.8 hours of transcribed Malay scripted
32
+ speech focusing on daily use sentences, where 2,839 utterances contributed by
33
+ ten speakers were contained.
34
+ """
35
+
36
+ _HOMEPAGE = "https://magichub.com/datasets/malay-scripted-speech-corpus-daily-use-sentence/"
37
+ _LANGUAGES = ["zlm"]
38
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value
39
+ _LOCAL = False
40
+ _URLS = {
41
+ _DATASETNAME: "https://magichub.com/df/df.php?file_name=Malay_Scripted_Speech_Corpus_Daily_Use_Sentence.zip",
42
+ }
43
+ _SUPPORTED_TASKS = [Tasks.TEXT_TO_SPEECH, Tasks.SPEECH_RECOGNITION]
44
+
45
+ _SOURCE_VERSION = "1.0.0"
46
+ _SEACROWD_VERSION = "2024.06.20"
47
+
48
+
49
+ class ASRSmaldusc(datasets.GeneratorBasedBuilder):
50
+ """ASR-Smaldusc consists transcribed Malay scripted speech focusing on daily use sentences."""
51
+
52
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
54
+
55
+ SEACROWD_SCHEMA_NAME = "sptext"
56
+
57
+ BUILDER_CONFIGS = [
58
+ SEACrowdConfig(
59
+ name=f"{_DATASETNAME}_source",
60
+ version=SOURCE_VERSION,
61
+ description=f"{_DATASETNAME} source schema",
62
+ schema="source",
63
+ subset_id=_DATASETNAME,
64
+ ),
65
+ SEACrowdConfig(
66
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
67
+ version=SEACROWD_VERSION,
68
+ description=f"{_DATASETNAME} SEACrowd schema",
69
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
70
+ subset_id=_DATASETNAME,
71
+ ),
72
+ ]
73
+
74
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
75
+
76
+ def _info(self) -> datasets.DatasetInfo:
77
+
78
+ if self.config.schema == "source":
79
+ features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "channel": datasets.Value("string"),
83
+ "uttrans_id": datasets.Value("string"),
84
+ "speaker_id": datasets.Value("string"),
85
+ "prompt": datasets.Value("string"),
86
+ "transcription": datasets.Value("string"),
87
+ "path": datasets.Value("string"),
88
+ "audio": datasets.Audio(sampling_rate=16_000),
89
+ "speaker_gender": datasets.Value("string"),
90
+ "speaker_age": datasets.Value("int64"),
91
+ "speaker_region": datasets.Value("string"),
92
+ "speaker_device": datasets.Value("string"),
93
+ }
94
+ )
95
+
96
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
97
+ features = schemas.speech_text_features
98
+
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=features,
102
+ homepage=_HOMEPAGE,
103
+ license=_LICENSE,
104
+ citation=_CITATION,
105
+ )
106
+
107
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
108
+ """Returns SplitGenerators."""
109
+
110
+ data_paths = {
111
+ _DATASETNAME: Path(dl_manager.download_and_extract(_URLS[_DATASETNAME])),
112
+ }
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ "filepath": data_paths[_DATASETNAME],
119
+ "split": "train",
120
+ },
121
+ )
122
+ ]
123
+
124
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
125
+ """Yields examples as (key, example) tuples."""
126
+
127
+ # read UTTRANSINFO file
128
+ # columns: channel, uttrans_id, speaker_id, prompt, transcription
129
+ uttransinfo_filepath = os.path.join(filepath, "UTTRANSINFO.txt")
130
+ with open(uttransinfo_filepath, "r", encoding="utf-8") as uttransinfo_file:
131
+ uttransinfo_data = uttransinfo_file.readlines()
132
+ uttransinfo_data = uttransinfo_data[1:] # remove header
133
+ uttransinfo_data = [s.strip("\n").split("\t") for s in uttransinfo_data]
134
+
135
+ # read SPKINFO file
136
+ # columns: channel, speaker_id, gender, age, region, device
137
+ spkinfo_filepath = os.path.join(filepath, "SPKINFO.txt")
138
+ with open(spkinfo_filepath, "r", encoding="utf-8") as spkinfo_file:
139
+ spkinfo_data = spkinfo_file.readlines()
140
+ spkinfo_data = spkinfo_data[1:] # remove header
141
+ spkinfo_data = [s.strip("\n").split("\t") for s in spkinfo_data]
142
+ for i, s in enumerate(spkinfo_data):
143
+ if s[2] == "M":
144
+ s[2] = "male"
145
+ elif s[2] == "F":
146
+ s[2] = "female"
147
+ else:
148
+ s[2] = None
149
+ # dictionary of metadata of each speaker
150
+ spkinfo_dict = {s[1]: {"speaker_gender": s[2], "speaker_age": int(s[3]), "speaker_region": s[4], "speaker_device": s[5]} for s in spkinfo_data}
151
+
152
+ num_sample = len(uttransinfo_data)
153
+
154
+ for i in range(num_sample):
155
+ wav_path = os.path.join(filepath, "WAV", uttransinfo_data[i][2], uttransinfo_data[i][1])
156
+
157
+ if self.config.schema == "source":
158
+ example = {
159
+ "id": str(i),
160
+ "channel": uttransinfo_data[i][0],
161
+ "uttrans_id": uttransinfo_data[i][1],
162
+ "speaker_id": uttransinfo_data[i][2],
163
+ "prompt": uttransinfo_data[i][3],
164
+ "transcription": uttransinfo_data[i][4],
165
+ "path": wav_path,
166
+ "audio": wav_path,
167
+ "speaker_gender": spkinfo_dict[uttransinfo_data[i][2]]["speaker_gender"],
168
+ "speaker_age": spkinfo_dict[uttransinfo_data[i][2]]["speaker_age"],
169
+ "speaker_region": spkinfo_dict[uttransinfo_data[i][2]]["speaker_region"],
170
+ "speaker_device": spkinfo_dict[uttransinfo_data[i][2]]["speaker_device"],
171
+ }
172
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
173
+ example = {
174
+ "id": str(i),
175
+ "speaker_id": uttransinfo_data[i][2],
176
+ "path": wav_path,
177
+ "audio": wav_path,
178
+ "text": uttransinfo_data[i][4],
179
+ "metadata": {"speaker_age": spkinfo_dict[uttransinfo_data[i][2]]["speaker_age"], "speaker_gender": spkinfo_dict[uttransinfo_data[i][2]]["speaker_gender"]},
180
+ }
181
+
182
+ yield i, example