spktsagar commited on
Commit
5c9c480
1 Parent(s): 8911067

add data loader script

Browse files
Files changed (1) hide show
  1. openslr-nepali-asr-cleaned.py +127 -0
openslr-nepali-asr-cleaned.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Leading and Trailing Silences Removed Large Nepali ASR Dataset"""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{kjartansson-etal-sltu2018,
26
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
27
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
28
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
29
+ year = {2018},
30
+ address = {Gurugram, India},
31
+ month = aug,
32
+ pages = {52--55},
33
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
39
+ The data set has been manually quality checked, but there might still be errors.
40
+
41
+ The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection.
42
+ """
43
+
44
+ # Official homepage for the dataset
45
+ _HOMEPAGE = "https://www.openslr.org/54/"
46
+
47
+ # The licence for the dataset
48
+ _LICENSE = "license:cc-by-sa-4.0"
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URLS = {
54
+ "index_file": "https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/utt_spk_text.tsv",
55
+ "zipfiles": [
56
+ f"https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/asr_nepali_{k}.zip"
57
+ for k in [*range(10), *'abcdef']
58
+ ],
59
+ }
60
+
61
+
62
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
63
+ class OpenslrNepaliAsrCleaned(datasets.GeneratorBasedBuilder):
64
+ """End Silences Removed Large Nepali ASR Dataset"""
65
+
66
+ VERSION = datasets.Version("1.0.0")
67
+
68
+ BUILDER_CONFIGS = [
69
+ datasets.BuilderConfig(name="all", version=VERSION,
70
+ description="All cleaned utterances, speaker id and transcription from Openslr Large Nepali ASR Dataset"),
71
+ ]
72
+
73
+ # It's not mandatory to have a default configuration. Just use one if it make sense.
74
+ DEFAULT_CONFIG_NAME = "all"
75
+
76
+ def _info(self):
77
+ features = datasets.Features(
78
+ {
79
+ "utterance_id": datasets.Value("string"),
80
+ "speaker_id": datasets.Value("string"),
81
+ "utterance": datasets.Audio(sampling_rate=16000),
82
+ "transcription": datasets.Value("string"),
83
+ }
84
+ )
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ # Here we define them above because they are different between the two configurations
88
+ features=features,
89
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
90
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
91
+ # supervised_keys=("sentence", "label"),
92
+ # Homepage of the dataset for documentation
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ task_templates=[datasets.tasks.AutomaticSpeechRecognition(
99
+ audio_column="utterance", transcription_column="transcription"
100
+ )]
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+ index_file = dl_manager.download(_URLS['index_file'])
105
+ audio_paths = dict(zip([url[-5] for url in _URLS["zipfiles"]],
106
+ dl_manager.download_and_extract(_URLS['zipfiles'])))
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TRAIN,
110
+ gen_kwargs={
111
+ "index_file": index_file,
112
+ "audio_paths": audio_paths,
113
+ },
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, index_file, audio_paths):
118
+ with open(index_file, encoding="utf-8") as f:
119
+ reader = csv.DictReader(f, delimiter='\t')
120
+ for key, row in enumerate(reader):
121
+ path = f"{audio_paths[row['Utterance'][0]]}/cleaned/asr_nepali/data/{row['Utterance'][:2]}/{row['Utterance']}.flac"
122
+ yield key, {
123
+ "utterance_id": row['Utterance'],
124
+ "speaker_id": row['Speaker'],
125
+ "utterance": path,
126
+ "transcription": row['Text'],
127
+ }