pere commited on
Commit
c48ac53
·
verified ·
1 Parent(s): 1ad0af4

Create distil_raw_ncc_speech_v7_compact1.py

Browse files
Files changed (1) hide show
  1. distil_raw_ncc_speech_v7_compact1.py +197 -0
distil_raw_ncc_speech_v7_compact1.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import io
3
+ import json
4
+ import tarfile
5
+ import datasets
6
+ from huggingface_hub import hf_hub_download
7
+
8
+ _CITATION = """\
9
+ # Citation details
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ This database was created from NB deposit recordings
14
+ """
15
+
16
+ _HOMEPAGE = "https://ai.nb.no"
17
+
18
+ # The repository ID for the dataset
19
+ _REPO_ID = "NbAiLab/distil_raw_ncc_speech_v7_compact1"
20
+
21
+ _SHARDS = {
22
+ "no": {
23
+ datasets.Split.TRAIN: 1, # Updated from 256 to 8
24
+ datasets.Split.VALIDATION: 1,
25
+ datasets.Split.TEST: 1,
26
+ },
27
+ }
28
+
29
+ _SOURCES = ["audio_books_nn", "audio_books_no", "clean_audio_books_no",
30
+ "clean_stortinget_no", "norwegian_fleurs", "nrk_no", "nst",
31
+ "stortinget_no"]
32
+
33
+ _SHARDS["no"].update({f"validation_{source}": 1 for source in _SOURCES})
34
+ _SHARDS["no"].update({f"test_{source}": 1 for source in _SOURCES})
35
+
36
+
37
+ class distil_raw_ncc_speech_v7_compact1Config(datasets.BuilderConfig):
38
+ def __init__(self, *args, **kwargs):
39
+ super(distil_raw_ncc_speech_v7_compact1Config, self).__init__(*args, **kwargs)
40
+
41
+
42
+ class distil_raw_ncc_speech_v7_compact1(datasets.GeneratorBasedBuilder):
43
+ DEFAULT_WRITER_BATCH_SIZE = 1000
44
+ BUILDER_CONFIGS = [
45
+ distil_raw_ncc_speech_v7_compact1Config(
46
+ name="no",
47
+ version=datasets.Version("1.0.1"),
48
+ description="ncc_speech Norwegian Compact 8",
49
+ ),
50
+ ]
51
+
52
+ def __init__(self, *args, post_processors=None, **kwargs):
53
+ if not isinstance(post_processors, (tuple, list)):
54
+ post_processors = [post_processors]
55
+ self.post_processors = post_processors
56
+ super().__init__(*args, **kwargs)
57
+
58
+ def _info(self):
59
+ sampling_rate = 16000
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features({
63
+ "id": datasets.Value("string"),
64
+ # ... (your existing features)
65
+ }),
66
+ supervised_keys=None,
67
+ homepage=_HOMEPAGE,
68
+ citation=_CITATION,
69
+ )
70
+
71
+ def _info(self):
72
+ sampling_rate = 16000
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features({
76
+ "id": datasets.Value("string"),
77
+ "group_id": datasets.Value("string"),
78
+ "source": datasets.Value("string"),
79
+ "audio_language": datasets.Value("string"),
80
+ "audio": datasets.features.Audio(sampling_rate=sampling_rate),
81
+ "audio_duration": datasets.Value("int32"),
82
+ "previous_text": datasets.Value("string"),
83
+ "text_en": datasets.Value("string"),
84
+ "text_language": datasets.Value("string"),
85
+ "text": datasets.Value("string"),
86
+ "timestamped_text_en": datasets.Value("string"),
87
+ "text_en": datasets.Value("string"),
88
+ "timestamped_text": datasets.Value("string"),
89
+ "wav2vec_wer": datasets.Value("float32"),
90
+ "whisper_wer": datasets.Value("float32"),
91
+ "verbosity_level": datasets.Value("int32"),
92
+ "file": datasets.Value("string"),
93
+ "channels": datasets.Value("int32"),
94
+ "frequency": datasets.Value("int32"),
95
+ "language": datasets.Value("string"),
96
+ "task": datasets.Value("string"),
97
+ "_post_processor": datasets.Value("string"),
98
+ }),
99
+ supervised_keys=None,
100
+ homepage=_HOMEPAGE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ data_urls = {}
106
+ splits = _SHARDS[self.config.name].keys()
107
+ for split in splits:
108
+ data_urls[split] = []
109
+ shard_total = _SHARDS["no"][split]
110
+ for shard_idx in range(1, shard_total + 1):
111
+ string_formatting = dict(
112
+ split=split,
113
+ lang_code="no",
114
+ shard_idx=shard_idx,
115
+ shard_total=shard_total
116
+ )
117
+ folder = f"data/{split}"
118
+ metadata_filename = f"ncc_speech_v7-{string_formatting['lang_code']}-{string_formatting['shard_idx']:04d}-{string_formatting['shard_total']:04d}.json"
119
+ archive_filename = f"ncc_speech_v7-{string_formatting['lang_code']}-{string_formatting['shard_idx']:04d}-{string_formatting['shard_total']:04d}.tar.gz"
120
+ metadata_path = f"{folder}/{metadata_filename}"
121
+ archive_path = f"{folder}/{archive_filename}"
122
+ data_urls[split].append((metadata_path, archive_path))
123
+
124
+ return [
125
+ datasets.SplitGenerator(
126
+ name=split, gen_kwargs={
127
+ "filepaths": data_urls[split],
128
+ "dl_manager": dl_manager
129
+ }
130
+ ) for split in splits
131
+ ]
132
+
133
+ def _generate_examples(self, filepaths, dl_manager):
134
+ data_fields = list(self._info().features.keys())
135
+ data_fields.remove("audio")
136
+
137
+ for metadata_path, archive_path in filepaths:
138
+ metadata_content = {}
139
+
140
+ # Download metadata using the Hugging Face Hub API
141
+ metadata_file_path = hf_hub_download(
142
+ repo_id=_REPO_ID,
143
+ filename=metadata_path,
144
+ repo_type="dataset",
145
+ use_auth_token=True
146
+ )
147
+
148
+ with open(metadata_file_path, "r", encoding="utf-8") as metadata_file:
149
+ metadata_records = metadata_file.read().splitlines()
150
+ for record in metadata_records:
151
+ record = record.strip()
152
+ if not record:
153
+ continue # Skip empty lines
154
+ metadata_object = json.loads(record)
155
+ metadata_key = metadata_object["id"]
156
+ metadata_content[metadata_key] = metadata_object
157
+
158
+ # Stream audio archives using iter_archive
159
+ archive_file_path = hf_hub_download(
160
+ repo_id=_REPO_ID,
161
+ filename=archive_path,
162
+ repo_type="dataset",
163
+ use_auth_token=True
164
+ )
165
+
166
+ for filename, file_handle in dl_manager.iter_archive(archive_file_path):
167
+ if filename.endswith(".mp3"):
168
+ metadata_key = filename.replace(".mp3", "")
169
+ if metadata_key not in metadata_content:
170
+ continue
171
+
172
+ # Extract relevant fields for each entry
173
+ fields = {key: metadata_content[metadata_key].get(key, "") for key in data_fields}
174
+ fields["file"] = fields["id"] + ".mp3"
175
+ fields["channels"] = 1
176
+ fields["frequency"] = 16000
177
+ fields["task"] = "transcribe"
178
+ fields["language"] = fields["text_language"]
179
+ fields["_post_processor"] = None
180
+
181
+ audio_bytes = file_handle.read()
182
+ audio_dict = {"bytes": audio_bytes, "path": filename}
183
+ metadata_dict = {
184
+ "id": metadata_key,
185
+ "audio": audio_dict,
186
+ **fields
187
+ }
188
+
189
+ for func in self.post_processors:
190
+ if func is None:
191
+ yield metadata_key, metadata_dict
192
+ else:
193
+ func_name = func.__name__ if func.__name__ else hex(id(func)).replace("0x", "lambda-")
194
+ result = func(metadata_dict)
195
+ if result:
196
+ result["_post_processor"] = func_name
197
+ yield f"{metadata_key}_{func_name}", result