RodrigoLimaRFL commited on
Commit
96fd80c
·
verified ·
1 Parent(s): 7556b48

Update NURC-SP_ENTOA_TTS.py

Browse files
Files changed (1) hide show
  1. NURC-SP_ENTOA_TTS.py +159 -162
NURC-SP_ENTOA_TTS.py CHANGED
@@ -1,162 +1,159 @@
1
- import csv
2
- import datasets
3
- from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split
4
-
5
- _PROSODIC_PROMPTS_URLS = {
6
- "validation": "prosodic/audios_dev_metadata.csv",
7
- "train": "prosodic/audios_train_metadata.csv",
8
- }
9
-
10
- _AUTOMATIC_PROMPTS_URLS = {
11
- "validation": "automatic/audios_dev_metadata.csv",
12
- "train": "automatic/audios_train_metadata.csv",
13
- }
14
-
15
- _ARCHIVES = {
16
- "validation_prosodic": "prosodic/audios_dev.zip",
17
- "train_prosodic": "prosodic/audios_train.zip",
18
- "validation_automatic": "automatic/audios_dev.zip",
19
- "train_automatic": "automatic/audios_train.zip",
20
- }
21
-
22
- _PATH_TO_CLIPS = {
23
- "validation_prosodic": "prosodic/audios_dev",
24
- "train_prosodic": "prosodic/audios_train",
25
- "validation_automatic": "automatic/audios_dev",
26
- "train_automatic": "automatic/audios_train",
27
- }
28
-
29
- class EntoaConfig(BuilderConfig):
30
- def __init__(self, prompts_type="prosodic", **kwargs):
31
- super().__init__(**kwargs)
32
- self.prompts_type = prompts_type
33
-
34
- class EntoaDataset(GeneratorBasedBuilder):
35
- BUILDER_CONFIGS = [
36
- EntoaConfig(name="prosodic", description="Prosodic audio prompts", prompts_type="prosodic"),
37
- EntoaConfig(name="automatic", description="Automatic audio prompts", prompts_type="automatic"),
38
- ]
39
-
40
- def _info(self):
41
- if self.config.name == "prosodic":
42
- features = datasets.Features(
43
- {
44
- "path": datasets.Value("string"),
45
- "name": datasets.Value("string"),
46
- "speaker": datasets.Value("string"),
47
- "start_time": datasets.Value("string"),
48
- "end_time": datasets.Value("string"),
49
- "normalized_text": datasets.Value("string"),
50
- "text": datasets.Value("string"),
51
- "duration": datasets.Value("string"),
52
- "type": datasets.Value("string"),
53
- "year": datasets.Value("string"),
54
- "gender": datasets.Value("string"),
55
- "age_range": datasets.Value("string"),
56
- "total_duration": datasets.Value("string"),
57
- "quality": datasets.Value("string"),
58
- "theme": datasets.Value("string"),
59
- "audio": datasets.Audio(sampling_rate=16_000),
60
- }
61
- )
62
- else: # automatic
63
- features = datasets.Features(
64
- {
65
- "audio_name": datasets.Value("string"),
66
- "file_path": datasets.Value("string"),
67
- "text": datasets.Value("string"),
68
- "start_time": datasets.Value("string"),
69
- "end_time": datasets.Value("string"),
70
- "duration": datasets.Value("string"),
71
- "quality": datasets.Value("string"),
72
- "speech_genre": datasets.Value("string"),
73
- "speech_style": datasets.Value("string"),
74
- "variety": datasets.Value("string"),
75
- "accent": datasets.Value("string"),
76
- "sex": datasets.Value("string"),
77
- "age_range": datasets.Value("string"),
78
- "num_speakers": datasets.Value("string"),
79
- "speaker_id": datasets.Value("string"),
80
- "audio": datasets.Audio(sampling_rate=16_000),
81
- }
82
- )
83
- return DatasetInfo(features=features)
84
-
85
- def _split_generators(self, dl_manager):
86
- prompts_urls = _PROSODIC_PROMPTS_URLS if self.config.name == "prosodic" else _AUTOMATIC_PROMPTS_URLS
87
- path_to_clips = _PATH_TO_CLIPS
88
- archive = dl_manager.download(_ARCHIVES)
89
- prompts_path = dl_manager.download(prompts_urls)
90
-
91
- return [
92
- SplitGenerator(
93
- name=Split.VALIDATION,
94
- gen_kwargs={
95
- "prompts_path": prompts_path["validation"],
96
- "path_to_clips": path_to_clips[f"validation_{self.config.name}"],
97
- "audio_files": dl_manager.iter_archive(archive[f"validation_{self.config.name}"]),
98
- },
99
- ),
100
- SplitGenerator(
101
- name=Split.TRAIN,
102
- gen_kwargs={
103
- "prompts_path": prompts_path["train"],
104
- "path_to_clips": path_to_clips[f"train_{self.config.name}"],
105
- "audio_files": dl_manager.iter_archive(archive[f"train_{self.config.name}"]),
106
- },
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, prompts_path, path_to_clips, audio_files):
111
- examples = {}
112
- with open(prompts_path, "r") as f:
113
- csv_reader = csv.DictReader(f)
114
- for row in csv_reader:
115
- if self.config.name == "prosodic":
116
- examples[row['path']] = {
117
- "path": row['path'],
118
- "name": row['name'],
119
- "speaker": row['speaker'],
120
- "start_time": row['start_time'],
121
- "end_time": row['end_time'],
122
- "normalized_text": row['normalized_text'],
123
- "text": row['text'],
124
- "duration": row['duration'],
125
- "type": row['type'],
126
- "year": row['year'],
127
- "gender": row['gender'],
128
- "age_range": row['age_range'],
129
- "total_duration": row['total_duration'],
130
- "quality": row['quality'],
131
- "theme": row['theme'],
132
- }
133
- else: # automatic
134
- examples[row['file_path']] = {
135
- "audio_name": row['audio_name'],
136
- "file_path": row['file_path'],
137
- "text": row['text'],
138
- "start_time": row['start_time'],
139
- "end_time": row['end_time'],
140
- "duration": row['duration'],
141
- "quality": row['quality'],
142
- "speech_genre": row['speech_genre'],
143
- "speech_style": row['speech_style'],
144
- "variety": row['variety'],
145
- "accent": row['accent'],
146
- "sex": row['sex'],
147
- "age_range": row['age_range'],
148
- "num_speakers": row['num_speakers'],
149
- "speaker_id": row['speaker_id'],
150
- }
151
-
152
- id_ = 0
153
- inside_clips_dir = False
154
- for path, f in audio_files:
155
- if path.startswith(path_to_clips):
156
- inside_clips_dir = True
157
- if path in examples:
158
- audio = {"path": path, "bytes": f.read()}
159
- yield id_, {**examples[path], "audio": audio}
160
- id_ += 1
161
- elif inside_clips_dir:
162
- break
 
1
+ import csv
2
+ import datasets
3
+ from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split
4
+
5
+ _PROSODIC_PROMPTS_URLS = {
6
+ "validation": "prosodic/audios_dev_metadata.csv",
7
+ "train": "prosodic/audios_train_metadata.csv",
8
+ }
9
+
10
+ _AUTOMATIC_PROMPTS_URLS = {
11
+ "validation": "automatic/audios_dev_metadata.csv",
12
+ "train": "automatic/audios_train_metadata.csv",
13
+ }
14
+
15
+ _ARCHIVES = {
16
+ "prosodic": "prosodic/audios.tar.gz",
17
+ "automatic": "automatic/audios.tar.gz",
18
+ }
19
+
20
+ _PATH_TO_CLIPS = {
21
+ "validation_prosodic": "prosodic/audios_dev",
22
+ "train_prosodic": "prosodic/audios_train",
23
+ "validation_automatic": "automatic/audios_dev",
24
+ "train_automatic": "automatic/audios_train",
25
+ }
26
+
27
+ class EntoaConfig(BuilderConfig):
28
+ def __init__(self, prompts_type="prosodic", **kwargs):
29
+ super().__init__(**kwargs)
30
+ self.prompts_type = prompts_type
31
+
32
+ class EntoaDataset(GeneratorBasedBuilder):
33
+ BUILDER_CONFIGS = [
34
+ EntoaConfig(name="prosodic", description="Prosodic audio prompts", prompts_type="prosodic"),
35
+ EntoaConfig(name="automatic", description="Automatic audio prompts", prompts_type="automatic"),
36
+ ]
37
+
38
+ def _info(self):
39
+ if self.config.name == "prosodic":
40
+ features = datasets.Features(
41
+ {
42
+ "path": datasets.Value("string"),
43
+ "name": datasets.Value("string"),
44
+ "speaker": datasets.Value("string"),
45
+ "start_time": datasets.Value("string"),
46
+ "end_time": datasets.Value("string"),
47
+ "normalized_text": datasets.Value("string"),
48
+ "text": datasets.Value("string"),
49
+ "duration": datasets.Value("string"),
50
+ "type": datasets.Value("string"),
51
+ "year": datasets.Value("string"),
52
+ "gender": datasets.Value("string"),
53
+ "age_range": datasets.Value("string"),
54
+ "total_duration": datasets.Value("string"),
55
+ "quality": datasets.Value("string"),
56
+ "theme": datasets.Value("string"),
57
+ "audio": datasets.Audio(sampling_rate=16_000),
58
+ }
59
+ )
60
+ else: # automatic
61
+ features = datasets.Features(
62
+ {
63
+ "audio_name": datasets.Value("string"),
64
+ "file_path": datasets.Value("string"),
65
+ "text": datasets.Value("string"),
66
+ "start_time": datasets.Value("string"),
67
+ "end_time": datasets.Value("string"),
68
+ "duration": datasets.Value("string"),
69
+ "quality": datasets.Value("string"),
70
+ "speech_genre": datasets.Value("string"),
71
+ "speech_style": datasets.Value("string"),
72
+ "variety": datasets.Value("string"),
73
+ "accent": datasets.Value("string"),
74
+ "sex": datasets.Value("string"),
75
+ "age_range": datasets.Value("string"),
76
+ "num_speakers": datasets.Value("string"),
77
+ "speaker_id": datasets.Value("string"),
78
+ "audio": datasets.Audio(sampling_rate=16_000),
79
+ }
80
+ )
81
+ return DatasetInfo(features=features)
82
+
83
+ def _split_generators(self, dl_manager):
84
+ prompts_urls = _PROSODIC_PROMPTS_URLS if self.config.name == "prosodic" else _AUTOMATIC_PROMPTS_URLS
85
+ archive = dl_manager.download(_ARCHIVES[self.config.name])
86
+ prompts_path = dl_manager.download(prompts_urls)
87
+
88
+ return [
89
+ SplitGenerator(
90
+ name=Split.VALIDATION,
91
+ gen_kwargs={
92
+ "prompts_path": prompts_path["validation"],
93
+ "path_to_clips": _PATH_TO_CLIPS[f"validation_{self.config.name}"],
94
+ "audio_files": dl_manager.iter_archive(archive),
95
+ },
96
+ ),
97
+ SplitGenerator(
98
+ name=Split.TRAIN,
99
+ gen_kwargs={
100
+ "prompts_path": prompts_path["train"],
101
+ "path_to_clips": _PATH_TO_CLIPS[f"train_{self.config.name}"],
102
+ "audio_files": dl_manager.iter_archive(archive),
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def _generate_examples(self, prompts_path, path_to_clips, audio_files):
108
+ examples = {}
109
+ with open(prompts_path, "r") as f:
110
+ csv_reader = csv.DictReader(f)
111
+ for row in csv_reader:
112
+ if self.config.name == "prosodic":
113
+ examples[row['path']] = {
114
+ "path": row['path'],
115
+ "name": row['name'],
116
+ "speaker": row['speaker'],
117
+ "start_time": row['start_time'],
118
+ "end_time": row['end_time'],
119
+ "normalized_text": row['normalized_text'],
120
+ "text": row['text'],
121
+ "duration": row['duration'],
122
+ "type": row['type'],
123
+ "year": row['year'],
124
+ "gender": row['gender'],
125
+ "age_range": row['age_range'],
126
+ "total_duration": row['total_duration'],
127
+ "quality": row['quality'],
128
+ "theme": row['theme'],
129
+ }
130
+ else: # automatic
131
+ examples[row['file_path']] = {
132
+ "audio_name": row['audio_name'],
133
+ "file_path": row['file_path'],
134
+ "text": row['text'],
135
+ "start_time": row['start_time'],
136
+ "end_time": row['end_time'],
137
+ "duration": row['duration'],
138
+ "quality": row['quality'],
139
+ "speech_genre": row['speech_genre'],
140
+ "speech_style": row['speech_style'],
141
+ "variety": row['variety'],
142
+ "accent": row['accent'],
143
+ "sex": row['sex'],
144
+ "age_range": row['age_range'],
145
+ "num_speakers": row['num_speakers'],
146
+ "speaker_id": row['speaker_id'],
147
+ }
148
+
149
+ id_ = 0
150
+ inside_clips_dir = False
151
+ for path, f in audio_files:
152
+ if path.startswith(path_to_clips):
153
+ inside_clips_dir = True
154
+ if path in examples:
155
+ audio = {"path": path, "bytes": f.read()}
156
+ yield id_, {**examples[path], "audio": audio}
157
+ id_ += 1
158
+ elif inside_clips_dir:
159
+ break