File size: 5,518 Bytes
25596de 052518a 25596de ef0458b 25596de ef0458b 25596de ef0458b 25596de ef0458b 25596de ef0458b 25596de ef0458b 25596de ef0458b 25596de ef0458b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import csv
import datasets
from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split
_PROMPTS_URLS = {
"dev": "original/audios_dev_metadata.csv",
"test": "original/audios_test_metadata.csv",
"train": "original/audios_train_metadata.csv",
}
_PROMPTS_FILTERED_URLS = {
"dev": "filtered/audios_dev_metadata.csv",
"test": "filtered/audios_test_metadata.csv",
"train": "filtered/audios_train_metadata.csv",
}
_ARCHIVES = {
"dev": "dev.tar.gz",
"test": "test.tar.gz",
"train": "train.tar.gz",
}
_PATH_TO_CLIPS = {
"dev": "dev",
"test": "test",
"train": "train",
}
class NurcSPConfig(BuilderConfig):
def __init__(self, prompts_type="original", **kwargs):
super().__init__(**kwargs)
self.prompts_type = prompts_type
class NurcSPDataset(GeneratorBasedBuilder):
BUILDER_CONFIGS = [
NurcSPConfig(name="original", description="Original audio prompts", prompts_type="original"),
NurcSPConfig(name="filtered", description="Filtered audio prompts", prompts_type="filtered"),
]
def _info(self):
return DatasetInfo(
features=datasets.Features(
{
"audio_name": datasets.Value("string"),
"file_path": datasets.Value("string"),
"text": datasets.Value("string"),
"start_time": datasets.Value("string"),
"end_time": datasets.Value("string"),
"duration": datasets.Value("string"),
"quality": datasets.Value("string"),
"speech_genre": datasets.Value("string"),
"speech_style": datasets.Value("string"),
"variety": datasets.Value("string"),
"accent": datasets.Value("string"),
"sex": datasets.Value("string"),
"age_range": datasets.Value("string"),
"num_speakers": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
}
)
)
def _split_generators(self, dl_manager):
prompts_urls = _PROMPTS_URLS # Default to original prompts URLs
if self.config.prompts_type == "filtered":
prompts_urls = _PROMPTS_FILTERED_URLS
prompts_path = dl_manager.download(prompts_urls)
archive = dl_manager.download(_ARCHIVES)
return [
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"prompts_path": prompts_path["dev"],
"path_to_clips": _PATH_TO_CLIPS["dev"],
"audio_files": dl_manager.iter_archive(archive["dev"]),
}
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={
"prompts_path": prompts_path["test"],
"path_to_clips": _PATH_TO_CLIPS["test"],
"audio_files": dl_manager.iter_archive(archive["test"]),
}
),
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"prompts_path": prompts_path["train"],
"path_to_clips": _PATH_TO_CLIPS["train"],
"audio_files": dl_manager.iter_archive(archive["train"]),
}
),
]
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
examples = {}
with open(prompts_path, "r") as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
audio_name = row['audio_name']
file_path = row['file_path']
text = row['text']
start_time = row['start_time']
end_time = row['end_time']
duration = row['duration']
quality = row['quality']
speech_genre = row['speech_genre']
speech_style = row['speech_style']
variety = row['variety']
accent = row['accent']
sex = row['sex']
age_range = row['age_range']
num_speakers = row['num_speakers']
speaker_id = row['speaker_id']
examples[file_path] = {
"audio_name": audio_name,
"file_path": file_path,
"text": text,
"start_time": start_time,
"end_time": end_time,
"duration": duration,
"quality": quality,
"speech_genre": speech_genre,
"speech_style": speech_style,
"variety": variety,
"accent": accent,
"sex": sex,
"age_range": age_range,
"num_speakers": num_speakers,
"speaker_id": speaker_id,
}
inside_clips_dir = False
id_ = 0
for path, f in audio_files:
if path.startswith(path_to_clips):
inside_clips_dir = True
if path in examples:
audio = {"path": path, "bytes": f.read()}
yield id_, {**examples[path], "audio": audio}
id_ += 1
elif inside_clips_dir:
break
|