|
import csv |
|
import datasets |
|
from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split |
|
|
|
_PROSODIC_PROMPTS_URLS = { |
|
"validation": "prosodic/validation.csv", |
|
"train": "prosodic/train.csv", |
|
} |
|
|
|
_AUTOMATIC_PROMPTS_URLS = { |
|
"validation": "automatic/validation.csv", |
|
"train": "automatic/train.csv", |
|
} |
|
|
|
_ARCHIVES = { |
|
"prosodic": "prosodic/audios.tar.gz", |
|
"automatic": "automatic/audios.tar.gz", |
|
} |
|
|
|
_PATH_TO_CLIPS = { |
|
"validation_prosodic": "audios", |
|
"train_prosodic": "audios", |
|
"validation_automatic": "audios/validation", |
|
"train_automatic": "audios/train", |
|
} |
|
|
|
def debug_path_matching(csv_path, archive_files): |
|
""" |
|
Debug utility to compare paths between CSV and archive files |
|
""" |
|
import csv |
|
from collections import defaultdict |
|
|
|
|
|
csv_paths = set() |
|
with open(csv_path, "r") as f: |
|
reader = csv.DictReader(f) |
|
for row in reader: |
|
|
|
path = row.get("path") or row.get("file_path") |
|
csv_paths.add(path) |
|
csv_paths.add(path.split("/")[-1]) |
|
|
|
|
|
archive_paths = set() |
|
matches = defaultdict(list) |
|
|
|
for path, _ in archive_files: |
|
archive_paths.add(path) |
|
archive_paths.add(path.split("/")[-1]) |
|
|
|
|
|
for csv_path in csv_paths: |
|
if path.endswith(csv_path) or csv_path.endswith(path): |
|
matches[path].append(csv_path) |
|
|
|
print("=== Debug Report ===") |
|
print(f"CSV Paths: {len(csv_paths)}") |
|
print(f"Archive Paths: {len(archive_paths)}") |
|
print(f"Matched Paths: {len(matches)}") |
|
print("\nSample CSV paths:") |
|
for path in list(csv_paths)[:5]: |
|
print(f" {path}") |
|
print("\nSample Archive paths:") |
|
for path in list(archive_paths)[:5]: |
|
print(f" {path}") |
|
print("\nSample Matches:") |
|
for archive_path, csv_paths in list(matches.items())[:5]: |
|
print(f" Archive: {archive_path}") |
|
print(f" CSV: {csv_paths}") |
|
print() |
|
|
|
return csv_paths, archive_paths, matches |
|
|
|
class EntoaConfig(BuilderConfig): |
|
def __init__(self, prompts_type="prosodic", **kwargs): |
|
super().__init__(**kwargs) |
|
self.prompts_type = prompts_type |
|
|
|
class EntoaDataset(GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
EntoaConfig(name="prosodic", description="Prosodic audio prompts", prompts_type="prosodic"), |
|
EntoaConfig(name="automatic", description="Automatic audio prompts", prompts_type="automatic"), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "prosodic": |
|
features = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"speaker": datasets.Value("string"), |
|
"start_time": datasets.Value("string"), |
|
"end_time": datasets.Value("string"), |
|
"normalized_text": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"duration": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"year": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"age_range": datasets.Value("string"), |
|
"total_duration": datasets.Value("string"), |
|
"quality": datasets.Value("string"), |
|
"theme": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"audio_name": datasets.Value("string"), |
|
"file_path": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"start_time": datasets.Value("string"), |
|
"end_time": datasets.Value("string"), |
|
"duration": datasets.Value("string"), |
|
"quality": datasets.Value("string"), |
|
"speech_genre": datasets.Value("string"), |
|
"speech_style": datasets.Value("string"), |
|
"variety": datasets.Value("string"), |
|
"accent": datasets.Value("string"), |
|
"sex": datasets.Value("string"), |
|
"age_range": datasets.Value("string"), |
|
"num_speakers": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
} |
|
) |
|
return DatasetInfo(features=features) |
|
|
|
def _split_generators(self, dl_manager): |
|
prompts_urls = _PROSODIC_PROMPTS_URLS if self.config.name == "prosodic" else _AUTOMATIC_PROMPTS_URLS |
|
archive = dl_manager.download(_ARCHIVES[self.config.name]) |
|
prompts_path = dl_manager.download(prompts_urls) |
|
|
|
|
|
print(f"Downloaded prompts: {prompts_path}") |
|
print(f"Downloaded archive: {archive}") |
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.VALIDATION, |
|
gen_kwargs={ |
|
"prompts_path": prompts_path["validation"], |
|
"path_to_clips": _PATH_TO_CLIPS[f"validation_{self.config.name}"], |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={ |
|
"prompts_path": prompts_path["train"], |
|
"path_to_clips": _PATH_TO_CLIPS[f"train_{self.config.name}"], |
|
"audio_files": dl_manager.iter_archive(archive), |
|
}, |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
def _generate_examples(self, prompts_path, path_to_clips, audio_files): |
|
csv_paths, archive_paths, matches = debug_path_matching(prompts_path, audio_files) |
|
examples = {} |
|
with open(prompts_path, "r") as f: |
|
csv_reader = csv.DictReader(f) |
|
for row in csv_reader: |
|
|
|
if self.config.name == "prosodic": |
|
examples[row["path"]] = { |
|
"path": row["path"], |
|
"name": row["name"], |
|
"speaker": row["speaker"], |
|
"start_time": row["start_time"], |
|
"end_time": row["end_time"], |
|
"normalized_text": row["normalized_text"], |
|
"text": row["text"], |
|
"duration": row["duration"], |
|
"type": row["type"], |
|
"year": row["year"], |
|
"gender": row["gender"], |
|
"age_range": row["age_range"], |
|
"total_duration": row["total_duration"], |
|
"quality": row["quality"], |
|
"theme": row["theme"], |
|
} |
|
else: |
|
examples[row["file_path"]] = { |
|
"audio_name": row["audio_name"], |
|
"file_path": row["file_path"], |
|
"text": row["text"], |
|
"start_time": row["start_time"], |
|
"end_time": row["end_time"], |
|
"duration": row["duration"], |
|
"quality": row["quality"], |
|
"speech_genre": row["speech_genre"], |
|
"speech_style": row["speech_style"], |
|
"variety": row["variety"], |
|
"accent": row["accent"], |
|
"sex": row["sex"], |
|
"age_range": row["age_range"], |
|
"num_speakers": row["num_speakers"], |
|
"speaker_id": row["speaker_id"], |
|
} |
|
|
|
id_ = 0 |
|
inside_clips_dir = False |
|
|
|
for path, f in audio_files: |
|
|
|
if path.startswith(path_to_clips): |
|
inside_clips_dir = True |
|
if path in examples: |
|
|
|
print(f"Match found for: {path}") |
|
audio = {"path": path, "bytes": f.read()} |
|
yield id_, {**examples[path], "audio": audio} |
|
id_ += 1 |
|
else: |
|
|
|
print(f"No match for: {path}") |
|
elif inside_clips_dir: |
|
break |
|
|
|
|
|
print(f"Completed generating examples. Total examples: {id_}") |
|
|
|
|