|
import csv |
|
import datasets |
|
from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split |
|
from pathlib import Path |
|
import os |
|
|
|
_PROMPTS_PROSODIC_URLS = { |
|
"dev": "prosodic/validation.csv", |
|
"train": "prosodic/train.csv", |
|
} |
|
|
|
_PROMPTS_AUTOMATIC_URLS = { |
|
"dev": "automatic/validation.csv", |
|
"train": "automatic/train.csv", |
|
} |
|
|
|
_ARCHIVES_PROSODIC = { |
|
"dev": "prosodic/audios.tar.gz", |
|
"train": "prosodic/audios.tar.gz", |
|
} |
|
|
|
_ARCHIVES_AUTOMATIC = { |
|
"dev": "automatic/audios.tar.gz", |
|
"train": "automatic/audios.tar.gz", |
|
} |
|
|
|
_PATH_TO_CLIPS = { |
|
"dev": "", |
|
"train": "", |
|
} |
|
|
|
class NurcSPConfig(BuilderConfig): |
|
def __init__(self, prompts_type, **kwargs): |
|
super().__init__(**kwargs) |
|
self.prompts_type = prompts_type |
|
|
|
class NurcSPDataset(GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
NurcSPConfig(name="automatic", description="Automatic audio prompts", prompts_type="automatic"), |
|
NurcSPConfig(name="prosodic", description="Prosodic audio prompts", prompts_type="prosodic"), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "prosodic": |
|
return DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"speaker": datasets.Value("string"), |
|
"start_time": datasets.Value("string"), |
|
"end_time": datasets.Value("string"), |
|
"normalized_text": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"duration": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"year": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"age_range": datasets.Value("string"), |
|
"total_duration": datasets.Value("string"), |
|
"quality": datasets.Value("string"), |
|
"theme": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
} |
|
) |
|
) |
|
elif self.config.name == "automatic": |
|
return DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"audio_name": datasets.Value("string"), |
|
"file_path": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"start_time": datasets.Value("string"), |
|
"end_time": datasets.Value("string"), |
|
"duration": datasets.Value("string"), |
|
"quality": datasets.Value("string"), |
|
"speech_genre": datasets.Value("string"), |
|
"speech_style": datasets.Value("string"), |
|
"variety": datasets.Value("string"), |
|
"accent": datasets.Value("string"), |
|
"sex": datasets.Value("string"), |
|
"age_range": datasets.Value("string"), |
|
"num_speakers": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
} |
|
) |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
print("\n=== Configuration ===") |
|
print(f"Using prompts_type: {self.config.prompts_type}") |
|
|
|
if self.config.prompts_type == "prosodic": |
|
prompts_urls = _PROMPTS_PROSODIC_URLS |
|
archive_link = _ARCHIVES_PROSODIC |
|
elif self.config.prompts_type == "automatic": |
|
prompts_urls = _PROMPTS_AUTOMATIC_URLS |
|
archive_link = _ARCHIVES_AUTOMATIC |
|
else: |
|
print("Invalid config") |
|
return |
|
|
|
print(f"Downloading prompts from: {prompts_urls}") |
|
prompts_path = dl_manager.download(prompts_urls) |
|
print(f"Downloaded prompts to: {prompts_path}") |
|
|
|
print(f"Downloading archives from: {archive_link}") |
|
archive = dl_manager.download(archive_link) |
|
print(f"Downloaded archives to: {archive}") |
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.VALIDATION, |
|
gen_kwargs={ |
|
"prompts_path": prompts_path["dev"], |
|
"path_to_clips": _PATH_TO_CLIPS["dev"], |
|
"audio_files": dl_manager.iter_archive(archive["dev"]), |
|
"split_name": "validation" |
|
} |
|
), |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={ |
|
"prompts_path": prompts_path["train"], |
|
"path_to_clips": _PATH_TO_CLIPS["train"], |
|
"audio_files": dl_manager.iter_archive(archive["train"]), |
|
"split_name": "train" |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, prompts_path, path_to_clips, audio_files, split_name): |
|
print(f"\n{'='*50}") |
|
print(f"Processing {split_name} split") |
|
print(f"{'='*50}") |
|
print(f"\nCSV Path: {prompts_path}") |
|
print(f"Expected clips directory: {path_to_clips}") |
|
|
|
examples = {} |
|
csv_paths = [] |
|
|
|
|
|
print("\n=== Reading CSV ===") |
|
with open(prompts_path, "r") as f: |
|
csv_reader = csv.DictReader(f) |
|
if self.config.prompts_type == "prosodic": |
|
for row in csv_reader: |
|
file_path = Path(row['path']).as_posix() |
|
examples[file_path] = { |
|
"path": row['path'], |
|
"name": row['name'], |
|
"speaker": row['speaker'], |
|
"start_time": row['start_time'], |
|
"end_time": row['end_time'], |
|
"normalized_text": row['normalized_text'], |
|
"text": row['text'], |
|
"duration": row['duration'], |
|
"type": row['type'], |
|
"year": row['year'], |
|
"gender": row['gender'], |
|
"age_range": row['age_range'], |
|
"total_duration": row['total_duration'], |
|
"quality": row['quality'], |
|
"theme": row['theme'], |
|
} |
|
csv_paths.append(file_path) |
|
elif self.config.prompts_type == "automatic": |
|
for row in csv_reader: |
|
file_path = Path(row['file_path']).as_posix() |
|
examples[file_path] = { |
|
"audio_name": row['audio_name'], |
|
"file_path": row['file_path'], |
|
"text": row['text'], |
|
"start_time": row['start_time'], |
|
"end_time": row['end_time'], |
|
"duration": row['duration'], |
|
"quality": row['quality'], |
|
"speech_genre": row['speech_genre'], |
|
"speech_style": row['speech_style'], |
|
"variety": row['variety'], |
|
"accent": row['accent'], |
|
"sex": row['sex'], |
|
"age_range": row['age_range'], |
|
"num_speakers": row['num_speakers'], |
|
"speaker_id": row['speaker_id'], |
|
} |
|
csv_paths.append(file_path) |
|
|
|
print(f"\nFound {len(csv_paths)} entries in CSV") |
|
print("\nFirst 3 CSV paths:") |
|
for path in csv_paths[:3]: |
|
print(f" CSV path: {path}") |
|
|
|
|
|
print("\n=== Processing Archive ===") |
|
inside_clips_dir = False |
|
id_ = 0 |
|
matched_files = 0 |
|
archive_paths = [] |
|
|
|
for path, f in audio_files: |
|
path = Path(path).as_posix() |
|
archive_paths.append(path) |
|
|
|
if path.startswith(path_to_clips): |
|
inside_clips_dir = True |
|
if path in examples: |
|
audio = {"path": path, "bytes": f.read()} |
|
matched_files += 1 |
|
yield id_, {**examples[path], "audio": audio} |
|
id_ += 1 |
|
|
|
print("\n=== Path Analysis ===") |
|
print("\nFirst 3 archive paths:") |
|
for path in archive_paths[:3]: |
|
print(f" Archive path: {path}") |
|
|
|
|
|
print("\nPotential matches in CSV:") |
|
for csv_path in csv_paths[:3]: |
|
print(f"\nComparing:") |
|
print(f" Archive: {path}") |
|
print(f" CSV: {csv_path}") |
|
print(f" Archive parts: {path.split('/')}") |
|
print(f" CSV parts: {csv_path.split('/')}") |
|
|
|
print(f"\n=== Summary for {split_name} split ===") |
|
print(f"Total paths in CSV: {len(csv_paths)}") |
|
print(f"Total paths found in archive: {len(archive_paths)}") |
|
print(f"Successfully matched files: {matched_files}") |
|
|
|
if matched_files == 0: |
|
print("\n!!! MATCHING FAILED !!!") |
|
print("No files were matched between CSV and archive") |
|
print("\nTroubleshooting:") |
|
print("1. Check if CSV paths start with the clip directory name") |
|
print("2. Check for case sensitivity issues") |
|
print("3. Check for extra/missing directory levels") |
|
print("4. Check path separator consistency") |