Datasets:

Modalities:
Audio
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
cifkao commited on
Commit
1a16f09
1 Parent(s): 51168da

Add dataset loader script

Browse files
Files changed (1) hide show
  1. jam-alt.py +124 -0
jam-alt.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HuggingFace loading script for the JamALT dataset."""
2
+
3
+
4
+ import csv
5
+ from dataclasses import dataclass
6
+ import json
7
+ import os
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+ import datasets
12
+
13
+
14
+ # TODO: Add BibTeX citation
15
+ _CITATION = """\
16
+ """
17
+
18
+ # TODO: Add description of the dataset here
19
+ _DESCRIPTION = """\
20
+ """
21
+
22
+ # TODO: Add a link to an official homepage for the dataset here
23
+ _HOMEPAGE = ""
24
+
25
+ # TODO: Add the licence for the dataset here
26
+ _LICENSE = ""
27
+
28
+ _METADATA_FILENAME = "metadata.csv"
29
+
30
+
31
+ _LANGUAGE_NAME_TO_CODE = {
32
+ "English": "en",
33
+ "French": "fr",
34
+ "German": "de",
35
+ "Spanish": "es",
36
+ }
37
+
38
+
39
+ @dataclass
40
+ class JamAltBuilderConfig(datasets.BuilderConfig):
41
+ language: Optional[str] = None
42
+ with_audio: bool = False
43
+ decode_audio: bool = True
44
+ sampling_rate: Optional[int] = None
45
+ mono: bool = True
46
+
47
+
48
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
49
+ class JamAltDataset(datasets.GeneratorBasedBuilder):
50
+ """TODO: Short description of my dataset."""
51
+
52
+ VERSION = datasets.Version("0.0.0")
53
+ BUILDER_CONFIG_CLASS = JamAltBuilderConfig
54
+ BUILDER_CONFIGS = [JamAltBuilderConfig("default")]
55
+ DEFAULT_CONFIG_NAME = "default"
56
+
57
+ def _info(self):
58
+ feat_dict = {
59
+ "name": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "language": datasets.Value("string"),
62
+ }
63
+ if self.config.with_audio:
64
+ feat_dict["audio"] = datasets.Audio(
65
+ decode=self.config.decode_audio,
66
+ sampling_rate=self.config.sampling_rate,
67
+ mono=self.config.mono,
68
+ )
69
+
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(feat_dict),
73
+ supervised_keys=("audio", "text") if "audio" in feat_dict else None,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ citation=_CITATION,
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ metadata_path = dl_manager.download(_METADATA_FILENAME)
81
+
82
+ audio_paths, text_paths, metadata = [], [], []
83
+ with open(metadata_path, encoding="utf-8") as f:
84
+ for row in csv.DictReader(f):
85
+ if (
86
+ self.config.language is None
87
+ or _LANGUAGE_NAME_TO_CODE[row["Language"]] == self.config.language
88
+ ):
89
+ audio_paths.append("audio/" + row["Filepath"])
90
+ text_paths.append(
91
+ "lyrics/" + os.path.splitext(row["Filepath"])[0] + ".txt"
92
+ )
93
+ metadata.append(row)
94
+
95
+ text_paths = dl_manager.download(text_paths)
96
+ audio_paths = (
97
+ dl_manager.download(audio_paths) if self.config.with_audio else None
98
+ )
99
+
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TEST,
103
+ gen_kwargs=dict(
104
+ text_paths=text_paths,
105
+ audio_paths=audio_paths,
106
+ metadata=metadata,
107
+ ),
108
+ ),
109
+ ]
110
+
111
+ def _generate_examples(self, text_paths, audio_paths, metadata):
112
+ if audio_paths is None:
113
+ audio_paths = [None] * len(text_paths)
114
+ for text_path, audio_path, meta in zip(text_paths, audio_paths, metadata):
115
+ name = os.path.splitext(os.path.basename(text_path))[0]
116
+ with open(text_path, encoding="utf-8") as text_f:
117
+ record = {
118
+ "name": name,
119
+ "text": text_f.read(),
120
+ "language": _LANGUAGE_NAME_TO_CODE[meta["Language"]],
121
+ }
122
+ if audio_path is not None:
123
+ record["audio"] = audio_path
124
+ yield name, record