|
import os |
|
import csv |
|
import random |
|
import datasets |
|
import numpy as np |
|
from glob import glob |
|
|
|
_NAMES = { |
|
"chanyin": 0, |
|
"dianyin": 6, |
|
"shanghua": 2, |
|
"xiahua": 3, |
|
"huazhi": 4, |
|
"guazou": 4, |
|
"lianmo": 4, |
|
"liantuo": 4, |
|
"yaozhi": 5, |
|
"boxian": 1, |
|
} |
|
|
|
_NAME = [ |
|
"chanyin", |
|
"boxian", |
|
"shanghua", |
|
"xiahua", |
|
"huazhi/guazou/lianmo/liantuo", |
|
"yaozhi", |
|
"dianyin", |
|
] |
|
|
|
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{os.path.basename(__file__)[:-3]}" |
|
|
|
_DOMAIN = f"{_HOMEPAGE}/resolve/master/data" |
|
|
|
_URLS = { |
|
"audio": f"{_DOMAIN}/audio.zip", |
|
"mel": f"{_DOMAIN}/mel.zip", |
|
"label": f"{_DOMAIN}/label.zip", |
|
} |
|
|
|
_TIME_LENGTH = 3 |
|
_SAMPLE_RATE = 44100 |
|
_HOP_LENGTH = 512 |
|
|
|
|
|
class Guzheng_Tech99(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
features=( |
|
datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=44100), |
|
"mel": datasets.Image(), |
|
"label": datasets.Sequence( |
|
feature={ |
|
"onset_time": datasets.Value("float32"), |
|
"offset_time": datasets.Value("float32"), |
|
"IPT": datasets.ClassLabel(num_classes=7, names=_NAME), |
|
"note": datasets.Value("int8"), |
|
} |
|
), |
|
} |
|
) |
|
if self.config.name == "default" |
|
else datasets.Features( |
|
{ |
|
"data": datasets.features.Array3D( |
|
dtype="float32", shape=(88, 258, 1) |
|
), |
|
"label": datasets.features.Array2D( |
|
dtype="float32", shape=(7, 258) |
|
), |
|
} |
|
) |
|
), |
|
homepage=_HOMEPAGE, |
|
license="CC-BY-NC-ND", |
|
version="1.2.0", |
|
) |
|
|
|
def _RoW_norm(self, data): |
|
common_sum = 0 |
|
square_sum = 0 |
|
tfle = 0 |
|
for i in range(len(data)): |
|
tfle += (data[i].sum(-1).sum(0) != 0).astype("int").sum() |
|
common_sum += data[i].sum(-1).sum(-1) |
|
square_sum += (data[i] ** 2).sum(-1).sum(-1) |
|
|
|
common_avg = common_sum / tfle |
|
square_avg = square_sum / tfle |
|
std = np.sqrt(square_avg - common_avg**2) |
|
return common_avg, std |
|
|
|
def _norm(self, avg, std, data, size): |
|
avg = np.tile(avg.reshape((1, -1, 1, 1)), (size[0], 1, size[2], size[3])) |
|
std = np.tile(std.reshape((1, -1, 1, 1)), (size[0], 1, size[2], size[3])) |
|
data = (data - avg) / std |
|
return data |
|
|
|
def _load(self, wav_dir, csv_dir, groups, avg=None, std=None): |
|
|
|
if std is None: |
|
std = np.array([None]) |
|
|
|
if avg is None: |
|
avg = np.array([None]) |
|
|
|
def files(wav_dir, csv_dir, group): |
|
flacs = sorted(glob(os.path.join(wav_dir, group, "*.flac"))) |
|
if len(flacs) == 0: |
|
flacs = sorted(glob(os.path.join(wav_dir, group, "*.wav"))) |
|
|
|
csvs = sorted(glob(os.path.join(csv_dir, group, "*.csv"))) |
|
files = list(zip(flacs, csvs)) |
|
if len(files) == 0: |
|
raise RuntimeError(f"Group {group} is empty") |
|
|
|
result = [] |
|
for audio_path, csv_path in files: |
|
result.append((audio_path, csv_path)) |
|
|
|
return result |
|
|
|
|
|
def logCQT(file): |
|
import librosa |
|
|
|
sr = _SAMPLE_RATE |
|
y, sr = librosa.load(file, sr=sr) |
|
|
|
cqt = librosa.cqt( |
|
y, |
|
sr=sr, |
|
hop_length=_HOP_LENGTH, |
|
fmin=27.5, |
|
n_bins=88, |
|
bins_per_octave=12, |
|
) |
|
return ( |
|
(1.0 / 80.0) * librosa.core.amplitude_to_db(np.abs(cqt), ref=np.max) |
|
) + 1.0 |
|
|
|
def chunk_data(f): |
|
s = int(_SAMPLE_RATE * _TIME_LENGTH / _HOP_LENGTH) |
|
xdata = np.transpose(f) |
|
x = [] |
|
length = int(np.ceil((int(len(xdata) / s) + 1) * s)) |
|
app = np.zeros((length - xdata.shape[0], xdata.shape[1])) |
|
xdata = np.concatenate((xdata, app), 0) |
|
for i in range(int(length / s)): |
|
data = xdata[int(i * s) : int(i * s + s)] |
|
x.append(np.transpose(data[:s, :])) |
|
|
|
return np.array(x) |
|
|
|
def load_all(audio_path, csv_path): |
|
|
|
cqt = logCQT(audio_path) |
|
|
|
hop = _HOP_LENGTH |
|
n_steps = cqt.shape[1] |
|
n_IPTs = 7 |
|
technique = _NAMES |
|
IPT_label = np.zeros([n_IPTs, n_steps], dtype=int) |
|
with open(csv_path, "r") as f: |
|
reader = csv.DictReader(f, delimiter=",") |
|
for label in reader: |
|
onset = float(label["onset_time"]) |
|
offset = float(label["offset_time"]) |
|
IPT = int(technique[label["IPT"]]) |
|
left = int(round(onset * _SAMPLE_RATE / hop)) |
|
frame_right = int(round(offset * _SAMPLE_RATE / hop)) |
|
frame_right = min(n_steps, frame_right) |
|
IPT_label[IPT, left:frame_right] = 1 |
|
|
|
return dict( |
|
audiuo_path=audio_path, csv_path=csv_path, cqt=cqt, IPT_label=IPT_label |
|
) |
|
|
|
data = [] |
|
|
|
for group in groups: |
|
for input_files in files(wav_dir, csv_dir, group): |
|
data.append(load_all(*input_files)) |
|
|
|
i = 0 |
|
for dic in data: |
|
x = dic["cqt"] |
|
x = chunk_data(x) |
|
y_i = dic["IPT_label"] |
|
y_i = chunk_data(y_i) |
|
if i == 0: |
|
Xtr = x |
|
Ytr_i = y_i |
|
i += 1 |
|
|
|
else: |
|
Xtr = np.concatenate([Xtr, x], axis=0) |
|
Ytr_i = np.concatenate([Ytr_i, y_i], axis=0) |
|
|
|
|
|
Xtr = np.expand_dims(Xtr, axis=3) |
|
|
|
if avg.all() == None and std.all() == None: |
|
avg, std = self._RoW_norm(Xtr) |
|
|
|
Xtr = self._norm(avg, std, Xtr, Xtr.shape) |
|
return list(Xtr), list(Ytr_i) |
|
|
|
def _parse_csv_label(self, csv_file): |
|
label = [] |
|
with open(csv_file, mode="r", encoding="utf-8") as file: |
|
for row in csv.DictReader(file): |
|
label.append( |
|
{ |
|
"onset_time": float(row["onset_time"]), |
|
"offset_time": float(row["offset_time"]), |
|
"IPT": _NAME[_NAMES[row["IPT"]]], |
|
"note": int(row["note"]), |
|
} |
|
) |
|
|
|
return label |
|
|
|
def _split_generators(self, dl_manager): |
|
audio_files = dl_manager.download_and_extract(_URLS["audio"]) |
|
csv_files = dl_manager.download_and_extract(_URLS["label"]) |
|
trainset, validset, testset = [], [], [] |
|
if self.config.name == "default": |
|
files = {} |
|
mel_files = dl_manager.download_and_extract(_URLS["mel"]) |
|
for path in dl_manager.iter_files([audio_files]): |
|
fname: str = os.path.basename(path) |
|
if fname.endswith(".flac"): |
|
item_id = fname.split(".")[0] |
|
files[item_id] = {"audio": path} |
|
|
|
for path in dl_manager.iter_files([mel_files]): |
|
fname = os.path.basename(path) |
|
if fname.endswith(".jpg"): |
|
item_id = fname.split(".")[0] |
|
files[item_id]["mel"] = path |
|
|
|
for path in dl_manager.iter_files([csv_files]): |
|
fname = os.path.basename(path) |
|
if fname.endswith(".csv"): |
|
item_id = fname.split(".")[0] |
|
files[item_id]["label"] = self._parse_csv_label(path) |
|
|
|
for item in files.values(): |
|
if "train" in item["audio"]: |
|
trainset.append(item) |
|
|
|
elif "validation" in item["audio"]: |
|
validset.append(item) |
|
|
|
elif "test" in item["audio"]: |
|
testset.append(item) |
|
|
|
else: |
|
audio_dir = audio_files + "\\audio" |
|
csv_dir = csv_files + "\\label" |
|
X_train, Y_train = self._load(audio_dir, csv_dir, ["train"]) |
|
X_valid, Y_valid = self._load(audio_dir, csv_dir, ["validation"]) |
|
X_test, Y_test = self._load(audio_dir, csv_dir, ["test"]) |
|
for i in range(len(X_train)): |
|
trainset.append({"data": X_train[i], "label": Y_train[i]}) |
|
|
|
for i in range(len(X_valid)): |
|
validset.append({"data": X_valid[i], "label": Y_valid[i]}) |
|
|
|
for i in range(len(X_test)): |
|
testset.append({"data": X_test[i], "label": Y_test[i]}) |
|
|
|
random.shuffle(trainset) |
|
random.shuffle(validset) |
|
random.shuffle(testset) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"files": trainset} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"files": validset} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"files": testset} |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
for i, path in enumerate(files): |
|
yield i, path |
|
|