|
import os |
|
import numpy as np |
|
from pydub import AudioSegment |
|
from scipy.ndimage import maximum_filter1d |
|
import json |
|
import hashlib |
|
import tqdm |
|
import os |
|
|
|
from scipy.interpolate import interp1d |
|
from scipy.signal import argrelmax |
|
|
|
|
|
|
|
def infer_tempo(beats, fps, hist_smooth=4, no_tempo=-1): |
|
import madmom |
|
ibis = np.diff(beats) * fps |
|
bins = np.bincount(np.round(ibis).astype(int)) |
|
if not bins.any(): |
|
return no_tempo |
|
if hist_smooth > 0: |
|
bins = madmom.audio.signal.smooth(bins, hist_smooth) |
|
intervals = np.arange(len(bins)) |
|
interpolation_fn = interp1d(intervals, bins, 'quadratic') |
|
intervals = np.arange(intervals[0], intervals[-1], 0.001) |
|
tempi = 60.0 * fps / intervals |
|
print(tempi) |
|
bins = interpolation_fn(intervals) |
|
peaks = argrelmax(bins, mode='wrap')[0] |
|
if len(peaks) == 0: |
|
return no_tempo |
|
else: |
|
sorted_peaks = peaks[np.argsort(bins[peaks])[::-1]] |
|
return tempi[sorted_peaks][0] |
|
|
|
|
|
def quantise(beats): |
|
return [int(round(b * 25)) / 25 for b in beats] |
|
|
|
|
|
def get_sample(excerpt_path, beats, existed_uuid_list, split="train", key="gtzan", type="beat"): |
|
|
|
|
|
|
|
data_sample = { |
|
"instruction": "Identify and list the timestamps of all beats in this audio track. Use the format of `0.0s,0.54s,1.0ss, ...`", |
|
"input": f"<|SOA|>{excerpt_path[len(PATH)+1:]}<|EOA|>", |
|
"output": ",".join([f"{b}s" for b in beats]), |
|
"uuid": "", |
|
"audioid": excerpt_path[len(PATH)+1:], |
|
"split": [split], |
|
"task_type": {"major": ["global_MIR"], "minor": ["beat_tracking"]}, |
|
"domain": "music", |
|
"source": key, |
|
"other": {} |
|
} |
|
if type == "downbeat": |
|
data_sample["instruction"] = "Identify and list the timestamps of all downbeats in this audio track. Use the format of `0.0s,1.54s,3.0s, ...`" |
|
data_sample["task_type"]["minor"] = ["downbeat_tracking"] |
|
|
|
|
|
uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}" |
|
unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] |
|
if unique_id in existed_uuid_list: |
|
sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] |
|
unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16] |
|
existed_uuid_list.add(unique_id) |
|
data_sample["uuid"] = f"{unique_id}" |
|
return data_sample |
|
|
|
|
|
EXCERPT_LENGTH = 30 * 1000 |
|
MIN_LENGTH = 5 * 1000 |
|
|
|
PATH = '/work/fast_data_yinghao/Beat-Transformer/data' |
|
load_annotation = np.load(f'{PATH}/full_beat_annotation.npz', allow_pickle=True) |
|
|
|
for key in ["ballroom"]: |
|
|
|
|
|
|
|
annotation = load_annotation[key] |
|
|
|
with open(f'{PATH}/audio_lists/{key}.txt', 'r') as f: |
|
audio_root = f.readlines() |
|
audio_root = [item.replace('\n', '') for item in audio_root] |
|
audio_root = [f'{PATH}/{item[37:]}' for item in audio_root] |
|
assert(len(annotation) == len(audio_root)) |
|
|
|
existed_uuid_list = set() |
|
data_samples = [] |
|
for idx, ann in tqdm.tqdm(enumerate(annotation)): |
|
|
|
audio_path = audio_root[idx] |
|
if len(ann.shape) == 1: |
|
beats = quantise(ann) |
|
downbeats = None |
|
elif key != "rwc": |
|
beats = quantise(ann[:,0]) |
|
downbeats = quantise(ann[ann[:, 1] == 1, 0]) |
|
else: |
|
NotImplementedError |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if key =="ballroom": |
|
|
|
sample = get_sample(audio_path, beats, existed_uuid_list, key=key) |
|
data_samples.append(sample) |
|
sample = get_sample(audio_path, downbeats, existed_uuid_list, key=key, type="downbeat") |
|
data_samples.append(sample) |
|
elif key == "gtzan": |
|
if "jazz.00054" in audio_path: |
|
continue |
|
sample = get_sample(audio_path, beats, existed_uuid_list, split="test", key=key) |
|
data_samples.append(sample) |
|
if downbeats: |
|
sample = get_sample(audio_path, downbeats, existed_uuid_list, split="test", key=key, type="downbeat") |
|
data_samples.append(sample) |
|
else: |
|
audio = AudioSegment.from_file(audio_path) |
|
for i in range(0, len(audio), EXCERPT_LENGTH): |
|
end = i + EXCERPT_LENGTH |
|
if end < len(audio): |
|
excerpt = audio[i:end] |
|
else: |
|
excerpt = audio[i:] |
|
|
|
if len(excerpt) < MIN_LENGTH: |
|
break |
|
end = len(audio) |
|
|
|
|
|
excerpt_path = f"{audio_path[:-4]}_{i//EXCERPT_LENGTH}.wav" |
|
if not os.path.exists(excerpt_path): |
|
excerpt.export(excerpt_path, format="wav") |
|
|
|
excerpt_beats = [b%30 for b in beats if i * 30 <= b <= (i + 1) * 30] |
|
if downbeats: |
|
excerpt_downbeats = [db%30 for db in downbeats if i * 30 <= db <= (i + 1) * 30] |
|
else: |
|
excerpt_downbeats = None |
|
|
|
|
|
sample = get_sample(excerpt_path, excerpt_beats, existed_uuid_list, key=key) |
|
data_samples.append(sample) |
|
if downbeats: |
|
sample = get_sample(excerpt_path, excerpt_downbeats, existed_uuid_list, key=key, type="downbeat") |
|
data_samples.append(sample) |
|
|
|
|
|
|
|
|
|
|
|
|
|
split = "test" if key == "gtzan" else "train" |
|
output_file_path = f'{PATH}/../{key}_{split}.jsonl' |
|
with open(output_file_path, 'w') as outfile: |
|
|
|
json.dump(data_samples, outfile) |
|
|
|
|
|
outfile.close() |
|
|