File size: 3,503 Bytes
b69dcfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import jams
import tqdm
import glob
import numpy as np
import mir_eval


def quantise(beats):
   return [int(round(b * 25)) / 25 for b in beats]


# simplify annotation to sevenths_inv
new_key = []
for tmp in tqdm.tqdm(glob.glob("/work/fast_data_yinghao/jjy_chord/references_v2/*.jams")):
    file = jams.load(tmp)
    for idx, data in enumerate(file["annotations"][0]["data"]):
        # key.add(data.value.split(":")[-1])
        if ":" in data.value:
            new_key.append(data.value.split(":")[-1])
        # else:
        #     new_key.append(data.value)
new_key = list(set(new_key))

map = {}
# partition = [[new_key[0]]]
target = ['maj','maj/3','maj/5',
          'min' ,'min/b3','min/5', 
          '7' ,'7/3' ,'7/5' ,'7/b7',
          'maj7', 'maj7/3','maj7/7',
          'min7', 'min7/b3','min7/5' ,'min7/b7' ]
for idx, i in tqdm.tqdm(enumerate(new_key)):
    for j in target:
        if mir_eval.chord.sevenths_inv(["C:" + i],["C:" + j])==1:
            map[i] = j
            break
    else:
        map[i] = i
#     # for j in new_key[idx+1:]:
#     for j in partition:
#         if mir_eval.chord.sevenths_inv([i],[j[0]])==1:
#             j.append(i)
#             break
#     else:
#         partition.append([i])


# key = set()
# ['C#:min11', 'C#:5', 'C#:5(b13)', 'C#:(1,b3)/b3', 'C#:min7', 'C#:min(*b3,9)', 'C#:5(b7)', 'C#:maj6', 'C#:maj7(*b5)', 'C#:dim', 'C#:min(*b3,*5)', 'C#:min11/b3', 'C#:min7(4)/4', 'C#:min7/4',
#  'C#:7(b9)/3', 'C#:sus4(b7)', 'C#:maj6(4)', 'C#:sus4', 'C#:min7(9)', 'C#/3', 'C#:maj', 'C#:7(b9)', 'C#:7/4', 'C#', 'C#:maj/3', 'C#:min9/b7', 'C#:min(9)', 'C#:maj(*3)', 'C#:1/1', 'C#:13', 'C#:7(#9)',
#  'C#:dim/b3', 'C#:sus2(4)', 'C#:maj/b4', 'C#:min(*b3)', 'C#:7/3', 'C#:maj7/3', 'C#:min/b7', 'C#:min7/b7', 'C#:7', 'C#:min(b13)', 'C#:7(b9,#9)', 'C#:(1,b3,b5,6)', 'C#:min9', 'C#:min7/5', 'C#:sus4(b7,9)',
#  'C#:min7/b3', 'C#:maj/5', 'C#:min/4', 'C#:dim7', 'C#:min/5', 'C#:hdim7/4', 'C#:min/3', 'C#:maj6/3', 'C#:min(11)', 'C#:hdim7', 'C#:dim7/b3', 'C#:9', 'C#:sus2(b7)', 'C#:aug', 'C#:(b3,b7,11,9)', 
# 'C#:(1,b3,b5,6)/b3', 'C#:maj6(4)/3', 'C#:7(b2)', 'C#:sus2', 'C#:min', 'C#:(1,b3)']
# count = 0
for tmp in tqdm.tqdm(glob.glob("/work/fast_data_yinghao/jjy_chord/references_v2/*.jams")):
    file = jams.load(tmp)
    ref_intervals = np.zeros((len(file["annotations"][0]["data"]),2))
    ref_labels = []
    # all annotations start at 0s
    # count += (file["annotations"][0]["data"][0].time==0)
    annotation = ""
    for idx, data in enumerate(file["annotations"][0]["data"]):
        # key.add(data.value.split(":")[-1])
        # key.add(data.value)
        ref_intervals[idx] = [data.time, data.time + data.duration]
        ref_labels.append(data.value)
        if ":" in data.value:
            chord = data.value.split(":")[0] + ":" + map[data.value.split(":")[-1]]
        annotation +=f"{chord} {quantise(data.time + data.duration)}s,"

    # break


# (est_intervals, est_labels) = (ref_intervals, ref_labels) 
# est_intervals, est_labels = mir_eval.util.adjust_intervals(
#     est_intervals, est_labels, ref_intervals.min(),
#     ref_intervals.max(), mir_eval.chord.NO_CHORD,
#     mir_eval.chord.NO_CHORD)
# (intervals, ref_labels, est_labels) = mir_eval.util.merge_labeled_intervals(
#     ref_intervals, ref_labels, est_intervals, est_labels)
# durations = mir_eval.util.intervals_to_durations(intervals)
# comparisons = mir_eval.chord.sevenths_inv(ref_labels, est_labels)
# score = mir_eval.chord.weighted_accuracy(comparisons, durations)