|
import note_seq |
|
import numpy as np |
|
from PIL import Image |
|
|
|
NOTE_LENGTH_16TH_120BPM = 0.25 * 60 / 120 |
|
BAR_LENGTH_120BPM = 4.0 * 60 / 120 |
|
SAMPLE_RATE=44100 |
|
|
|
def token_sequence_to_audio(token_sequence): |
|
note_sequence = token_sequence_to_note_sequence(token_sequence) |
|
synth = note_seq.midi_synth.fluidsynth |
|
array_of_floats = synth(note_sequence, sample_rate=SAMPLE_RATE) |
|
note_plot = note_seq.plot_sequence(note_sequence, False) |
|
array_of_floats /=1.414 |
|
array_of_floats *= 32767 |
|
int16_data = array_of_floats.astype(np.int16) |
|
return SAMPLE_RATE, int16_data |
|
|
|
def token_sequence_to_image(token_sequence): |
|
|
|
note_sequence = token_sequence_to_note_sequence(token_sequence) |
|
|
|
|
|
min_pitch = 128 |
|
max_pitch = 0 |
|
for note in note_sequence.notes: |
|
if note.pitch < min_pitch: |
|
min_pitch = note.pitch |
|
if note.pitch > max_pitch: |
|
max_pitch = note.pitch |
|
|
|
image_height = max_pitch - min_pitch + 1 |
|
image_width = int(16 * 4) |
|
|
|
color = (12, 12, 12) |
|
|
|
|
|
image = Image.new("RGB", (image_width, image_height), color) |
|
|
|
|
|
colors = [(248, 249, 250), (233, 236, 239), (173, 181, 189), (52, 58, 64)] |
|
|
|
instrument_to_color_index = {} |
|
|
|
|
|
for note in note_sequence.notes: |
|
x = int(note.start_time / note_sequence.total_time * image_width) |
|
y = note.pitch - min_pitch |
|
width = int((note.end_time - note.start_time) / note_sequence.total_time * image_width) |
|
height = 1 |
|
|
|
if note.instrument not in instrument_to_color_index: |
|
instrument_to_color_index[note.instrument] = len(instrument_to_color_index) |
|
color_index = instrument_to_color_index[note.instrument] |
|
color = colors[color_index] |
|
|
|
|
|
image.paste(color, (x, y, x + width, y + height)) |
|
|
|
|
|
factor = 8 |
|
image = image.resize((image_width * factor, image_height * factor), Image.NEAREST) |
|
image = image.transpose(Image.FLIP_TOP_BOTTOM) |
|
|
|
return image |
|
|
|
|
|
|
|
|
|
def token_sequence_to_note_sequence(token_sequence, use_program=True, use_drums=True, instrument_mapper=None, only_piano=False): |
|
if isinstance(token_sequence, str): |
|
token_sequence = token_sequence.split() |
|
note_sequence = empty_note_sequence() |
|
|
|
|
|
current_program = 1 |
|
current_is_drum = False |
|
current_instrument = 0 |
|
track_count = 0 |
|
for token_index, token in enumerate(token_sequence): |
|
|
|
if token == "PIECE_START": |
|
pass |
|
elif token == "PIECE_END": |
|
print("The end.") |
|
break |
|
elif token == "TRACK_START": |
|
current_bar_index = 0 |
|
track_count += 1 |
|
pass |
|
elif token == "TRACK_END": |
|
pass |
|
elif token.startswith("INST"): |
|
instrument = token.split("=")[-1] |
|
if instrument != "DRUMS" and use_program: |
|
if instrument_mapper is not None: |
|
if instrument in instrument_mapper: |
|
instrument = instrument_mapper[instrument] |
|
current_program = int(instrument) |
|
current_instrument = track_count |
|
current_is_drum = False |
|
if instrument == "DRUMS" and use_drums: |
|
current_instrument = 0 |
|
current_program = 0 |
|
current_is_drum = True |
|
elif token == "BAR_START": |
|
current_time = current_bar_index * BAR_LENGTH_120BPM |
|
current_notes = {} |
|
elif token == "BAR_END": |
|
current_bar_index += 1 |
|
pass |
|
elif token.startswith("NOTE_ON"): |
|
pitch = int(token.split("=")[-1]) |
|
note = note_sequence.notes.add() |
|
note.start_time = current_time |
|
note.end_time = current_time + 4 * NOTE_LENGTH_16TH_120BPM |
|
note.pitch = pitch |
|
note.instrument = current_instrument |
|
note.program = current_program |
|
note.velocity = 80 |
|
note.is_drum = current_is_drum |
|
current_notes[pitch] = note |
|
elif token.startswith("NOTE_OFF"): |
|
pitch = int(token.split("=")[-1]) |
|
if pitch in current_notes: |
|
note = current_notes[pitch] |
|
note.end_time = current_time |
|
elif token.startswith("TIME_DELTA"): |
|
delta = float(token.split("=")[-1]) * NOTE_LENGTH_16TH_120BPM |
|
current_time += delta |
|
elif token.startswith("DENSITY="): |
|
pass |
|
elif token == "[PAD]": |
|
pass |
|
else: |
|
pass |
|
|
|
|
|
instruments_drums = [] |
|
for note in note_sequence.notes: |
|
pair = [note.program, note.is_drum] |
|
if pair not in instruments_drums: |
|
instruments_drums += [pair] |
|
note.instrument = instruments_drums.index(pair) |
|
|
|
if only_piano: |
|
for note in note_sequence.notes: |
|
if not note.is_drum: |
|
note.instrument = 0 |
|
note.program = 0 |
|
|
|
note_sequence.total_time = current_time |
|
|
|
return note_sequence |
|
|
|
def empty_note_sequence(qpm=120.0, total_time=0.0): |
|
note_sequence = note_seq.protobuf.music_pb2.NoteSequence() |
|
note_sequence.tempos.add().qpm = qpm |
|
note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ |
|
note_sequence.total_time = total_time |
|
return note_sequence |