asigalov61's picture
Update app.py
a8c784b
raw
history blame
5.58 kB
import argparse
import glob
import os.path
import torch
import torch.nn.functional as F
import gradio as gr
import numpy as np
import onnxruntime as rt
import tqdm
import json
from midi_synthesizer import synthesis
import TMIDIX
in_space = os.getenv("SYSTEM") == "spaces"
#=================================================================================================
def generate(
start_tokens,
seq_len,
max_seq_len = 2048,
temperature = 0.9,
verbose=True,
return_prime=False,
):
out = torch.LongTensor([start_tokens])
st = len(start_tokens)
if verbose:
print("Generating sequence of max length:", seq_len)
for s in range(seq_len):
x = out[:, -max_seq_len:]
torch_in = x.tolist()[0]
logits = torch.FloatTensor(session.run(None, {'input': [torch_in]})[0])[:, -1]
filtered_logits = logits
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if verbose:
if s % 32 == 0:
print(s, '/', seq_len)
if return_prime:
return out[:, :]
else:
return out[:, st:]
#=================================================================================================
def GenerateMIDI(params):
melody_chords_f = generate([3087, 3073+1, 3075+1], 512)
melody_chords_f = melody_chords_f.tolist()[0]
print('=' * 70)
print('Sample INTs', melody_chords_f[:12])
print('=' * 70)
if len(melody_chords_f) != 0:
song = melody_chords_f
song_f = []
time = 0
dur = 0
vel = 0
pitch = 0
channel = 0
for ss in song:
if ss > 0 and ss < 256:
time += ss * 8
if ss >= 256 and ss < 1280:
dur = ((ss-256) // 8) * 32
vel = (((ss-256) % 8)+1) * 15
if ss >= 1280 and ss < 2816:
channel = (ss-1280) // 128
pitch = (ss-1280) % 128
song_f.append(['note', time, dur, channel, pitch, vel ])
detailed_stats = TMIDIX.Tegridy_SONG_to_MIDI_Converter(song_f,
output_signature = 'Allegro Music Transformer',
output_file_name = 'Allegro-Music-Transformer-Music-Composition',
track_name='Project Los Angeles',
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0],
number_of_ticks_per_quarter=500)
print('=' * 70)
#=================================================================================================
def load_javascript(dir="javascript"):
scripts_list = glob.glob(f"{dir}/*.js")
javascript = ""
for path in scripts_list:
with open(path, "r", encoding="utf8") as jsfile:
javascript += f"\n<!-- {path} --><script>{jsfile.read()}</script>"
template_response_ori = gr.routes.templates.TemplateResponse
def template_response(*args, **kwargs):
res = template_response_ori(*args, **kwargs)
res.body = res.body.replace(
b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = template_response
class JSMsgReceiver(gr.HTML):
def __init__(self, **kwargs):
super().__init__(elem_id="msg_receiver", visible=False, **kwargs)
def postprocess(self, y):
if y:
y = f"<p>{json.dumps(y)}</p>"
return super().postprocess(y)
def get_block_name(self) -> str:
return "html"
#=================================================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
parser.add_argument("--port", type=int, default=7860, help="gradio server port")
opt = parser.parse_args()
session = rt.InferenceSession('Allegro_Music_Transformer_Small_Trained_Model_56000_steps_0.9399_loss_0.7374_acc.onnx', providers=['CUDAExecutionProvider'])
app = gr.Blocks()
with app:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Allegro Music Transformer</h1>")
gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Allegro-Music-Transformer&style=flat)\n\n"
"Full-attention multi-instrumental music transformer featuring asymmetrical encoding with octo-velocity, and chords counters tokens, optimized for speed and performance\n\n"
"Check out [Allegro Music Transformer](https://github.com/asigalov61/Allegro-Music-Transformer) on GitHub!\n\n"
"[Open In Colab]"
"(https://colab.research.google.com/github/asigalov61/Allegro-Music-Transformer/blob/main/Allegro_Music_Transformer_Composer.ipynb)"
" for faster execution and endless generation"
)
js_msg = JSMsgReceiver()
tab_select = gr.Variable(value=0)
app.queue(2).launch(server_port=opt.port, share=opt.share, inbrowser=True)