Spaces:
Running
on
Zero
Running
on
Zero
asigalov61
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -29,6 +29,8 @@ torch.backends.cuda.enable_math_sdp(True)
|
|
29 |
torch.backends.cuda.enable_flash_sdp(True)
|
30 |
torch.backends.cuda.enable_cudnn_sdp(True)
|
31 |
|
|
|
|
|
32 |
import TMIDIX
|
33 |
|
34 |
from midi_to_colab_audio import midi_to_colab_audio
|
@@ -54,11 +56,11 @@ print('=' * 70)
|
|
54 |
|
55 |
#==================================================================================
|
56 |
|
57 |
-
MODEL_CHECKPOINT = '
|
58 |
|
59 |
SOUDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2'
|
60 |
|
61 |
-
NUM_OUT_BATCHES =
|
62 |
|
63 |
PREVIEW_LENGTH = 120 # in tokens
|
64 |
|
@@ -73,14 +75,14 @@ dtype = 'bfloat16'
|
|
73 |
ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
|
74 |
ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
75 |
|
76 |
-
SEQ_LEN =
|
77 |
-
PAD_IDX =
|
78 |
|
79 |
model = TransformerWrapper(
|
80 |
num_tokens = PAD_IDX+1,
|
81 |
max_seq_len = SEQ_LEN,
|
82 |
attn_layers = Decoder(dim = 2048,
|
83 |
-
depth =
|
84 |
heads = 32,
|
85 |
rotary_pos_emb = True,
|
86 |
attn_flash = True
|
@@ -92,7 +94,9 @@ model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX)
|
|
92 |
print('=' * 70)
|
93 |
print('Loading model checkpoint...')
|
94 |
|
95 |
-
|
|
|
|
|
96 |
|
97 |
model = torch.compile(model, mode='max-autotune')
|
98 |
|
|
|
29 |
torch.backends.cuda.enable_flash_sdp(True)
|
30 |
torch.backends.cuda.enable_cudnn_sdp(True)
|
31 |
|
32 |
+
from huggingface_hub import hf_hub_download
|
33 |
+
|
34 |
import TMIDIX
|
35 |
|
36 |
from midi_to_colab_audio import midi_to_colab_audio
|
|
|
56 |
|
57 |
#==================================================================================
|
58 |
|
59 |
+
MODEL_CHECKPOINT = 'Monster_Piano_Transformer_Velocity_Trained_Model_59896_steps_0.9055_loss_0.735_acc.pth'
|
60 |
|
61 |
SOUDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2'
|
62 |
|
63 |
+
NUM_OUT_BATCHES = 16
|
64 |
|
65 |
PREVIEW_LENGTH = 120 # in tokens
|
66 |
|
|
|
75 |
ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
|
76 |
ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
77 |
|
78 |
+
SEQ_LEN = 2048
|
79 |
+
PAD_IDX = 512
|
80 |
|
81 |
model = TransformerWrapper(
|
82 |
num_tokens = PAD_IDX+1,
|
83 |
max_seq_len = SEQ_LEN,
|
84 |
attn_layers = Decoder(dim = 2048,
|
85 |
+
depth = 4,
|
86 |
heads = 32,
|
87 |
rotary_pos_emb = True,
|
88 |
attn_flash = True
|
|
|
94 |
print('=' * 70)
|
95 |
print('Loading model checkpoint...')
|
96 |
|
97 |
+
model_checkpoint = hf_hub_download(repo_id='asigalov61/Monster-Piano-Transformer', filename=MODEL_CHECKPOINT_VEL)
|
98 |
+
|
99 |
+
model.load_state_dict(torch.load(model_checkpoint, map_location='cpu'))
|
100 |
|
101 |
model = torch.compile(model, mode='max-autotune')
|
102 |
|