Spaces:
Runtime error
Runtime error
Simon Stolarczyk
commited on
Commit
•
e49932c
1
Parent(s):
2769b59
fresh start
Browse files- .ipynb_checkpoints/app-checkpoint.py +40 -34
- app.py +40 -34
.ipynb_checkpoints/app-checkpoint.py
CHANGED
@@ -1,56 +1,62 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
# from musicautobot.numpy_encode import file2stream
|
4 |
from musicautobot.utils.setup_musescore import play_wav
|
5 |
from music21.midi.translate import midiFileToStream
|
6 |
from pathlib import Path
|
7 |
from midi2audio import FluidSynth
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import subprocess
|
10 |
import os
|
11 |
|
12 |
print(os.getcwd())
|
13 |
-
# print(os.listdir())
|
14 |
-
# print(os.listdir('..')) # ['.bash_logout', '.bashrc', '.profile', 'app', '.cache', '.local']
|
15 |
-
# print(os.listdir('../.local')) # ['bin', 'lib', 'etc', 'share']
|
16 |
-
# print(os.listdir('../..')) # ['user']
|
17 |
-
# print(os.listdir('../../..')) # ['bin', 'boot', 'dev', 'etc', 'home', 'lib', 'lib64', 'media', 'mnt', 'opt', 'proc', 'root', 'run', 'sbin', 'srv', 'sys', 'tmp', 'usr', 'var', '.dockerenv']
|
18 |
-
# print(os.listdir('../../../bin'))
|
19 |
-
|
20 |
-
# result = subprocess.run(['echo $PATH'], capture_output=True, shell=True)
|
21 |
-
# stdout = result.stdout
|
22 |
-
# paths = stdout.decode('utf-8').strip().split(':')
|
23 |
|
24 |
-
#
|
|
|
|
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
# print(os.listdir(path))
|
30 |
-
# print('-'*10)
|
31 |
|
32 |
-
#
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
36 |
|
37 |
|
38 |
-
# subprocess.run(['apt-get install fluidsynth'], shell=True) # do not have the right perms
|
39 |
-
# results = subprocess.run(['find / | grep "fluidsynth"'], capture_output=True, shell=True)
|
40 |
-
results = subprocess.run(['find / | grep "FluidR"'], capture_output=True, shell=True) # look for sound fonts
|
41 |
-
print(results.stdout)
|
42 |
|
43 |
-
# subprocess.run(['PATH=$PATH:~/opt/bin'])
|
44 |
-
# subprocess.run(['PATH=$PATH:~/opt/bin/fluidsynth'])
|
45 |
-
# subprocess.run(['fluidsynth'], shell=True)
|
46 |
-
|
47 |
def process_midi(midi_file):
|
48 |
name = Path(midi_file.name)
|
49 |
-
|
50 |
-
#
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
iface = gr.Interface(
|
56 |
fn=process_midi,
|
|
|
1 |
import gradio as gr
|
2 |
|
|
|
3 |
from musicautobot.utils.setup_musescore import play_wav
|
4 |
from music21.midi.translate import midiFileToStream
|
5 |
from pathlib import Path
|
6 |
from midi2audio import FluidSynth
|
7 |
|
8 |
+
from musicautobot.numpy_encode import *
|
9 |
+
from musicautobot.config import *
|
10 |
+
from musicautobot.music_transformer import *
|
11 |
+
from musicautobot.utils.midifile import *
|
12 |
+
from musicautobot.utils.file_processing import process_all
|
13 |
+
|
14 |
+
import pickle
|
15 |
+
|
16 |
import subprocess
|
17 |
import os
|
18 |
|
19 |
print(os.getcwd())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
# Load the stored data. This is needed to generate the vocab.
|
22 |
+
data_dir = Path('data/')
|
23 |
+
data = load_data(data_dir, 'data.pkl')
|
24 |
|
25 |
+
# Default config options
|
26 |
+
config = default_config()
|
27 |
+
config['encode_position'] = True
|
|
|
|
|
28 |
|
29 |
+
# Load our fine-tuned model
|
30 |
+
learner = music_model_learner(
|
31 |
+
data,
|
32 |
+
config=config.copy(),
|
33 |
+
pretrained_path='model.pth'
|
34 |
+
)
|
35 |
|
36 |
|
|
|
|
|
|
|
|
|
37 |
|
|
|
|
|
|
|
|
|
38 |
def process_midi(midi_file):
|
39 |
name = Path(midi_file.name)
|
40 |
+
|
41 |
+
# create the model input object
|
42 |
+
item = MusicItem.from_file(name, data.vocab);
|
43 |
+
|
44 |
+
# full is the prediction appended to the input
|
45 |
+
pred, full = learn.predict(item, n_words=100)
|
46 |
+
|
47 |
+
# convert to stream and then MIDI file
|
48 |
+
stream = full.to_stream()
|
49 |
+
out = music21.midi.translate.streamToMidiFile(stream)
|
50 |
+
|
51 |
+
# save MIDI file
|
52 |
+
out.open('result.midi', 'wb')
|
53 |
+
out.write()
|
54 |
+
out.close()
|
55 |
+
|
56 |
+
# use fluidsynth to convert MIDI to WAV so the user can hear the output
|
57 |
+
sound_font = "/usr/share/sounds/sf2/FluidR3_GM.sf2"
|
58 |
+
FluidSynth(sound_font).midi_to_audio('result.midi', 'result.wav')
|
59 |
+
return 'result.wav'
|
60 |
|
61 |
iface = gr.Interface(
|
62 |
fn=process_midi,
|
app.py
CHANGED
@@ -1,56 +1,62 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
# from musicautobot.numpy_encode import file2stream
|
4 |
from musicautobot.utils.setup_musescore import play_wav
|
5 |
from music21.midi.translate import midiFileToStream
|
6 |
from pathlib import Path
|
7 |
from midi2audio import FluidSynth
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import subprocess
|
10 |
import os
|
11 |
|
12 |
print(os.getcwd())
|
13 |
-
# print(os.listdir())
|
14 |
-
# print(os.listdir('..')) # ['.bash_logout', '.bashrc', '.profile', 'app', '.cache', '.local']
|
15 |
-
# print(os.listdir('../.local')) # ['bin', 'lib', 'etc', 'share']
|
16 |
-
# print(os.listdir('../..')) # ['user']
|
17 |
-
# print(os.listdir('../../..')) # ['bin', 'boot', 'dev', 'etc', 'home', 'lib', 'lib64', 'media', 'mnt', 'opt', 'proc', 'root', 'run', 'sbin', 'srv', 'sys', 'tmp', 'usr', 'var', '.dockerenv']
|
18 |
-
# print(os.listdir('../../../bin'))
|
19 |
-
|
20 |
-
# result = subprocess.run(['echo $PATH'], capture_output=True, shell=True)
|
21 |
-
# stdout = result.stdout
|
22 |
-
# paths = stdout.decode('utf-8').strip().split(':')
|
23 |
|
24 |
-
#
|
|
|
|
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
# print(os.listdir(path))
|
30 |
-
# print('-'*10)
|
31 |
|
32 |
-
#
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
36 |
|
37 |
|
38 |
-
# subprocess.run(['apt-get install fluidsynth'], shell=True) # do not have the right perms
|
39 |
-
# results = subprocess.run(['find / | grep "fluidsynth"'], capture_output=True, shell=True)
|
40 |
-
results = subprocess.run(['find / | grep "FluidR"'], capture_output=True, shell=True) # look for sound fonts
|
41 |
-
print(results.stdout)
|
42 |
|
43 |
-
# subprocess.run(['PATH=$PATH:~/opt/bin'])
|
44 |
-
# subprocess.run(['PATH=$PATH:~/opt/bin/fluidsynth'])
|
45 |
-
# subprocess.run(['fluidsynth'], shell=True)
|
46 |
-
|
47 |
def process_midi(midi_file):
|
48 |
name = Path(midi_file.name)
|
49 |
-
|
50 |
-
#
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
iface = gr.Interface(
|
56 |
fn=process_midi,
|
|
|
1 |
import gradio as gr
|
2 |
|
|
|
3 |
from musicautobot.utils.setup_musescore import play_wav
|
4 |
from music21.midi.translate import midiFileToStream
|
5 |
from pathlib import Path
|
6 |
from midi2audio import FluidSynth
|
7 |
|
8 |
+
from musicautobot.numpy_encode import *
|
9 |
+
from musicautobot.config import *
|
10 |
+
from musicautobot.music_transformer import *
|
11 |
+
from musicautobot.utils.midifile import *
|
12 |
+
from musicautobot.utils.file_processing import process_all
|
13 |
+
|
14 |
+
import pickle
|
15 |
+
|
16 |
import subprocess
|
17 |
import os
|
18 |
|
19 |
print(os.getcwd())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
# Load the stored data. This is needed to generate the vocab.
|
22 |
+
data_dir = Path('data/')
|
23 |
+
data = load_data(data_dir, 'data.pkl')
|
24 |
|
25 |
+
# Default config options
|
26 |
+
config = default_config()
|
27 |
+
config['encode_position'] = True
|
|
|
|
|
28 |
|
29 |
+
# Load our fine-tuned model
|
30 |
+
learner = music_model_learner(
|
31 |
+
data,
|
32 |
+
config=config.copy(),
|
33 |
+
pretrained_path='model.pth'
|
34 |
+
)
|
35 |
|
36 |
|
|
|
|
|
|
|
|
|
37 |
|
|
|
|
|
|
|
|
|
38 |
def process_midi(midi_file):
|
39 |
name = Path(midi_file.name)
|
40 |
+
|
41 |
+
# create the model input object
|
42 |
+
item = MusicItem.from_file(name, data.vocab);
|
43 |
+
|
44 |
+
# full is the prediction appended to the input
|
45 |
+
pred, full = learn.predict(item, n_words=100)
|
46 |
+
|
47 |
+
# convert to stream and then MIDI file
|
48 |
+
stream = full.to_stream()
|
49 |
+
out = music21.midi.translate.streamToMidiFile(stream)
|
50 |
+
|
51 |
+
# save MIDI file
|
52 |
+
out.open('result.midi', 'wb')
|
53 |
+
out.write()
|
54 |
+
out.close()
|
55 |
+
|
56 |
+
# use fluidsynth to convert MIDI to WAV so the user can hear the output
|
57 |
+
sound_font = "/usr/share/sounds/sf2/FluidR3_GM.sf2"
|
58 |
+
FluidSynth(sound_font).midi_to_audio('result.midi', 'result.wav')
|
59 |
+
return 'result.wav'
|
60 |
|
61 |
iface = gr.Interface(
|
62 |
fn=process_midi,
|