Spaces:
Running
on
Zero
Running
on
Zero
jadechoghari
commited on
Commit
β’
9eb21f5
1
Parent(s):
ca1a401
update
Browse files
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: indigo
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
+
title: OpenMusic
|
3 |
+
emoji: πΆ
|
4 |
colorFrom: indigo
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
app.py
CHANGED
@@ -6,11 +6,11 @@ import sys
|
|
6 |
|
7 |
# we will clone the repo and install the dependencies
|
8 |
# NOTE: Still fixing bugs, not release, do not try :) !
|
9 |
-
os.system('pip install -r qa_mdt/requirements.txt')
|
10 |
-
os.system('pip install xformers==0.0.26.post1')
|
11 |
-
os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
|
12 |
-
os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
|
13 |
-
os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')
|
14 |
|
15 |
# only then import the necessary modules from qa_mdt
|
16 |
from qa_mdt.pipeline import MOSDiffusionPipeline
|
@@ -21,7 +21,8 @@ pipe = MOSDiffusionPipeline()
|
|
21 |
# this runs the pipeline with user input and saves the output as 'awesome.wav'
|
22 |
@spaces.GPU(duration=120)
|
23 |
def generate_waveform(description):
|
24 |
-
|
|
|
25 |
|
26 |
generated_file_path = "./awesome.wav"
|
27 |
|
@@ -34,19 +35,15 @@ def generate_waveform(description):
|
|
34 |
intro = """
|
35 |
# πΆ OpenMusic: AI-Powered Music Diffusion πΆ
|
36 |
|
37 |
-
![OpenMusic Banner](./banner.png)
|
38 |
-
|
39 |
Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions!
|
40 |
|
41 |
Simply enter a description of the music you'd like to hear, and our AI will generate it for you.
|
42 |
|
43 |
-
---
|
44 |
-
|
45 |
### Powered by:
|
46 |
|
47 |
-
- [GitHub
|
48 |
-
-
|
49 |
-
-
|
50 |
|
51 |
---
|
52 |
|
|
|
6 |
|
7 |
# we will clone the repo and install the dependencies
|
8 |
# NOTE: Still fixing bugs, not release, do not try :) !
|
9 |
+
# os.system('pip install -r qa_mdt/requirements.txt')
|
10 |
+
# os.system('pip install xformers==0.0.26.post1')
|
11 |
+
# os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
|
12 |
+
# os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
|
13 |
+
# os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')
|
14 |
|
15 |
# only then import the necessary modules from qa_mdt
|
16 |
from qa_mdt.pipeline import MOSDiffusionPipeline
|
|
|
21 |
# this runs the pipeline with user input and saves the output as 'awesome.wav'
|
22 |
@spaces.GPU(duration=120)
|
23 |
def generate_waveform(description):
|
24 |
+
high_quality_description = "high quality " + description
|
25 |
+
pipe(high_quality_description)
|
26 |
|
27 |
generated_file_path = "./awesome.wav"
|
28 |
|
|
|
35 |
intro = """
|
36 |
# πΆ OpenMusic: AI-Powered Music Diffusion πΆ
|
37 |
|
|
|
|
|
38 |
Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions!
|
39 |
|
40 |
Simply enter a description of the music you'd like to hear, and our AI will generate it for you.
|
41 |
|
|
|
|
|
42 |
### Powered by:
|
43 |
|
44 |
+
- [GitHub](https://github.com/ivcylc/qa-mdt) [@changli](https://github.com/ivcylc) π.
|
45 |
+
- [Paper](https://arxiv.org/pdf/2405.15863)
|
46 |
+
- [[HuggingFace](https://huggingface.co/jadechoghari/qa_mdt) [@jadechoghari](https://github.com/jadechoghari) π€.
|
47 |
|
48 |
---
|
49 |
|