Spaces:
Runtime error
Runtime error
Jordan Legg
commited on
Commit
β’
d2cb214
1
Parent(s):
2965592
update to schell fp8
Browse files- app.py +46 -77
- requirements.txt +4 -3
app.py
CHANGED
@@ -1,46 +1,46 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
4 |
-
|
5 |
import torch
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
16 |
-
pipe = pipe.to(device)
|
17 |
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
19 |
-
MAX_IMAGE_SIZE =
|
20 |
-
|
21 |
-
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
|
22 |
|
|
|
|
|
23 |
if randomize_seed:
|
24 |
seed = random.randint(0, MAX_SEED)
|
25 |
-
|
26 |
generator = torch.Generator().manual_seed(seed)
|
27 |
-
|
28 |
image = pipe(
|
29 |
-
prompt
|
30 |
-
|
31 |
-
|
32 |
-
num_inference_steps
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
return image
|
39 |
|
40 |
examples = [
|
41 |
-
"
|
42 |
-
"
|
43 |
-
"
|
44 |
]
|
45 |
|
46 |
css="""
|
@@ -50,21 +50,13 @@ css="""
|
|
50 |
}
|
51 |
"""
|
52 |
|
53 |
-
if torch.cuda.is_available():
|
54 |
-
power_device = "GPU"
|
55 |
-
else:
|
56 |
-
power_device = "CPU"
|
57 |
-
|
58 |
with gr.Blocks(css=css) as demo:
|
59 |
-
|
60 |
with gr.Column(elem_id="col-container"):
|
61 |
-
gr.Markdown(f"""
|
62 |
-
|
63 |
-
|
64 |
""")
|
65 |
-
|
66 |
with gr.Row():
|
67 |
-
|
68 |
prompt = gr.Text(
|
69 |
label="Prompt",
|
70 |
show_label=False,
|
@@ -72,20 +64,9 @@ with gr.Blocks(css=css) as demo:
|
|
72 |
placeholder="Enter your prompt",
|
73 |
container=False,
|
74 |
)
|
75 |
-
|
76 |
run_button = gr.Button("Run", scale=0)
|
77 |
-
|
78 |
result = gr.Image(label="Result", show_label=False)
|
79 |
-
|
80 |
with gr.Accordion("Advanced Settings", open=False):
|
81 |
-
|
82 |
-
negative_prompt = gr.Text(
|
83 |
-
label="Negative prompt",
|
84 |
-
max_lines=1,
|
85 |
-
placeholder="Enter a negative prompt",
|
86 |
-
visible=False,
|
87 |
-
)
|
88 |
-
|
89 |
seed = gr.Slider(
|
90 |
label="Seed",
|
91 |
minimum=0,
|
@@ -93,54 +74,42 @@ with gr.Blocks(css=css) as demo:
|
|
93 |
step=1,
|
94 |
value=0,
|
95 |
)
|
96 |
-
|
97 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
98 |
-
|
99 |
with gr.Row():
|
100 |
-
|
101 |
width = gr.Slider(
|
102 |
label="Width",
|
103 |
minimum=256,
|
104 |
maximum=MAX_IMAGE_SIZE,
|
105 |
step=32,
|
106 |
-
value=
|
107 |
)
|
108 |
-
|
109 |
height = gr.Slider(
|
110 |
label="Height",
|
111 |
minimum=256,
|
112 |
maximum=MAX_IMAGE_SIZE,
|
113 |
step=32,
|
114 |
-
value=
|
115 |
)
|
116 |
-
|
117 |
with gr.Row():
|
118 |
-
|
119 |
-
guidance_scale = gr.Slider(
|
120 |
-
label="Guidance scale",
|
121 |
-
minimum=0.0,
|
122 |
-
maximum=10.0,
|
123 |
-
step=0.1,
|
124 |
-
value=0.0,
|
125 |
-
)
|
126 |
-
|
127 |
num_inference_steps = gr.Slider(
|
128 |
label="Number of inference steps",
|
129 |
minimum=1,
|
130 |
-
maximum=
|
131 |
step=1,
|
132 |
-
value=
|
133 |
)
|
134 |
-
|
135 |
gr.Examples(
|
136 |
-
examples
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
)
|
139 |
|
140 |
-
|
141 |
-
fn = infer,
|
142 |
-
inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
143 |
-
outputs = [result]
|
144 |
-
)
|
145 |
-
|
146 |
-
demo.queue().launch()
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
4 |
+
import spaces
|
5 |
import torch
|
6 |
+
from diffusers import DiffusionPipeline
|
7 |
+
|
8 |
+
# Enable TF32 for A100 (this is a form of FP8 computation)
|
9 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
10 |
+
torch.backends.cudnn.allow_tf32 = True
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
+
dtype = torch.float16 # Use float16 for loading
|
14 |
|
15 |
+
pipe = DiffusionPipeline.from_pretrained(
|
16 |
+
"Kijai/flux-fp8",
|
17 |
+
torch_dtype=dtype,
|
18 |
+
revision="main",
|
19 |
+
filename="flux1-schnell-fp8.safetensors"
|
20 |
+
).to(device)
|
|
|
|
|
21 |
|
22 |
MAX_SEED = np.iinfo(np.int32).max
|
23 |
+
MAX_IMAGE_SIZE = 2048
|
|
|
|
|
24 |
|
25 |
+
@spaces.GPU()
|
26 |
+
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
|
27 |
if randomize_seed:
|
28 |
seed = random.randint(0, MAX_SEED)
|
|
|
29 |
generator = torch.Generator().manual_seed(seed)
|
|
|
30 |
image = pipe(
|
31 |
+
prompt=prompt,
|
32 |
+
width=width,
|
33 |
+
height=height,
|
34 |
+
num_inference_steps=num_inference_steps,
|
35 |
+
generator=generator,
|
36 |
+
guidance_scale=0.0
|
37 |
+
).images[0]
|
38 |
+
return image, seed
|
|
|
|
|
39 |
|
40 |
examples = [
|
41 |
+
"a tiny astronaut hatching from an egg on the moon",
|
42 |
+
"a cat holding a sign that says hello world",
|
43 |
+
"an anime illustration of a wiener schnitzel",
|
44 |
]
|
45 |
|
46 |
css="""
|
|
|
50 |
}
|
51 |
"""
|
52 |
|
|
|
|
|
|
|
|
|
|
|
53 |
with gr.Blocks(css=css) as demo:
|
|
|
54 |
with gr.Column(elem_id="col-container"):
|
55 |
+
gr.Markdown(f"""# FLUX.1 [schnell] FP8
|
56 |
+
12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation
|
57 |
+
[[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/Kijai/flux-fp8)]
|
58 |
""")
|
|
|
59 |
with gr.Row():
|
|
|
60 |
prompt = gr.Text(
|
61 |
label="Prompt",
|
62 |
show_label=False,
|
|
|
64 |
placeholder="Enter your prompt",
|
65 |
container=False,
|
66 |
)
|
|
|
67 |
run_button = gr.Button("Run", scale=0)
|
|
|
68 |
result = gr.Image(label="Result", show_label=False)
|
|
|
69 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
seed = gr.Slider(
|
71 |
label="Seed",
|
72 |
minimum=0,
|
|
|
74 |
step=1,
|
75 |
value=0,
|
76 |
)
|
|
|
77 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
78 |
with gr.Row():
|
|
|
79 |
width = gr.Slider(
|
80 |
label="Width",
|
81 |
minimum=256,
|
82 |
maximum=MAX_IMAGE_SIZE,
|
83 |
step=32,
|
84 |
+
value=1024,
|
85 |
)
|
|
|
86 |
height = gr.Slider(
|
87 |
label="Height",
|
88 |
minimum=256,
|
89 |
maximum=MAX_IMAGE_SIZE,
|
90 |
step=32,
|
91 |
+
value=1024,
|
92 |
)
|
|
|
93 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
num_inference_steps = gr.Slider(
|
95 |
label="Number of inference steps",
|
96 |
minimum=1,
|
97 |
+
maximum=50,
|
98 |
step=1,
|
99 |
+
value=4,
|
100 |
)
|
|
|
101 |
gr.Examples(
|
102 |
+
examples=examples,
|
103 |
+
fn=infer,
|
104 |
+
inputs=[prompt],
|
105 |
+
outputs=[result, seed],
|
106 |
+
cache_examples="lazy"
|
107 |
+
)
|
108 |
+
gr.on(
|
109 |
+
triggers=[run_button.click, prompt.submit],
|
110 |
+
fn=infer,
|
111 |
+
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
|
112 |
+
outputs=[result, seed]
|
113 |
)
|
114 |
|
115 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
accelerate
|
2 |
-
diffusers
|
3 |
invisible_watermark
|
4 |
torch
|
5 |
-
transformers
|
6 |
-
xformers
|
|
|
|
1 |
accelerate
|
2 |
+
git+https://github.com/huggingface/diffusers.git@flux-pipeline
|
3 |
invisible_watermark
|
4 |
torch
|
5 |
+
transformers==4.42.4
|
6 |
+
xformers
|
7 |
+
sentencepiece
|