Spaces:
Running
on
Zero
Running
on
Zero
shuanholmes
commited on
Commit
•
a11dece
1
Parent(s):
e09baa5
[FireFlow] Add Examples
Browse files- app.py +32 -6
- example_images/art.jpg +0 -0
- example_images/cartoon.jpg +0 -0
- example_images/dog.jpg +0 -0
- example_images/gold.jpg +0 -0
app.py
CHANGED
@@ -53,10 +53,6 @@ ae = load_ae(name, device="cpu" if offload else torch_device)
|
|
53 |
t5 = load_t5(device, max_length=256 if name == "flux-schnell" else 512)
|
54 |
clip = load_clip(device)
|
55 |
model = load_flow_model(name, device="cpu" if offload else torch_device)
|
56 |
-
if offload:
|
57 |
-
model.cpu()
|
58 |
-
torch.cuda.empty_cache()
|
59 |
-
ae.encoder.to(torch_device)
|
60 |
is_schnell = False
|
61 |
output_dir = 'result'
|
62 |
add_sampling_metadata = True
|
@@ -68,7 +64,7 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, num_steps,
|
|
68 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
69 |
torch.cuda.empty_cache()
|
70 |
seed = None
|
71 |
-
|
72 |
shape = init_image.shape
|
73 |
|
74 |
new_h = shape[0] if shape[0] % 16 == 0 else shape[0] - shape[0] % 16
|
@@ -81,6 +77,11 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, num_steps,
|
|
81 |
init_image = torch.from_numpy(init_image).permute(2, 0, 1).float() / 127.5 - 1
|
82 |
init_image = init_image.unsqueeze(0)
|
83 |
init_image = init_image.to(device)
|
|
|
|
|
|
|
|
|
|
|
84 |
with torch.no_grad():
|
85 |
init_image = ae.encode(init_image.to()).to(torch.bfloat16)
|
86 |
|
@@ -113,7 +114,7 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, num_steps,
|
|
113 |
info['inject_step'] = min(inject_step, num_steps)
|
114 |
info['reuse_v']= False
|
115 |
info['editing_strategy']= " ".join(editing_strategy)
|
116 |
-
info['start_layer_index'] =
|
117 |
info['end_layer_index'] = 37
|
118 |
qkv_ratio = '1.0,1.0,1.0'
|
119 |
info['qkv_ratio'] = list(map(float, qkv_ratio.split(',')))
|
@@ -193,6 +194,7 @@ def create_demo(model_name: str, device: str = "cuda:0" if torch.cuda.is_availab
|
|
193 |
"""
|
194 |
description = r"""
|
195 |
<b>Official 🤗 Gradio Demo</b> for <a href='https://github.com/HolmesShuan/FireFlow-Fast-Inversion-of-Rectified-Flow-for-Image-Semantic-Editing' target='_blank'><b>🔥FireFlow: Fast Inversion of Rectified Flow for Image Semantic Editing</b></a>.<br>
|
|
|
196 |
"""
|
197 |
article = r"""
|
198 |
If you find our work helpful, we would greatly appreciate it if you could ⭐ our <a href='https://github.com/HolmesShuan/FireFlow-Fast-Inversion-of-Rectified-Flow-for-Image-Semantic-Editing' target='_blank'>GitHub repository</a>. Thank you for your support!
|
@@ -200,6 +202,15 @@ def create_demo(model_name: str, device: str = "cuda:0" if torch.cuda.is_availab
|
|
200 |
css = '''
|
201 |
.gradio-container {width: 85% !important}
|
202 |
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
with gr.Blocks(css=css) as demo:
|
204 |
# Add a title, description, and additional information
|
205 |
gr.HTML(title)
|
@@ -266,7 +277,22 @@ def create_demo(model_name: str, device: str = "cuda:0" if torch.cuda.is_availab
|
|
266 |
outputs=[output_image]
|
267 |
)
|
268 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
269 |
return demo
|
270 |
|
|
|
271 |
demo = create_demo("flux-dev", "cuda")
|
272 |
demo.launch()
|
|
|
53 |
t5 = load_t5(device, max_length=256 if name == "flux-schnell" else 512)
|
54 |
clip = load_clip(device)
|
55 |
model = load_flow_model(name, device="cpu" if offload else torch_device)
|
|
|
|
|
|
|
|
|
56 |
is_schnell = False
|
57 |
output_dir = 'result'
|
58 |
add_sampling_metadata = True
|
|
|
64 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
65 |
torch.cuda.empty_cache()
|
66 |
seed = None
|
67 |
+
|
68 |
shape = init_image.shape
|
69 |
|
70 |
new_h = shape[0] if shape[0] % 16 == 0 else shape[0] - shape[0] % 16
|
|
|
77 |
init_image = torch.from_numpy(init_image).permute(2, 0, 1).float() / 127.5 - 1
|
78 |
init_image = init_image.unsqueeze(0)
|
79 |
init_image = init_image.to(device)
|
80 |
+
if offload:
|
81 |
+
model.cpu()
|
82 |
+
torch.cuda.empty_cache()
|
83 |
+
ae.encoder.to(device)
|
84 |
+
|
85 |
with torch.no_grad():
|
86 |
init_image = ae.encode(init_image.to()).to(torch.bfloat16)
|
87 |
|
|
|
114 |
info['inject_step'] = min(inject_step, num_steps)
|
115 |
info['reuse_v']= False
|
116 |
info['editing_strategy']= " ".join(editing_strategy)
|
117 |
+
info['start_layer_index'] = 0
|
118 |
info['end_layer_index'] = 37
|
119 |
qkv_ratio = '1.0,1.0,1.0'
|
120 |
info['qkv_ratio'] = list(map(float, qkv_ratio.split(',')))
|
|
|
194 |
"""
|
195 |
description = r"""
|
196 |
<b>Official 🤗 Gradio Demo</b> for <a href='https://github.com/HolmesShuan/FireFlow-Fast-Inversion-of-Rectified-Flow-for-Image-Semantic-Editing' target='_blank'><b>🔥FireFlow: Fast Inversion of Rectified Flow for Image Semantic Editing</b></a>.<br>
|
197 |
+
<b>Tips</b> 🔔: If the results are not satisfactory, consider slightly increasing the total number of timesteps 📈. Each editing technique produces distinct effects, so feel free to experiment and explore their possibilities!
|
198 |
"""
|
199 |
article = r"""
|
200 |
If you find our work helpful, we would greatly appreciate it if you could ⭐ our <a href='https://github.com/HolmesShuan/FireFlow-Fast-Inversion-of-Rectified-Flow-for-Image-Semantic-Editing' target='_blank'>GitHub repository</a>. Thank you for your support!
|
|
|
202 |
css = '''
|
203 |
.gradio-container {width: 85% !important}
|
204 |
'''
|
205 |
+
|
206 |
+
# Pre-defined examples
|
207 |
+
examples = [
|
208 |
+
["example_images/dog.jpg", "Photograph of a dog on the grass", "Photograph of a cat on the grass", ['replace_v'], 8, 1, 2.0],
|
209 |
+
["example_images/gold.jpg", "3d melting gold render", "a cat in the style of 3d melting gold render", ['replace_v'], 8, 1, 2.0],
|
210 |
+
["example_images/gold.jpg", "3d melting gold render", "a cat in the style of 3d melting gold render", ['replace_v'], 10, 1, 2.0],
|
211 |
+
["example_images/art.jpg", "", "a vivid depiction of the Batman, featuring rich, dynamic colors, and a blend of realistic and abstract elements with dynamic splatter art.", ['add_q'], 8, 1, 2.0],
|
212 |
+
]
|
213 |
+
|
214 |
with gr.Blocks(css=css) as demo:
|
215 |
# Add a title, description, and additional information
|
216 |
gr.HTML(title)
|
|
|
277 |
outputs=[output_image]
|
278 |
)
|
279 |
|
280 |
+
# Add examples
|
281 |
+
gr.Examples(
|
282 |
+
examples=examples,
|
283 |
+
inputs=[
|
284 |
+
init_image,
|
285 |
+
source_prompt,
|
286 |
+
target_prompt,
|
287 |
+
editing_strategy,
|
288 |
+
num_steps,
|
289 |
+
inject_step,
|
290 |
+
guidance
|
291 |
+
]
|
292 |
+
)
|
293 |
+
|
294 |
return demo
|
295 |
|
296 |
+
|
297 |
demo = create_demo("flux-dev", "cuda")
|
298 |
demo.launch()
|
example_images/art.jpg
ADDED
example_images/cartoon.jpg
ADDED
example_images/dog.jpg
ADDED
example_images/gold.jpg
ADDED