Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -35,7 +35,7 @@ controlnet_model = 'InstantX/FLUX.1-dev-Controlnet-Canny-alpha'
|
|
35 |
def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
|
36 |
x_concept_1, x_concept_2, y_concept_1, y_concept_2,
|
37 |
avg_diff_x_1, avg_diff_x_2,
|
38 |
-
avg_diff_y_1, avg_diff_y_2,
|
39 |
img2img_type = None, img = None,
|
40 |
controlnet_scale= None, ip_adapter_scale=None,
|
41 |
|
@@ -61,11 +61,11 @@ def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale
|
|
61 |
|
62 |
if img2img_type=="controlnet canny" and img is not None:
|
63 |
control_img = process_controlnet_img(img)
|
64 |
-
image = t5_slider_controlnet.generate(prompt, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
65 |
elif img2img_type=="ip adapter" and img is not None:
|
66 |
-
image = t5_slider.generate(prompt, guidance_scale=guidance_scale, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
67 |
else: # text to image
|
68 |
-
image = t5_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
69 |
|
70 |
end_time = time.time()
|
71 |
print(f"generation time: {end_time - start_time:.2f} ms")
|
@@ -177,6 +177,13 @@ with gr.Blocks(css=css) as demo:
|
|
177 |
step=0.1,
|
178 |
value=5,
|
179 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
seed = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
|
181 |
|
182 |
|
@@ -225,13 +232,13 @@ with gr.Blocks(css=css) as demo:
|
|
225 |
seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
|
226 |
|
227 |
submit.click(fn=generate,
|
228 |
-
inputs=[slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y,],
|
229 |
outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, output_image])
|
230 |
|
231 |
generate_butt.click(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x, avg_diff_y], outputs=[output_image])
|
232 |
generate_butt_a.click(fn=update_scales, inputs=[x_a,y_a, prompt_a, seed_a, steps_a, guidance_scale_a, avg_diff_x, avg_diff_y, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale], outputs=[output_image_a])
|
233 |
submit_a.click(fn=generate,
|
234 |
-
inputs=[slider_x_a, slider_y_a, prompt_a, seed_a, iterations_a, steps_a, guidance_scale_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale],
|
235 |
outputs=[x_a, y_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, output_image_a])
|
236 |
|
237 |
|
|
|
35 |
def generate(slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale,
|
36 |
x_concept_1, x_concept_2, y_concept_1, y_concept_2,
|
37 |
avg_diff_x_1, avg_diff_x_2,
|
38 |
+
avg_diff_y_1, avg_diff_y_2,correlation,
|
39 |
img2img_type = None, img = None,
|
40 |
controlnet_scale= None, ip_adapter_scale=None,
|
41 |
|
|
|
61 |
|
62 |
if img2img_type=="controlnet canny" and img is not None:
|
63 |
control_img = process_controlnet_img(img)
|
64 |
+
image = t5_slider_controlnet.generate(prompt, correlation_weight_factor=correlation, guidance_scale=guidance_scale, image=control_img, controlnet_conditioning_scale =controlnet_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
65 |
elif img2img_type=="ip adapter" and img is not None:
|
66 |
+
image = t5_slider.generate(prompt, guidance_scale=guidance_scale, correlation_weight_factor=correlation, ip_adapter_image=img, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
67 |
else: # text to image
|
68 |
+
image = t5_slider.generate(prompt, guidance_scale=guidance_scale, correlation_weight_factor=correlation, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff, avg_diff_2nd=avg_diff_2nd)
|
69 |
|
70 |
end_time = time.time()
|
71 |
print(f"generation time: {end_time - start_time:.2f} ms")
|
|
|
177 |
step=0.1,
|
178 |
value=5,
|
179 |
)
|
180 |
+
correlation = gr.Slider(
|
181 |
+
label="correlation",
|
182 |
+
minimum=0.1,
|
183 |
+
maximum=1.0,
|
184 |
+
step=0.05,
|
185 |
+
value=0.6,
|
186 |
+
)
|
187 |
seed = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
|
188 |
|
189 |
|
|
|
232 |
seed_a = gr.Slider(minimum=0, maximum=np.iinfo(np.int32).max, label="Seed", interactive=True, randomize=True)
|
233 |
|
234 |
submit.click(fn=generate,
|
235 |
+
inputs=[slider_x, slider_y, prompt, seed, iterations, steps, guidance_scale, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y,correlation],
|
236 |
outputs=[x, y, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, output_image])
|
237 |
|
238 |
generate_butt.click(fn=update_scales, inputs=[x,y, prompt, seed, steps, guidance_scale, avg_diff_x, avg_diff_y], outputs=[output_image])
|
239 |
generate_butt_a.click(fn=update_scales, inputs=[x_a,y_a, prompt_a, seed_a, steps_a, guidance_scale_a, avg_diff_x, avg_diff_y, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale], outputs=[output_image_a])
|
240 |
submit_a.click(fn=generate,
|
241 |
+
inputs=[slider_x_a, slider_y_a, prompt_a, seed_a, iterations_a, steps_a, guidance_scale_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, correlation, img2img_type, image, controlnet_conditioning_scale, ip_adapter_scale],
|
242 |
outputs=[x_a, y_a, x_concept_1, x_concept_2, y_concept_1, y_concept_2, avg_diff_x, avg_diff_y, output_image_a])
|
243 |
|
244 |
|