Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -33,20 +33,6 @@ pipe = pipe.to(device)
|
|
33 |
|
34 |
|
35 |
|
36 |
-
dtype = torch.float16
|
37 |
-
pipe2 = FluxWithCFGPipeline.from_pretrained(
|
38 |
-
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
39 |
-
)
|
40 |
-
pipe2.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
41 |
-
pipe2.to("cuda")
|
42 |
-
pipe2.load_lora_weights('hugovntr/flux-schnell-realism', weight_name='schnell-realism_v2.3.safetensors', adapter_name="better")
|
43 |
-
pipe2.set_adapters(["better"], adapter_weights=[1.0])
|
44 |
-
pipe2.fuse_lora(adapter_name=["better"], lora_scale=1.0)
|
45 |
-
pipe2.unload_lora_weights()
|
46 |
-
|
47 |
-
torch.cuda.empty_cache()
|
48 |
-
|
49 |
-
|
50 |
|
51 |
|
52 |
def adjust_to_nearest_multiple(value, divisor=8):
|
@@ -110,8 +96,8 @@ def infer(
|
|
110 |
|
111 |
generator = torch.Generator().manual_seed(seed)
|
112 |
|
113 |
-
|
114 |
-
|
115 |
prompt=prompt,
|
116 |
negative_prompt=negative_prompt,
|
117 |
guidance_scale=guidance_scale,
|
@@ -120,14 +106,7 @@ def infer(
|
|
120 |
height=height,
|
121 |
generator=generator,
|
122 |
).images[0]
|
123 |
-
|
124 |
-
img = pipe2.generate_images(
|
125 |
-
prompt=prompt,
|
126 |
-
width=width,
|
127 |
-
height=height,
|
128 |
-
num_inference_steps=num_inference_steps,
|
129 |
-
generator=generator
|
130 |
-
)
|
131 |
|
132 |
return image, seed
|
133 |
|
|
|
33 |
|
34 |
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
|
38 |
def adjust_to_nearest_multiple(value, divisor=8):
|
|
|
96 |
|
97 |
generator = torch.Generator().manual_seed(seed)
|
98 |
|
99 |
+
|
100 |
+
image = pipe(
|
101 |
prompt=prompt,
|
102 |
negative_prompt=negative_prompt,
|
103 |
guidance_scale=guidance_scale,
|
|
|
106 |
height=height,
|
107 |
generator=generator,
|
108 |
).images[0]
|
109 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
return image, seed
|
112 |
|