Spaces:
Sleeping
Sleeping
better default values
Browse files- pipelines/controlnet.py +18 -12
pipelines/controlnet.py
CHANGED
@@ -69,18 +69,18 @@ class Pipeline:
|
|
69 |
2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
|
70 |
)
|
71 |
steps: int = Field(
|
72 |
-
|
73 |
)
|
74 |
width: int = Field(
|
75 |
-
|
76 |
)
|
77 |
height: int = Field(
|
78 |
-
|
79 |
)
|
80 |
guidance_scale: float = Field(
|
81 |
-
0.
|
82 |
min=0,
|
83 |
-
max=
|
84 |
step=0.001,
|
85 |
title="Guidance Scale",
|
86 |
field="range",
|
@@ -196,16 +196,21 @@ class Pipeline:
|
|
196 |
image=[Image.new("RGB", (768, 768))],
|
197 |
control_image=[Image.new("RGB", (768, 768))],
|
198 |
)
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
|
206 |
def predict(self, params: "Pipeline.InputParams") -> Image.Image:
|
207 |
generator = torch.manual_seed(params.seed)
|
208 |
-
prompt_embeds =
|
|
|
|
|
|
|
|
|
|
|
209 |
control_image = self.canny_torch(
|
210 |
params.image, params.canny_low_threshold, params.canny_high_threshold
|
211 |
)
|
@@ -218,6 +223,7 @@ class Pipeline:
|
|
218 |
image=params.image,
|
219 |
control_image=control_image,
|
220 |
prompt_embeds=prompt_embeds,
|
|
|
221 |
generator=generator,
|
222 |
strength=strength,
|
223 |
num_inference_steps=steps,
|
|
|
69 |
2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
|
70 |
)
|
71 |
steps: int = Field(
|
72 |
+
2, min=1, max=6, title="Steps", field="range", hide=True, id="steps"
|
73 |
)
|
74 |
width: int = Field(
|
75 |
+
512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
|
76 |
)
|
77 |
height: int = Field(
|
78 |
+
512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
|
79 |
)
|
80 |
guidance_scale: float = Field(
|
81 |
+
0.0,
|
82 |
min=0,
|
83 |
+
max=2,
|
84 |
step=0.001,
|
85 |
title="Guidance Scale",
|
86 |
field="range",
|
|
|
196 |
image=[Image.new("RGB", (768, 768))],
|
197 |
control_image=[Image.new("RGB", (768, 768))],
|
198 |
)
|
199 |
+
if args.compel:
|
200 |
+
self.compel_proc = Compel(
|
201 |
+
tokenizer=self.pipe.tokenizer,
|
202 |
+
text_encoder=self.pipe.text_encoder,
|
203 |
+
truncate_long_prompts=False,
|
204 |
+
)
|
205 |
|
206 |
def predict(self, params: "Pipeline.InputParams") -> Image.Image:
|
207 |
generator = torch.manual_seed(params.seed)
|
208 |
+
prompt_embeds = None
|
209 |
+
control_image = None
|
210 |
+
prompt = params.prompt
|
211 |
+
if hasattr(self, "compel_proc"):
|
212 |
+
prompt_embeds = self.compel_proc(params.prompt)
|
213 |
+
|
214 |
control_image = self.canny_torch(
|
215 |
params.image, params.canny_low_threshold, params.canny_high_threshold
|
216 |
)
|
|
|
223 |
image=params.image,
|
224 |
control_image=control_image,
|
225 |
prompt_embeds=prompt_embeds,
|
226 |
+
prompt=prompt,
|
227 |
generator=generator,
|
228 |
strength=strength,
|
229 |
num_inference_steps=steps,
|