Spaces:
Running
on
Zero
Running
on
Zero
Update pipeline.py
Browse files- pipeline.py +2 -2
pipeline.py
CHANGED
@@ -182,11 +182,11 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
182 |
)
|
183 |
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
|
184 |
|
185 |
-
_, seq_len, _ = prompt_embeds.shape
|
186 |
-
|
187 |
# Use pooled output of CLIPTextModel
|
188 |
prompt_embeds = prompt_embeds.pooler_output
|
189 |
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
|
|
|
|
190 |
|
191 |
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
192 |
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
|
|
|
182 |
)
|
183 |
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
|
184 |
|
|
|
|
|
185 |
# Use pooled output of CLIPTextModel
|
186 |
prompt_embeds = prompt_embeds.pooler_output
|
187 |
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
188 |
+
|
189 |
+
_, seq_len, _ = prompt_embeds.shape
|
190 |
|
191 |
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
192 |
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
|