Ashoka74 commited on
Commit
2958beb
ยท
verified ยท
1 Parent(s): 43e5b66

Update app_merged.py

Browse files
Files changed (1) hide show
  1. app_merged.py +24 -4
app_merged.py CHANGED
@@ -175,13 +175,33 @@ unet = UNet2DConditionModel.from_pretrained(sd15_name, subfolder="unet")
175
  # torch_dtype=torch.bfloat16
176
  # ).to("cuda")
177
 
178
- from diffusers import FluxTransformer2DModel, FluxFillPipeline
179
  from transformers import T5EncoderModel
180
  import torch
181
 
182
- transformer = FluxTransformer2DModel.from_pretrained("AlekseyCalvin/FluxFillDev_fp8_Diffusers", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
183
- text_encoder_2 = T5EncoderModel.from_pretrained("AlekseyCalvin/FluxFillDev_fp8_Diffusers", subfolder="text_encoder_2", torch_dtype=torch.bfloat16).to("cuda")
184
- fill_pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16).to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  try:
 
175
  # torch_dtype=torch.bfloat16
176
  # ).to("cuda")
177
 
178
+ from diffusers import FluxTransformer2DModel, FluxFillPipeline, GGUFQuantizationConfig
179
  from transformers import T5EncoderModel
180
  import torch
181
 
182
+ # transformer = FluxTransformer2DModel.from_pretrained("AlekseyCalvin/FluxFillDev_fp8_Diffusers", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
183
+ # text_encoder_2 = T5EncoderModel.from_pretrained("AlekseyCalvin/FluxFillDev_fp8_Diffusers", subfolder="text_encoder_2", torch_dtype=torch.bfloat16).to("cuda")
184
+ # fill_pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16).to("cuda")
185
+
186
+
187
+ ckpt_path = (
188
+ "https://huggingface.co/SporkySporkness/FLUX.1-Fill-dev-GGUF/flux1-fill-dev-fp16-Q5_0-GGUF.gguf"
189
+ )
190
+
191
+ transformer = FluxTransformer2DModel.from_single_file(
192
+ ckpt_path,
193
+ quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16),
194
+ torch_dtype=torch.bfloat16,
195
+ )
196
+
197
+ fill_pipe = FluxPipeline.from_pretrained(
198
+ "black-forest-labs/FLUX.1-dev",
199
+ transformer=transformer,
200
+ generator=torch.manual_seed(0),
201
+ torch_dtype=torch.bfloat16,
202
+ )
203
+
204
+ fill_pipe.enable_model_cpu_offload()
205
 
206
 
207
  try: