AlekseyCalvin commited on
Commit
a71c7f5
1 Parent(s): 78e58b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -38,7 +38,7 @@ with open('loras.json', 'r') as f:
38
 
39
  dtype = torch.bfloat16
40
 
41
- pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
42
 
43
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
44
 
@@ -54,7 +54,6 @@ if clipmodel == "norm":
54
  maxtokens = 512
55
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
56
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
57
- pipe.transformer = FluxTransformer2DModel.from_pretrained("ostris/OpenFLUX.1", subfolder="transformer", num_single_layers=0, chunk_size=0)
58
  pipe.tokenizer = clip_processor.tokenizer
59
  pipe.text_encoder = clip_model.text_model
60
  pipe.tokenizer_max_length = maxtokens
 
38
 
39
  dtype = torch.bfloat16
40
 
41
+ pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype, num_single_layers=0, chunk_size=0).to("cuda")
42
 
43
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
44
 
 
54
  maxtokens = 512
55
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
56
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
 
57
  pipe.tokenizer = clip_processor.tokenizer
58
  pipe.text_encoder = clip_model.text_model
59
  pipe.tokenizer_max_length = maxtokens