AlekseyCalvin commited on
Commit
78e58b7
·
verified ·
1 Parent(s): 9d0fe92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -42,7 +42,6 @@ pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtyp
42
 
43
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
44
 
45
-
46
  pipe.to("cuda")
47
  clipmodel = 'norm'
48
  if clipmodel == "long":
@@ -55,13 +54,16 @@ if clipmodel == "norm":
55
  maxtokens = 512
56
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
57
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
58
- pipe.transformer = FluxTransformer2DModel.from_pretrained("ostris/OpenFLUX.1", num_single_layers=0, chunk_size=0)
59
  pipe.tokenizer = clip_processor.tokenizer
60
  pipe.text_encoder = clip_model.text_model
61
  pipe.tokenizer_max_length = maxtokens
62
  pipe.text_encoder.dtype = torch.bfloat16
63
  torch.cuda.empty_cache()
64
 
 
 
 
65
  MAX_SEED = 2**32-1
66
 
67
  class calculateDuration:
 
42
 
43
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
44
 
 
45
  pipe.to("cuda")
46
  clipmodel = 'norm'
47
  if clipmodel == "long":
 
54
  maxtokens = 512
55
  clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
56
  clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
57
+ pipe.transformer = FluxTransformer2DModel.from_pretrained("ostris/OpenFLUX.1", subfolder="transformer", num_single_layers=0, chunk_size=0)
58
  pipe.tokenizer = clip_processor.tokenizer
59
  pipe.text_encoder = clip_model.text_model
60
  pipe.tokenizer_max_length = maxtokens
61
  pipe.text_encoder.dtype = torch.bfloat16
62
  torch.cuda.empty_cache()
63
 
64
+ num_single_layers=0
65
+ chunk_size=0
66
+
67
  MAX_SEED = 2**32-1
68
 
69
  class calculateDuration: