AlekseyCalvin commited on
Commit
90b0ce2
·
verified ·
1 Parent(s): eaab94f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -13,13 +13,15 @@ import random
13
  import time
14
  from typing import Any, Dict, List, Optional, Union
15
  from huggingface_hub import hf_hub_download
16
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoPipelineForImage2Image
17
  import safetensors.torch
18
  from safetensors.torch import load_file
19
  from pipeline import FluxWithCFGPipeline
20
  from transformers import CLIPModel, CLIPProcessor, CLIPConfig
21
  import gc
22
  import warnings
 
 
23
 
24
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
25
  os.environ["TRANSFORMERS_CACHE"] = cache_path
@@ -35,10 +37,12 @@ with open('loras.json', 'r') as f:
35
  loras = json.load(f)
36
 
37
  dtype = torch.bfloat16
38
- pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype, text_encoder_3=None, tokenizer_3=None
39
- ).to("cuda")
 
40
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
41
 
 
42
  pipe.to("cuda")
43
  clipmodel = 'norm'
44
  if clipmodel == "long":
@@ -48,11 +52,10 @@ if clipmodel == "long":
48
  if clipmodel == "norm":
49
  model_id = "zer0int/CLIP-GmP-ViT-L-14"
50
  config = CLIPConfig.from_pretrained(model_id)
51
- maxtokens = 77
52
- clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
53
- clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
54
- config.text_config.max_position_embeddings = 77
55
-
56
  pipe.tokenizer = clip_processor.tokenizer
57
  pipe.text_encoder = clip_model.text_model
58
  pipe.tokenizer_max_length = maxtokens
 
13
  import time
14
  from typing import Any, Dict, List, Optional, Union
15
  from huggingface_hub import hf_hub_download
16
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoPipelineForImage2Image, ConfigMixin
17
  import safetensors.torch
18
  from safetensors.torch import load_file
19
  from pipeline import FluxWithCFGPipeline
20
  from transformers import CLIPModel, CLIPProcessor, CLIPConfig
21
  import gc
22
  import warnings
23
+ import safetensors.torch
24
+
25
 
26
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
27
  os.environ["TRANSFORMERS_CACHE"] = cache_path
 
37
  loras = json.load(f)
38
 
39
  dtype = torch.bfloat16
40
+
41
+ pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
42
+
43
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
44
 
45
+
46
  pipe.to("cuda")
47
  clipmodel = 'norm'
48
  if clipmodel == "long":
 
52
  if clipmodel == "norm":
53
  model_id = "zer0int/CLIP-GmP-ViT-L-14"
54
  config = CLIPConfig.from_pretrained(model_id)
55
+ maxtokens = 512
56
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=False).to("cuda")
57
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=False, return_tensors="pt", truncation=True)
58
+ pipe.transformer=(FluxTransformer2DModel.from_pretrained("ostris/OpenFLUX.1", num_single_layers=0, chunk_size=0)
 
59
  pipe.tokenizer = clip_processor.tokenizer
60
  pipe.text_encoder = clip_model.text_model
61
  pipe.tokenizer_max_length = maxtokens