Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Commit
•
0f50c05
1
Parent(s):
68e5fe4
Update app.py
Browse files
app.py
CHANGED
@@ -58,23 +58,19 @@ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda
|
|
58 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
59 |
torch.cuda.empty_cache()
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
if clipmodel == "norm":
|
67 |
-
model_id = "zer0int/CLIP-GmP-ViT-L-14"
|
68 |
-
config = CLIPConfig.from_pretrained(model_id)
|
69 |
-
maxtokens = 77
|
70 |
-
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
71 |
-
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
72 |
-
|
73 |
pipe.tokenizer = clip_processor.tokenizer
|
74 |
pipe.text_encoder = clip_model.text_model
|
75 |
-
pipe.tokenizer_max_length =
|
76 |
pipe.text_encoder.dtype = torch.bfloat16
|
77 |
-
pipe.
|
|
|
|
|
|
|
78 |
|
79 |
# Load LoRAs from JSON file
|
80 |
with open('loras.json', 'r') as f:
|
|
|
58 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
59 |
torch.cuda.empty_cache()
|
60 |
|
61 |
+
model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
|
62 |
+
config = CLIPConfig.from_pretrained(model_id)
|
63 |
+
config.text_config.max_position_embeddings = 248
|
64 |
+
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
|
65 |
+
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
pipe.tokenizer = clip_processor.tokenizer
|
67 |
pipe.text_encoder = clip_model.text_model
|
68 |
+
pipe.tokenizer_max_length = 248
|
69 |
pipe.text_encoder.dtype = torch.bfloat16
|
70 |
+
#pipe.text_encoder_2 = t5.text_model
|
71 |
+
|
72 |
+
pipe.vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
73 |
+
|
74 |
|
75 |
# Load LoRAs from JSON file
|
76 |
with open('loras.json', 'r') as f:
|