Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ from diffusers import AutoencoderKL
|
|
20 |
from torch import Tensor, nn
|
21 |
from transformers import CLIPTextModel, CLIPTokenizer
|
22 |
from transformers import T5EncoderModel, T5Tokenizer
|
23 |
-
from optimum.quanto import freeze, qfloat8, quantize
|
24 |
|
25 |
|
26 |
# ---------------- Encoders ----------------
|
@@ -65,8 +65,8 @@ device = "cuda"
|
|
65 |
t5 = HFEmbedder("google/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
66 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
67 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
68 |
-
quantize(t5, weights=qfloat8)
|
69 |
-
freeze(t5)
|
70 |
|
71 |
|
72 |
# ---------------- NF4 ----------------
|
|
|
20 |
from torch import Tensor, nn
|
21 |
from transformers import CLIPTextModel, CLIPTokenizer
|
22 |
from transformers import T5EncoderModel, T5Tokenizer
|
23 |
+
# from optimum.quanto import freeze, qfloat8, quantize
|
24 |
|
25 |
|
26 |
# ---------------- Encoders ----------------
|
|
|
65 |
t5 = HFEmbedder("google/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
66 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
67 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
68 |
+
# quantize(t5, weights=qfloat8)
|
69 |
+
# freeze(t5)
|
70 |
|
71 |
|
72 |
# ---------------- NF4 ----------------
|