Spaces:
Runtime error
Runtime error
lixiang46
commited on
Commit
•
e9c3996
1
Parent(s):
e3f5833
update
Browse files
app.py
CHANGED
@@ -17,12 +17,12 @@ ckpt_IPA_dir = '/home/lixiang46/Kolors/weights/Kolors-IP-Adapter-Plus'
|
|
17 |
# ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
|
18 |
# ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
|
19 |
|
20 |
-
text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dtype=torch.float16).half()
|
21 |
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
|
22 |
-
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
|
23 |
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
|
24 |
-
unet_t2i = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
25 |
-
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
27 |
ip_img_size = 336
|
28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|
|
|
17 |
# ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
|
18 |
# ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
|
19 |
|
20 |
+
text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dtype=torch.float16).half()
|
21 |
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
|
22 |
+
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
|
23 |
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
|
24 |
+
unet_t2i = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
25 |
+
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
27 |
ip_img_size = 336
|
28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|