comfyanonymous commited on
Commit
4f1e20c
·
verified ·
1 Parent(s): 835382f

Upload z_image_convert_original_to_comfy.py

Browse files
z_image_convert_original_to_comfy.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import safetensors.torch
2
+ import torch
3
+ import sys
4
+
5
+ # Usage: python z_image_convert_original_to_comfy.py output.safetensors diffusion_model*.safetensors
6
+
7
+ cast_to = None
8
+ if "fp8_e4m3fn" in sys.argv[1]:
9
+ cast_to = torch.float8_e4m3fn
10
+ elif "fp16" in sys.argv[1]:
11
+ cast_to = torch.float16
12
+ elif "bf16" in sys.argv[1]:
13
+ cast_to = torch.bfloat16
14
+
15
+ replace_keys = {"all_final_layer.2-1.": "final_layer.",
16
+ "all_x_embedder.2-1.": "x_embedder.",
17
+ ".attention.to_out.0.bias": ".attention.out.bias",
18
+ ".attention.norm_k.weight": ".attention.k_norm.weight",
19
+ ".attention.norm_q.weight": ".attention.q_norm.weight",
20
+ ".attention.to_out.0.weight": ".attention.out.weight"
21
+ }
22
+
23
+ out_sd = {}
24
+ for f in sys.argv[2:]:
25
+ sd = safetensors.torch.load_file(f)
26
+ cc = None
27
+ for k in sd:
28
+ w = sd[k]
29
+
30
+ if cast_to is not None:
31
+ w = w.to(cast_to)
32
+ k_out = k
33
+ if k_out.endswith(".attention.to_out.0.bias"):
34
+ continue
35
+ if k_out.endswith(".attention.to_k.weight"):
36
+ cc = [w]
37
+ continue
38
+ if k_out.endswith(".attention.to_q.weight"):
39
+ cc = [w] + cc
40
+ continue
41
+ if k_out.endswith(".attention.to_v.weight"):
42
+ cc = cc + [w]
43
+ w = torch.cat(cc, dim=0)
44
+ k_out = k_out.replace(".attention.to_v.weight", ".attention.qkv.weight")
45
+
46
+ for r, rr in replace_keys.items():
47
+ k_out = k_out.replace(r, rr)
48
+ out_sd[k_out] = w
49
+
50
+
51
+
52
+ safetensors.torch.save_file(out_sd, sys.argv[1])