from diffusers import Transformer2DModel | |
model = Transformer2DModel( | |
activation_fn="gelu-approximate", | |
attention_bias=True, | |
attention_head_dim=64, | |
attention_type="default", | |
caption_channels=1024, | |
cross_attention_dim=768, | |
double_self_attention=False, | |
dropout=0.0, | |
in_channels=4, | |
norm_elementwise_affine=False, | |
norm_eps=1e-06, | |
norm_num_groups=32, | |
norm_type="ada_norm_single", | |
num_attention_heads=16, | |
num_embeds_ada_norm=1000, | |
num_layers=12, | |
only_cross_attention=False, | |
out_channels=8, | |
patch_size=2, | |
sample_size=128, | |
upcast_attention=False, | |
use_linear_projection=False | |
) | |
save_folder =r'G:\tiny-pixel-art\transformer' | |
print(model.num_parameters()) | |
model.save_pretrained(save_folder) | |
model.save_config(save_folder) | |