File size: 752 Bytes
f99d5f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
from diffusers import AutoencoderTiny
from pathlib import Path
from optimum.exporters.onnx import export
from optimum.exporters.onnx.model_configs import VaeDecoderOnnxConfig, VaeEncoderOnnxConfig
taesd = AutoencoderTiny.from_pretrained("madebyollin/taesd")
# TAESD Decoder
taesd.forward = lambda latent_sample: taesd.decode(x=latent_sample)
export(model = taesd, config = VaeDecoderOnnxConfig( config = taesd.config, task = "semantic-segmentation"), output = Path("./vae_decoder/model.onnx"))
# TAESD Encoder
taesd.forward = lambda sample: {"latent_sample": taesd.encode(x=sample)["latents"]}
export(model = taesd, config = VaeEncoderOnnxConfig( config = taesd.config, task = "semantic-segmentation"), output = Path("./vae_encoder/model.onnx"))
|