|
import gc |
|
import os |
|
from typing import TypeAlias |
|
|
|
import torch |
|
from PIL.Image import Image |
|
from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny |
|
from huggingface_hub.constants import HF_HUB_CACHE |
|
from pipelines.models import TextToImageRequest |
|
from torch import Generator |
|
from torchao.quantization import quantize_, int8_weight_only |
|
from transformers import T5EncoderModel, CLIPTextModel, logging |
|
|
|
|
|
Pipeline: TypeAlias = FluxPipeline |
|
torch.backends.cudnn.benchmark = True |
|
torch.backends.cudnn.benchmark = True |
|
torch._inductor.config.conv_1x1_as_mm = True |
|
torch._inductor.config.coordinate_descent_tuning = True |
|
torch._inductor.config.epilogue_fusion = False |
|
torch._inductor.config.coordinate_descent_check_all_directions = True |
|
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
|
|
|
CHECKPOINT = "manbeast3b/Flux.1.schnell-quant2" |
|
REVISION = "44eb293715147878512da10bf3bc47cd14ec8c55" |
|
|
|
TinyVAE = "madebyollin/taef1" |
|
TinyVAE_REV = "2d552378e58c9c94201075708d7de4e1163b2689" |
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
path = os.path.join(HF_HUB_CACHE, "models--manbeast3b--Flux.1.schnell-quant2/snapshots/44eb293715147878512da10bf3bc47cd14ec8c55/transformer") |
|
transformer = FluxTransformer2DModel.from_pretrained( |
|
path, |
|
use_safetensors=False, |
|
local_files_only=True, |
|
torch_dtype=torch.bfloat16) |
|
vae = AutoencoderTiny.from_pretrained( |
|
TinyVAE, |
|
revision=TinyVAE_REV, |
|
local_files_only=True, |
|
torch_dtype=torch.bfloat16) |
|
vae.encoder.load_state_dict(torch.load("encoder.pth"), strict=False) |
|
vae.decoder.load_state_dict(torch.load("decoder.pth"), strict=False) |
|
pipeline = FluxPipeline.from_pretrained( |
|
CHECKPOINT, |
|
revision=REVISION, |
|
transformer=transformer, |
|
vae=vae, |
|
local_files_only=True, |
|
torch_dtype=torch.bfloat16, |
|
).to("cuda") |
|
|
|
pipeline.to(memory_format=torch.channels_last) |
|
quantize_(pipeline.vae, int8_weight_only()) |
|
pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True) |
|
with torch.inference_mode(): |
|
for _ in range(2): |
|
pipeline("cat", num_inference_steps=4) |
|
|
|
return pipeline |
|
|
|
@torch.inference_mode() |
|
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image: |
|
|
|
return pipeline( |
|
request.prompt, |
|
generator=generator, |
|
guidance_scale=0.0, |
|
num_inference_steps=4, |
|
max_sequence_length=256, |
|
height=request.height, |
|
width=request.width, |
|
).images[0] |
|
|