|
from dataclasses import dataclass |
|
from typing import Union, Optional, List, Any, Dict |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import random |
|
import spaces |
|
import torch |
|
from huggingface_hub import hf_hub_download |
|
|
|
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL, FluxPipeline |
|
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast |
|
|
|
from model import Flux, FluxParams |
|
|
|
def calculate_shift( |
|
image_seq_len, |
|
base_seq_len: int = 256, |
|
max_seq_len: int = 4096, |
|
base_shift: float = 0.5, |
|
max_shift: float = 1.16, |
|
): |
|
m = (max_shift - base_shift) / (max_seq_len - base_seq_len) |
|
b = base_shift - m * base_seq_len |
|
mu = image_seq_len * m + b |
|
return mu |
|
|
|
|
|
def retrieve_timesteps( |
|
scheduler, |
|
num_inference_steps: Optional[int] = None, |
|
device: Optional[Union[str, torch.device]] = None, |
|
timesteps: Optional[List[int]] = None, |
|
sigmas: Optional[List[float]] = None, |
|
**kwargs, |
|
): |
|
if timesteps is not None and sigmas is not None: |
|
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") |
|
if timesteps is not None: |
|
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
num_inference_steps = len(timesteps) |
|
elif sigmas is not None: |
|
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
num_inference_steps = len(timesteps) |
|
else: |
|
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
return timesteps, num_inference_steps |
|
|
|
|
|
@torch.inference_mode() |
|
def flux_pipe_call_that_returns_an_iterable_of_images( |
|
self, |
|
prompt: Union[str, List[str]] = None, |
|
prompt_2: Optional[Union[str, List[str]]] = None, |
|
height: Optional[int] = None, |
|
width: Optional[int] = None, |
|
num_inference_steps: int = 28, |
|
timesteps: List[int] = None, |
|
guidance_scale: float = 3.5, |
|
num_images_per_prompt: Optional[int] = 1, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
latents: Optional[torch.FloatTensor] = None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
output_type: Optional[str] = "pil", |
|
return_dict: bool = True, |
|
joint_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
max_sequence_length: int = 512, |
|
good_vae: Optional[Any] = None, |
|
): |
|
height = height or self.default_sample_size * self.vae_scale_factor |
|
width = width or self.default_sample_size * self.vae_scale_factor |
|
|
|
|
|
self.check_inputs( |
|
prompt, |
|
prompt_2, |
|
height, |
|
width, |
|
prompt_embeds=prompt_embeds, |
|
pooled_prompt_embeds=pooled_prompt_embeds, |
|
max_sequence_length=max_sequence_length, |
|
) |
|
|
|
self._guidance_scale = guidance_scale |
|
self._joint_attention_kwargs = joint_attention_kwargs |
|
self._interrupt = False |
|
|
|
|
|
batch_size = 1 if isinstance(prompt, str) else len(prompt) |
|
device = self._execution_device |
|
|
|
|
|
lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None |
|
prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( |
|
prompt=prompt, |
|
prompt_2=prompt_2, |
|
prompt_embeds=prompt_embeds, |
|
pooled_prompt_embeds=pooled_prompt_embeds, |
|
device=device, |
|
num_images_per_prompt=num_images_per_prompt, |
|
max_sequence_length=max_sequence_length, |
|
lora_scale=lora_scale, |
|
) |
|
|
|
num_channels_latents = self.transformer.config.in_channels // 4 |
|
latents, latent_image_ids = self.prepare_latents( |
|
batch_size * num_images_per_prompt, |
|
num_channels_latents, |
|
height, |
|
width, |
|
prompt_embeds.dtype, |
|
device, |
|
generator, |
|
latents, |
|
) |
|
|
|
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) |
|
image_seq_len = latents.shape[1] |
|
mu = calculate_shift( |
|
image_seq_len, |
|
self.scheduler.config.base_image_seq_len, |
|
self.scheduler.config.max_image_seq_len, |
|
self.scheduler.config.base_shift, |
|
self.scheduler.config.max_shift, |
|
) |
|
timesteps, num_inference_steps = retrieve_timesteps( |
|
self.scheduler, |
|
num_inference_steps, |
|
device, |
|
timesteps, |
|
sigmas, |
|
mu=mu, |
|
) |
|
self._num_timesteps = len(timesteps) |
|
|
|
|
|
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None |
|
|
|
|
|
for i, t in enumerate(timesteps): |
|
if self.interrupt: |
|
continue |
|
|
|
timestep = t.expand(latents.shape[0]).to(latents.dtype) |
|
|
|
noise_pred = self.transformer( |
|
hidden_states=latents, |
|
timestep=timestep / 1000, |
|
guidance=guidance, |
|
pooled_projections=pooled_prompt_embeds, |
|
encoder_hidden_states=prompt_embeds, |
|
txt_ids=text_ids, |
|
img_ids=latent_image_ids, |
|
joint_attention_kwargs=self.joint_attention_kwargs, |
|
return_dict=False, |
|
)[0] |
|
|
|
latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor) |
|
latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor |
|
image = self.vae.decode(latents_for_image, return_dict=False)[0] |
|
yield self.image_processor.postprocess(image, output_type=output_type)[0] |
|
|
|
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] |
|
torch.cuda.empty_cache() |
|
|
|
|
|
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) |
|
latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor |
|
image = good_vae.decode(latents, return_dict=False)[0] |
|
self.maybe_free_model_hooks() |
|
torch.cuda.empty_cache() |
|
yield self.image_processor.postprocess(image, output_type=output_type)[0] |
|
|
|
|
|
@dataclass |
|
class ModelSpec: |
|
params: FluxParams |
|
repo_id: str |
|
repo_flow: str |
|
repo_ae: str |
|
repo_id_ae: str |
|
|
|
|
|
config = ModelSpec( |
|
repo_id="TencentARC/flux-mini", |
|
repo_flow="flux-mini.safetensors", |
|
repo_id_ae="black-forest-labs/FLUX.1-dev", |
|
repo_ae="ae.safetensors", |
|
params=FluxParams( |
|
in_channels=64, |
|
vec_in_dim=768, |
|
context_in_dim=4096, |
|
hidden_size=3072, |
|
mlp_ratio=4.0, |
|
num_heads=24, |
|
depth=5, |
|
depth_single_blocks=10, |
|
axes_dim=[16, 56, 56], |
|
theta=10_000, |
|
qkv_bias=True, |
|
guidance_embed=True, |
|
) |
|
) |
|
|
|
|
|
def load_flow_model2(config, device: str = "cuda", hf_download: bool = True): |
|
if (config.repo_id is not None |
|
and config.repo_flow is not None |
|
and hf_download |
|
): |
|
ckpt_path = hf_hub_download(config.repo_id, config.repo_flow.replace("sft", "safetensors")) |
|
|
|
model = Flux(config.params) |
|
if ckpt_path is not None: |
|
sd = load_sft(ckpt_path, device=str(device)) |
|
missing, unexpected = model.load_state_dict(sd, strict=True) |
|
return model |
|
|
|
|
|
dtype = torch.bfloat16 |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="scheduler") |
|
vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) |
|
text_encoder = CLIPTextModel.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="text_encoder").to(device) |
|
tokenizer = CLIPTokenizer.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="tokenizer") |
|
text_encoder_2 = T5EncoderModel.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="text_encoder_2").to(device) |
|
tokenizer_2 = T5TokenizerFast.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="tokenizer_2") |
|
transformer = load_flow_model2(config, device) |
|
|
|
pipe = FluxPipeline( |
|
scheduler, |
|
vae, |
|
text_encoder, |
|
tokenizer, |
|
text_encoder_2, |
|
tokenizer_2, |
|
transformer |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 2048 |
|
|
|
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) |
|
|
|
@spaces.GPU(duration=75) |
|
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)): |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
generator = torch.Generator().manual_seed(seed) |
|
|
|
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images( |
|
prompt=prompt, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_inference_steps, |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
output_type="pil", |
|
good_vae=good_vae, |
|
): |
|
yield img, seed |
|
|
|
examples = [ |
|
"thousands of luminous oysters on a shore reflecting and refracting the sunset", |
|
"profile of sad Socrates, full body, high detail, dramatic scene, Epic dynamic action, wide angle, cinematic, hyper realistic, concept art, warm muted tones as painted by Bernie Wrightson, Frank Frazetta,", |
|
"ghosts, astronauts, robots, cats, superhero costumes, line drawings, naive, simple, exploring a strange planet, coloured pencil crayons, , black canvas background, drawn by 5 year old child", |
|
] |
|
|
|
css=""" |
|
#col-container { |
|
margin: 0 auto; |
|
max-width: 520px; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
|
|
with gr.Column(elem_id="col-container"): |
|
gr.Markdown(f"""# FLUX-Mini |
|
A 3.2B param rectified flow transformer distilled from [FLUX.1 [dev]](https://blackforestlabs.ai/) |
|
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] |
|
""") |
|
|
|
with gr.Row(): |
|
|
|
prompt = gr.Text( |
|
label="Prompt", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="Enter your prompt", |
|
container=False, |
|
) |
|
|
|
run_button = gr.Button("Run", scale=0) |
|
|
|
result = gr.Image(label="Result", show_label=False) |
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
|
|
seed = gr.Slider( |
|
label="Seed", |
|
minimum=0, |
|
maximum=MAX_SEED, |
|
step=1, |
|
value=0, |
|
) |
|
|
|
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) |
|
|
|
with gr.Row(): |
|
|
|
width = gr.Slider( |
|
label="Width", |
|
minimum=256, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=1024, |
|
) |
|
|
|
height = gr.Slider( |
|
label="Height", |
|
minimum=256, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=1024, |
|
) |
|
|
|
with gr.Row(): |
|
|
|
guidance_scale = gr.Slider( |
|
label="Guidance Scale", |
|
minimum=1, |
|
maximum=15, |
|
step=0.1, |
|
value=3.5, |
|
) |
|
|
|
num_inference_steps = gr.Slider( |
|
label="Number of inference steps", |
|
minimum=1, |
|
maximum=50, |
|
step=1, |
|
value=28, |
|
) |
|
|
|
gr.Examples( |
|
examples = examples, |
|
fn = infer, |
|
inputs = [prompt], |
|
outputs = [result, seed], |
|
cache_examples="lazy" |
|
) |
|
|
|
gr.on( |
|
triggers=[run_button.click, prompt.submit], |
|
fn = infer, |
|
inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], |
|
outputs = [result, seed] |
|
) |
|
|
|
demo.launch() |