Spaces:
Running
Running
# Copyright 2024 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from typing import Dict, Optional, Tuple, Union | |
import torch | |
import torch.nn as nn | |
from diffusers.configuration_utils import ConfigMixin, register_to_config | |
from diffusers.loaders.single_file_model import FromOriginalModelMixin | |
from diffusers.models.autoencoders.vae import (DecoderOutput, | |
DiagonalGaussianDistribution) | |
from diffusers.models.modeling_outputs import AutoencoderKLOutput | |
from diffusers.models.modeling_utils import ModelMixin | |
from diffusers.utils import logging | |
from diffusers.utils.accelerate_utils import apply_forward_hook | |
try: | |
from diffusers.loaders import FromOriginalVAEMixin | |
except: | |
from diffusers.loaders import FromOriginalModelMixin as FromOriginalVAEMixin | |
from diffusers.models.attention_processor import ( | |
ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, | |
AttentionProcessor, AttnAddedKVProcessor, AttnProcessor) | |
from diffusers.models.autoencoders.vae import (DecoderOutput, | |
DiagonalGaussianDistribution) | |
from diffusers.models.modeling_outputs import AutoencoderKLOutput | |
from diffusers.models.modeling_utils import ModelMixin | |
from diffusers.utils.accelerate_utils import apply_forward_hook | |
from torch import nn | |
from diffusers import AutoencoderKL | |
from ..vae.ldm.models.cogvideox_enc_dec import (CogVideoXCausalConv3d, | |
CogVideoXDecoder3D, | |
CogVideoXEncoder3D, | |
CogVideoXSafeConv3d) | |
from ..vae.ldm.models.omnigen_enc_dec import CausalConv3d | |
from ..vae.ldm.models.omnigen_enc_dec import Decoder as omnigen_Mag_Decoder | |
from ..vae.ldm.models.omnigen_enc_dec import Encoder as omnigen_Mag_Encoder | |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name | |
def str_eval(item): | |
if type(item) == str: | |
return eval(item) | |
else: | |
return item | |
class AutoencoderKLMagvit(ModelMixin, ConfigMixin, FromOriginalVAEMixin): | |
r""" | |
A VAE model with KL loss for encoding images into latents and decoding latent representations into images. | |
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented | |
for all models (such as downloading or saving). | |
Parameters: | |
in_channels (int, *optional*, defaults to 3): Number of channels in the input image. | |
out_channels (int, *optional*, defaults to 3): Number of channels in the output. | |
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): | |
Tuple of downsample block types. | |
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): | |
Tuple of upsample block types. | |
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): | |
Tuple of block output channels. | |
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. | |
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. | |
sample_size (`int`, *optional*, defaults to `32`): Sample input size. | |
scaling_factor (`float`, *optional*, defaults to 0.18215): | |
The component-wise standard deviation of the trained latent space computed using the first batch of the | |
training set. This is used to scale the latent space to have unit variance when training the diffusion | |
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the | |
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 | |
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image | |
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. | |
force_upcast (`bool`, *optional*, default to `True`): | |
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE | |
can be fine-tuned / trained to a lower range without loosing too much precision in which case | |
`force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix | |
""" | |
_supports_gradient_checkpointing = True | |
def __init__( | |
self, | |
in_channels: int = 3, | |
out_channels: int = 3, | |
ch = 128, | |
ch_mult = [ 1,2,4,4 ], | |
block_out_channels = [128, 256, 512, 512], | |
use_gc_blocks = None, | |
down_block_types: tuple = None, | |
up_block_types: tuple = None, | |
mid_block_type: str = "MidBlock3D", | |
mid_block_use_attention: bool = True, | |
mid_block_attention_type: str = "3d", | |
mid_block_num_attention_heads: int = 1, | |
layers_per_block: int = 2, | |
act_fn: str = "silu", | |
num_attention_heads: int = 1, | |
latent_channels: int = 4, | |
norm_num_groups: int = 32, | |
scaling_factor: float = 0.1825, | |
force_upcast: float = True, | |
slice_mag_vae=True, | |
slice_compression_vae=False, | |
cache_compression_vae=False, | |
cache_mag_vae=False, | |
use_tiling=False, | |
use_tiling_encoder=False, | |
use_tiling_decoder=False, | |
mini_batch_encoder=9, | |
mini_batch_decoder=3, | |
upcast_vae=False, | |
spatial_group_norm=False, | |
tile_sample_min_size=384, | |
tile_overlap_factor=0.25, | |
): | |
super().__init__() | |
down_block_types = str_eval(down_block_types) | |
up_block_types = str_eval(up_block_types) | |
self.encoder = omnigen_Mag_Encoder( | |
in_channels=in_channels, | |
out_channels=latent_channels, | |
down_block_types=down_block_types, | |
ch=ch, | |
ch_mult=ch_mult, | |
block_out_channels=block_out_channels, | |
use_gc_blocks=use_gc_blocks, | |
mid_block_type=mid_block_type, | |
mid_block_use_attention=mid_block_use_attention, | |
mid_block_attention_type=mid_block_attention_type, | |
mid_block_num_attention_heads=mid_block_num_attention_heads, | |
layers_per_block=layers_per_block, | |
norm_num_groups=norm_num_groups, | |
act_fn=act_fn, | |
num_attention_heads=num_attention_heads, | |
double_z=True, | |
slice_mag_vae=slice_mag_vae, | |
slice_compression_vae=slice_compression_vae, | |
cache_compression_vae=cache_compression_vae, | |
cache_mag_vae=cache_mag_vae, | |
mini_batch_encoder=mini_batch_encoder, | |
spatial_group_norm=spatial_group_norm, | |
) | |
self.decoder = omnigen_Mag_Decoder( | |
in_channels=latent_channels, | |
out_channels=out_channels, | |
up_block_types=up_block_types, | |
ch=ch, | |
ch_mult=ch_mult, | |
block_out_channels=block_out_channels, | |
use_gc_blocks=use_gc_blocks, | |
mid_block_type=mid_block_type, | |
mid_block_use_attention=mid_block_use_attention, | |
mid_block_attention_type=mid_block_attention_type, | |
mid_block_num_attention_heads=mid_block_num_attention_heads, | |
layers_per_block=layers_per_block, | |
norm_num_groups=norm_num_groups, | |
act_fn=act_fn, | |
num_attention_heads=num_attention_heads, | |
slice_mag_vae=slice_mag_vae, | |
slice_compression_vae=slice_compression_vae, | |
cache_compression_vae=cache_compression_vae, | |
cache_mag_vae=cache_mag_vae, | |
mini_batch_decoder=mini_batch_decoder, | |
spatial_group_norm=spatial_group_norm, | |
) | |
self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1) | |
self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1) | |
self.slice_mag_vae = slice_mag_vae | |
self.slice_compression_vae = slice_compression_vae | |
self.cache_compression_vae = cache_compression_vae | |
self.cache_mag_vae = cache_mag_vae | |
self.mini_batch_encoder = mini_batch_encoder | |
self.mini_batch_decoder = mini_batch_decoder | |
self.use_slicing = False | |
self.use_tiling = use_tiling | |
self.use_tiling_encoder = use_tiling_encoder | |
self.use_tiling_decoder = use_tiling_decoder | |
self.upcast_vae = upcast_vae | |
self.tile_sample_min_size = tile_sample_min_size | |
self.tile_overlap_factor = tile_overlap_factor | |
self.tile_latent_min_size = int(self.tile_sample_min_size / (2 ** (len(ch_mult) - 1))) | |
self.scaling_factor = scaling_factor | |
def _set_gradient_checkpointing(self, module, value=False): | |
if isinstance(module, (omnigen_Mag_Encoder, omnigen_Mag_Decoder)): | |
module.gradient_checkpointing = value | |
def _clear_conv_cache(self): | |
for name, module in self.named_modules(): | |
if isinstance(module, CausalConv3d): | |
module._clear_conv_cache() | |
def encode( | |
self, x: torch.FloatTensor, return_dict: bool = True | |
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: | |
""" | |
Encode a batch of images into latents. | |
Args: | |
x (`torch.FloatTensor`): Input batch of images. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. | |
Returns: | |
The latent representations of the encoded images. If `return_dict` is True, a | |
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. | |
""" | |
if self.upcast_vae: | |
x = x.float() | |
self.encoder = self.encoder.float() | |
self.quant_conv = self.quant_conv.float() | |
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): | |
x = self.tiled_encode(x, return_dict=return_dict) | |
return x | |
if self.use_tiling_encoder and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): | |
x = self.tiled_encode(x, return_dict=return_dict) | |
return x | |
if self.use_slicing and x.shape[0] > 1: | |
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] | |
h = torch.cat(encoded_slices) | |
else: | |
h = self.encoder(x) | |
moments = self.quant_conv(h) | |
posterior = DiagonalGaussianDistribution(moments) | |
self._clear_conv_cache() | |
if not return_dict: | |
return (posterior,) | |
return AutoencoderKLOutput(latent_dist=posterior) | |
def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: | |
if self.upcast_vae: | |
z = z.float() | |
self.decoder = self.decoder.float() | |
self.post_quant_conv = self.post_quant_conv.float() | |
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): | |
return self.tiled_decode(z, return_dict=return_dict) | |
if self.use_tiling_decoder and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): | |
return self.tiled_decode(z, return_dict=return_dict) | |
z = self.post_quant_conv(z) | |
dec = self.decoder(z) | |
if not return_dict: | |
return (dec,) | |
return DecoderOutput(sample=dec) | |
def decode( | |
self, z: torch.FloatTensor, return_dict: bool = True, generator=None | |
) -> Union[DecoderOutput, torch.FloatTensor]: | |
""" | |
Decode a batch of images. | |
Args: | |
z (`torch.FloatTensor`): Input batch of latent vectors. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. | |
Returns: | |
[`~models.vae.DecoderOutput`] or `tuple`: | |
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is | |
returned. | |
""" | |
if self.use_slicing and z.shape[0] > 1: | |
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] | |
decoded = torch.cat(decoded_slices) | |
else: | |
decoded = self._decode(z).sample | |
self._clear_conv_cache() | |
if not return_dict: | |
return (decoded,) | |
return DecoderOutput(sample=decoded) | |
def blend_v( | |
self, a: torch.Tensor, b: torch.Tensor, blend_extent: int | |
) -> torch.Tensor: | |
blend_extent = min(a.shape[3], b.shape[3], blend_extent) | |
for y in range(blend_extent): | |
b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * ( | |
1 - y / blend_extent | |
) + b[:, :, :, y, :] * (y / blend_extent) | |
return b | |
def blend_h( | |
self, a: torch.Tensor, b: torch.Tensor, blend_extent: int | |
) -> torch.Tensor: | |
blend_extent = min(a.shape[4], b.shape[4], blend_extent) | |
for x in range(blend_extent): | |
b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * ( | |
1 - x / blend_extent | |
) + b[:, :, :, :, x] * (x / blend_extent) | |
return b | |
def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput: | |
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) | |
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) | |
row_limit = self.tile_latent_min_size - blend_extent | |
# Split the image into 512x512 tiles and encode them separately. | |
rows = [] | |
for i in range(0, x.shape[3], overlap_size): | |
row = [] | |
for j in range(0, x.shape[4], overlap_size): | |
tile = x[ | |
:, | |
:, | |
:, | |
i : i + self.tile_sample_min_size, | |
j : j + self.tile_sample_min_size, | |
] | |
tile = self.encoder(tile) | |
tile = self.quant_conv(tile) | |
row.append(tile) | |
rows.append(row) | |
result_rows = [] | |
for i, row in enumerate(rows): | |
result_row = [] | |
for j, tile in enumerate(row): | |
# blend the above tile and the left tile | |
# to the current tile and add the current tile to the result row | |
if i > 0: | |
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) | |
if j > 0: | |
tile = self.blend_h(row[j - 1], tile, blend_extent) | |
result_row.append(tile[:, :, :, :row_limit, :row_limit]) | |
result_rows.append(torch.cat(result_row, dim=4)) | |
moments = torch.cat(result_rows, dim=3) | |
posterior = DiagonalGaussianDistribution(moments) | |
if not return_dict: | |
return (posterior,) | |
return AutoencoderKLOutput(latent_dist=posterior) | |
def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: | |
overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) | |
blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) | |
row_limit = self.tile_sample_min_size - blend_extent | |
# Split z into overlapping 64x64 tiles and decode them separately. | |
# The tiles have an overlap to avoid seams between tiles. | |
rows = [] | |
for i in range(0, z.shape[3], overlap_size): | |
row = [] | |
for j in range(0, z.shape[4], overlap_size): | |
tile = z[ | |
:, | |
:, | |
:, | |
i : i + self.tile_latent_min_size, | |
j : j + self.tile_latent_min_size, | |
] | |
tile = self.post_quant_conv(tile) | |
decoded = self.decoder(tile) | |
row.append(decoded) | |
rows.append(row) | |
result_rows = [] | |
for i, row in enumerate(rows): | |
result_row = [] | |
for j, tile in enumerate(row): | |
# blend the above tile and the left tile | |
# to the current tile and add the current tile to the result row | |
if i > 0: | |
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) | |
if j > 0: | |
tile = self.blend_h(row[j - 1], tile, blend_extent) | |
result_row.append(tile[:, :, :, :row_limit, :row_limit]) | |
result_rows.append(torch.cat(result_row, dim=4)) | |
dec = torch.cat(result_rows, dim=3) | |
# Handle the lower right corner tile separately | |
lower_right_original = z[ | |
:, | |
:, | |
:, | |
-self.tile_latent_min_size:, | |
-self.tile_latent_min_size: | |
] | |
quantized_lower_right = self.decoder(self.post_quant_conv(lower_right_original)) | |
# Combine | |
H, W = quantized_lower_right.size(-2), quantized_lower_right.size(-1) | |
x_weights = torch.linspace(0, 1, W).unsqueeze(0).repeat(H, 1) | |
y_weights = torch.linspace(0, 1, H).unsqueeze(1).repeat(1, W) | |
weights = torch.min(x_weights, y_weights) | |
if len(dec.size()) == 4: | |
weights = weights.unsqueeze(0).unsqueeze(0) | |
elif len(dec.size()) == 5: | |
weights = weights.unsqueeze(0).unsqueeze(0).unsqueeze(0) | |
weights = weights.to(dec.device) | |
quantized_area = dec[:, :, :, -H:, -W:] | |
combined = weights * quantized_lower_right + (1 - weights) * quantized_area | |
dec[:, :, :, -H:, -W:] = combined | |
if not return_dict: | |
return (dec,) | |
return DecoderOutput(sample=dec) | |
def forward( | |
self, | |
sample: torch.FloatTensor, | |
sample_posterior: bool = False, | |
return_dict: bool = True, | |
generator: Optional[torch.Generator] = None, | |
) -> Union[DecoderOutput, torch.FloatTensor]: | |
r""" | |
Args: | |
sample (`torch.FloatTensor`): Input sample. | |
sample_posterior (`bool`, *optional*, defaults to `False`): | |
Whether to sample from the posterior. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether or not to return a [`DecoderOutput`] instead of a plain tuple. | |
""" | |
x = sample | |
posterior = self.encode(x).latent_dist | |
if sample_posterior: | |
z = posterior.sample(generator=generator) | |
else: | |
z = posterior.mode() | |
dec = self.decode(z).sample | |
if not return_dict: | |
return (dec,) | |
return DecoderOutput(sample=dec) | |
def from_pretrained(cls, pretrained_model_path, subfolder=None, **vae_additional_kwargs): | |
import json | |
import os | |
if subfolder is not None: | |
pretrained_model_path = os.path.join(pretrained_model_path, subfolder) | |
config_file = os.path.join(pretrained_model_path, 'config.json') | |
if not os.path.isfile(config_file): | |
raise RuntimeError(f"{config_file} does not exist") | |
with open(config_file, "r") as f: | |
config = json.load(f) | |
model = cls.from_config(config, **vae_additional_kwargs) | |
from diffusers.utils import WEIGHTS_NAME | |
model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) | |
model_file_safetensors = model_file.replace(".bin", ".safetensors") | |
if os.path.exists(model_file_safetensors): | |
from safetensors.torch import load_file, safe_open | |
state_dict = load_file(model_file_safetensors) | |
else: | |
if not os.path.isfile(model_file): | |
raise RuntimeError(f"{model_file} does not exist") | |
state_dict = torch.load(model_file, map_location="cpu") | |
m, u = model.load_state_dict(state_dict, strict=False) | |
print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};") | |
print(m, u) | |
return model | |
# Modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py | |
# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. | |
# All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin): | |
r""" | |
A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in | |
[CogVideoX](https://github.com/THUDM/CogVideo). | |
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented | |
for all models (such as downloading or saving). | |
Parameters: | |
in_channels (int, *optional*, defaults to 3): Number of channels in the input image. | |
out_channels (int, *optional*, defaults to 3): Number of channels in the output. | |
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): | |
Tuple of downsample block types. | |
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): | |
Tuple of upsample block types. | |
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): | |
Tuple of block output channels. | |
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. | |
sample_size (`int`, *optional*, defaults to `32`): Sample input size. | |
scaling_factor (`float`, *optional*, defaults to `1.15258426`): | |
The component-wise standard deviation of the trained latent space computed using the first batch of the | |
training set. This is used to scale the latent space to have unit variance when training the diffusion | |
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the | |
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 | |
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image | |
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. | |
force_upcast (`bool`, *optional*, default to `True`): | |
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE | |
can be fine-tuned / trained to a lower range without loosing too much precision in which case | |
`force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix | |
""" | |
_supports_gradient_checkpointing = True | |
_no_split_modules = ["CogVideoXResnetBlock3D"] | |
def __init__( | |
self, | |
in_channels: int = 3, | |
out_channels: int = 3, | |
down_block_types: Tuple[str] = ( | |
"CogVideoXDownBlock3D", | |
"CogVideoXDownBlock3D", | |
"CogVideoXDownBlock3D", | |
"CogVideoXDownBlock3D", | |
), | |
up_block_types: Tuple[str] = ( | |
"CogVideoXUpBlock3D", | |
"CogVideoXUpBlock3D", | |
"CogVideoXUpBlock3D", | |
"CogVideoXUpBlock3D", | |
), | |
block_out_channels: Tuple[int] = (128, 256, 256, 512), | |
latent_channels: int = 16, | |
layers_per_block: int = 3, | |
act_fn: str = "silu", | |
norm_eps: float = 1e-6, | |
norm_num_groups: int = 32, | |
temporal_compression_ratio: float = 4, | |
sample_height: int = 480, | |
sample_width: int = 720, | |
scaling_factor: float = 1.15258426, | |
shift_factor: Optional[float] = None, | |
latents_mean: Optional[Tuple[float]] = None, | |
latents_std: Optional[Tuple[float]] = None, | |
force_upcast: float = True, | |
use_quant_conv: bool = False, | |
use_post_quant_conv: bool = False, | |
slice_mag_vae=False, | |
slice_compression_vae=False, | |
cache_compression_vae=False, | |
cache_mag_vae=True, | |
use_tiling=False, | |
mini_batch_encoder=4, | |
mini_batch_decoder=1, | |
): | |
super().__init__() | |
self.encoder = CogVideoXEncoder3D( | |
in_channels=in_channels, | |
out_channels=latent_channels, | |
down_block_types=down_block_types, | |
block_out_channels=block_out_channels, | |
layers_per_block=layers_per_block, | |
act_fn=act_fn, | |
norm_eps=norm_eps, | |
norm_num_groups=norm_num_groups, | |
temporal_compression_ratio=temporal_compression_ratio, | |
) | |
self.decoder = CogVideoXDecoder3D( | |
in_channels=latent_channels, | |
out_channels=out_channels, | |
up_block_types=up_block_types, | |
block_out_channels=block_out_channels, | |
layers_per_block=layers_per_block, | |
act_fn=act_fn, | |
norm_eps=norm_eps, | |
norm_num_groups=norm_num_groups, | |
temporal_compression_ratio=temporal_compression_ratio, | |
) | |
self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None | |
self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None | |
self.use_slicing = False | |
self.use_tiling = use_tiling | |
# Can be increased to decode more latent frames at once, but comes at a reasonable memory cost and it is not | |
# recommended because the temporal parts of the VAE, here, are tricky to understand. | |
# If you decode X latent frames together, the number of output frames is: | |
# (X + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) => X + 6 frames | |
# | |
# Example with num_latent_frames_batch_size = 2: | |
# - 12 latent frames: (0, 1), (2, 3), (4, 5), (6, 7), (8, 9), (10, 11) are processed together | |
# => (12 // 2 frame slices) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) | |
# => 6 * 8 = 48 frames | |
# - 13 latent frames: (0, 1, 2) (special case), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12) are processed together | |
# => (1 frame slice) * ((3 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) + | |
# ((13 - 3) // 2) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) | |
# => 1 * 9 + 5 * 8 = 49 frames | |
# It has been implemented this way so as to not have "magic values" in the code base that would be hard to explain. Note that | |
# setting it to anything other than 2 would give poor results because the VAE hasn't been trained to be adaptive with different | |
# number of temporal frames. | |
self.num_latent_frames_batch_size = 2 | |
# We make the minimum height and width of sample for tiling half that of the generally supported | |
self.tile_sample_min_height = sample_height // 2 | |
self.tile_sample_min_width = sample_width // 2 | |
self.tile_latent_min_height = int( | |
self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1)) | |
) | |
self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1))) | |
# These are experimental overlap factors that were chosen based on experimentation and seem to work best for | |
# 720x480 (WxH) resolution. The above resolution is the strongly recommended generation resolution in CogVideoX | |
# and so the tiling implementation has only been tested on those specific resolutions. | |
self.tile_overlap_factor_height = 1 / 6 | |
self.tile_overlap_factor_width = 1 / 5 | |
def _set_gradient_checkpointing(self, module, value=False): | |
if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)): | |
module.gradient_checkpointing = value | |
def _clear_fake_context_parallel_cache(self): | |
for name, module in self.named_modules(): | |
if isinstance(module, CogVideoXCausalConv3d): | |
logger.debug(f"Clearing fake Context Parallel cache for layer: {name}") | |
module._clear_fake_context_parallel_cache() | |
def enable_tiling( | |
self, | |
tile_sample_min_height: Optional[int] = None, | |
tile_sample_min_width: Optional[int] = None, | |
tile_overlap_factor_height: Optional[float] = None, | |
tile_overlap_factor_width: Optional[float] = None, | |
) -> None: | |
r""" | |
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
processing larger images. | |
Args: | |
tile_sample_min_height (`int`, *optional*): | |
The minimum height required for a sample to be separated into tiles across the height dimension. | |
tile_sample_min_width (`int`, *optional*): | |
The minimum width required for a sample to be separated into tiles across the width dimension. | |
tile_overlap_factor_height (`int`, *optional*): | |
The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are | |
no tiling artifacts produced across the height dimension. Must be between 0 and 1. Setting a higher | |
value might cause more tiles to be processed leading to slow down of the decoding process. | |
tile_overlap_factor_width (`int`, *optional*): | |
The minimum amount of overlap between two consecutive horizontal tiles. This is to ensure that there | |
are no tiling artifacts produced across the width dimension. Must be between 0 and 1. Setting a higher | |
value might cause more tiles to be processed leading to slow down of the decoding process. | |
""" | |
self.use_tiling = True | |
self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height | |
self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width | |
self.tile_latent_min_height = int( | |
self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1)) | |
) | |
self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1))) | |
self.tile_overlap_factor_height = tile_overlap_factor_height or self.tile_overlap_factor_height | |
self.tile_overlap_factor_width = tile_overlap_factor_width or self.tile_overlap_factor_width | |
def disable_tiling(self) -> None: | |
r""" | |
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing | |
decoding in one step. | |
""" | |
self.use_tiling = False | |
def enable_slicing(self) -> None: | |
r""" | |
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. | |
""" | |
self.use_slicing = True | |
def disable_slicing(self) -> None: | |
r""" | |
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing | |
decoding in one step. | |
""" | |
self.use_slicing = False | |
def encode( | |
self, x: torch.Tensor, return_dict: bool = True | |
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: | |
""" | |
Encode a batch of images into latents. | |
Args: | |
x (`torch.Tensor`): Input batch of images. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. | |
Returns: | |
The latent representations of the encoded images. If `return_dict` is True, a | |
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. | |
""" | |
batch_size, num_channels, num_frames, height, width = x.shape | |
if num_frames == 1: | |
h = self.encoder(x) | |
if self.quant_conv is not None: | |
h = self.quant_conv(h) | |
posterior = DiagonalGaussianDistribution(h) | |
else: | |
frame_batch_size = 4 | |
h = [] | |
for i in range(num_frames // frame_batch_size): | |
remaining_frames = num_frames % frame_batch_size | |
start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) | |
end_frame = frame_batch_size * (i + 1) + remaining_frames | |
z_intermediate = x[:, :, start_frame:end_frame] | |
z_intermediate = self.encoder(z_intermediate) | |
if self.quant_conv is not None: | |
z_intermediate = self.quant_conv(z_intermediate) | |
h.append(z_intermediate) | |
self._clear_fake_context_parallel_cache() | |
h = torch.cat(h, dim=2) | |
posterior = DiagonalGaussianDistribution(h) | |
self._clear_fake_context_parallel_cache() | |
if not return_dict: | |
return (posterior,) | |
return AutoencoderKLOutput(latent_dist=posterior) | |
def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: | |
batch_size, num_channels, num_frames, height, width = z.shape | |
if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height): | |
return self.tiled_decode(z, return_dict=return_dict) | |
if num_frames == 1: | |
dec = [] | |
z_intermediate = z | |
if self.post_quant_conv is not None: | |
z_intermediate = self.post_quant_conv(z_intermediate) | |
z_intermediate = self.decoder(z_intermediate) | |
dec.append(z_intermediate) | |
else: | |
frame_batch_size = self.num_latent_frames_batch_size | |
dec = [] | |
for i in range(num_frames // frame_batch_size): | |
remaining_frames = num_frames % frame_batch_size | |
start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) | |
end_frame = frame_batch_size * (i + 1) + remaining_frames | |
z_intermediate = z[:, :, start_frame:end_frame] | |
if self.post_quant_conv is not None: | |
z_intermediate = self.post_quant_conv(z_intermediate) | |
z_intermediate = self.decoder(z_intermediate) | |
dec.append(z_intermediate) | |
self._clear_fake_context_parallel_cache() | |
dec = torch.cat(dec, dim=2) | |
if not return_dict: | |
return (dec,) | |
return DecoderOutput(sample=dec) | |
def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: | |
""" | |
Decode a batch of images. | |
Args: | |
z (`torch.Tensor`): Input batch of latent vectors. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. | |
Returns: | |
[`~models.vae.DecoderOutput`] or `tuple`: | |
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is | |
returned. | |
""" | |
if self.use_slicing and z.shape[0] > 1: | |
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] | |
decoded = torch.cat(decoded_slices) | |
else: | |
decoded = self._decode(z).sample | |
if not return_dict: | |
return (decoded,) | |
return DecoderOutput(sample=decoded) | |
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: | |
blend_extent = min(a.shape[3], b.shape[3], blend_extent) | |
for y in range(blend_extent): | |
b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( | |
y / blend_extent | |
) | |
return b | |
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: | |
blend_extent = min(a.shape[4], b.shape[4], blend_extent) | |
for x in range(blend_extent): | |
b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( | |
x / blend_extent | |
) | |
return b | |
def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: | |
r""" | |
Decode a batch of images using a tiled decoder. | |
Args: | |
z (`torch.Tensor`): Input batch of latent vectors. | |
return_dict (`bool`, *optional*, defaults to `True`): | |
Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. | |
Returns: | |
[`~models.vae.DecoderOutput`] or `tuple`: | |
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is | |
returned. | |
""" | |
# Rough memory assessment: | |
# - In CogVideoX-2B, there are a total of 24 CausalConv3d layers. | |
# - The biggest intermediate dimensions are: [1, 128, 9, 480, 720]. | |
# - Assume fp16 (2 bytes per value). | |
# Memory required: 1 * 128 * 9 * 480 * 720 * 24 * 2 / 1024**3 = 17.8 GB | |
# | |
# Memory assessment when using tiling: | |
# - Assume everything as above but now HxW is 240x360 by tiling in half | |
# Memory required: 1 * 128 * 9 * 240 * 360 * 24 * 2 / 1024**3 = 4.5 GB | |
batch_size, num_channels, num_frames, height, width = z.shape | |
overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor_height)) | |
overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor_width)) | |
blend_extent_height = int(self.tile_sample_min_height * self.tile_overlap_factor_height) | |
blend_extent_width = int(self.tile_sample_min_width * self.tile_overlap_factor_width) | |
row_limit_height = self.tile_sample_min_height - blend_extent_height | |
row_limit_width = self.tile_sample_min_width - blend_extent_width | |
frame_batch_size = self.num_latent_frames_batch_size | |
# Split z into overlapping tiles and decode them separately. | |
# The tiles have an overlap to avoid seams between tiles. | |
rows = [] | |
for i in range(0, height, overlap_height): | |
row = [] | |
for j in range(0, width, overlap_width): | |
time = [] | |
for k in range(num_frames // frame_batch_size): | |
remaining_frames = num_frames % frame_batch_size | |
start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames) | |
end_frame = frame_batch_size * (k + 1) + remaining_frames | |
tile = z[ | |
:, | |
:, | |
start_frame:end_frame, | |
i : i + self.tile_latent_min_height, | |
j : j + self.tile_latent_min_width, | |
] | |
if self.post_quant_conv is not None: | |
tile = self.post_quant_conv(tile) | |
tile = self.decoder(tile) | |
time.append(tile) | |
self._clear_fake_context_parallel_cache() | |
row.append(torch.cat(time, dim=2)) | |
rows.append(row) | |
result_rows = [] | |
for i, row in enumerate(rows): | |
result_row = [] | |
for j, tile in enumerate(row): | |
# blend the above tile and the left tile | |
# to the current tile and add the current tile to the result row | |
if i > 0: | |
tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height) | |
if j > 0: | |
tile = self.blend_h(row[j - 1], tile, blend_extent_width) | |
result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) | |
result_rows.append(torch.cat(result_row, dim=4)) | |
dec = torch.cat(result_rows, dim=3) | |
if not return_dict: | |
return (dec,) | |
return DecoderOutput(sample=dec) | |
def forward( | |
self, | |
sample: torch.Tensor, | |
sample_posterior: bool = False, | |
return_dict: bool = True, | |
generator: Optional[torch.Generator] = None, | |
) -> Union[torch.Tensor, torch.Tensor]: | |
x = sample | |
posterior = self.encode(x).latent_dist | |
if sample_posterior: | |
z = posterior.sample(generator=generator) | |
else: | |
z = posterior.mode() | |
dec = self.decode(z) | |
if not return_dict: | |
return (dec,) | |
return dec | |