|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
|
import numpy as np |
|
import paddle |
|
from paddle import nn |
|
|
|
|
|
def get_timestep_embedding( |
|
timesteps: paddle.Tensor, |
|
embedding_dim: int, |
|
flip_sin_to_cos: bool = False, |
|
downscale_freq_shift: float = 1, |
|
scale: float = 1, |
|
max_period: int = 10000, |
|
): |
|
""" |
|
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. |
|
|
|
:param timesteps: a 1-D Tensor of N indices, one per batch element. |
|
These may be fractional. |
|
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the |
|
embeddings. :return: an [N x dim] Tensor of positional embeddings. |
|
""" |
|
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" |
|
|
|
half_dim = embedding_dim // 2 |
|
exponent = -math.log(max_period) * paddle.arange(start=0, end=half_dim, dtype="float32") |
|
exponent = exponent / (half_dim - downscale_freq_shift) |
|
|
|
emb = paddle.exp(exponent) |
|
emb = timesteps[:, None].cast("float32") * emb[None, :] |
|
|
|
|
|
emb = scale * emb |
|
|
|
|
|
emb = paddle.concat([paddle.sin(emb), paddle.cos(emb)], axis=-1) |
|
|
|
|
|
if flip_sin_to_cos: |
|
emb = paddle.concat([emb[:, half_dim:], emb[:, :half_dim]], axis=-1) |
|
|
|
|
|
if embedding_dim % 2 == 1: |
|
emb = paddle.concat(emb, paddle.zeros([emb.shape[0], 1]), axis=-1) |
|
return emb |
|
|
|
|
|
class TimestepEmbedding(nn.Layer): |
|
def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): |
|
super().__init__() |
|
|
|
self.linear_1 = nn.Linear(in_channels, time_embed_dim) |
|
self.act = None |
|
if act_fn == "silu": |
|
self.act = nn.Silu() |
|
elif act_fn == "mish": |
|
self.act = nn.Mish() |
|
|
|
if out_dim is not None: |
|
time_embed_dim_out = out_dim |
|
else: |
|
time_embed_dim_out = time_embed_dim |
|
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out) |
|
|
|
def forward(self, sample): |
|
sample = self.linear_1(sample) |
|
|
|
if self.act is not None: |
|
sample = self.act(sample) |
|
|
|
sample = self.linear_2(sample) |
|
return sample |
|
|
|
|
|
class Timesteps(nn.Layer): |
|
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): |
|
super().__init__() |
|
self.num_channels = num_channels |
|
self.flip_sin_to_cos = flip_sin_to_cos |
|
self.downscale_freq_shift = downscale_freq_shift |
|
|
|
def forward(self, timesteps): |
|
t_emb = get_timestep_embedding( |
|
timesteps, |
|
self.num_channels, |
|
flip_sin_to_cos=self.flip_sin_to_cos, |
|
downscale_freq_shift=self.downscale_freq_shift, |
|
) |
|
return t_emb |
|
|
|
|
|
class GaussianFourierProjection(nn.Layer): |
|
"""Gaussian Fourier embeddings for noise levels.""" |
|
|
|
def __init__( |
|
self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False |
|
): |
|
super().__init__() |
|
self.register_buffer("weight", paddle.randn((embedding_size,)) * scale) |
|
self.log = log |
|
self.flip_sin_to_cos = flip_sin_to_cos |
|
|
|
if set_W_to_weight: |
|
|
|
self.register_buffer("W", paddle.randn((embedding_size,)) * scale) |
|
|
|
self.weight = self.W |
|
|
|
def forward(self, x): |
|
if self.log: |
|
x = paddle.log(x.cast(self.weight.dtype)) |
|
|
|
x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi |
|
|
|
if self.flip_sin_to_cos: |
|
out = paddle.concat([paddle.cos(x_proj), paddle.sin(x_proj)], axis=-1) |
|
else: |
|
out = paddle.concat([paddle.sin(x_proj), paddle.cos(x_proj)], axis=-1) |
|
return out |
|
|
|
|
|
class ImagePositionalEmbeddings(nn.Layer): |
|
""" |
|
Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the |
|
height and width of the latent space. |
|
|
|
For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092 |
|
|
|
For VQ-diffusion: |
|
|
|
Output vector embeddings are used as input for the transformer. |
|
|
|
Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE. |
|
|
|
Args: |
|
num_embed (`int`): |
|
Number of embeddings for the latent pixels embeddings. |
|
height (`int`): |
|
Height of the latent image i.e. the number of height embeddings. |
|
width (`int`): |
|
Width of the latent image i.e. the number of width embeddings. |
|
embed_dim (`int`): |
|
Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
num_embed: int, |
|
height: int, |
|
width: int, |
|
embed_dim: int, |
|
): |
|
super().__init__() |
|
|
|
self.height = height |
|
self.width = width |
|
self.num_embed = num_embed |
|
self.embed_dim = embed_dim |
|
|
|
self.emb = nn.Embedding(self.num_embed, embed_dim) |
|
self.height_emb = nn.Embedding(self.height, embed_dim) |
|
self.width_emb = nn.Embedding(self.width, embed_dim) |
|
|
|
def forward(self, index): |
|
emb = self.emb(index) |
|
|
|
height_emb = self.height_emb(paddle.arange(self.height).reshape([1, self.height])) |
|
|
|
|
|
height_emb = height_emb.unsqueeze(2) |
|
|
|
width_emb = self.width_emb(paddle.arange(self.width).reshape([1, self.width])) |
|
|
|
|
|
width_emb = width_emb.unsqueeze(1) |
|
|
|
pos_emb = height_emb + width_emb |
|
|
|
|
|
pos_emb = pos_emb.reshape([1, self.height * self.width, -1]) |
|
|
|
emb = emb + pos_emb[:, : emb.shape[1], :] |
|
|
|
return emb |
|
|