diff --git a/styletts2/Configs/config.yml b/styletts2/Configs/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..b74b8ee8a72f28f33edfaa3ea992342467e801cb --- /dev/null +++ b/styletts2/Configs/config.yml @@ -0,0 +1,116 @@ +log_dir: "Models/LJSpeech" +first_stage_path: "first_stage.pth" +save_freq: 2 +log_interval: 10 +device: "cuda" +epochs_1st: 200 # number of epochs for first stage training (pre-training) +epochs_2nd: 100 # number of peochs for second stage training (joint training) +batch_size: 16 +max_len: 400 # maximum number of frames +pretrained_model: "" +second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage +load_only_params: false # set to true if do not want to load epoch numbers and optimizer parameters + +F0_path: "Utils/JDC/bst.t7" +ASR_config: "Utils/ASR/config.yml" +ASR_path: "Utils/ASR/epoch_00080.pth" +PLBERT_dir: 'Utils/PLBERT/' + +data_params: + train_data: "Data/train_list.txt" + val_data: "Data/val_list.txt" + root_path: "/local/LJSpeech-1.1/wavs" + OOD_data: "Data/OOD_texts.txt" + min_length: 50 # sample until texts with this size are obtained for OOD texts + +preprocess_params: + sr: 24000 + spect_params: + n_fft: 2048 + win_length: 1200 + hop_length: 300 + +model_params: + multispeaker: false + + dim_in: 64 + hidden_dim: 512 + max_conv_dim: 512 + n_layer: 3 + n_mels: 80 + + n_token: 178 # number of phoneme tokens + max_dur: 50 # maximum duration of a single phoneme + style_dim: 128 # style vector size + + dropout: 0.2 + + # config for decoder + decoder: + type: 'istftnet' # either hifigan or istftnet + resblock_kernel_sizes: [3,7,11] + upsample_rates : [10, 6] + upsample_initial_channel: 512 + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_kernel_sizes: [20, 12] + gen_istft_n_fft: 20 + gen_istft_hop_size: 5 + + # speech language model config + slm: + model: 'microsoft/wavlm-base-plus' + sr: 16000 # sampling rate of SLM + hidden: 768 # hidden size of SLM + nlayers: 13 # number of layers of SLM + initial_channel: 64 # initial channels of SLM discriminator head + + # style diffusion model config + diffusion: + embedding_mask_proba: 0.1 + # transformer config + transformer: + num_layers: 3 + num_heads: 8 + head_features: 64 + multiplier: 2 + + # diffusion distribution config + dist: + sigma_data: 0.2 # placeholder for estimate_sigma_data set to false + estimate_sigma_data: true # estimate sigma_data from the current batch if set to true + mean: -3.0 + std: 1.0 + +loss_params: + lambda_mel: 5. # mel reconstruction loss + lambda_gen: 1. # generator loss + lambda_slm: 1. # slm feature matching loss + + lambda_mono: 1. # monotonic alignment loss (1st stage, TMA) + lambda_s2s: 1. # sequence-to-sequence loss (1st stage, TMA) + TMA_epoch: 50 # TMA starting epoch (1st stage) + + lambda_F0: 1. # F0 reconstruction loss (2nd stage) + lambda_norm: 1. # norm reconstruction loss (2nd stage) + lambda_dur: 1. # duration loss (2nd stage) + lambda_ce: 20. # duration predictor probability output CE loss (2nd stage) + lambda_sty: 1. # style reconstruction loss (2nd stage) + lambda_diff: 1. # score matching loss (2nd stage) + + diff_epoch: 20 # style diffusion starting epoch (2nd stage) + joint_epoch: 50 # joint training starting epoch (2nd stage) + +optimizer_params: + lr: 0.0001 # general learning rate + bert_lr: 0.00001 # learning rate for PLBERT + ft_lr: 0.00001 # learning rate for acoustic modules + +slmadv_params: + min_len: 400 # minimum length of samples + max_len: 500 # maximum length of samples + batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size + iter: 10 # update the discriminator every this iterations of generator update + thresh: 5 # gradient norm above which the gradient is scaled + scale: 0.01 # gradient scaling factor for predictors from SLM discriminators + sig: 1.5 # sigma for differentiable duration modeling + \ No newline at end of file diff --git a/styletts2/Configs/config_ft.yml b/styletts2/Configs/config_ft.yml new file mode 100644 index 0000000000000000000000000000000000000000..00ae95fc734934a4639681ec9981222fc462c270 --- /dev/null +++ b/styletts2/Configs/config_ft.yml @@ -0,0 +1,111 @@ +log_dir: "Models/LJSpeech" +save_freq: 5 +log_interval: 10 +device: "cuda" +epochs: 50 # number of finetuning epoch (1 hour of data) +batch_size: 8 +max_len: 400 # maximum number of frames +pretrained_model: "Models/LibriTTS/epochs_2nd_00020.pth" +second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage +load_only_params: true # set to true if do not want to load epoch numbers and optimizer parameters + +F0_path: "Utils/JDC/bst.t7" +ASR_config: "Utils/ASR/config.yml" +ASR_path: "Utils/ASR/epoch_00080.pth" +PLBERT_dir: 'Utils/PLBERT/' + +data_params: + train_data: "Data/train_list.txt" + val_data: "Data/val_list.txt" + root_path: "/local/LJSpeech-1.1/wavs" + OOD_data: "Data/OOD_texts.txt" + min_length: 50 # sample until texts with this size are obtained for OOD texts + +preprocess_params: + sr: 24000 + spect_params: + n_fft: 2048 + win_length: 1200 + hop_length: 300 + +model_params: + multispeaker: true + + dim_in: 64 + hidden_dim: 512 + max_conv_dim: 512 + n_layer: 3 + n_mels: 80 + + n_token: 178 # number of phoneme tokens + max_dur: 50 # maximum duration of a single phoneme + style_dim: 128 # style vector size + + dropout: 0.2 + + # config for decoder + decoder: + type: 'hifigan' # either hifigan or istftnet + resblock_kernel_sizes: [3,7,11] + upsample_rates : [10,5,3,2] + upsample_initial_channel: 512 + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_kernel_sizes: [20,10,6,4] + + # speech language model config + slm: + model: 'microsoft/wavlm-base-plus' + sr: 16000 # sampling rate of SLM + hidden: 768 # hidden size of SLM + nlayers: 13 # number of layers of SLM + initial_channel: 64 # initial channels of SLM discriminator head + + # style diffusion model config + diffusion: + embedding_mask_proba: 0.1 + # transformer config + transformer: + num_layers: 3 + num_heads: 8 + head_features: 64 + multiplier: 2 + + # diffusion distribution config + dist: + sigma_data: 0.2 # placeholder for estimate_sigma_data set to false + estimate_sigma_data: true # estimate sigma_data from the current batch if set to true + mean: -3.0 + std: 1.0 + +loss_params: + lambda_mel: 5. # mel reconstruction loss + lambda_gen: 1. # generator loss + lambda_slm: 1. # slm feature matching loss + + lambda_mono: 1. # monotonic alignment loss (TMA) + lambda_s2s: 1. # sequence-to-sequence loss (TMA) + + lambda_F0: 1. # F0 reconstruction loss + lambda_norm: 1. # norm reconstruction loss + lambda_dur: 1. # duration loss + lambda_ce: 20. # duration predictor probability output CE loss + lambda_sty: 1. # style reconstruction loss + lambda_diff: 1. # score matching loss + + diff_epoch: 10 # style diffusion starting epoch + joint_epoch: 30 # joint training starting epoch + +optimizer_params: + lr: 0.0001 # general learning rate + bert_lr: 0.00001 # learning rate for PLBERT + ft_lr: 0.0001 # learning rate for acoustic modules + +slmadv_params: + min_len: 400 # minimum length of samples + max_len: 500 # maximum length of samples + batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size + iter: 10 # update the discriminator every this iterations of generator update + thresh: 5 # gradient norm above which the gradient is scaled + scale: 0.01 # gradient scaling factor for predictors from SLM discriminators + sig: 1.5 # sigma for differentiable duration modeling + diff --git a/styletts2/Configs/config_libritts.yml b/styletts2/Configs/config_libritts.yml new file mode 100644 index 0000000000000000000000000000000000000000..135d87260aa53cfb18b665333d44744ce5b4152a --- /dev/null +++ b/styletts2/Configs/config_libritts.yml @@ -0,0 +1,113 @@ +log_dir: "Models/LibriTTS" +first_stage_path: "first_stage.pth" +save_freq: 1 +log_interval: 10 +device: "cuda" +epochs_1st: 50 # number of epochs for first stage training (pre-training) +epochs_2nd: 30 # number of peochs for second stage training (joint training) +batch_size: 16 +max_len: 300 # maximum number of frames +pretrained_model: "" +second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage +load_only_params: false # set to true if do not want to load epoch numbers and optimizer parameters + +F0_path: "Utils/JDC/bst.t7" +ASR_config: "Utils/ASR/config.yml" +ASR_path: "Utils/ASR/epoch_00080.pth" +PLBERT_dir: 'Utils/PLBERT/' + +data_params: + train_data: "Data/train_list.txt" + val_data: "Data/val_list.txt" + root_path: "" + OOD_data: "Data/OOD_texts.txt" + min_length: 50 # sample until texts with this size are obtained for OOD texts + +preprocess_params: + sr: 24000 + spect_params: + n_fft: 2048 + win_length: 1200 + hop_length: 300 + +model_params: + multispeaker: true + + dim_in: 64 + hidden_dim: 512 + max_conv_dim: 512 + n_layer: 3 + n_mels: 80 + + n_token: 178 # number of phoneme tokens + max_dur: 50 # maximum duration of a single phoneme + style_dim: 128 # style vector size + + dropout: 0.2 + + # config for decoder + decoder: + type: 'hifigan' # either hifigan or istftnet + resblock_kernel_sizes: [3,7,11] + upsample_rates : [10,5,3,2] + upsample_initial_channel: 512 + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_kernel_sizes: [20,10,6,4] + + # speech language model config + slm: + model: 'microsoft/wavlm-base-plus' + sr: 16000 # sampling rate of SLM + hidden: 768 # hidden size of SLM + nlayers: 13 # number of layers of SLM + initial_channel: 64 # initial channels of SLM discriminator head + + # style diffusion model config + diffusion: + embedding_mask_proba: 0.1 + # transformer config + transformer: + num_layers: 3 + num_heads: 8 + head_features: 64 + multiplier: 2 + + # diffusion distribution config + dist: + sigma_data: 0.2 # placeholder for estimate_sigma_data set to false + estimate_sigma_data: true # estimate sigma_data from the current batch if set to true + mean: -3.0 + std: 1.0 + +loss_params: + lambda_mel: 5. # mel reconstruction loss + lambda_gen: 1. # generator loss + lambda_slm: 1. # slm feature matching loss + + lambda_mono: 1. # monotonic alignment loss (1st stage, TMA) + lambda_s2s: 1. # sequence-to-sequence loss (1st stage, TMA) + TMA_epoch: 5 # TMA starting epoch (1st stage) + + lambda_F0: 1. # F0 reconstruction loss (2nd stage) + lambda_norm: 1. # norm reconstruction loss (2nd stage) + lambda_dur: 1. # duration loss (2nd stage) + lambda_ce: 20. # duration predictor probability output CE loss (2nd stage) + lambda_sty: 1. # style reconstruction loss (2nd stage) + lambda_diff: 1. # score matching loss (2nd stage) + + diff_epoch: 10 # style diffusion starting epoch (2nd stage) + joint_epoch: 15 # joint training starting epoch (2nd stage) + +optimizer_params: + lr: 0.0001 # general learning rate + bert_lr: 0.00001 # learning rate for PLBERT + ft_lr: 0.00001 # learning rate for acoustic modules + +slmadv_params: + min_len: 400 # minimum length of samples + max_len: 500 # maximum length of samples + batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size + iter: 20 # update the discriminator every this iterations of generator update + thresh: 5 # gradient norm above which the gradient is scaled + scale: 0.01 # gradient scaling factor for predictors from SLM discriminators + sig: 1.5 # sigma for differentiable duration modeling diff --git a/styletts2/LICENSE b/styletts2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0c7bc2e109e023323ea3146c306b392bfa3ce614 --- /dev/null +++ b/styletts2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Aaron (Yinghao) Li + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/styletts2/Modules/__init__.py b/styletts2/Modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/styletts2/Modules/__init__.py @@ -0,0 +1 @@ + diff --git a/styletts2/Modules/__pycache__/__init__.cpython-310.pyc b/styletts2/Modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2dcbb0bd13ec59ca9ac2250c69d7be077e238ef Binary files /dev/null and b/styletts2/Modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/styletts2/Modules/__pycache__/discriminators.cpython-310.pyc b/styletts2/Modules/__pycache__/discriminators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d8c9de95d95054ff1c675919c1f93a93a272f14 Binary files /dev/null and b/styletts2/Modules/__pycache__/discriminators.cpython-310.pyc differ diff --git a/styletts2/Modules/__pycache__/hifigan.cpython-310.pyc b/styletts2/Modules/__pycache__/hifigan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6632b620a328fc94e590adc260a759336138b61 Binary files /dev/null and b/styletts2/Modules/__pycache__/hifigan.cpython-310.pyc differ diff --git a/styletts2/Modules/__pycache__/utils.cpython-310.pyc b/styletts2/Modules/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fae87d7afbb63d3d143fcd0d793bdc331d96e2 Binary files /dev/null and b/styletts2/Modules/__pycache__/utils.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/__init__.py b/styletts2/Modules/diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/styletts2/Modules/diffusion/__init__.py @@ -0,0 +1 @@ + diff --git a/styletts2/Modules/diffusion/__pycache__/__init__.cpython-310.pyc b/styletts2/Modules/diffusion/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6826bbcfbec11993617bfc7783e4b6e2ac921a97 Binary files /dev/null and b/styletts2/Modules/diffusion/__pycache__/__init__.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/__pycache__/diffusion.cpython-310.pyc b/styletts2/Modules/diffusion/__pycache__/diffusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57cefde73e93b3bb66d8799f8010bd6c2b784a6 Binary files /dev/null and b/styletts2/Modules/diffusion/__pycache__/diffusion.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/__pycache__/modules.cpython-310.pyc b/styletts2/Modules/diffusion/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..414ed562574351ec014f8a1f6abdafc59dda5479 Binary files /dev/null and b/styletts2/Modules/diffusion/__pycache__/modules.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/__pycache__/sampler.cpython-310.pyc b/styletts2/Modules/diffusion/__pycache__/sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc720aeff10224e3ec5de0e55155932693832c13 Binary files /dev/null and b/styletts2/Modules/diffusion/__pycache__/sampler.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/__pycache__/utils.cpython-310.pyc b/styletts2/Modules/diffusion/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e238626d215928b4af8e6b6f6e5bbcd5f1da5da2 Binary files /dev/null and b/styletts2/Modules/diffusion/__pycache__/utils.cpython-310.pyc differ diff --git a/styletts2/Modules/diffusion/diffusion.py b/styletts2/Modules/diffusion/diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..8f8d40041e4493f1d037cdbd33bd25091ffbdfe4 --- /dev/null +++ b/styletts2/Modules/diffusion/diffusion.py @@ -0,0 +1,94 @@ +from math import pi +from random import randint +from typing import Any, Optional, Sequence, Tuple, Union + +import torch +from einops import rearrange +from torch import Tensor, nn +from tqdm import tqdm + +from .utils import * +from .sampler import * + +""" +Diffusion Classes (generic for 1d data) +""" + + +class Model1d(nn.Module): + def __init__(self, unet_type: str = "base", **kwargs): + super().__init__() + diffusion_kwargs, kwargs = groupby("diffusion_", kwargs) + self.unet = None + self.diffusion = None + + def forward(self, x: Tensor, **kwargs) -> Tensor: + return self.diffusion(x, **kwargs) + + def sample(self, *args, **kwargs) -> Tensor: + return self.diffusion.sample(*args, **kwargs) + + +""" +Audio Diffusion Classes (specific for 1d audio data) +""" + + +def get_default_model_kwargs(): + return dict( + channels=128, + patch_size=16, + multipliers=[1, 2, 4, 4, 4, 4, 4], + factors=[4, 4, 4, 2, 2, 2], + num_blocks=[2, 2, 2, 2, 2, 2], + attentions=[0, 0, 0, 1, 1, 1, 1], + attention_heads=8, + attention_features=64, + attention_multiplier=2, + attention_use_rel_pos=False, + diffusion_type="v", + diffusion_sigma_distribution=UniformDistribution(), + ) + + +def get_default_sampling_kwargs(): + return dict(sigma_schedule=LinearSchedule(), sampler=VSampler(), clamp=True) + + +class AudioDiffusionModel(Model1d): + def __init__(self, **kwargs): + super().__init__(**{**get_default_model_kwargs(), **kwargs}) + + def sample(self, *args, **kwargs): + return super().sample(*args, **{**get_default_sampling_kwargs(), **kwargs}) + + +class AudioDiffusionConditional(Model1d): + def __init__( + self, + embedding_features: int, + embedding_max_length: int, + embedding_mask_proba: float = 0.1, + **kwargs, + ): + self.embedding_mask_proba = embedding_mask_proba + default_kwargs = dict( + **get_default_model_kwargs(), + unet_type="cfg", + context_embedding_features=embedding_features, + context_embedding_max_length=embedding_max_length, + ) + super().__init__(**{**default_kwargs, **kwargs}) + + def forward(self, *args, **kwargs): + default_kwargs = dict(embedding_mask_proba=self.embedding_mask_proba) + return super().forward(*args, **{**default_kwargs, **kwargs}) + + def sample(self, *args, **kwargs): + default_kwargs = dict( + **get_default_sampling_kwargs(), + embedding_scale=5.0, + ) + return super().sample(*args, **{**default_kwargs, **kwargs}) + + diff --git a/styletts2/Modules/diffusion/modules.py b/styletts2/Modules/diffusion/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..9df85b993c3839ab01df6b6168334bdbf3cb1f30 --- /dev/null +++ b/styletts2/Modules/diffusion/modules.py @@ -0,0 +1,693 @@ +from math import floor, log, pi +from typing import Any, List, Optional, Sequence, Tuple, Union + +from .utils import * + +import torch +import torch.nn as nn +from einops import rearrange, reduce, repeat +from einops.layers.torch import Rearrange +from einops_exts import rearrange_many +from torch import Tensor, einsum + + +""" +Utils +""" + +class AdaLayerNorm(nn.Module): + def __init__(self, style_dim, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.fc = nn.Linear(style_dim, channels*2) + + def forward(self, x, s): + x = x.transpose(-1, -2) + x = x.transpose(1, -1) + + h = self.fc(s) + h = h.view(h.size(0), h.size(1), 1) + gamma, beta = torch.chunk(h, chunks=2, dim=1) + gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1) + + + x = F.layer_norm(x, (self.channels,), eps=self.eps) + x = (1 + gamma) * x + beta + return x.transpose(1, -1).transpose(-1, -2) + +class StyleTransformer1d(nn.Module): + def __init__( + self, + num_layers: int, + channels: int, + num_heads: int, + head_features: int, + multiplier: int, + use_context_time: bool = True, + use_rel_pos: bool = False, + context_features_multiplier: int = 1, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + context_features: Optional[int] = None, + context_embedding_features: Optional[int] = None, + embedding_max_length: int = 512, + ): + super().__init__() + + self.blocks = nn.ModuleList( + [ + StyleTransformerBlock( + features=channels + context_embedding_features, + head_features=head_features, + num_heads=num_heads, + multiplier=multiplier, + style_dim=context_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + for i in range(num_layers) + ] + ) + + self.to_out = nn.Sequential( + Rearrange("b t c -> b c t"), + nn.Conv1d( + in_channels=channels + context_embedding_features, + out_channels=channels, + kernel_size=1, + ), + ) + + use_context_features = exists(context_features) + self.use_context_features = use_context_features + self.use_context_time = use_context_time + + if use_context_time or use_context_features: + context_mapping_features = channels + context_embedding_features + + self.to_mapping = nn.Sequential( + nn.Linear(context_mapping_features, context_mapping_features), + nn.GELU(), + nn.Linear(context_mapping_features, context_mapping_features), + nn.GELU(), + ) + + if use_context_time: + assert exists(context_mapping_features) + self.to_time = nn.Sequential( + TimePositionalEmbedding( + dim=channels, out_features=context_mapping_features + ), + nn.GELU(), + ) + + if use_context_features: + assert exists(context_features) and exists(context_mapping_features) + self.to_features = nn.Sequential( + nn.Linear( + in_features=context_features, out_features=context_mapping_features + ), + nn.GELU(), + ) + + self.fixed_embedding = FixedEmbedding( + max_length=embedding_max_length, features=context_embedding_features + ) + + + def get_mapping( + self, time: Optional[Tensor] = None, features: Optional[Tensor] = None + ) -> Optional[Tensor]: + """Combines context time features and features into mapping""" + items, mapping = [], None + # Compute time features + if self.use_context_time: + assert_message = "use_context_time=True but no time features provided" + assert exists(time), assert_message + items += [self.to_time(time)] + # Compute features + if self.use_context_features: + assert_message = "context_features exists but no features provided" + assert exists(features), assert_message + items += [self.to_features(features)] + + # Compute joint mapping + if self.use_context_time or self.use_context_features: + mapping = reduce(torch.stack(items), "n b m -> b m", "sum") + mapping = self.to_mapping(mapping) + + return mapping + + def run(self, x, time, embedding, features): + + mapping = self.get_mapping(time, features) + x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1) + mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1) + + for block in self.blocks: + x = x + mapping + x = block(x, features) + + x = x.mean(axis=1).unsqueeze(1) + x = self.to_out(x) + x = x.transpose(-1, -2) + + return x + + def forward(self, x: Tensor, + time: Tensor, + embedding_mask_proba: float = 0.0, + embedding: Optional[Tensor] = None, + features: Optional[Tensor] = None, + embedding_scale: float = 1.0) -> Tensor: + + b, device = embedding.shape[0], embedding.device + fixed_embedding = self.fixed_embedding(embedding) + if embedding_mask_proba > 0.0: + # Randomly mask embedding + batch_mask = rand_bool( + shape=(b, 1, 1), proba=embedding_mask_proba, device=device + ) + embedding = torch.where(batch_mask, fixed_embedding, embedding) + + if embedding_scale != 1.0: + # Compute both normal and fixed embedding outputs + out = self.run(x, time, embedding=embedding, features=features) + out_masked = self.run(x, time, embedding=fixed_embedding, features=features) + # Scale conditional output using classifier-free guidance + return out_masked + (out - out_masked) * embedding_scale + else: + return self.run(x, time, embedding=embedding, features=features) + + return x + + +class StyleTransformerBlock(nn.Module): + def __init__( + self, + features: int, + num_heads: int, + head_features: int, + style_dim: int, + multiplier: int, + use_rel_pos: bool, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + context_features: Optional[int] = None, + ): + super().__init__() + + self.use_cross_attention = exists(context_features) and context_features > 0 + + self.attention = StyleAttention( + features=features, + style_dim=style_dim, + num_heads=num_heads, + head_features=head_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + if self.use_cross_attention: + self.cross_attention = StyleAttention( + features=features, + style_dim=style_dim, + num_heads=num_heads, + head_features=head_features, + context_features=context_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + self.feed_forward = FeedForward(features=features, multiplier=multiplier) + + def forward(self, x: Tensor, s: Tensor, *, context: Optional[Tensor] = None) -> Tensor: + x = self.attention(x, s) + x + if self.use_cross_attention: + x = self.cross_attention(x, s, context=context) + x + x = self.feed_forward(x) + x + return x + +class StyleAttention(nn.Module): + def __init__( + self, + features: int, + *, + style_dim: int, + head_features: int, + num_heads: int, + context_features: Optional[int] = None, + use_rel_pos: bool, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + ): + super().__init__() + self.context_features = context_features + mid_features = head_features * num_heads + context_features = default(context_features, features) + + self.norm = AdaLayerNorm(style_dim, features) + self.norm_context = AdaLayerNorm(style_dim, context_features) + self.to_q = nn.Linear( + in_features=features, out_features=mid_features, bias=False + ) + self.to_kv = nn.Linear( + in_features=context_features, out_features=mid_features * 2, bias=False + ) + self.attention = AttentionBase( + features, + num_heads=num_heads, + head_features=head_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + def forward(self, x: Tensor, s: Tensor, *, context: Optional[Tensor] = None) -> Tensor: + assert_message = "You must provide a context when using context_features" + assert not self.context_features or exists(context), assert_message + # Use context if provided + context = default(context, x) + # Normalize then compute q from input and k,v from context + x, context = self.norm(x, s), self.norm_context(context, s) + + q, k, v = (self.to_q(x), *torch.chunk(self.to_kv(context), chunks=2, dim=-1)) + # Compute and return attention + return self.attention(q, k, v) + +class Transformer1d(nn.Module): + def __init__( + self, + num_layers: int, + channels: int, + num_heads: int, + head_features: int, + multiplier: int, + use_context_time: bool = True, + use_rel_pos: bool = False, + context_features_multiplier: int = 1, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + context_features: Optional[int] = None, + context_embedding_features: Optional[int] = None, + embedding_max_length: int = 512, + ): + super().__init__() + + self.blocks = nn.ModuleList( + [ + TransformerBlock( + features=channels + context_embedding_features, + head_features=head_features, + num_heads=num_heads, + multiplier=multiplier, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + for i in range(num_layers) + ] + ) + + self.to_out = nn.Sequential( + Rearrange("b t c -> b c t"), + nn.Conv1d( + in_channels=channels + context_embedding_features, + out_channels=channels, + kernel_size=1, + ), + ) + + use_context_features = exists(context_features) + self.use_context_features = use_context_features + self.use_context_time = use_context_time + + if use_context_time or use_context_features: + context_mapping_features = channels + context_embedding_features + + self.to_mapping = nn.Sequential( + nn.Linear(context_mapping_features, context_mapping_features), + nn.GELU(), + nn.Linear(context_mapping_features, context_mapping_features), + nn.GELU(), + ) + + if use_context_time: + assert exists(context_mapping_features) + self.to_time = nn.Sequential( + TimePositionalEmbedding( + dim=channels, out_features=context_mapping_features + ), + nn.GELU(), + ) + + if use_context_features: + assert exists(context_features) and exists(context_mapping_features) + self.to_features = nn.Sequential( + nn.Linear( + in_features=context_features, out_features=context_mapping_features + ), + nn.GELU(), + ) + + self.fixed_embedding = FixedEmbedding( + max_length=embedding_max_length, features=context_embedding_features + ) + + + def get_mapping( + self, time: Optional[Tensor] = None, features: Optional[Tensor] = None + ) -> Optional[Tensor]: + """Combines context time features and features into mapping""" + items, mapping = [], None + # Compute time features + if self.use_context_time: + assert_message = "use_context_time=True but no time features provided" + assert exists(time), assert_message + items += [self.to_time(time)] + # Compute features + if self.use_context_features: + assert_message = "context_features exists but no features provided" + assert exists(features), assert_message + items += [self.to_features(features)] + + # Compute joint mapping + if self.use_context_time or self.use_context_features: + mapping = reduce(torch.stack(items), "n b m -> b m", "sum") + mapping = self.to_mapping(mapping) + + return mapping + + def run(self, x, time, embedding, features): + + mapping = self.get_mapping(time, features) + x = torch.cat([x.expand(-1, embedding.size(1), -1), embedding], axis=-1) + mapping = mapping.unsqueeze(1).expand(-1, embedding.size(1), -1) + + for block in self.blocks: + x = x + mapping + x = block(x) + + x = x.mean(axis=1).unsqueeze(1) + x = self.to_out(x) + x = x.transpose(-1, -2) + + return x + + def forward(self, x: Tensor, + time: Tensor, + embedding_mask_proba: float = 0.0, + embedding: Optional[Tensor] = None, + features: Optional[Tensor] = None, + embedding_scale: float = 1.0) -> Tensor: + + b, device = embedding.shape[0], embedding.device + fixed_embedding = self.fixed_embedding(embedding) + if embedding_mask_proba > 0.0: + # Randomly mask embedding + batch_mask = rand_bool( + shape=(b, 1, 1), proba=embedding_mask_proba, device=device + ) + embedding = torch.where(batch_mask, fixed_embedding, embedding) + + if embedding_scale != 1.0: + # Compute both normal and fixed embedding outputs + out = self.run(x, time, embedding=embedding, features=features) + out_masked = self.run(x, time, embedding=fixed_embedding, features=features) + # Scale conditional output using classifier-free guidance + return out_masked + (out - out_masked) * embedding_scale + else: + return self.run(x, time, embedding=embedding, features=features) + + return x + + +""" +Attention Components +""" + + +class RelativePositionBias(nn.Module): + def __init__(self, num_buckets: int, max_distance: int, num_heads: int): + super().__init__() + self.num_buckets = num_buckets + self.max_distance = max_distance + self.num_heads = num_heads + self.relative_attention_bias = nn.Embedding(num_buckets, num_heads) + + @staticmethod + def _relative_position_bucket( + relative_position: Tensor, num_buckets: int, max_distance: int + ): + num_buckets //= 2 + ret = (relative_position >= 0).to(torch.long) * num_buckets + n = torch.abs(relative_position) + + max_exact = num_buckets // 2 + is_small = n < max_exact + + val_if_large = ( + max_exact + + ( + torch.log(n.float() / max_exact) + / log(max_distance / max_exact) + * (num_buckets - max_exact) + ).long() + ) + val_if_large = torch.min( + val_if_large, torch.full_like(val_if_large, num_buckets - 1) + ) + + ret += torch.where(is_small, n, val_if_large) + return ret + + def forward(self, num_queries: int, num_keys: int) -> Tensor: + i, j, device = num_queries, num_keys, self.relative_attention_bias.weight.device + q_pos = torch.arange(j - i, j, dtype=torch.long, device=device) + k_pos = torch.arange(j, dtype=torch.long, device=device) + rel_pos = rearrange(k_pos, "j -> 1 j") - rearrange(q_pos, "i -> i 1") + + relative_position_bucket = self._relative_position_bucket( + rel_pos, num_buckets=self.num_buckets, max_distance=self.max_distance + ) + + bias = self.relative_attention_bias(relative_position_bucket) + bias = rearrange(bias, "m n h -> 1 h m n") + return bias + + +def FeedForward(features: int, multiplier: int) -> nn.Module: + mid_features = features * multiplier + return nn.Sequential( + nn.Linear(in_features=features, out_features=mid_features), + nn.GELU(), + nn.Linear(in_features=mid_features, out_features=features), + ) + + +class AttentionBase(nn.Module): + def __init__( + self, + features: int, + *, + head_features: int, + num_heads: int, + use_rel_pos: bool, + out_features: Optional[int] = None, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + ): + super().__init__() + self.scale = head_features ** -0.5 + self.num_heads = num_heads + self.use_rel_pos = use_rel_pos + mid_features = head_features * num_heads + + if use_rel_pos: + assert exists(rel_pos_num_buckets) and exists(rel_pos_max_distance) + self.rel_pos = RelativePositionBias( + num_buckets=rel_pos_num_buckets, + max_distance=rel_pos_max_distance, + num_heads=num_heads, + ) + if out_features is None: + out_features = features + + self.to_out = nn.Linear(in_features=mid_features, out_features=out_features) + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Split heads + q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=self.num_heads) + # Compute similarity matrix + sim = einsum("... n d, ... m d -> ... n m", q, k) + sim = (sim + self.rel_pos(*sim.shape[-2:])) if self.use_rel_pos else sim + sim = sim * self.scale + # Get attention matrix with softmax + attn = sim.softmax(dim=-1) + # Compute values + out = einsum("... n m, ... m d -> ... n d", attn, v) + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) + + +class Attention(nn.Module): + def __init__( + self, + features: int, + *, + head_features: int, + num_heads: int, + out_features: Optional[int] = None, + context_features: Optional[int] = None, + use_rel_pos: bool, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + ): + super().__init__() + self.context_features = context_features + mid_features = head_features * num_heads + context_features = default(context_features, features) + + self.norm = nn.LayerNorm(features) + self.norm_context = nn.LayerNorm(context_features) + self.to_q = nn.Linear( + in_features=features, out_features=mid_features, bias=False + ) + self.to_kv = nn.Linear( + in_features=context_features, out_features=mid_features * 2, bias=False + ) + + self.attention = AttentionBase( + features, + out_features=out_features, + num_heads=num_heads, + head_features=head_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + def forward(self, x: Tensor, *, context: Optional[Tensor] = None) -> Tensor: + assert_message = "You must provide a context when using context_features" + assert not self.context_features or exists(context), assert_message + # Use context if provided + context = default(context, x) + # Normalize then compute q from input and k,v from context + x, context = self.norm(x), self.norm_context(context) + q, k, v = (self.to_q(x), *torch.chunk(self.to_kv(context), chunks=2, dim=-1)) + # Compute and return attention + return self.attention(q, k, v) + + +""" +Transformer Blocks +""" + + +class TransformerBlock(nn.Module): + def __init__( + self, + features: int, + num_heads: int, + head_features: int, + multiplier: int, + use_rel_pos: bool, + rel_pos_num_buckets: Optional[int] = None, + rel_pos_max_distance: Optional[int] = None, + context_features: Optional[int] = None, + ): + super().__init__() + + self.use_cross_attention = exists(context_features) and context_features > 0 + + self.attention = Attention( + features=features, + num_heads=num_heads, + head_features=head_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + if self.use_cross_attention: + self.cross_attention = Attention( + features=features, + num_heads=num_heads, + head_features=head_features, + context_features=context_features, + use_rel_pos=use_rel_pos, + rel_pos_num_buckets=rel_pos_num_buckets, + rel_pos_max_distance=rel_pos_max_distance, + ) + + self.feed_forward = FeedForward(features=features, multiplier=multiplier) + + def forward(self, x: Tensor, *, context: Optional[Tensor] = None) -> Tensor: + x = self.attention(x) + x + if self.use_cross_attention: + x = self.cross_attention(x, context=context) + x + x = self.feed_forward(x) + x + return x + + + +""" +Time Embeddings +""" + + +class SinusoidalEmbedding(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.dim = dim + + def forward(self, x: Tensor) -> Tensor: + device, half_dim = x.device, self.dim // 2 + emb = torch.tensor(log(10000) / (half_dim - 1), device=device) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = rearrange(x, "i -> i 1") * rearrange(emb, "j -> 1 j") + return torch.cat((emb.sin(), emb.cos()), dim=-1) + + +class LearnedPositionalEmbedding(nn.Module): + """Used for continuous time""" + + def __init__(self, dim: int): + super().__init__() + assert (dim % 2) == 0 + half_dim = dim // 2 + self.weights = nn.Parameter(torch.randn(half_dim)) + + def forward(self, x: Tensor) -> Tensor: + x = rearrange(x, "b -> b 1") + freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * pi + fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1) + fouriered = torch.cat((x, fouriered), dim=-1) + return fouriered + + +def TimePositionalEmbedding(dim: int, out_features: int) -> nn.Module: + return nn.Sequential( + LearnedPositionalEmbedding(dim), + nn.Linear(in_features=dim + 1, out_features=out_features), + ) + +class FixedEmbedding(nn.Module): + def __init__(self, max_length: int, features: int): + super().__init__() + self.max_length = max_length + self.embedding = nn.Embedding(max_length, features) + + def forward(self, x: Tensor) -> Tensor: + batch_size, length, device = *x.shape[0:2], x.device + assert_message = "Input sequence length must be <= max_length" + assert length <= self.max_length, assert_message + position = torch.arange(length, device=device) + fixed_embedding = self.embedding(position) + fixed_embedding = repeat(fixed_embedding, "n d -> b n d", b=batch_size) + return fixed_embedding diff --git a/styletts2/Modules/diffusion/sampler.py b/styletts2/Modules/diffusion/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..467bd0d16093564e3ddcfe9033d2097986d54a27 --- /dev/null +++ b/styletts2/Modules/diffusion/sampler.py @@ -0,0 +1,691 @@ +from math import atan, cos, pi, sin, sqrt +from typing import Any, Callable, List, Optional, Tuple, Type + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange, reduce +from torch import Tensor + +from .utils import * + +""" +Diffusion Training +""" + +""" Distributions """ + + +class Distribution: + def __call__(self, num_samples: int, device: torch.device): + raise NotImplementedError() + + +class LogNormalDistribution(Distribution): + def __init__(self, mean: float, std: float): + self.mean = mean + self.std = std + + def __call__( + self, num_samples: int, device: torch.device = torch.device("cpu") + ) -> Tensor: + normal = self.mean + self.std * torch.randn((num_samples,), device=device) + return normal.exp() + + +class UniformDistribution(Distribution): + def __call__(self, num_samples: int, device: torch.device = torch.device("cpu")): + return torch.rand(num_samples, device=device) + + +class VKDistribution(Distribution): + def __init__( + self, + min_value: float = 0.0, + max_value: float = float("inf"), + sigma_data: float = 1.0, + ): + self.min_value = min_value + self.max_value = max_value + self.sigma_data = sigma_data + + def __call__( + self, num_samples: int, device: torch.device = torch.device("cpu") + ) -> Tensor: + sigma_data = self.sigma_data + min_cdf = atan(self.min_value / sigma_data) * 2 / pi + max_cdf = atan(self.max_value / sigma_data) * 2 / pi + u = (max_cdf - min_cdf) * torch.randn((num_samples,), device=device) + min_cdf + return torch.tan(u * pi / 2) * sigma_data + + +""" Diffusion Classes """ + + +def pad_dims(x: Tensor, ndim: int) -> Tensor: + # Pads additional ndims to the right of the tensor + return x.view(*x.shape, *((1,) * ndim)) + + +def clip(x: Tensor, dynamic_threshold: float = 0.0): + if dynamic_threshold == 0.0: + return x.clamp(-1.0, 1.0) + else: + # Dynamic thresholding + # Find dynamic threshold quantile for each batch + x_flat = rearrange(x, "b ... -> b (...)") + scale = torch.quantile(x_flat.abs(), dynamic_threshold, dim=-1) + # Clamp to a min of 1.0 + scale.clamp_(min=1.0) + # Clamp all values and scale + scale = pad_dims(scale, ndim=x.ndim - scale.ndim) + x = x.clamp(-scale, scale) / scale + return x + + +def to_batch( + batch_size: int, + device: torch.device, + x: Optional[float] = None, + xs: Optional[Tensor] = None, +) -> Tensor: + assert exists(x) ^ exists(xs), "Either x or xs must be provided" + # If x provided use the same for all batch items + if exists(x): + xs = torch.full(size=(batch_size,), fill_value=x).to(device) + assert exists(xs) + return xs + + +class Diffusion(nn.Module): + + alias: str = "" + + """Base diffusion class""" + + def denoise_fn( + self, + x_noisy: Tensor, + sigmas: Optional[Tensor] = None, + sigma: Optional[float] = None, + **kwargs, + ) -> Tensor: + raise NotImplementedError("Diffusion class missing denoise_fn") + + def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor: + raise NotImplementedError("Diffusion class missing forward function") + + +class VDiffusion(Diffusion): + + alias = "v" + + def __init__(self, net: nn.Module, *, sigma_distribution: Distribution): + super().__init__() + self.net = net + self.sigma_distribution = sigma_distribution + + def get_alpha_beta(self, sigmas: Tensor) -> Tuple[Tensor, Tensor]: + angle = sigmas * pi / 2 + alpha = torch.cos(angle) + beta = torch.sin(angle) + return alpha, beta + + def denoise_fn( + self, + x_noisy: Tensor, + sigmas: Optional[Tensor] = None, + sigma: Optional[float] = None, + **kwargs, + ) -> Tensor: + batch_size, device = x_noisy.shape[0], x_noisy.device + sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device) + return self.net(x_noisy, sigmas, **kwargs) + + def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor: + batch_size, device = x.shape[0], x.device + + # Sample amount of noise to add for each batch element + sigmas = self.sigma_distribution(num_samples=batch_size, device=device) + sigmas_padded = rearrange(sigmas, "b -> b 1 1") + + # Get noise + noise = default(noise, lambda: torch.randn_like(x)) + + # Combine input and noise weighted by half-circle + alpha, beta = self.get_alpha_beta(sigmas_padded) + x_noisy = x * alpha + noise * beta + x_target = noise * alpha - x * beta + + # Denoise and return loss + x_denoised = self.denoise_fn(x_noisy, sigmas, **kwargs) + return F.mse_loss(x_denoised, x_target) + + +class KDiffusion(Diffusion): + """Elucidated Diffusion (Karras et al. 2022): https://arxiv.org/abs/2206.00364""" + + alias = "k" + + def __init__( + self, + net: nn.Module, + *, + sigma_distribution: Distribution, + sigma_data: float, # data distribution standard deviation + dynamic_threshold: float = 0.0, + ): + super().__init__() + self.net = net + self.sigma_data = sigma_data + self.sigma_distribution = sigma_distribution + self.dynamic_threshold = dynamic_threshold + + def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]: + sigma_data = self.sigma_data + c_noise = torch.log(sigmas) * 0.25 + sigmas = rearrange(sigmas, "b -> b 1 1") + c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2) + c_out = sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5 + c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5 + return c_skip, c_out, c_in, c_noise + + def denoise_fn( + self, + x_noisy: Tensor, + sigmas: Optional[Tensor] = None, + sigma: Optional[float] = None, + **kwargs, + ) -> Tensor: + batch_size, device = x_noisy.shape[0], x_noisy.device + sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device) + + # Predict network output and add skip connection + c_skip, c_out, c_in, c_noise = self.get_scale_weights(sigmas) + x_pred = self.net(c_in * x_noisy, c_noise, **kwargs) + x_denoised = c_skip * x_noisy + c_out * x_pred + + return x_denoised + + def loss_weight(self, sigmas: Tensor) -> Tensor: + # Computes weight depending on data distribution + return (sigmas ** 2 + self.sigma_data ** 2) * (sigmas * self.sigma_data) ** -2 + + def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor: + batch_size, device = x.shape[0], x.device + from einops import rearrange, reduce + + # Sample amount of noise to add for each batch element + sigmas = self.sigma_distribution(num_samples=batch_size, device=device) + sigmas_padded = rearrange(sigmas, "b -> b 1 1") + + # Add noise to input + noise = default(noise, lambda: torch.randn_like(x)) + x_noisy = x + sigmas_padded * noise + + # Compute denoised values + x_denoised = self.denoise_fn(x_noisy, sigmas=sigmas, **kwargs) + + # Compute weighted loss + losses = F.mse_loss(x_denoised, x, reduction="none") + losses = reduce(losses, "b ... -> b", "mean") + losses = losses * self.loss_weight(sigmas) + loss = losses.mean() + return loss + + +class VKDiffusion(Diffusion): + + alias = "vk" + + def __init__(self, net: nn.Module, *, sigma_distribution: Distribution): + super().__init__() + self.net = net + self.sigma_distribution = sigma_distribution + + def get_scale_weights(self, sigmas: Tensor) -> Tuple[Tensor, ...]: + sigma_data = 1.0 + sigmas = rearrange(sigmas, "b -> b 1 1") + c_skip = (sigma_data ** 2) / (sigmas ** 2 + sigma_data ** 2) + c_out = -sigmas * sigma_data * (sigma_data ** 2 + sigmas ** 2) ** -0.5 + c_in = (sigmas ** 2 + sigma_data ** 2) ** -0.5 + return c_skip, c_out, c_in + + def sigma_to_t(self, sigmas: Tensor) -> Tensor: + return sigmas.atan() / pi * 2 + + def t_to_sigma(self, t: Tensor) -> Tensor: + return (t * pi / 2).tan() + + def denoise_fn( + self, + x_noisy: Tensor, + sigmas: Optional[Tensor] = None, + sigma: Optional[float] = None, + **kwargs, + ) -> Tensor: + batch_size, device = x_noisy.shape[0], x_noisy.device + sigmas = to_batch(x=sigma, xs=sigmas, batch_size=batch_size, device=device) + + # Predict network output and add skip connection + c_skip, c_out, c_in = self.get_scale_weights(sigmas) + x_pred = self.net(c_in * x_noisy, self.sigma_to_t(sigmas), **kwargs) + x_denoised = c_skip * x_noisy + c_out * x_pred + return x_denoised + + def forward(self, x: Tensor, noise: Tensor = None, **kwargs) -> Tensor: + batch_size, device = x.shape[0], x.device + + # Sample amount of noise to add for each batch element + sigmas = self.sigma_distribution(num_samples=batch_size, device=device) + sigmas_padded = rearrange(sigmas, "b -> b 1 1") + + # Add noise to input + noise = default(noise, lambda: torch.randn_like(x)) + x_noisy = x + sigmas_padded * noise + + # Compute model output + c_skip, c_out, c_in = self.get_scale_weights(sigmas) + x_pred = self.net(c_in * x_noisy, self.sigma_to_t(sigmas), **kwargs) + + # Compute v-objective target + v_target = (x - c_skip * x_noisy) / (c_out + 1e-7) + + # Compute loss + loss = F.mse_loss(x_pred, v_target) + return loss + + +""" +Diffusion Sampling +""" + +""" Schedules """ + + +class Schedule(nn.Module): + """Interface used by different sampling schedules""" + + def forward(self, num_steps: int, device: torch.device) -> Tensor: + raise NotImplementedError() + + +class LinearSchedule(Schedule): + def forward(self, num_steps: int, device: Any) -> Tensor: + sigmas = torch.linspace(1, 0, num_steps + 1)[:-1] + return sigmas + + +class KarrasSchedule(Schedule): + """https://arxiv.org/abs/2206.00364 equation 5""" + + def __init__(self, sigma_min: float, sigma_max: float, rho: float = 7.0): + super().__init__() + self.sigma_min = sigma_min + self.sigma_max = sigma_max + self.rho = rho + + def forward(self, num_steps: int, device: Any) -> Tensor: + rho_inv = 1.0 / self.rho + steps = torch.arange(num_steps, device=device, dtype=torch.float32) + sigmas = ( + self.sigma_max ** rho_inv + + (steps / (num_steps - 1)) + * (self.sigma_min ** rho_inv - self.sigma_max ** rho_inv) + ) ** self.rho + sigmas = F.pad(sigmas, pad=(0, 1), value=0.0) + return sigmas + + +""" Samplers """ + + +class Sampler(nn.Module): + + diffusion_types: List[Type[Diffusion]] = [] + + def forward( + self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int + ) -> Tensor: + raise NotImplementedError() + + def inpaint( + self, + source: Tensor, + mask: Tensor, + fn: Callable, + sigmas: Tensor, + num_steps: int, + num_resamples: int, + ) -> Tensor: + raise NotImplementedError("Inpainting not available with current sampler") + + +class VSampler(Sampler): + + diffusion_types = [VDiffusion] + + def get_alpha_beta(self, sigma: float) -> Tuple[float, float]: + angle = sigma * pi / 2 + alpha = cos(angle) + beta = sin(angle) + return alpha, beta + + def forward( + self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int + ) -> Tensor: + x = sigmas[0] * noise + alpha, beta = self.get_alpha_beta(sigmas[0].item()) + + for i in range(num_steps - 1): + is_last = i == num_steps - 1 + + x_denoised = fn(x, sigma=sigmas[i]) + x_pred = x * alpha - x_denoised * beta + x_eps = x * beta + x_denoised * alpha + + if not is_last: + alpha, beta = self.get_alpha_beta(sigmas[i + 1].item()) + x = x_pred * alpha + x_eps * beta + + return x_pred + + +class KarrasSampler(Sampler): + """https://arxiv.org/abs/2206.00364 algorithm 1""" + + diffusion_types = [KDiffusion, VKDiffusion] + + def __init__( + self, + s_tmin: float = 0, + s_tmax: float = float("inf"), + s_churn: float = 0.0, + s_noise: float = 1.0, + ): + super().__init__() + self.s_tmin = s_tmin + self.s_tmax = s_tmax + self.s_noise = s_noise + self.s_churn = s_churn + + def step( + self, x: Tensor, fn: Callable, sigma: float, sigma_next: float, gamma: float + ) -> Tensor: + """Algorithm 2 (step)""" + # Select temporarily increased noise level + sigma_hat = sigma + gamma * sigma + # Add noise to move from sigma to sigma_hat + epsilon = self.s_noise * torch.randn_like(x) + x_hat = x + sqrt(sigma_hat ** 2 - sigma ** 2) * epsilon + # Evaluate ∂x/∂sigma at sigma_hat + d = (x_hat - fn(x_hat, sigma=sigma_hat)) / sigma_hat + # Take euler step from sigma_hat to sigma_next + x_next = x_hat + (sigma_next - sigma_hat) * d + # Second order correction + if sigma_next != 0: + model_out_next = fn(x_next, sigma=sigma_next) + d_prime = (x_next - model_out_next) / sigma_next + x_next = x_hat + 0.5 * (sigma - sigma_hat) * (d + d_prime) + return x_next + + def forward( + self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int + ) -> Tensor: + x = sigmas[0] * noise + # Compute gammas + gammas = torch.where( + (sigmas >= self.s_tmin) & (sigmas <= self.s_tmax), + min(self.s_churn / num_steps, sqrt(2) - 1), + 0.0, + ) + # Denoise to sample + for i in range(num_steps - 1): + x = self.step( + x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1], gamma=gammas[i] # type: ignore # noqa + ) + + return x + + +class AEulerSampler(Sampler): + + diffusion_types = [KDiffusion, VKDiffusion] + + def get_sigmas(self, sigma: float, sigma_next: float) -> Tuple[float, float]: + sigma_up = sqrt(sigma_next ** 2 * (sigma ** 2 - sigma_next ** 2) / sigma ** 2) + sigma_down = sqrt(sigma_next ** 2 - sigma_up ** 2) + return sigma_up, sigma_down + + def step(self, x: Tensor, fn: Callable, sigma: float, sigma_next: float) -> Tensor: + # Sigma steps + sigma_up, sigma_down = self.get_sigmas(sigma, sigma_next) + # Derivative at sigma (∂x/∂sigma) + d = (x - fn(x, sigma=sigma)) / sigma + # Euler method + x_next = x + d * (sigma_down - sigma) + # Add randomness + x_next = x_next + torch.randn_like(x) * sigma_up + return x_next + + def forward( + self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int + ) -> Tensor: + x = sigmas[0] * noise + # Denoise to sample + for i in range(num_steps - 1): + x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa + return x + + +class ADPM2Sampler(Sampler): + """https://www.desmos.com/calculator/jbxjlqd9mb""" + + diffusion_types = [KDiffusion, VKDiffusion] + + def __init__(self, rho: float = 1.0): + super().__init__() + self.rho = rho + + def get_sigmas(self, sigma: float, sigma_next: float) -> Tuple[float, float, float]: + r = self.rho + sigma_up = sqrt(sigma_next ** 2 * (sigma ** 2 - sigma_next ** 2) / sigma ** 2) + sigma_down = sqrt(sigma_next ** 2 - sigma_up ** 2) + sigma_mid = ((sigma ** (1 / r) + sigma_down ** (1 / r)) / 2) ** r + return sigma_up, sigma_down, sigma_mid + + def step(self, x: Tensor, fn: Callable, sigma: float, sigma_next: float) -> Tensor: + # Sigma steps + sigma_up, sigma_down, sigma_mid = self.get_sigmas(sigma, sigma_next) + # Derivative at sigma (∂x/∂sigma) + d = (x - fn(x, sigma=sigma)) / sigma + # Denoise to midpoint + x_mid = x + d * (sigma_mid - sigma) + # Derivative at sigma_mid (∂x_mid/∂sigma_mid) + d_mid = (x_mid - fn(x_mid, sigma=sigma_mid)) / sigma_mid + # Denoise to next + x = x + d_mid * (sigma_down - sigma) + # Add randomness + x_next = x + torch.randn_like(x) * sigma_up + return x_next + + def forward( + self, noise: Tensor, fn: Callable, sigmas: Tensor, num_steps: int + ) -> Tensor: + x = sigmas[0] * noise + # Denoise to sample + for i in range(num_steps - 1): + x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa + return x + + def inpaint( + self, + source: Tensor, + mask: Tensor, + fn: Callable, + sigmas: Tensor, + num_steps: int, + num_resamples: int, + ) -> Tensor: + x = sigmas[0] * torch.randn_like(source) + + for i in range(num_steps - 1): + # Noise source to current noise level + source_noisy = source + sigmas[i] * torch.randn_like(source) + for r in range(num_resamples): + # Merge noisy source and current then denoise + x = source_noisy * mask + x * ~mask + x = self.step(x, fn=fn, sigma=sigmas[i], sigma_next=sigmas[i + 1]) # type: ignore # noqa + # Renoise if not last resample step + if r < num_resamples - 1: + sigma = sqrt(sigmas[i] ** 2 - sigmas[i + 1] ** 2) + x = x + sigma * torch.randn_like(x) + + return source * mask + x * ~mask + + +""" Main Classes """ + + +class DiffusionSampler(nn.Module): + def __init__( + self, + diffusion: Diffusion, + *, + sampler: Sampler, + sigma_schedule: Schedule, + num_steps: Optional[int] = None, + clamp: bool = True, + ): + super().__init__() + self.denoise_fn = diffusion.denoise_fn + self.sampler = sampler + self.sigma_schedule = sigma_schedule + self.num_steps = num_steps + self.clamp = clamp + + # Check sampler is compatible with diffusion type + sampler_class = sampler.__class__.__name__ + diffusion_class = diffusion.__class__.__name__ + message = f"{sampler_class} incompatible with {diffusion_class}" + assert diffusion.alias in [t.alias for t in sampler.diffusion_types], message + + def forward( + self, noise: Tensor, num_steps: Optional[int] = None, **kwargs + ) -> Tensor: + device = noise.device + num_steps = default(num_steps, self.num_steps) # type: ignore + assert exists(num_steps), "Parameter `num_steps` must be provided" + # Compute sigmas using schedule + sigmas = self.sigma_schedule(num_steps, device) + # Append additional kwargs to denoise function (used e.g. for conditional unet) + fn = lambda *a, **ka: self.denoise_fn(*a, **{**ka, **kwargs}) # noqa + # Sample using sampler + x = self.sampler(noise, fn=fn, sigmas=sigmas, num_steps=num_steps) + x = x.clamp(-1.0, 1.0) if self.clamp else x + return x + + +class DiffusionInpainter(nn.Module): + def __init__( + self, + diffusion: Diffusion, + *, + num_steps: int, + num_resamples: int, + sampler: Sampler, + sigma_schedule: Schedule, + ): + super().__init__() + self.denoise_fn = diffusion.denoise_fn + self.num_steps = num_steps + self.num_resamples = num_resamples + self.inpaint_fn = sampler.inpaint + self.sigma_schedule = sigma_schedule + + @torch.no_grad() + def forward(self, inpaint: Tensor, inpaint_mask: Tensor) -> Tensor: + x = self.inpaint_fn( + source=inpaint, + mask=inpaint_mask, + fn=self.denoise_fn, + sigmas=self.sigma_schedule(self.num_steps, inpaint.device), + num_steps=self.num_steps, + num_resamples=self.num_resamples, + ) + return x + + +def sequential_mask(like: Tensor, start: int) -> Tensor: + length, device = like.shape[2], like.device + mask = torch.ones_like(like, dtype=torch.bool) + mask[:, :, start:] = torch.zeros((length - start,), device=device) + return mask + + +class SpanBySpanComposer(nn.Module): + def __init__( + self, + inpainter: DiffusionInpainter, + *, + num_spans: int, + ): + super().__init__() + self.inpainter = inpainter + self.num_spans = num_spans + + def forward(self, start: Tensor, keep_start: bool = False) -> Tensor: + half_length = start.shape[2] // 2 + + spans = list(start.chunk(chunks=2, dim=-1)) if keep_start else [] + # Inpaint second half from first half + inpaint = torch.zeros_like(start) + inpaint[:, :, :half_length] = start[:, :, half_length:] + inpaint_mask = sequential_mask(like=start, start=half_length) + + for i in range(self.num_spans): + # Inpaint second half + span = self.inpainter(inpaint=inpaint, inpaint_mask=inpaint_mask) + # Replace first half with generated second half + second_half = span[:, :, half_length:] + inpaint[:, :, :half_length] = second_half + # Save generated span + spans.append(second_half) + + return torch.cat(spans, dim=2) + + +class XDiffusion(nn.Module): + def __init__(self, type: str, net: nn.Module, **kwargs): + super().__init__() + + diffusion_classes = [VDiffusion, KDiffusion, VKDiffusion] + aliases = [t.alias for t in diffusion_classes] # type: ignore + message = f"type='{type}' must be one of {*aliases,}" + assert type in aliases, message + self.net = net + + for XDiffusion in diffusion_classes: + if XDiffusion.alias == type: # type: ignore + self.diffusion = XDiffusion(net=net, **kwargs) + + def forward(self, *args, **kwargs) -> Tensor: + return self.diffusion(*args, **kwargs) + + def sample( + self, + noise: Tensor, + num_steps: int, + sigma_schedule: Schedule, + sampler: Sampler, + clamp: bool, + **kwargs, + ) -> Tensor: + diffusion_sampler = DiffusionSampler( + diffusion=self.diffusion, + sampler=sampler, + sigma_schedule=sigma_schedule, + num_steps=num_steps, + clamp=clamp, + ) + return diffusion_sampler(noise, **kwargs) diff --git a/styletts2/Modules/diffusion/utils.py b/styletts2/Modules/diffusion/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f330b98dddf65f6ae8473303a7d5abd31563f362 --- /dev/null +++ b/styletts2/Modules/diffusion/utils.py @@ -0,0 +1,82 @@ +from functools import reduce +from inspect import isfunction +from math import ceil, floor, log2, pi +from typing import Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union + +import torch +import torch.nn.functional as F +from einops import rearrange +from torch import Generator, Tensor +from typing_extensions import TypeGuard + +T = TypeVar("T") + + +def exists(val: Optional[T]) -> TypeGuard[T]: + return val is not None + + +def iff(condition: bool, value: T) -> Optional[T]: + return value if condition else None + + +def is_sequence(obj: T) -> TypeGuard[Union[list, tuple]]: + return isinstance(obj, list) or isinstance(obj, tuple) + + +def default(val: Optional[T], d: Union[Callable[..., T], T]) -> T: + if exists(val): + return val + return d() if isfunction(d) else d + + +def to_list(val: Union[T, Sequence[T]]) -> List[T]: + if isinstance(val, tuple): + return list(val) + if isinstance(val, list): + return val + return [val] # type: ignore + + +def prod(vals: Sequence[int]) -> int: + return reduce(lambda x, y: x * y, vals) + + +def closest_power_2(x: float) -> int: + exponent = log2(x) + distance_fn = lambda z: abs(x - 2 ** z) # noqa + exponent_closest = min((floor(exponent), ceil(exponent)), key=distance_fn) + return 2 ** int(exponent_closest) + +def rand_bool(shape, proba, device = None): + if proba == 1: + return torch.ones(shape, device=device, dtype=torch.bool) + elif proba == 0: + return torch.zeros(shape, device=device, dtype=torch.bool) + else: + return torch.bernoulli(torch.full(shape, proba, device=device)).to(torch.bool) + + +""" +Kwargs Utils +""" + + +def group_dict_by_prefix(prefix: str, d: Dict) -> Tuple[Dict, Dict]: + return_dicts: Tuple[Dict, Dict] = ({}, {}) + for key in d.keys(): + no_prefix = int(not key.startswith(prefix)) + return_dicts[no_prefix][key] = d[key] + return return_dicts + + +def groupby(prefix: str, d: Dict, keep_prefix: bool = False) -> Tuple[Dict, Dict]: + kwargs_with_prefix, kwargs = group_dict_by_prefix(prefix, d) + if keep_prefix: + return kwargs_with_prefix, kwargs + kwargs_no_prefix = {k[len(prefix) :]: v for k, v in kwargs_with_prefix.items()} + return kwargs_no_prefix, kwargs + + +def prefix_dict(prefix: str, d: Dict) -> Dict: + return {prefix + str(k): v for k, v in d.items()} diff --git a/styletts2/Modules/discriminators.py b/styletts2/Modules/discriminators.py new file mode 100644 index 0000000000000000000000000000000000000000..31a187acc41d04d6afb29a05dbbf5d951889598c --- /dev/null +++ b/styletts2/Modules/discriminators.py @@ -0,0 +1,188 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, spectral_norm + +from .utils import get_padding + +LRELU_SLOPE = 0.1 + +def stft(x, fft_size, hop_size, win_length, window): + """Perform STFT and convert to magnitude spectrogram. + Args: + x (Tensor): Input signal tensor (B, T). + fft_size (int): FFT size. + hop_size (int): Hop size. + win_length (int): Window length. + window (str): Window function type. + Returns: + Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). + """ + x_stft = torch.stft(x, fft_size, hop_size, win_length, window, + return_complex=True) + real = x_stft[..., 0] + imag = x_stft[..., 1] + + return torch.abs(x_stft).transpose(2, 1) + +class SpecDiscriminator(nn.Module): + """docstring for Discriminator.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", use_spectral_norm=False): + super(SpecDiscriminator, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.window = getattr(torch, window)(win_length) + self.discriminators = nn.ModuleList([ + norm_f(nn.Conv2d(1, 32, kernel_size=(3, 9), padding=(1, 4))), + norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), + norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), + norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1,2), padding=(1, 4))), + norm_f(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1,1), padding=(1, 1))), + ]) + + self.out = norm_f(nn.Conv2d(32, 1, 3, 1, 1)) + + def forward(self, y): + + fmap = [] + y = y.squeeze(1) + y = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(y.get_device())) + y = y.unsqueeze(1) + for i, d in enumerate(self.discriminators): + y = d(y) + y = F.leaky_relu(y, LRELU_SLOPE) + fmap.append(y) + + y = self.out(y) + fmap.append(y) + + return torch.flatten(y, 1, -1), fmap + +class MultiResSpecDiscriminator(torch.nn.Module): + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window="hann_window"): + + super(MultiResSpecDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window), + SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window), + SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window) + ]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self): + super(MultiPeriodDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorP(2), + DiscriminatorP(3), + DiscriminatorP(5), + DiscriminatorP(7), + DiscriminatorP(11), + ]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + +class WavLMDiscriminator(nn.Module): + """docstring for Discriminator.""" + + def __init__(self, slm_hidden=768, + slm_layers=13, + initial_channel=64, + use_spectral_norm=False): + super(WavLMDiscriminator, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.pre = norm_f(Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)) + + self.convs = nn.ModuleList([ + norm_f(nn.Conv1d(initial_channel, initial_channel * 2, kernel_size=5, padding=2)), + norm_f(nn.Conv1d(initial_channel * 2, initial_channel * 4, kernel_size=5, padding=2)), + norm_f(nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)), + ]) + + self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1)) + + def forward(self, x): + x = self.pre(x) + + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + x = torch.flatten(x, 1, -1) + + return x \ No newline at end of file diff --git a/styletts2/Modules/hifigan.py b/styletts2/Modules/hifigan.py new file mode 100644 index 0000000000000000000000000000000000000000..e36f95a212216eaa8a5175aa9146848fce54fb03 --- /dev/null +++ b/styletts2/Modules/hifigan.py @@ -0,0 +1,477 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from .utils import init_weights, get_padding + +import math +import random +import numpy as np + +LRELU_SLOPE = 0.1 + +class AdaIN1d(nn.Module): + def __init__(self, style_dim, num_features): + super().__init__() + self.norm = nn.InstanceNorm1d(num_features, affine=False) + self.fc = nn.Linear(style_dim, num_features*2) + + def forward(self, x, s): + h = self.fc(s) + h = h.view(h.size(0), h.size(1), 1) + gamma, beta = torch.chunk(h, chunks=2, dim=1) + return (1 + gamma) * self.norm(x) + beta + +class AdaINResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64): + super(AdaINResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + self.adain1 = nn.ModuleList([ + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + ]) + + self.adain2 = nn.ModuleList([ + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + ]) + + self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))]) + self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))]) + + + def forward(self, x, s): + for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2): + xt = n1(x, s) + xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) # Snake1D + xt = c1(xt) + xt = n2(xt, s) + xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, upsample_scale, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + self.upsample_scale = upsample_scale + + def _f02uv(self, f0): + # generate uv signal + uv = (f0 > self.voiced_threshold).type(torch.float32) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: +# # for normal case + +# # To prevent torch.cumsum numerical overflow, +# # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. +# # Buffer tmp_over_one_idx indicates the time step to add -1. +# # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi +# tmp_over_one = torch.cumsum(rad_values, 1) % 1 +# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 +# cumsum_shift = torch.zeros_like(rad_values) +# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + +# phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi + rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2), + scale_factor=1/self.upsample_scale, + mode="linear").transpose(1, 2) + +# tmp_over_one = torch.cumsum(rad_values, 1) % 1 +# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 +# cumsum_shift = torch.zeros_like(rad_values) +# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi + phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale, + scale_factor=self.upsample_scale, mode="linear").transpose(1, 2) + sines = torch.sin(phase) + + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, + device=f0.device) + # fundamental component + fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) + + # generate sine waveforms + sine_waves = self._f02sine(fn) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + with torch.no_grad(): + sine_wavs, uv, _ = self.l_sin_gen(x) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv +def padDiff(x): + return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) + +class Generator(torch.nn.Module): + def __init__(self, style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + resblock = AdaINResBlock1 + + self.m_source = SourceModuleHnNSF( + sampling_rate=24000, + upsample_scale=np.prod(upsample_rates), + harmonic_num=8, voiced_threshod=10) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.noise_convs = nn.ModuleList() + self.ups = nn.ModuleList() + self.noise_res = nn.ModuleList() + + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + + self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel//(2**i), + upsample_initial_channel//(2**(i+1)), + k, u, padding=(u//2 + u%2), output_padding=u%2))) + + if i + 1 < len(upsample_rates): # + stride_f0 = np.prod(upsample_rates[i + 1:]) + self.noise_convs.append(Conv1d( + 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2)) + self.noise_res.append(resblock(c_cur, 7, [1,3,5], style_dim)) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + self.noise_res.append(resblock(c_cur, 11, [1,3,5], style_dim)) + + self.resblocks = nn.ModuleList() + + self.alphas = nn.ParameterList() + self.alphas.append(nn.Parameter(torch.ones(1, upsample_initial_channel, 1))) + + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + self.alphas.append(nn.Parameter(torch.ones(1, ch, 1))) + + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d, style_dim)) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x, s, f0): + + f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t + + har_source, noi_source, uv = self.m_source(f0) + har_source = har_source.transpose(1, 2) + + for i in range(self.num_upsamples): + x = x + (1 / self.alphas[i]) * (torch.sin(self.alphas[i] * x) ** 2) + x_source = self.noise_convs[i](har_source) + x_source = self.noise_res[i](x_source, s) + + x = self.ups[i](x) + x = x + x_source + + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x, s) + else: + xs += self.resblocks[i*self.num_kernels+j](x, s) + x = xs / self.num_kernels + x = x + (1 / self.alphas[i+1]) * (torch.sin(self.alphas[i+1] * x) ** 2) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class AdainResBlk1d(nn.Module): + def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2), + upsample='none', dropout_p=0.0): + super().__init__() + self.actv = actv + self.upsample_type = upsample + self.upsample = UpSample1d(upsample) + self.learned_sc = dim_in != dim_out + self._build_weights(dim_in, dim_out, style_dim) + self.dropout = nn.Dropout(dropout_p) + + if upsample == 'none': + self.pool = nn.Identity() + else: + self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1)) + + + def _build_weights(self, dim_in, dim_out, style_dim): + self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) + self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1)) + self.norm1 = AdaIN1d(style_dim, dim_in) + self.norm2 = AdaIN1d(style_dim, dim_out) + if self.learned_sc: + self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) + + def _shortcut(self, x): + x = self.upsample(x) + if self.learned_sc: + x = self.conv1x1(x) + return x + + def _residual(self, x, s): + x = self.norm1(x, s) + x = self.actv(x) + x = self.pool(x) + x = self.conv1(self.dropout(x)) + x = self.norm2(x, s) + x = self.actv(x) + x = self.conv2(self.dropout(x)) + return x + + def forward(self, x, s): + out = self._residual(x, s) + out = (out + self._shortcut(x)) / math.sqrt(2) + return out + +class UpSample1d(nn.Module): + def __init__(self, layer_type): + super().__init__() + self.layer_type = layer_type + + def forward(self, x): + if self.layer_type == 'none': + return x + else: + return F.interpolate(x, scale_factor=2, mode='nearest') + +class Decoder(nn.Module): + def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80, + resblock_kernel_sizes = [3,7,11], + upsample_rates = [10,5,3,2], + upsample_initial_channel=512, + resblock_dilation_sizes=[[1,3,5], [1,3,5], [1,3,5]], + upsample_kernel_sizes=[20,10,6,4]): + super().__init__() + + self.decode = nn.ModuleList() + + self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim) + + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 512, style_dim, upsample=True)) + + self.F0_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) + + self.N_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) + + self.asr_res = nn.Sequential( + weight_norm(nn.Conv1d(512, 64, kernel_size=1)), + ) + + + self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes) + + + def forward(self, asr, F0_curve, N, s): + if self.training: + downlist = [0, 3, 7] + F0_down = downlist[random.randint(0, 2)] + downlist = [0, 3, 7, 15] + N_down = downlist[random.randint(0, 3)] + if F0_down: + F0_curve = nn.functional.conv1d(F0_curve.unsqueeze(1), torch.ones(1, 1, F0_down).to('cuda'), padding=F0_down//2).squeeze(1) / F0_down + if N_down: + N = nn.functional.conv1d(N.unsqueeze(1), torch.ones(1, 1, N_down).to('cuda'), padding=N_down//2).squeeze(1) / N_down + + + F0 = self.F0_conv(F0_curve.unsqueeze(1)) + N = self.N_conv(N.unsqueeze(1)) + + x = torch.cat([asr, F0, N], axis=1) + x = self.encode(x, s) + + asr_res = self.asr_res(asr) + + res = True + for block in self.decode: + if res: + x = torch.cat([x, asr_res, F0, N], axis=1) + x = block(x, s) + if block.upsample_type != "none": + res = False + + x = self.generator(x, s, F0_curve) + return x + + \ No newline at end of file diff --git a/styletts2/Modules/istftnet.py b/styletts2/Modules/istftnet.py new file mode 100644 index 0000000000000000000000000000000000000000..7a8c5093f833aa2b211a7a380b1b2220424012eb --- /dev/null +++ b/styletts2/Modules/istftnet.py @@ -0,0 +1,530 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from .utils import init_weights, get_padding + +import math +import random +import numpy as np +from scipy.signal import get_window + +LRELU_SLOPE = 0.1 + +class AdaIN1d(nn.Module): + def __init__(self, style_dim, num_features): + super().__init__() + self.norm = nn.InstanceNorm1d(num_features, affine=False) + self.fc = nn.Linear(style_dim, num_features*2) + + def forward(self, x, s): + h = self.fc(s) + h = h.view(h.size(0), h.size(1), 1) + gamma, beta = torch.chunk(h, chunks=2, dim=1) + return (1 + gamma) * self.norm(x) + beta + +class AdaINResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64): + super(AdaINResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + self.adain1 = nn.ModuleList([ + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + ]) + + self.adain2 = nn.ModuleList([ + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + AdaIN1d(style_dim, channels), + ]) + + self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))]) + self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))]) + + + def forward(self, x, s): + for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2): + xt = n1(x, s) + xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) # Snake1D + xt = c1(xt) + xt = n2(xt, s) + xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + +class TorchSTFT(torch.nn.Module): + def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'): + super().__init__() + self.filter_length = filter_length + self.hop_length = hop_length + self.win_length = win_length + self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32)) + + def transform(self, input_data): + forward_transform = torch.stft( + input_data, + self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device), + return_complex=True) + + return torch.abs(forward_transform), torch.angle(forward_transform) + + def inverse(self, magnitude, phase): + inverse_transform = torch.istft( + magnitude * torch.exp(phase * 1j), + self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device)) + + return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation + + def forward(self, input_data): + self.magnitude, self.phase = self.transform(input_data) + reconstruction = self.inverse(self.magnitude, self.phase) + return reconstruction + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, upsample_scale, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + self.flag_for_pulse = flag_for_pulse + self.upsample_scale = upsample_scale + + def _f02uv(self, f0): + # generate uv signal + uv = (f0 > self.voiced_threshold).type(torch.float32) + return uv + + def _f02sine(self, f0_values): + """ f0_values: (batchsize, length, dim) + where dim indicates fundamental tone and overtones + """ + # convert to F0 in rad. The interger part n can be ignored + # because 2 * np.pi * n doesn't affect phase + rad_values = (f0_values / self.sampling_rate) % 1 + + # initial phase noise (no noise for fundamental component) + rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ + device=f0_values.device) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + + # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) + if not self.flag_for_pulse: +# # for normal case + +# # To prevent torch.cumsum numerical overflow, +# # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. +# # Buffer tmp_over_one_idx indicates the time step to add -1. +# # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi +# tmp_over_one = torch.cumsum(rad_values, 1) % 1 +# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 +# cumsum_shift = torch.zeros_like(rad_values) +# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + +# phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi + rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2), + scale_factor=1/self.upsample_scale, + mode="linear").transpose(1, 2) + +# tmp_over_one = torch.cumsum(rad_values, 1) % 1 +# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 +# cumsum_shift = torch.zeros_like(rad_values) +# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + + phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi + phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale, + scale_factor=self.upsample_scale, mode="linear").transpose(1, 2) + sines = torch.sin(phase) + + else: + # If necessary, make sure that the first time step of every + # voiced segments is sin(pi) or cos(0) + # This is used for pulse-train generation + + # identify the last time step in unvoiced segments + uv = self._f02uv(f0_values) + uv_1 = torch.roll(uv, shifts=-1, dims=1) + uv_1[:, -1, :] = 1 + u_loc = (uv < 1) * (uv_1 > 0) + + # get the instantanouse phase + tmp_cumsum = torch.cumsum(rad_values, dim=1) + # different batch needs to be processed differently + for idx in range(f0_values.shape[0]): + temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] + temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] + # stores the accumulation of i.phase within + # each voiced segments + tmp_cumsum[idx, :, :] = 0 + tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum + + # rad_values - tmp_cumsum: remove the accumulation of i.phase + # within the previous voiced segment. + i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) + + # get the sines + sines = torch.cos(i_phase * 2 * np.pi) + return sines + + def forward(self, f0): + """ sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, + device=f0.device) + # fundamental component + fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) + + # generate sine waveforms + sine_waves = self._f02sine(fn) * self.sine_amp + + # generate uv signal + # uv = torch.ones(f0.shape) + # uv = uv * (f0 > self.voiced_threshold) + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + with torch.no_grad(): + sine_wavs, uv, _ = self.l_sin_gen(x) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv +def padDiff(x): + return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) + + +class Generator(torch.nn.Module): + def __init__(self, style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size): + super(Generator, self).__init__() + + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + resblock = AdaINResBlock1 + + self.m_source = SourceModuleHnNSF( + sampling_rate=24000, + upsample_scale=np.prod(upsample_rates) * gen_istft_hop_size, + harmonic_num=8, voiced_threshod=10) + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * gen_istft_hop_size) + self.noise_convs = nn.ModuleList() + self.noise_res = nn.ModuleList() + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), + k, u, padding=(k-u)//2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes,resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d, style_dim)) + + c_cur = upsample_initial_channel // (2 ** (i + 1)) + + if i + 1 < len(upsample_rates): # + stride_f0 = np.prod(upsample_rates[i + 1:]) + self.noise_convs.append(Conv1d( + gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2)) + self.noise_res.append(resblock(c_cur, 7, [1,3,5], style_dim)) + else: + self.noise_convs.append(Conv1d(gen_istft_n_fft + 2, c_cur, kernel_size=1)) + self.noise_res.append(resblock(c_cur, 11, [1,3,5], style_dim)) + + + self.post_n_fft = gen_istft_n_fft + self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + self.reflection_pad = torch.nn.ReflectionPad1d((1, 0)) + self.stft = TorchSTFT(filter_length=gen_istft_n_fft, hop_length=gen_istft_hop_size, win_length=gen_istft_n_fft) + + + def forward(self, x, s, f0): + with torch.no_grad(): + f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t + + har_source, noi_source, uv = self.m_source(f0) + har_source = har_source.transpose(1, 2).squeeze(1) + har_spec, har_phase = self.stft.transform(har_source) + har = torch.cat([har_spec, har_phase], dim=1) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x_source = self.noise_convs[i](har) + x_source = self.noise_res[i](x_source, s) + + x = self.ups[i](x) + if i == self.num_upsamples - 1: + x = self.reflection_pad(x) + + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x, s) + else: + xs += self.resblocks[i*self.num_kernels+j](x, s) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :]) + phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :]) + return self.stft.inverse(spec, phase) + + def fw_phase(self, x, s): + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x, s) + else: + xs += self.resblocks[i*self.num_kernels+j](x, s) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.reflection_pad(x) + x = self.conv_post(x) + spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :]) + phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :]) + return spec, phase + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class AdainResBlk1d(nn.Module): + def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2), + upsample='none', dropout_p=0.0): + super().__init__() + self.actv = actv + self.upsample_type = upsample + self.upsample = UpSample1d(upsample) + self.learned_sc = dim_in != dim_out + self._build_weights(dim_in, dim_out, style_dim) + self.dropout = nn.Dropout(dropout_p) + + if upsample == 'none': + self.pool = nn.Identity() + else: + self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1)) + + + def _build_weights(self, dim_in, dim_out, style_dim): + self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) + self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1)) + self.norm1 = AdaIN1d(style_dim, dim_in) + self.norm2 = AdaIN1d(style_dim, dim_out) + if self.learned_sc: + self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) + + def _shortcut(self, x): + x = self.upsample(x) + if self.learned_sc: + x = self.conv1x1(x) + return x + + def _residual(self, x, s): + x = self.norm1(x, s) + x = self.actv(x) + x = self.pool(x) + x = self.conv1(self.dropout(x)) + x = self.norm2(x, s) + x = self.actv(x) + x = self.conv2(self.dropout(x)) + return x + + def forward(self, x, s): + out = self._residual(x, s) + out = (out + self._shortcut(x)) / math.sqrt(2) + return out + +class UpSample1d(nn.Module): + def __init__(self, layer_type): + super().__init__() + self.layer_type = layer_type + + def forward(self, x): + if self.layer_type == 'none': + return x + else: + return F.interpolate(x, scale_factor=2, mode='nearest') + +class Decoder(nn.Module): + def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80, + resblock_kernel_sizes = [3,7,11], + upsample_rates = [10, 6], + upsample_initial_channel=512, + resblock_dilation_sizes=[[1,3,5], [1,3,5], [1,3,5]], + upsample_kernel_sizes=[20, 12], + gen_istft_n_fft=20, gen_istft_hop_size=5): + super().__init__() + + self.decode = nn.ModuleList() + + self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim) + + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) + self.decode.append(AdainResBlk1d(1024 + 2 + 64, 512, style_dim, upsample=True)) + + self.F0_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) + + self.N_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) + + self.asr_res = nn.Sequential( + weight_norm(nn.Conv1d(512, 64, kernel_size=1)), + ) + + + self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates, + upsample_initial_channel, resblock_dilation_sizes, + upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size) + + def forward(self, asr, F0_curve, N, s): + if self.training: + downlist = [0, 3, 7] + F0_down = downlist[random.randint(0, 2)] + downlist = [0, 3, 7, 15] + N_down = downlist[random.randint(0, 3)] + if F0_down: + F0_curve = nn.functional.conv1d(F0_curve.unsqueeze(1), torch.ones(1, 1, F0_down).to('cuda'), padding=F0_down//2).squeeze(1) / F0_down + if N_down: + N = nn.functional.conv1d(N.unsqueeze(1), torch.ones(1, 1, N_down).to('cuda'), padding=N_down//2).squeeze(1) / N_down + + + F0 = self.F0_conv(F0_curve.unsqueeze(1)) + N = self.N_conv(N.unsqueeze(1)) + + x = torch.cat([asr, F0, N], axis=1) + x = self.encode(x, s) + + asr_res = self.asr_res(asr) + + res = True + for block in self.decode: + if res: + x = torch.cat([x, asr_res, F0, N], axis=1) + x = block(x, s) + if block.upsample_type != "none": + res = False + + x = self.generator(x, s, F0_curve) + return x + + \ No newline at end of file diff --git a/styletts2/Modules/slmadv.py b/styletts2/Modules/slmadv.py new file mode 100644 index 0000000000000000000000000000000000000000..11acb9149598993be23793925f34b35f659f8d6e --- /dev/null +++ b/styletts2/Modules/slmadv.py @@ -0,0 +1,195 @@ +import torch +import numpy as np +import torch.nn.functional as F + +class SLMAdversarialLoss(torch.nn.Module): + + def __init__(self, model, wl, sampler, min_len, max_len, batch_percentage=0.5, skip_update=10, sig=1.5): + super(SLMAdversarialLoss, self).__init__() + self.model = model + self.wl = wl + self.sampler = sampler + + self.min_len = min_len + self.max_len = max_len + self.batch_percentage = batch_percentage + + self.sig = sig + self.skip_update = skip_update + + def forward(self, iters, y_rec_gt, y_rec_gt_pred, waves, mel_input_length, ref_text, ref_lengths, use_ind, s_trg, ref_s=None): + text_mask = length_to_mask(ref_lengths).to(ref_text.device) + bert_dur = self.model.bert(ref_text, attention_mask=(~text_mask).int()) + d_en = self.model.bert_encoder(bert_dur).transpose(-1, -2) + + if use_ind and np.random.rand() < 0.5: + s_preds = s_trg + else: + num_steps = np.random.randint(3, 5) + if ref_s is not None: + s_preds = self.sampler(noise = torch.randn_like(s_trg).unsqueeze(1).to(ref_text.device), + embedding=bert_dur, + embedding_scale=1, + features=ref_s, # reference from the same speaker as the embedding + embedding_mask_proba=0.1, + num_steps=num_steps).squeeze(1) + else: + s_preds = self.sampler(noise = torch.randn_like(s_trg).unsqueeze(1).to(ref_text.device), + embedding=bert_dur, + embedding_scale=1, + embedding_mask_proba=0.1, + num_steps=num_steps).squeeze(1) + + s_dur = s_preds[:, 128:] + s = s_preds[:, :128] + + d, _ = self.model.predictor(d_en, s_dur, + ref_lengths, + torch.randn(ref_lengths.shape[0], ref_lengths.max(), 2).to(ref_text.device), + text_mask) + + bib = 0 + + output_lengths = [] + attn_preds = [] + + # differentiable duration modeling + for _s2s_pred, _text_length in zip(d, ref_lengths): + + _s2s_pred_org = _s2s_pred[:_text_length, :] + + _s2s_pred = torch.sigmoid(_s2s_pred_org) + _dur_pred = _s2s_pred.sum(axis=-1) + + l = int(torch.round(_s2s_pred.sum()).item()) + t = torch.arange(0, l).expand(l) + + t = torch.arange(0, l).unsqueeze(0).expand((len(_s2s_pred), l)).to(ref_text.device) + loc = torch.cumsum(_dur_pred, dim=0) - _dur_pred / 2 + + h = torch.exp(-0.5 * torch.square(t - (l - loc.unsqueeze(-1))) / (self.sig)**2) + + out = torch.nn.functional.conv1d(_s2s_pred_org.unsqueeze(0), + h.unsqueeze(1), + padding=h.shape[-1] - 1, groups=int(_text_length))[..., :l] + attn_preds.append(F.softmax(out.squeeze(), dim=0)) + + output_lengths.append(l) + + max_len = max(output_lengths) + + with torch.no_grad(): + t_en = self.model.text_encoder(ref_text, ref_lengths, text_mask) + + s2s_attn = torch.zeros(len(ref_lengths), int(ref_lengths.max()), max_len).to(ref_text.device) + for bib in range(len(output_lengths)): + s2s_attn[bib, :ref_lengths[bib], :output_lengths[bib]] = attn_preds[bib] + + asr_pred = t_en @ s2s_attn + + _, p_pred = self.model.predictor(d_en, s_dur, + ref_lengths, + s2s_attn, + text_mask) + + mel_len = max(int(min(output_lengths) / 2 - 1), self.min_len // 2) + mel_len = min(mel_len, self.max_len // 2) + + # get clips + + en = [] + p_en = [] + sp = [] + + F0_fakes = [] + N_fakes = [] + + wav = [] + + for bib in range(len(output_lengths)): + mel_length_pred = output_lengths[bib] + mel_length_gt = int(mel_input_length[bib].item() / 2) + if mel_length_gt <= mel_len or mel_length_pred <= mel_len: + continue + + sp.append(s_preds[bib]) + + random_start = np.random.randint(0, mel_length_pred - mel_len) + en.append(asr_pred[bib, :, random_start:random_start+mel_len]) + p_en.append(p_pred[bib, :, random_start:random_start+mel_len]) + + # get ground truth clips + random_start = np.random.randint(0, mel_length_gt - mel_len) + y = waves[bib][(random_start * 2) * 300:((random_start+mel_len) * 2) * 300] + wav.append(torch.from_numpy(y).to(ref_text.device)) + + if len(wav) >= self.batch_percentage * len(waves): # prevent OOM due to longer lengths + break + + if len(sp) <= 1: + return None + + sp = torch.stack(sp) + wav = torch.stack(wav).float() + en = torch.stack(en) + p_en = torch.stack(p_en) + + F0_fake, N_fake = self.model.predictor.F0Ntrain(p_en, sp[:, 128:]) + y_pred = self.model.decoder(en, F0_fake, N_fake, sp[:, :128]) + + # discriminator loss + if (iters + 1) % self.skip_update == 0: + if np.random.randint(0, 2) == 0: + wav = y_rec_gt_pred + use_rec = True + else: + use_rec = False + + crop_size = min(wav.size(-1), y_pred.size(-1)) + if use_rec: # use reconstructed (shorter lengths), do length invariant regularization + if wav.size(-1) > y_pred.size(-1): + real_GP = wav[:, : , :crop_size] + out_crop = self.wl.discriminator_forward(real_GP.detach().squeeze()) + out_org = self.wl.discriminator_forward(wav.detach().squeeze()) + loss_reg = F.l1_loss(out_crop, out_org[..., :out_crop.size(-1)]) + + if np.random.randint(0, 2) == 0: + d_loss = self.wl.discriminator(real_GP.detach().squeeze(), y_pred.detach().squeeze()).mean() + else: + d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean() + else: + real_GP = y_pred[:, : , :crop_size] + out_crop = self.wl.discriminator_forward(real_GP.detach().squeeze()) + out_org = self.wl.discriminator_forward(y_pred.detach().squeeze()) + loss_reg = F.l1_loss(out_crop, out_org[..., :out_crop.size(-1)]) + + if np.random.randint(0, 2) == 0: + d_loss = self.wl.discriminator(wav.detach().squeeze(), real_GP.detach().squeeze()).mean() + else: + d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean() + + # regularization (ignore length variation) + d_loss += loss_reg + + out_gt = self.wl.discriminator_forward(y_rec_gt.detach().squeeze()) + out_rec = self.wl.discriminator_forward(y_rec_gt_pred.detach().squeeze()) + + # regularization (ignore reconstruction artifacts) + d_loss += F.l1_loss(out_gt, out_rec) + + else: + d_loss = self.wl.discriminator(wav.detach().squeeze(), y_pred.detach().squeeze()).mean() + else: + d_loss = 0 + + # generator loss + gen_loss = self.wl.generator(y_pred.squeeze()) + + gen_loss = gen_loss.mean() + + return d_loss, gen_loss, y_pred.detach().cpu().numpy() + +def length_to_mask(lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask diff --git a/styletts2/Modules/utils.py b/styletts2/Modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf5fd72d7e70c4e5b443b43566b6f69e1954ee8 --- /dev/null +++ b/styletts2/Modules/utils.py @@ -0,0 +1,14 @@ +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size*dilation - dilation)/2) \ No newline at end of file diff --git a/styletts2/README.md b/styletts2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cb5f1cfff8328c97cea279ba7df0bbe4085f6d95 --- /dev/null +++ b/styletts2/README.md @@ -0,0 +1,119 @@ +# StyleTTS 2: Towards Human-Level Text-to-Speech through Style Diffusion and Adversarial Training with Large Speech Language Models + +### Yinghao Aaron Li, Cong Han, Vinay S. Raghavan, Gavin Mischler, Nima Mesgarani + +> In this paper, we present StyleTTS 2, a text-to-speech (TTS) model that leverages style diffusion and adversarial training with large speech language models (SLMs) to achieve human-level TTS synthesis. StyleTTS 2 differs from its predecessor by modeling styles as a latent random variable through diffusion models to generate the most suitable style for the text without requiring reference speech, achieving efficient latent diffusion while benefiting from the diverse speech synthesis offered by diffusion models. Furthermore, we employ large pre-trained SLMs, such as WavLM, as discriminators with our novel differentiable duration modeling for end-to-end training, resulting in improved speech naturalness. StyleTTS 2 surpasses human recordings on the single-speaker LJSpeech dataset and matches it on the multispeaker VCTK dataset as judged by native English speakers. Moreover, when trained on the LibriTTS dataset, our model outperforms previous publicly available models for zero-shot speaker adaptation. This work achieves the first human-level TTS synthesis on both single and multispeaker datasets, showcasing the potential of style diffusion and adversarial training with large SLMs. + +Paper: [https://arxiv.org/abs/2306.07691](https://arxiv.org/abs/2306.07691) + +Audio samples: [https://styletts2.github.io/](https://styletts2.github.io/) + +Online demo: [Hugging Face](https://huggingface.co/spaces/styletts2/styletts2) (thank [@fakerybakery](https://github.com/fakerybakery) for the wonderful online demo) + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/) [![Slack](https://img.shields.io/badge/Join%20Our%20Community-Slack-blue)](https://join.slack.com/t/styletts2/shared_invite/zt-2805io6cg-0ROMhjfW9Gd_ix_FJqjGmQ) + +## TODO +- [x] Training and inference demo code for single-speaker models (LJSpeech) +- [x] Test training code for multi-speaker models (VCTK and LibriTTS) +- [x] Finish demo code for multispeaker model and upload pre-trained models +- [x] Add a finetuning script for new speakers with base pre-trained multispeaker models +- [ ] Fix DDP (accelerator) for `train_second.py` **(I have tried everything I could to fix this but had no success, so if you are willing to help, please see [#7](https://github.com/yl4579/StyleTTS2/issues/7))** + +## Pre-requisites +1. Python >= 3.7 +2. Clone this repository: +```bash +git clone https://github.com/yl4579/StyleTTS2.git +cd StyleTTS2 +``` +3. Install python requirements: +```bash +pip install -r requirements.txt +``` +On Windows add: +```bash +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -U +``` +Also install phonemizer and espeak if you want to run the demo: +```bash +pip install phonemizer +sudo apt-get install espeak-ng +``` +4. Download and extract the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/), unzip to the data folder and upsample the data to 24 kHz. The text aligner and pitch extractor are pre-trained on 24 kHz data, but you can easily change the preprocessing and re-train them using your own preprocessing. +For LibriTTS, you will need to combine train-clean-360 with train-clean-100 and rename the folder train-clean-460 (see [val_list_libritts.txt](https://github.com/yl4579/StyleTTS/blob/main/Data/val_list_libritts.txt) as an example). + +## Training +First stage training: +```bash +accelerate launch train_first.py --config_path ./Configs/config.yml +``` +Second stage training **(DDP version not working, so the current version uses DP, again see [#7](https://github.com/yl4579/StyleTTS2/issues/7) if you want to help)**: +```bash +python train_second.py --config_path ./Configs/config.yml +``` +You can run both consecutively and it will train both the first and second stages. The model will be saved in the format "epoch_1st_%05d.pth" and "epoch_2nd_%05d.pth". Checkpoints and Tensorboard logs will be saved at `log_dir`. + +The data list format needs to be `filename.wav|transcription|speaker`, see [val_list.txt](https://github.com/yl4579/StyleTTS2/blob/main/Data/val_list.txt) as an example. The speaker labels are needed for multi-speaker models because we need to sample reference audio for style diffusion model training. + +### Important Configurations +In [config.yml](https://github.com/yl4579/StyleTTS2/blob/main/Configs/config.yml), there are a few important configurations to take care of: +- `OOD_data`: The path for out-of-distribution texts for SLM adversarial training. The format should be `text|anything`. +- `min_length`: Minimum length of OOD texts for training. This is to make sure the synthesized speech has a minimum length. +- `max_len`: Maximum length of audio for training. The unit is frame. Since the default hop size is 300, one frame is approximately `300 / 24000` (0.0125) second. Lowering this if you encounter the out-of-memory issue. +- `multispeaker`: Set to true if you want to train a multispeaker model. This is needed because the architecture of the denoiser is different for single and multispeaker models. +- `batch_percentage`: This is to make sure during SLM adversarial training there are no out-of-memory (OOM) issues. If you encounter OOM problem, please set a lower number for this. + +### Pre-trained modules +In [Utils](https://github.com/yl4579/StyleTTS2/tree/main/Utils) folder, there are three pre-trained models: +- **[ASR](https://github.com/yl4579/StyleTTS2/tree/main/Utils/ASR) folder**: It contains the pre-trained text aligner, which was pre-trained on English (LibriTTS), Japanese (JVS), and Chinese (AiShell) corpus. It works well for most other languages without fine-tuning, but you can always train your own text aligner with the code here: [yl4579/AuxiliaryASR](https://github.com/yl4579/AuxiliaryASR). +- **[JDC](https://github.com/yl4579/StyleTTS2/tree/main/Utils/JDC) folder**: It contains the pre-trained pitch extractor, which was pre-trained on English (LibriTTS) corpus only. However, it works well for other languages too because F0 is independent of language. If you want to train on singing corpus, it is recommended to train a new pitch extractor with the code here: [yl4579/PitchExtractor](https://github.com/yl4579/PitchExtractor). +- **[PLBERT](https://github.com/yl4579/StyleTTS2/tree/main/Utils/PLBERT) folder**: It contains the pre-trained [PL-BERT](https://arxiv.org/abs/2301.08810) model, which was pre-trained on English (Wikipedia) corpus only. It probably does not work very well on other languages, so you will need to train a different PL-BERT for different languages using the repo here: [yl4579/PL-BERT](https://github.com/yl4579/PL-BERT). You can also replace this module with other phoneme BERT models like [XPhoneBERT](https://arxiv.org/abs/2305.19709) which is pre-trained on more than 100 languages. + +### Common Issues +- **Loss becomes NaN**: If it is the first stage, please make sure you do not use mixed precision, as it can cause loss becoming NaN for some particular datasets when the batch size is not set properly (need to be more than 16 to work well). For the second stage, please also experiment with different batch sizes, with higher batch sizes being more likely to cause NaN loss values. We recommend the batch size to be 16. You can refer to issues [#10](https://github.com/yl4579/StyleTTS2/issues/10) and [#11](https://github.com/yl4579/StyleTTS2/issues/11) for more details. +- **Out of memory**: Please either use lower `batch_size` or `max_len`. You may refer to issue [#10](https://github.com/yl4579/StyleTTS2/issues/10) for more information. +- **Non-English dataset**: You can train on any language you want, but the current bottleneck is PL-BERT. The pre-trained PL-BERT in English would still work for other languages, but it will not be as good as English (you may refer to [yl4579/StyleTTS#10](https://github.com/yl4579/StyleTTS/issues/10) and [#70](https://github.com/yl4579/StyleTTS2/issues/70) for some examples to train on Chinese datasets). We are currently planning on training multi-lingual PL-BERT models for the best performance. You can go to [#41](https://github.com/yl4579/StyleTTS2/issues/41) if you would like to help. + +## Finetuning +The script is modified from `train_second.py` which uses DP, as DDP does not work for `train_second.py`. Please see the bold section above if you are willing to help with this problem. +```bash +python train_finetune.py --config_path ./Configs/config_ft.yml +``` +Please make sure you have the LibriTTS checkpoint downloaded and unzipped under the folder. The default configuration `config_ft.yml` finetunes on LJSpeech with 1 hour of speech data (around 1k samples) for 50 epochs. This took about 4 hours to finish on four NVidia A100. The quality is slightly worse (similar to NaturalSpeech on LJSpeech) than LJSpeech model trained from scratch with 24 hours of speech data, which took around 2.5 days to finish on four A100. The samples can be found at [#65 (comment)](https://github.com/yl4579/StyleTTS2/discussions/65#discussioncomment-7668393). + +If you are using a **single GPU** (because the script doesn't work with DDP) and want to save training speed and VRAM, you can do (thank [@korakoe](https://github.com/korakoe) for making the script at [#100](https://github.com/yl4579/StyleTTS2/pull/100)): +```bash +accelerate launch --mixed_precision=fp16 --num_processes=1 train_finetune_accelerate.py --config_path ./Configs/config_ft.yml +``` +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Finetune_Demo.ipynb) + +### Common Issues +[@Kreevoz](https://github.com/Kreevoz) has made detailed notes on common issues in finetuning, with suggestions in maximizing audio quality: [#81](https://github.com/yl4579/StyleTTS2/discussions/81). Some of these also apply to training from scratch. [@IIEleven11](https://github.com/IIEleven11) has also made a guideline for fine-tuning: [#128](https://github.com/yl4579/StyleTTS2/discussions/128). + +- **Out of memory after `joint_epoch`**: This is likely because your GPU RAM is not big enough for SLM adversarial training run. You may skip that but the quality could be worse. Setting `joint_epoch` a larger number than `epochs` could skip the SLM advesariral training. + +## Inference +Please refer to [Inference_LJSpeech.ipynb](https://github.com/yl4579/StyleTTS2/blob/main/Demo/Inference_LJSpeech.ipynb) (single-speaker) and [Inference_LibriTTS.ipynb](https://github.com/yl4579/StyleTTS2/blob/main/Demo/Inference_LibriTTS.ipynb) (multi-speaker) for details. For LibriTTS, you will also need to download [reference_audio.zip](https://huggingface.co/yl4579/StyleTTS2-LibriTTS/resolve/main/reference_audio.zip) and unzip it under the `demo` before running the demo. + +- The pretrained StyleTTS 2 on LJSpeech corpus in 24 kHz can be downloaded at [https://huggingface.co/yl4579/StyleTTS2-LJSpeech/tree/main](https://huggingface.co/yl4579/StyleTTS2-LJSpeech/tree/main). + + [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LJSpeech.ipynb) + +- The pretrained StyleTTS 2 model on LibriTTS can be downloaded at [https://huggingface.co/yl4579/StyleTTS2-LibriTTS/tree/main](https://huggingface.co/yl4579/StyleTTS2-LibriTTS/tree/main). + + [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/yl4579/StyleTTS2/blob/main/Colab/StyleTTS2_Demo_LibriTTS.ipynb) + + +You can import StyleTTS 2 and run it in your own code. However, the inference depends on a GPL-licensed package, so it is not included directly in this repository. A [GPL-licensed fork](https://github.com/NeuralVox/StyleTTS2) has an importable script, as well as an experimental streaming API, etc. + +***Before using these pre-trained models, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.*** + +### Common Issues +- **High-pitched background noise**: This is caused by numerical float differences in older GPUs. For more details, please refer to issue [#13](https://github.com/yl4579/StyleTTS2/issues/13). Basically, you will need to use more modern GPUs or do inference on CPUs. +- **Pre-trained model license**: You only need to abide by the above rules if you use **the pre-trained models** and the voices are **NOT** in the training set, i.e., your reference speakers are not from any open access dataset. For more details of rules to use the pre-trained models, please see [#37](https://github.com/yl4579/StyleTTS2/issues/37). + +## References +- [archinetai/audio-diffusion-pytorch](https://github.com/archinetai/audio-diffusion-pytorch) +- [jik876/hifi-gan](https://github.com/jik876/hifi-gan) +- [rishikksh20/iSTFTNet-pytorch](https://github.com/rishikksh20/iSTFTNet-pytorch) +- [nii-yamagishilab/project-NN-Pytorch-scripts/project/01-nsf](https://github.com/nii-yamagishilab/project-NN-Pytorch-scripts/tree/master/project/01-nsf) diff --git a/styletts2/Utils/ASR/__init__.py b/styletts2/Utils/ASR/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/styletts2/Utils/ASR/__init__.py @@ -0,0 +1 @@ + diff --git a/styletts2/Utils/ASR/__pycache__/__init__.cpython-310.pyc b/styletts2/Utils/ASR/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a32a34239e617375de650c2cf838e64b72bf51ed Binary files /dev/null and b/styletts2/Utils/ASR/__pycache__/__init__.cpython-310.pyc differ diff --git a/styletts2/Utils/ASR/__pycache__/layers.cpython-310.pyc b/styletts2/Utils/ASR/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ee3964c2ce7c99cf3a416cf504cacf23c1c55a Binary files /dev/null and b/styletts2/Utils/ASR/__pycache__/layers.cpython-310.pyc differ diff --git a/styletts2/Utils/ASR/__pycache__/models.cpython-310.pyc b/styletts2/Utils/ASR/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ac8a0dac5cb6aea94284645b787153aa9fd6368 Binary files /dev/null and b/styletts2/Utils/ASR/__pycache__/models.cpython-310.pyc differ diff --git a/styletts2/Utils/ASR/config.yml b/styletts2/Utils/ASR/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..ca334a10a12dd3ef12497eb12651fa5f508106dc --- /dev/null +++ b/styletts2/Utils/ASR/config.yml @@ -0,0 +1,29 @@ +log_dir: "logs/20201006" +save_freq: 5 +device: "cuda" +epochs: 180 +batch_size: 64 +pretrained_model: "" +train_data: "ASRDataset/train_list.txt" +val_data: "ASRDataset/val_list.txt" + +dataset_params: + data_augmentation: false + +preprocess_parasm: + sr: 24000 + spect_params: + n_fft: 2048 + win_length: 1200 + hop_length: 300 + mel_params: + n_mels: 80 + +model_params: + input_dim: 80 + hidden_dim: 256 + n_token: 178 + token_embedding_dim: 512 + +optimizer_params: + lr: 0.0005 \ No newline at end of file diff --git a/styletts2/Utils/ASR/layers.py b/styletts2/Utils/ASR/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..bc6567b47ef60e9a64f85b7c088b7fac1683fdb5 --- /dev/null +++ b/styletts2/Utils/ASR/layers.py @@ -0,0 +1,354 @@ +import math +import torch +from torch import nn +from typing import Optional, Any +from torch import Tensor +import torch.nn.functional as F +import torchaudio +import torchaudio.functional as audio_F + +import random +random.seed(0) + + +def _get_activation_fn(activ): + if activ == 'relu': + return nn.ReLU() + elif activ == 'lrelu': + return nn.LeakyReLU(0.2) + elif activ == 'swish': + return lambda x: x*torch.sigmoid(x) + else: + raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ) + +class LinearNorm(torch.nn.Module): + def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): + super(LinearNorm, self).__init__() + self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) + + torch.nn.init.xavier_uniform_( + self.linear_layer.weight, + gain=torch.nn.init.calculate_gain(w_init_gain)) + + def forward(self, x): + return self.linear_layer(x) + + +class ConvNorm(torch.nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, + padding=None, dilation=1, bias=True, w_init_gain='linear', param=None): + super(ConvNorm, self).__init__() + if padding is None: + assert(kernel_size % 2 == 1) + padding = int(dilation * (kernel_size - 1) / 2) + + self.conv = torch.nn.Conv1d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, + padding=padding, dilation=dilation, + bias=bias) + + torch.nn.init.xavier_uniform_( + self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param)) + + def forward(self, signal): + conv_signal = self.conv(signal) + return conv_signal + +class CausualConv(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None): + super(CausualConv, self).__init__() + if padding is None: + assert(kernel_size % 2 == 1) + padding = int(dilation * (kernel_size - 1) / 2) * 2 + else: + self.padding = padding * 2 + self.conv = nn.Conv1d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, + padding=self.padding, + dilation=dilation, + bias=bias) + + torch.nn.init.xavier_uniform_( + self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param)) + + def forward(self, x): + x = self.conv(x) + x = x[:, :, :-self.padding] + return x + +class CausualBlock(nn.Module): + def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'): + super(CausualBlock, self).__init__() + self.blocks = nn.ModuleList([ + self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p) + for i in range(n_conv)]) + + def forward(self, x): + for block in self.blocks: + res = x + x = block(x) + x += res + return x + + def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2): + layers = [ + CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation), + _get_activation_fn(activ), + nn.BatchNorm1d(hidden_dim), + nn.Dropout(p=dropout_p), + CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1), + _get_activation_fn(activ), + nn.Dropout(p=dropout_p) + ] + return nn.Sequential(*layers) + +class ConvBlock(nn.Module): + def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'): + super().__init__() + self._n_groups = 8 + self.blocks = nn.ModuleList([ + self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p) + for i in range(n_conv)]) + + + def forward(self, x): + for block in self.blocks: + res = x + x = block(x) + x += res + return x + + def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2): + layers = [ + ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation), + _get_activation_fn(activ), + nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim), + nn.Dropout(p=dropout_p), + ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1), + _get_activation_fn(activ), + nn.Dropout(p=dropout_p) + ] + return nn.Sequential(*layers) + +class LocationLayer(nn.Module): + def __init__(self, attention_n_filters, attention_kernel_size, + attention_dim): + super(LocationLayer, self).__init__() + padding = int((attention_kernel_size - 1) / 2) + self.location_conv = ConvNorm(2, attention_n_filters, + kernel_size=attention_kernel_size, + padding=padding, bias=False, stride=1, + dilation=1) + self.location_dense = LinearNorm(attention_n_filters, attention_dim, + bias=False, w_init_gain='tanh') + + def forward(self, attention_weights_cat): + processed_attention = self.location_conv(attention_weights_cat) + processed_attention = processed_attention.transpose(1, 2) + processed_attention = self.location_dense(processed_attention) + return processed_attention + + +class Attention(nn.Module): + def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, + attention_location_n_filters, attention_location_kernel_size): + super(Attention, self).__init__() + self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, + bias=False, w_init_gain='tanh') + self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, + w_init_gain='tanh') + self.v = LinearNorm(attention_dim, 1, bias=False) + self.location_layer = LocationLayer(attention_location_n_filters, + attention_location_kernel_size, + attention_dim) + self.score_mask_value = -float("inf") + + def get_alignment_energies(self, query, processed_memory, + attention_weights_cat): + """ + PARAMS + ------ + query: decoder output (batch, n_mel_channels * n_frames_per_step) + processed_memory: processed encoder outputs (B, T_in, attention_dim) + attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) + RETURNS + ------- + alignment (batch, max_time) + """ + + processed_query = self.query_layer(query.unsqueeze(1)) + processed_attention_weights = self.location_layer(attention_weights_cat) + energies = self.v(torch.tanh( + processed_query + processed_attention_weights + processed_memory)) + + energies = energies.squeeze(-1) + return energies + + def forward(self, attention_hidden_state, memory, processed_memory, + attention_weights_cat, mask): + """ + PARAMS + ------ + attention_hidden_state: attention rnn last output + memory: encoder outputs + processed_memory: processed encoder outputs + attention_weights_cat: previous and cummulative attention weights + mask: binary mask for padded data + """ + alignment = self.get_alignment_energies( + attention_hidden_state, processed_memory, attention_weights_cat) + + if mask is not None: + alignment.data.masked_fill_(mask, self.score_mask_value) + + attention_weights = F.softmax(alignment, dim=1) + attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) + attention_context = attention_context.squeeze(1) + + return attention_context, attention_weights + + +class ForwardAttentionV2(nn.Module): + def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, + attention_location_n_filters, attention_location_kernel_size): + super(ForwardAttentionV2, self).__init__() + self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, + bias=False, w_init_gain='tanh') + self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, + w_init_gain='tanh') + self.v = LinearNorm(attention_dim, 1, bias=False) + self.location_layer = LocationLayer(attention_location_n_filters, + attention_location_kernel_size, + attention_dim) + self.score_mask_value = -float(1e20) + + def get_alignment_energies(self, query, processed_memory, + attention_weights_cat): + """ + PARAMS + ------ + query: decoder output (batch, n_mel_channels * n_frames_per_step) + processed_memory: processed encoder outputs (B, T_in, attention_dim) + attention_weights_cat: prev. and cumulative att weights (B, 2, max_time) + RETURNS + ------- + alignment (batch, max_time) + """ + + processed_query = self.query_layer(query.unsqueeze(1)) + processed_attention_weights = self.location_layer(attention_weights_cat) + energies = self.v(torch.tanh( + processed_query + processed_attention_weights + processed_memory)) + + energies = energies.squeeze(-1) + return energies + + def forward(self, attention_hidden_state, memory, processed_memory, + attention_weights_cat, mask, log_alpha): + """ + PARAMS + ------ + attention_hidden_state: attention rnn last output + memory: encoder outputs + processed_memory: processed encoder outputs + attention_weights_cat: previous and cummulative attention weights + mask: binary mask for padded data + """ + log_energy = self.get_alignment_energies( + attention_hidden_state, processed_memory, attention_weights_cat) + + #log_energy = + + if mask is not None: + log_energy.data.masked_fill_(mask, self.score_mask_value) + + #attention_weights = F.softmax(alignment, dim=1) + + #content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME] + #log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1] + + #log_total_score = log_alpha + content_score + + #previous_attention_weights = attention_weights_cat[:,0,:] + + log_alpha_shift_padded = [] + max_time = log_energy.size(1) + for sft in range(2): + shifted = log_alpha[:,:max_time-sft] + shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value) + log_alpha_shift_padded.append(shift_padded.unsqueeze(2)) + + biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2) + + log_alpha_new = biased + log_energy + + attention_weights = F.softmax(log_alpha_new, dim=1) + + attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) + attention_context = attention_context.squeeze(1) + + return attention_context, attention_weights, log_alpha_new + + +class PhaseShuffle2d(nn.Module): + def __init__(self, n=2): + super(PhaseShuffle2d, self).__init__() + self.n = n + self.random = random.Random(1) + + def forward(self, x, move=None): + # x.size = (B, C, M, L) + if move is None: + move = self.random.randint(-self.n, self.n) + + if move == 0: + return x + else: + left = x[:, :, :, :move] + right = x[:, :, :, move:] + shuffled = torch.cat([right, left], dim=3) + return shuffled + +class PhaseShuffle1d(nn.Module): + def __init__(self, n=2): + super(PhaseShuffle1d, self).__init__() + self.n = n + self.random = random.Random(1) + + def forward(self, x, move=None): + # x.size = (B, C, M, L) + if move is None: + move = self.random.randint(-self.n, self.n) + + if move == 0: + return x + else: + left = x[:, :, :move] + right = x[:, :, move:] + shuffled = torch.cat([right, left], dim=2) + + return shuffled + +class MFCC(nn.Module): + def __init__(self, n_mfcc=40, n_mels=80): + super(MFCC, self).__init__() + self.n_mfcc = n_mfcc + self.n_mels = n_mels + self.norm = 'ortho' + dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm) + self.register_buffer('dct_mat', dct_mat) + + def forward(self, mel_specgram): + if len(mel_specgram.shape) == 2: + mel_specgram = mel_specgram.unsqueeze(0) + unsqueezed = True + else: + unsqueezed = False + # (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc) + # -> (channel, time, n_mfcc).tranpose(...) + mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2) + + # unpack batch + if unsqueezed: + mfcc = mfcc.squeeze(0) + return mfcc diff --git a/styletts2/Utils/ASR/models.py b/styletts2/Utils/ASR/models.py new file mode 100644 index 0000000000000000000000000000000000000000..278c8d06a1cafe77afcac208617b8716357d38d3 --- /dev/null +++ b/styletts2/Utils/ASR/models.py @@ -0,0 +1,186 @@ +import math +import torch +from torch import nn +from torch.nn import TransformerEncoder +import torch.nn.functional as F +from .layers import MFCC, Attention, LinearNorm, ConvNorm, ConvBlock + +class ASRCNN(nn.Module): + def __init__(self, + input_dim=80, + hidden_dim=256, + n_token=35, + n_layers=6, + token_embedding_dim=256, + + ): + super().__init__() + self.n_token = n_token + self.n_down = 1 + self.to_mfcc = MFCC() + self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2) + self.cnns = nn.Sequential( + *[nn.Sequential( + ConvBlock(hidden_dim), + nn.GroupNorm(num_groups=1, num_channels=hidden_dim) + ) for n in range(n_layers)]) + self.projection = ConvNorm(hidden_dim, hidden_dim // 2) + self.ctc_linear = nn.Sequential( + LinearNorm(hidden_dim//2, hidden_dim), + nn.ReLU(), + LinearNorm(hidden_dim, n_token)) + self.asr_s2s = ASRS2S( + embedding_dim=token_embedding_dim, + hidden_dim=hidden_dim//2, + n_token=n_token) + + def forward(self, x, src_key_padding_mask=None, text_input=None): + x = self.to_mfcc(x) + x = self.init_cnn(x) + x = self.cnns(x) + x = self.projection(x) + x = x.transpose(1, 2) + ctc_logit = self.ctc_linear(x) + if text_input is not None: + _, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input) + return ctc_logit, s2s_logit, s2s_attn + else: + return ctc_logit + + def get_feature(self, x): + x = self.to_mfcc(x.squeeze(1)) + x = self.init_cnn(x) + x = self.cnns(x) + x = self.projection(x) + return x + + def length_to_mask(self, lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device) + return mask + + def get_future_mask(self, out_length, unmask_future_steps=0): + """ + Args: + out_length (int): returned mask shape is (out_length, out_length). + unmask_futre_steps (int): unmasking future step size. + Return: + mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False + """ + index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1) + mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps) + return mask + +class ASRS2S(nn.Module): + def __init__(self, + embedding_dim=256, + hidden_dim=512, + n_location_filters=32, + location_kernel_size=63, + n_token=40): + super(ASRS2S, self).__init__() + self.embedding = nn.Embedding(n_token, embedding_dim) + val_range = math.sqrt(6 / hidden_dim) + self.embedding.weight.data.uniform_(-val_range, val_range) + + self.decoder_rnn_dim = hidden_dim + self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token) + self.attention_layer = Attention( + self.decoder_rnn_dim, + hidden_dim, + hidden_dim, + n_location_filters, + location_kernel_size + ) + self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim) + self.project_to_hidden = nn.Sequential( + LinearNorm(self.decoder_rnn_dim * 2, hidden_dim), + nn.Tanh()) + self.sos = 1 + self.eos = 2 + + def initialize_decoder_states(self, memory, mask): + """ + moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim) + """ + B, L, H = memory.shape + self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory) + self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory) + self.attention_weights = torch.zeros((B, L)).type_as(memory) + self.attention_weights_cum = torch.zeros((B, L)).type_as(memory) + self.attention_context = torch.zeros((B, H)).type_as(memory) + self.memory = memory + self.processed_memory = self.attention_layer.memory_layer(memory) + self.mask = mask + self.unk_index = 3 + self.random_mask = 0.1 + + def forward(self, memory, memory_mask, text_input): + """ + moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim) + moemory_mask.shape = (B, L, ) + texts_input.shape = (B, T) + """ + self.initialize_decoder_states(memory, memory_mask) + # text random mask + random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device) + _text_input = text_input.clone() + _text_input.masked_fill_(random_mask, self.unk_index) + decoder_inputs = self.embedding(_text_input).transpose(0, 1) # -> [T, B, channel] + start_embedding = self.embedding( + torch.LongTensor([self.sos]*decoder_inputs.size(1)).to(decoder_inputs.device)) + decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0) + + hidden_outputs, logit_outputs, alignments = [], [], [] + while len(hidden_outputs) < decoder_inputs.size(0): + + decoder_input = decoder_inputs[len(hidden_outputs)] + hidden, logit, attention_weights = self.decode(decoder_input) + hidden_outputs += [hidden] + logit_outputs += [logit] + alignments += [attention_weights] + + hidden_outputs, logit_outputs, alignments = \ + self.parse_decoder_outputs( + hidden_outputs, logit_outputs, alignments) + + return hidden_outputs, logit_outputs, alignments + + + def decode(self, decoder_input): + + cell_input = torch.cat((decoder_input, self.attention_context), -1) + self.decoder_hidden, self.decoder_cell = self.decoder_rnn( + cell_input, + (self.decoder_hidden, self.decoder_cell)) + + attention_weights_cat = torch.cat( + (self.attention_weights.unsqueeze(1), + self.attention_weights_cum.unsqueeze(1)),dim=1) + + self.attention_context, self.attention_weights = self.attention_layer( + self.decoder_hidden, + self.memory, + self.processed_memory, + attention_weights_cat, + self.mask) + + self.attention_weights_cum += self.attention_weights + + hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1) + hidden = self.project_to_hidden(hidden_and_context) + + # dropout to increasing g + logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training)) + + return hidden, logit, self.attention_weights + + def parse_decoder_outputs(self, hidden, logit, alignments): + + # -> [B, T_out + 1, max_time] + alignments = torch.stack(alignments).transpose(0,1) + # [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols] + logit = torch.stack(logit).transpose(0, 1).contiguous() + hidden = torch.stack(hidden).transpose(0, 1).contiguous() + + return hidden, logit, alignments diff --git a/styletts2/Utils/JDC/__init__.py b/styletts2/Utils/JDC/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/styletts2/Utils/JDC/__init__.py @@ -0,0 +1 @@ + diff --git a/styletts2/Utils/JDC/__pycache__/__init__.cpython-310.pyc b/styletts2/Utils/JDC/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2459476f150644746ccadb4b1502c02c083cc736 Binary files /dev/null and b/styletts2/Utils/JDC/__pycache__/__init__.cpython-310.pyc differ diff --git a/styletts2/Utils/JDC/__pycache__/model.cpython-310.pyc b/styletts2/Utils/JDC/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..230514273d180dd870151d92d8181f2f12c215ae Binary files /dev/null and b/styletts2/Utils/JDC/__pycache__/model.cpython-310.pyc differ diff --git a/styletts2/Utils/JDC/model.py b/styletts2/Utils/JDC/model.py new file mode 100644 index 0000000000000000000000000000000000000000..83cd266d1cd6f054d0684e8e1a60496044048605 --- /dev/null +++ b/styletts2/Utils/JDC/model.py @@ -0,0 +1,190 @@ +""" +Implementation of model from: +Kum et al. - "Joint Detection and Classification of Singing Voice Melody Using +Convolutional Recurrent Neural Networks" (2019) +Link: https://www.semanticscholar.org/paper/Joint-Detection-and-Classification-of-Singing-Voice-Kum-Nam/60a2ad4c7db43bace75805054603747fcd062c0d +""" +import torch +from torch import nn + +class JDCNet(nn.Module): + """ + Joint Detection and Classification Network model for singing voice melody. + """ + def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01): + super().__init__() + self.num_class = num_class + + # input = (b, 1, 31, 513), b = batch size + self.conv_block = nn.Sequential( + nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513) + nn.BatchNorm2d(num_features=64), + nn.LeakyReLU(leaky_relu_slope, inplace=True), + nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513) + ) + + # res blocks + self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128) + self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32) + self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8) + + # pool block + self.pool_block = nn.Sequential( + nn.BatchNorm2d(num_features=256), + nn.LeakyReLU(leaky_relu_slope, inplace=True), + nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2) + nn.Dropout(p=0.2), + ) + + # maxpool layers (for auxiliary network inputs) + # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2) + self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40)) + # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2) + self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20)) + # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2) + self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10)) + + # in = (b, 640, 31, 2), out = (b, 256, 31, 2) + self.detector_conv = nn.Sequential( + nn.Conv2d(640, 256, 1, bias=False), + nn.BatchNorm2d(256), + nn.LeakyReLU(leaky_relu_slope, inplace=True), + nn.Dropout(p=0.2), + ) + + # input: (b, 31, 512) - resized from (b, 256, 31, 2) + self.bilstm_classifier = nn.LSTM( + input_size=512, hidden_size=256, + batch_first=True, bidirectional=True) # (b, 31, 512) + + # input: (b, 31, 512) - resized from (b, 256, 31, 2) + self.bilstm_detector = nn.LSTM( + input_size=512, hidden_size=256, + batch_first=True, bidirectional=True) # (b, 31, 512) + + # input: (b * 31, 512) + self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class) + + # input: (b * 31, 512) + self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier + + # initialize weights + self.apply(self.init_weights) + + def get_feature_GAN(self, x): + seq_len = x.shape[-2] + x = x.float().transpose(-1, -2) + + convblock_out = self.conv_block(x) + + resblock1_out = self.res_block1(convblock_out) + resblock2_out = self.res_block2(resblock1_out) + resblock3_out = self.res_block3(resblock2_out) + poolblock_out = self.pool_block[0](resblock3_out) + poolblock_out = self.pool_block[1](poolblock_out) + + return poolblock_out.transpose(-1, -2) + + def get_feature(self, x): + seq_len = x.shape[-2] + x = x.float().transpose(-1, -2) + + convblock_out = self.conv_block(x) + + resblock1_out = self.res_block1(convblock_out) + resblock2_out = self.res_block2(resblock1_out) + resblock3_out = self.res_block3(resblock2_out) + poolblock_out = self.pool_block[0](resblock3_out) + poolblock_out = self.pool_block[1](poolblock_out) + + return self.pool_block[2](poolblock_out) + + def forward(self, x): + """ + Returns: + classification_prediction, detection_prediction + sizes: (b, 31, 722), (b, 31, 2) + """ + ############################### + # forward pass for classifier # + ############################### + seq_len = x.shape[-1] + x = x.float().transpose(-1, -2) + + convblock_out = self.conv_block(x) + + resblock1_out = self.res_block1(convblock_out) + resblock2_out = self.res_block2(resblock1_out) + resblock3_out = self.res_block3(resblock2_out) + + + poolblock_out = self.pool_block[0](resblock3_out) + poolblock_out = self.pool_block[1](poolblock_out) + GAN_feature = poolblock_out.transpose(-1, -2) + poolblock_out = self.pool_block[2](poolblock_out) + + # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512) + classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512)) + classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states + + classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512) + classifier_out = self.classifier(classifier_out) + classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class) + + # sizes: (b, 31, 722), (b, 31, 2) + # classifier output consists of predicted pitch classes per frame + # detector output consists of: (isvoice, notvoice) estimates per frame + return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out + + @staticmethod + def init_weights(m): + if isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell): + for p in m.parameters(): + if p.data is None: + continue + + if len(p.shape) >= 2: + nn.init.orthogonal_(p.data) + else: + nn.init.normal_(p.data) + + +class ResBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01): + super().__init__() + self.downsample = in_channels != out_channels + + # BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper + self.pre_conv = nn.Sequential( + nn.BatchNorm2d(num_features=in_channels), + nn.LeakyReLU(leaky_relu_slope, inplace=True), + nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only + ) + + # conv layers + self.conv = nn.Sequential( + nn.Conv2d(in_channels=in_channels, out_channels=out_channels, + kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(out_channels), + nn.LeakyReLU(leaky_relu_slope, inplace=True), + nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False), + ) + + # 1 x 1 convolution layer to match the feature dimensions + self.conv1by1 = None + if self.downsample: + self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False) + + def forward(self, x): + x = self.pre_conv(x) + if self.downsample: + x = self.conv(x) + self.conv1by1(x) + else: + x = self.conv(x) + x + return x \ No newline at end of file diff --git a/styletts2/Utils/PLBERT/__pycache__/util.cpython-310.pyc b/styletts2/Utils/PLBERT/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afdc703ea232fafadcd5cc9bd121f9765c496254 Binary files /dev/null and b/styletts2/Utils/PLBERT/__pycache__/util.cpython-310.pyc differ diff --git a/styletts2/Utils/PLBERT/config.yml b/styletts2/Utils/PLBERT/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..75f60d1eccfee7e253649a1ae02105b5245e2473 --- /dev/null +++ b/styletts2/Utils/PLBERT/config.yml @@ -0,0 +1,30 @@ +log_dir: "Checkpoint" +mixed_precision: "fp16" +data_folder: "wikipedia_20220301.en.processed" +batch_size: 192 +save_interval: 5000 +log_interval: 10 +num_process: 1 # number of GPUs +num_steps: 1000000 + +dataset_params: + tokenizer: "transfo-xl-wt103" + token_separator: " " # token used for phoneme separator (space) + token_mask: "M" # token used for phoneme mask (M) + word_separator: 3039 # token used for word separator () + token_maps: "token_maps.pkl" # token map path + + max_mel_length: 512 # max phoneme length + + word_mask_prob: 0.15 # probability to mask the entire word + phoneme_mask_prob: 0.1 # probability to mask each phoneme + replace_prob: 0.2 # probablity to replace phonemes + +model_params: + vocab_size: 178 + hidden_size: 768 + num_attention_heads: 12 + intermediate_size: 2048 + max_position_embeddings: 512 + num_hidden_layers: 12 + dropout: 0.1 \ No newline at end of file diff --git a/styletts2/Utils/PLBERT/util.py b/styletts2/Utils/PLBERT/util.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6d9bd163dac1078617658de811ee74f016b37c --- /dev/null +++ b/styletts2/Utils/PLBERT/util.py @@ -0,0 +1,52 @@ +import os +import yaml +import torch +from transformers import AlbertConfig, AlbertModel + +class CustomAlbert(AlbertModel): + def forward(self, *args, **kwargs): + # Call the original forward method + outputs = super().forward(*args, **kwargs) + + # Only return the last_hidden_state + return outputs.last_hidden_state + + +def load_plbert(log_dir, config_path=None, checkpoint_path=None): + """ + + :param log_dir: + :param config_path: + :param checkpoint_path: + :return: + """ + if not config_path: + config_path = os.path.join(log_dir, "config.yml") + plbert_config = yaml.safe_load(open(config_path)) + + albert_base_configuration = AlbertConfig(**plbert_config['model_params']) + bert = CustomAlbert(albert_base_configuration) + + if not checkpoint_path: + files = os.listdir(log_dir) + ckpts = [] + for f in os.listdir(log_dir): + if f.startswith("step_"): ckpts.append(f) + + iters = [int(f.split('_')[-1].split('.')[0]) for f in ckpts if os.path.isfile(os.path.join(log_dir, f))] + iters = sorted(iters)[-1] + checkpoint_path = log_dir / f"step_{iters}.t7" + + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict = checkpoint['net'] + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + if name.startswith('encoder.'): + name = name[8:] # remove `encoder.` + new_state_dict[name] = v + del new_state_dict["embeddings.position_ids"] + bert.load_state_dict(new_state_dict, strict=False) + + return bert diff --git a/styletts2/Utils/__init__.py b/styletts2/Utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/styletts2/Utils/__init__.py @@ -0,0 +1 @@ + diff --git a/styletts2/Utils/__pycache__/__init__.cpython-310.pyc b/styletts2/Utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e41c8d2ed393ff8b16a1c7594c200726403daa3e Binary files /dev/null and b/styletts2/Utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/styletts2/__init__.py b/styletts2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/styletts2/losses.py b/styletts2/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..1766acd1b7d526857ebb8b5052df335c30660762 --- /dev/null +++ b/styletts2/losses.py @@ -0,0 +1,253 @@ +import torch +from torch import nn +import torch.nn.functional as F +import torchaudio +from transformers import AutoModel + +class SpectralConvergengeLoss(torch.nn.Module): + """Spectral convergence loss module.""" + + def __init__(self): + """Initilize spectral convergence loss module.""" + super(SpectralConvergengeLoss, self).__init__() + + def forward(self, x_mag, y_mag): + """Calculate forward propagation. + Args: + x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). + y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). + Returns: + Tensor: Spectral convergence loss value. + """ + return torch.norm(y_mag - x_mag, p=1) / torch.norm(y_mag, p=1) + +class STFTLoss(torch.nn.Module): + """STFT loss module.""" + + def __init__(self, fft_size=1024, shift_size=120, win_length=600, window=torch.hann_window): + """Initialize STFT loss module.""" + super(STFTLoss, self).__init__() + self.fft_size = fft_size + self.shift_size = shift_size + self.win_length = win_length + self.to_mel = torchaudio.transforms.MelSpectrogram(sample_rate=24000, n_fft=fft_size, win_length=win_length, hop_length=shift_size, window_fn=window) + + self.spectral_convergenge_loss = SpectralConvergengeLoss() + + def forward(self, x, y): + """Calculate forward propagation. + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + Returns: + Tensor: Spectral convergence loss value. + Tensor: Log STFT magnitude loss value. + """ + x_mag = self.to_mel(x) + mean, std = -4, 4 + x_mag = (torch.log(1e-5 + x_mag) - mean) / std + + y_mag = self.to_mel(y) + mean, std = -4, 4 + y_mag = (torch.log(1e-5 + y_mag) - mean) / std + + sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) + return sc_loss + + +class MultiResolutionSTFTLoss(torch.nn.Module): + """Multi resolution STFT loss module.""" + + def __init__(self, + fft_sizes=[1024, 2048, 512], + hop_sizes=[120, 240, 50], + win_lengths=[600, 1200, 240], + window=torch.hann_window): + """Initialize Multi resolution STFT loss module. + Args: + fft_sizes (list): List of FFT sizes. + hop_sizes (list): List of hop sizes. + win_lengths (list): List of window lengths. + window (str): Window function type. + """ + super(MultiResolutionSTFTLoss, self).__init__() + assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) + self.stft_losses = torch.nn.ModuleList() + for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): + self.stft_losses += [STFTLoss(fs, ss, wl, window)] + + def forward(self, x, y): + """Calculate forward propagation. + Args: + x (Tensor): Predicted signal (B, T). + y (Tensor): Groundtruth signal (B, T). + Returns: + Tensor: Multi resolution spectral convergence loss value. + Tensor: Multi resolution log STFT magnitude loss value. + """ + sc_loss = 0.0 + for f in self.stft_losses: + sc_l = f(x, y) + sc_loss += sc_l + sc_loss /= len(self.stft_losses) + + return sc_loss + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss*2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1-dr)**2) + g_loss = torch.mean(dg**2) + loss += (r_loss + g_loss) + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss(disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1-dg)**2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + +""" https://dl.acm.org/doi/abs/10.1145/3573834.3574506 """ +def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + tau = 0.04 + m_DG = torch.median((dr-dg)) + L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG]) + loss += tau - F.relu(tau - L_rel) + return loss + +def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + for dg, dr in zip(disc_real_outputs, disc_generated_outputs): + tau = 0.04 + m_DG = torch.median((dr-dg)) + L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG]) + loss += tau - F.relu(tau - L_rel) + return loss + +class GeneratorLoss(torch.nn.Module): + + def __init__(self, mpd, msd): + super(GeneratorLoss, self).__init__() + self.mpd = mpd + self.msd = msd + + def forward(self, y, y_hat): + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = self.mpd(y, y_hat) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = self.msd(y, y_hat) + loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) + loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) + loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) + loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) + + loss_rel = generator_TPRLS_loss(y_df_hat_r, y_df_hat_g) + generator_TPRLS_loss(y_ds_hat_r, y_ds_hat_g) + + loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_rel + + return loss_gen_all.mean() + +class DiscriminatorLoss(torch.nn.Module): + + def __init__(self, mpd, msd): + super(DiscriminatorLoss, self).__init__() + self.mpd = mpd + self.msd = msd + + def forward(self, y, y_hat): + # MPD + y_df_hat_r, y_df_hat_g, _, _ = self.mpd(y, y_hat) + loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g) + # MSD + y_ds_hat_r, y_ds_hat_g, _, _ = self.msd(y, y_hat) + loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g) + + loss_rel = discriminator_TPRLS_loss(y_df_hat_r, y_df_hat_g) + discriminator_TPRLS_loss(y_ds_hat_r, y_ds_hat_g) + + + d_loss = loss_disc_s + loss_disc_f + loss_rel + + return d_loss.mean() + + +class WavLMLoss(torch.nn.Module): + + def __init__(self, model, wd, model_sr, slm_sr=16000): + super(WavLMLoss, self).__init__() + self.wavlm = AutoModel.from_pretrained(model) + self.wd = wd + self.resample = torchaudio.transforms.Resample(model_sr, slm_sr) + + def forward(self, wav, y_rec): + with torch.no_grad(): + wav_16 = self.resample(wav) + wav_embeddings = self.wavlm(input_values=wav_16, output_hidden_states=True).hidden_states + y_rec_16 = self.resample(y_rec) + y_rec_embeddings = self.wavlm(input_values=y_rec_16.squeeze(), output_hidden_states=True).hidden_states + + floss = 0 + for er, eg in zip(wav_embeddings, y_rec_embeddings): + floss += torch.mean(torch.abs(er - eg)) + + return floss.mean() + + def generator(self, y_rec): + y_rec_16 = self.resample(y_rec) + y_rec_embeddings = self.wavlm(input_values=y_rec_16, output_hidden_states=True).hidden_states + y_rec_embeddings = torch.stack(y_rec_embeddings, dim=1).transpose(-1, -2).flatten(start_dim=1, end_dim=2) + y_df_hat_g = self.wd(y_rec_embeddings) + loss_gen = torch.mean((1-y_df_hat_g)**2) + + return loss_gen + + def discriminator(self, wav, y_rec): + with torch.no_grad(): + wav_16 = self.resample(wav) + wav_embeddings = self.wavlm(input_values=wav_16, output_hidden_states=True).hidden_states + y_rec_16 = self.resample(y_rec) + y_rec_embeddings = self.wavlm(input_values=y_rec_16, output_hidden_states=True).hidden_states + + y_embeddings = torch.stack(wav_embeddings, dim=1).transpose(-1, -2).flatten(start_dim=1, end_dim=2) + y_rec_embeddings = torch.stack(y_rec_embeddings, dim=1).transpose(-1, -2).flatten(start_dim=1, end_dim=2) + + y_d_rs = self.wd(y_embeddings) + y_d_gs = self.wd(y_rec_embeddings) + + y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs + + r_loss = torch.mean((1-y_df_hat_r)**2) + g_loss = torch.mean((y_df_hat_g)**2) + + loss_disc_f = r_loss + g_loss + + return loss_disc_f.mean() + + def discriminator_forward(self, wav): + with torch.no_grad(): + wav_16 = self.resample(wav) + wav_embeddings = self.wavlm(input_values=wav_16, output_hidden_states=True).hidden_states + y_embeddings = torch.stack(wav_embeddings, dim=1).transpose(-1, -2).flatten(start_dim=1, end_dim=2) + + y_d_rs = self.wd(y_embeddings) + + return y_d_rs \ No newline at end of file diff --git a/styletts2/meldataset.py b/styletts2/meldataset.py new file mode 100644 index 0000000000000000000000000000000000000000..afc5acc76123ce0eb34afb3dc5007e4b08731e1d --- /dev/null +++ b/styletts2/meldataset.py @@ -0,0 +1,255 @@ +#coding: utf-8 +import os +import os.path as osp +import time +import random +import numpy as np +import random +import soundfile as sf +import librosa + +import torch +from torch import nn +import torch.nn.functional as F +import torchaudio +from torch.utils.data import DataLoader + +import logging +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +import pandas as pd + +_pad = "$" +_punctuation = ';:,.!?¡¿—…"«»“” ' +_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" + +# Export all symbols: +symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) + +dicts = {} +for i in range(len((symbols))): + dicts[symbols[i]] = i + +class TextCleaner: + def __init__(self, dummy=None): + self.word_index_dictionary = dicts + def __call__(self, text): + indexes = [] + for char in text: + try: + indexes.append(self.word_index_dictionary[char]) + except KeyError: + print(text) + return indexes + +np.random.seed(1) +random.seed(1) +SPECT_PARAMS = { + "n_fft": 2048, + "win_length": 1200, + "hop_length": 300 +} +MEL_PARAMS = { + "n_mels": 80, +} + +to_mel = torchaudio.transforms.MelSpectrogram( + n_mels=80, n_fft=2048, win_length=1200, hop_length=300) +mean, std = -4, 4 + +def preprocess(wave): + wave_tensor = torch.from_numpy(wave).float() + mel_tensor = to_mel(wave_tensor) + mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std + return mel_tensor + +class FilePathDataset(torch.utils.data.Dataset): + def __init__(self, + data_list, + root_path, + sr=24000, + data_augmentation=False, + validation=False, + OOD_data="Data/OOD_texts.txt", + min_length=50, + ): + + spect_params = SPECT_PARAMS + mel_params = MEL_PARAMS + + _data_list = [l.strip().split('|') for l in data_list] + self.data_list = [data if len(data) == 3 else (*data, 0) for data in _data_list] + self.text_cleaner = TextCleaner() + self.sr = sr + + self.df = pd.DataFrame(self.data_list) + + self.to_melspec = torchaudio.transforms.MelSpectrogram(**MEL_PARAMS) + + self.mean, self.std = -4, 4 + self.data_augmentation = data_augmentation and (not validation) + self.max_mel_length = 192 + + self.min_length = min_length + with open(OOD_data, 'r', encoding='utf-8') as f: + tl = f.readlines() + idx = 1 if '.wav' in tl[0].split('|')[0] else 0 + self.ptexts = [t.split('|')[idx] for t in tl] + + self.root_path = root_path + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + data = self.data_list[idx] + path = data[0] + + wave, text_tensor, speaker_id = self._load_tensor(data) + + mel_tensor = preprocess(wave).squeeze() + + acoustic_feature = mel_tensor.squeeze() + length_feature = acoustic_feature.size(1) + acoustic_feature = acoustic_feature[:, :(length_feature - length_feature % 2)] + + # get reference sample + ref_data = (self.df[self.df[2] == str(speaker_id)]).sample(n=1).iloc[0].tolist() + ref_mel_tensor, ref_label = self._load_data(ref_data[:3]) + + # get OOD text + + ps = "" + + while len(ps) < self.min_length: + rand_idx = np.random.randint(0, len(self.ptexts) - 1) + ps = self.ptexts[rand_idx] + + text = self.text_cleaner(ps) + text.insert(0, 0) + text.append(0) + + ref_text = torch.LongTensor(text) + + return speaker_id, acoustic_feature, text_tensor, ref_text, ref_mel_tensor, ref_label, path, wave + + def _load_tensor(self, data): + wave_path, text, speaker_id = data + speaker_id = int(speaker_id) + wave, sr = sf.read(osp.join(self.root_path, wave_path)) + if wave.shape[-1] == 2: + wave = wave[:, 0].squeeze() + if sr != 24000: + wave = librosa.resample(wave, orig_sr=sr, target_sr=24000) + print(wave_path, sr) + + wave = np.concatenate([np.zeros([5000]), wave, np.zeros([5000])], axis=0) + + text = self.text_cleaner(text) + + text.insert(0, 0) + text.append(0) + + text = torch.LongTensor(text) + + return wave, text, speaker_id + + def _load_data(self, data): + wave, text_tensor, speaker_id = self._load_tensor(data) + mel_tensor = preprocess(wave).squeeze() + + mel_length = mel_tensor.size(1) + if mel_length > self.max_mel_length: + random_start = np.random.randint(0, mel_length - self.max_mel_length) + mel_tensor = mel_tensor[:, random_start:random_start + self.max_mel_length] + + return mel_tensor, speaker_id + + +class Collater(object): + """ + Args: + adaptive_batch_size (bool): if true, decrease batch size when long data comes. + """ + + def __init__(self, return_wave=False): + self.text_pad_index = 0 + self.min_mel_length = 192 + self.max_mel_length = 192 + self.return_wave = return_wave + + + def __call__(self, batch): + # batch[0] = wave, mel, text, f0, speakerid + batch_size = len(batch) + + # sort by mel length + lengths = [b[1].shape[1] for b in batch] + batch_indexes = np.argsort(lengths)[::-1] + batch = [batch[bid] for bid in batch_indexes] + + nmels = batch[0][1].size(0) + max_mel_length = max([b[1].shape[1] for b in batch]) + max_text_length = max([b[2].shape[0] for b in batch]) + max_rtext_length = max([b[3].shape[0] for b in batch]) + + labels = torch.zeros((batch_size)).long() + mels = torch.zeros((batch_size, nmels, max_mel_length)).float() + texts = torch.zeros((batch_size, max_text_length)).long() + ref_texts = torch.zeros((batch_size, max_rtext_length)).long() + + input_lengths = torch.zeros(batch_size).long() + ref_lengths = torch.zeros(batch_size).long() + output_lengths = torch.zeros(batch_size).long() + ref_mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float() + ref_labels = torch.zeros((batch_size)).long() + paths = ['' for _ in range(batch_size)] + waves = [None for _ in range(batch_size)] + + for bid, (label, mel, text, ref_text, ref_mel, ref_label, path, wave) in enumerate(batch): + mel_size = mel.size(1) + text_size = text.size(0) + rtext_size = ref_text.size(0) + labels[bid] = label + mels[bid, :, :mel_size] = mel + texts[bid, :text_size] = text + ref_texts[bid, :rtext_size] = ref_text + input_lengths[bid] = text_size + ref_lengths[bid] = rtext_size + output_lengths[bid] = mel_size + paths[bid] = path + ref_mel_size = ref_mel.size(1) + ref_mels[bid, :, :ref_mel_size] = ref_mel + + ref_labels[bid] = ref_label + waves[bid] = wave + + return waves, texts, input_lengths, ref_texts, ref_lengths, mels, output_lengths, ref_mels + + + +def build_dataloader(path_list, + root_path, + validation=False, + OOD_data="Data/OOD_texts.txt", + min_length=50, + batch_size=4, + num_workers=1, + device='cpu', + collate_config={}, + dataset_config={}): + + dataset = FilePathDataset(path_list, root_path, OOD_data=OOD_data, min_length=min_length, validation=validation, **dataset_config) + collate_fn = Collater(**collate_config) + data_loader = DataLoader(dataset, + batch_size=batch_size, + shuffle=(not validation), + num_workers=num_workers, + drop_last=(not validation), + collate_fn=collate_fn, + pin_memory=(device != 'cpu')) + + return data_loader + diff --git a/styletts2/models.py b/styletts2/models.py new file mode 100644 index 0000000000000000000000000000000000000000..4f90068eac8061f498d0f525cfd430ad2512bf44 --- /dev/null +++ b/styletts2/models.py @@ -0,0 +1,713 @@ +#coding:utf-8 + +import os +import os.path as osp + +import copy +import math + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm + +from .Utils.ASR.models import ASRCNN +from .Utils.JDC.model import JDCNet + +from .Modules.diffusion.sampler import KDiffusion, LogNormalDistribution +from .Modules.diffusion.modules import Transformer1d, StyleTransformer1d +from .Modules.diffusion.diffusion import AudioDiffusionConditional + +from .Modules.discriminators import MultiPeriodDiscriminator, MultiResSpecDiscriminator, WavLMDiscriminator + +from munch import Munch +import yaml + +class LearnedDownSample(nn.Module): + def __init__(self, layer_type, dim_in): + super().__init__() + self.layer_type = layer_type + + if self.layer_type == 'none': + self.conv = nn.Identity() + elif self.layer_type == 'timepreserve': + self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, padding=(1, 0))) + elif self.layer_type == 'half': + self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, padding=1)) + else: + raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type) + + def forward(self, x): + return self.conv(x) + +class LearnedUpSample(nn.Module): + def __init__(self, layer_type, dim_in): + super().__init__() + self.layer_type = layer_type + + if self.layer_type == 'none': + self.conv = nn.Identity() + elif self.layer_type == 'timepreserve': + self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 1), stride=(2, 1), groups=dim_in, output_padding=(1, 0), padding=(1, 0)) + elif self.layer_type == 'half': + self.conv = nn.ConvTranspose2d(dim_in, dim_in, kernel_size=(3, 3), stride=(2, 2), groups=dim_in, output_padding=1, padding=1) + else: + raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type) + + + def forward(self, x): + return self.conv(x) + +class DownSample(nn.Module): + def __init__(self, layer_type): + super().__init__() + self.layer_type = layer_type + + def forward(self, x): + if self.layer_type == 'none': + return x + elif self.layer_type == 'timepreserve': + return F.avg_pool2d(x, (2, 1)) + elif self.layer_type == 'half': + if x.shape[-1] % 2 != 0: + x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1) + return F.avg_pool2d(x, 2) + else: + raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type) + + +class UpSample(nn.Module): + def __init__(self, layer_type): + super().__init__() + self.layer_type = layer_type + + def forward(self, x): + if self.layer_type == 'none': + return x + elif self.layer_type == 'timepreserve': + return F.interpolate(x, scale_factor=(2, 1), mode='nearest') + elif self.layer_type == 'half': + return F.interpolate(x, scale_factor=2, mode='nearest') + else: + raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type) + + +class ResBlk(nn.Module): + def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), + normalize=False, downsample='none'): + super().__init__() + self.actv = actv + self.normalize = normalize + self.downsample = DownSample(downsample) + self.downsample_res = LearnedDownSample(downsample, dim_in) + self.learned_sc = dim_in != dim_out + self._build_weights(dim_in, dim_out) + + def _build_weights(self, dim_in, dim_out): + self.conv1 = spectral_norm(nn.Conv2d(dim_in, dim_in, 3, 1, 1)) + self.conv2 = spectral_norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1)) + if self.normalize: + self.norm1 = nn.InstanceNorm2d(dim_in, affine=True) + self.norm2 = nn.InstanceNorm2d(dim_in, affine=True) + if self.learned_sc: + self.conv1x1 = spectral_norm(nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)) + + def _shortcut(self, x): + if self.learned_sc: + x = self.conv1x1(x) + if self.downsample: + x = self.downsample(x) + return x + + def _residual(self, x): + if self.normalize: + x = self.norm1(x) + x = self.actv(x) + x = self.conv1(x) + x = self.downsample_res(x) + if self.normalize: + x = self.norm2(x) + x = self.actv(x) + x = self.conv2(x) + return x + + def forward(self, x): + x = self._shortcut(x) + self._residual(x) + return x / math.sqrt(2) # unit variance + +class StyleEncoder(nn.Module): + def __init__(self, dim_in=48, style_dim=48, max_conv_dim=384): + super().__init__() + blocks = [] + blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))] + + repeat_num = 4 + for _ in range(repeat_num): + dim_out = min(dim_in*2, max_conv_dim) + blocks += [ResBlk(dim_in, dim_out, downsample='half')] + dim_in = dim_out + + blocks += [nn.LeakyReLU(0.2)] + blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))] + blocks += [nn.AdaptiveAvgPool2d(1)] + blocks += [nn.LeakyReLU(0.2)] + self.shared = nn.Sequential(*blocks) + + self.unshared = nn.Linear(dim_out, style_dim) + + def forward(self, x): + h = self.shared(x) + h = h.view(h.size(0), -1) + s = self.unshared(h) + + return s + +class LinearNorm(torch.nn.Module): + def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): + super(LinearNorm, self).__init__() + self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) + + torch.nn.init.xavier_uniform_( + self.linear_layer.weight, + gain=torch.nn.init.calculate_gain(w_init_gain)) + + def forward(self, x): + return self.linear_layer(x) + +class Discriminator2d(nn.Module): + def __init__(self, dim_in=48, num_domains=1, max_conv_dim=384, repeat_num=4): + super().__init__() + blocks = [] + blocks += [spectral_norm(nn.Conv2d(1, dim_in, 3, 1, 1))] + + for lid in range(repeat_num): + dim_out = min(dim_in*2, max_conv_dim) + blocks += [ResBlk(dim_in, dim_out, downsample='half')] + dim_in = dim_out + + blocks += [nn.LeakyReLU(0.2)] + blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 5, 1, 0))] + blocks += [nn.LeakyReLU(0.2)] + blocks += [nn.AdaptiveAvgPool2d(1)] + blocks += [spectral_norm(nn.Conv2d(dim_out, num_domains, 1, 1, 0))] + self.main = nn.Sequential(*blocks) + + def get_feature(self, x): + features = [] + for l in self.main: + x = l(x) + features.append(x) + out = features[-1] + out = out.view(out.size(0), -1) # (batch, num_domains) + return out, features + + def forward(self, x): + out, features = self.get_feature(x) + out = out.squeeze() # (batch) + return out, features + +class ResBlk1d(nn.Module): + def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), + normalize=False, downsample='none', dropout_p=0.2): + super().__init__() + self.actv = actv + self.normalize = normalize + self.downsample_type = downsample + self.learned_sc = dim_in != dim_out + self._build_weights(dim_in, dim_out) + self.dropout_p = dropout_p + + if self.downsample_type == 'none': + self.pool = nn.Identity() + else: + self.pool = weight_norm(nn.Conv1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1)) + + def _build_weights(self, dim_in, dim_out): + self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_in, 3, 1, 1)) + self.conv2 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) + if self.normalize: + self.norm1 = nn.InstanceNorm1d(dim_in, affine=True) + self.norm2 = nn.InstanceNorm1d(dim_in, affine=True) + if self.learned_sc: + self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) + + def downsample(self, x): + if self.downsample_type == 'none': + return x + else: + if x.shape[-1] % 2 != 0: + x = torch.cat([x, x[..., -1].unsqueeze(-1)], dim=-1) + return F.avg_pool1d(x, 2) + + def _shortcut(self, x): + if self.learned_sc: + x = self.conv1x1(x) + x = self.downsample(x) + return x + + def _residual(self, x): + if self.normalize: + x = self.norm1(x) + x = self.actv(x) + x = F.dropout(x, p=self.dropout_p, training=self.training) + + x = self.conv1(x) + x = self.pool(x) + if self.normalize: + x = self.norm2(x) + + x = self.actv(x) + x = F.dropout(x, p=self.dropout_p, training=self.training) + + x = self.conv2(x) + return x + + def forward(self, x): + x = self._shortcut(x) + self._residual(x) + return x / math.sqrt(2) # unit variance + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + +class TextEncoder(nn.Module): + def __init__(self, channels, kernel_size, depth, n_symbols, actv=nn.LeakyReLU(0.2)): + super().__init__() + self.embedding = nn.Embedding(n_symbols, channels) + + padding = (kernel_size - 1) // 2 + self.cnn = nn.ModuleList() + for _ in range(depth): + self.cnn.append(nn.Sequential( + weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)), + LayerNorm(channels), + actv, + nn.Dropout(0.2), + )) + # self.cnn = nn.Sequential(*self.cnn) + + self.lstm = nn.LSTM(channels, channels//2, 1, batch_first=True, bidirectional=True) + + def forward(self, x, input_lengths, m): + x = self.embedding(x) # [B, T, emb] + x = x.transpose(1, 2) # [B, emb, T] + m = m.to(input_lengths.device).unsqueeze(1) + x.masked_fill_(m, 0.0) + + for c in self.cnn: + x = c(x) + x.masked_fill_(m, 0.0) + + x = x.transpose(1, 2) # [B, T, chn] + + input_lengths = input_lengths.cpu().numpy() + x = nn.utils.rnn.pack_padded_sequence( + x, input_lengths, batch_first=True, enforce_sorted=False) + + self.lstm.flatten_parameters() + x, _ = self.lstm(x) + x, _ = nn.utils.rnn.pad_packed_sequence( + x, batch_first=True) + + x = x.transpose(-1, -2) + x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]]) + + x_pad[:, :, :x.shape[-1]] = x + x = x_pad.to(x.device) + + x.masked_fill_(m, 0.0) + + return x + + def inference(self, x): + x = self.embedding(x) + x = x.transpose(1, 2) + x = self.cnn(x) + x = x.transpose(1, 2) + self.lstm.flatten_parameters() + x, _ = self.lstm(x) + return x + + def length_to_mask(self, lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask + + + +class AdaIN1d(nn.Module): + def __init__(self, style_dim, num_features): + super().__init__() + self.norm = nn.InstanceNorm1d(num_features, affine=False) + self.fc = nn.Linear(style_dim, num_features*2) + + def forward(self, x, s): + h = self.fc(s) + h = h.view(h.size(0), h.size(1), 1) + gamma, beta = torch.chunk(h, chunks=2, dim=1) + return (1 + gamma) * self.norm(x) + beta + +class UpSample1d(nn.Module): + def __init__(self, layer_type): + super().__init__() + self.layer_type = layer_type + + def forward(self, x): + if self.layer_type == 'none': + return x + else: + return F.interpolate(x, scale_factor=2, mode='nearest') + +class AdainResBlk1d(nn.Module): + def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2), + upsample='none', dropout_p=0.0): + super().__init__() + self.actv = actv + self.upsample_type = upsample + self.upsample = UpSample1d(upsample) + self.learned_sc = dim_in != dim_out + self._build_weights(dim_in, dim_out, style_dim) + self.dropout = nn.Dropout(dropout_p) + + if upsample == 'none': + self.pool = nn.Identity() + else: + self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1)) + + + def _build_weights(self, dim_in, dim_out, style_dim): + self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) + self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1)) + self.norm1 = AdaIN1d(style_dim, dim_in) + self.norm2 = AdaIN1d(style_dim, dim_out) + if self.learned_sc: + self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) + + def _shortcut(self, x): + x = self.upsample(x) + if self.learned_sc: + x = self.conv1x1(x) + return x + + def _residual(self, x, s): + x = self.norm1(x, s) + x = self.actv(x) + x = self.pool(x) + x = self.conv1(self.dropout(x)) + x = self.norm2(x, s) + x = self.actv(x) + x = self.conv2(self.dropout(x)) + return x + + def forward(self, x, s): + out = self._residual(x, s) + out = (out + self._shortcut(x)) / math.sqrt(2) + return out + +class AdaLayerNorm(nn.Module): + def __init__(self, style_dim, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.fc = nn.Linear(style_dim, channels*2) + + def forward(self, x, s): + x = x.transpose(-1, -2) + x = x.transpose(1, -1) + + h = self.fc(s) + h = h.view(h.size(0), h.size(1), 1) + gamma, beta = torch.chunk(h, chunks=2, dim=1) + gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1) + + + x = F.layer_norm(x, (self.channels,), eps=self.eps) + x = (1 + gamma) * x + beta + return x.transpose(1, -1).transpose(-1, -2) + +class ProsodyPredictor(nn.Module): + + def __init__(self, style_dim, d_hid, nlayers, max_dur=50, dropout=0.1): + super().__init__() + + self.text_encoder = DurationEncoder(sty_dim=style_dim, + d_model=d_hid, + nlayers=nlayers, + dropout=dropout) + + self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True) + self.duration_proj = LinearNorm(d_hid, max_dur) + + self.shared = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True) + self.F0 = nn.ModuleList() + self.F0.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout)) + self.F0.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout)) + self.F0.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout)) + + self.N = nn.ModuleList() + self.N.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout)) + self.N.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout)) + self.N.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout)) + + self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) + self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) + + + def forward(self, texts, style, text_lengths, alignment, m): + d = self.text_encoder(texts, style, text_lengths, m) + + batch_size = d.shape[0] + text_size = d.shape[1] + + # predict duration + input_lengths = text_lengths.cpu().numpy() + x = nn.utils.rnn.pack_padded_sequence( + d, input_lengths, batch_first=True, enforce_sorted=False) + + m = m.to(text_lengths.device).unsqueeze(1) + + self.lstm.flatten_parameters() + x, _ = self.lstm(x) + x, _ = nn.utils.rnn.pad_packed_sequence( + x, batch_first=True) + + x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]]) + + x_pad[:, :x.shape[1], :] = x + x = x_pad.to(x.device) + + duration = self.duration_proj(nn.functional.dropout(x, 0.5, training=self.training)) + + en = (d.transpose(-1, -2) @ alignment) + + return duration.squeeze(-1), en + + def F0Ntrain(self, x, s): + x, _ = self.shared(x.transpose(-1, -2)) + + F0 = x.transpose(-1, -2) + for block in self.F0: + F0 = block(F0, s) + F0 = self.F0_proj(F0) + + N = x.transpose(-1, -2) + for block in self.N: + N = block(N, s) + N = self.N_proj(N) + + return F0.squeeze(1), N.squeeze(1) + + def length_to_mask(self, lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask + +class DurationEncoder(nn.Module): + + def __init__(self, sty_dim, d_model, nlayers, dropout=0.1): + super().__init__() + self.lstms = nn.ModuleList() + for _ in range(nlayers): + self.lstms.append(nn.LSTM(d_model + sty_dim, + d_model // 2, + num_layers=1, + batch_first=True, + bidirectional=True, + dropout=dropout)) + self.lstms.append(AdaLayerNorm(sty_dim, d_model)) + + + self.dropout = dropout + self.d_model = d_model + self.sty_dim = sty_dim + + def forward(self, x, style, text_lengths, m): + masks = m.to(text_lengths.device) + + x = x.permute(2, 0, 1) + s = style.expand(x.shape[0], x.shape[1], -1) + x = torch.cat([x, s], axis=-1) + x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0) + + x = x.transpose(0, 1) + input_lengths = text_lengths.cpu().numpy() + x = x.transpose(-1, -2) + + for block in self.lstms: + if isinstance(block, AdaLayerNorm): + x = block(x.transpose(-1, -2), style).transpose(-1, -2) + x = torch.cat([x, s.permute(1, -1, 0)], axis=1) + x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0) + else: + x = x.transpose(-1, -2) + x = nn.utils.rnn.pack_padded_sequence( + x, input_lengths, batch_first=True, enforce_sorted=False) + block.flatten_parameters() + x, _ = block(x) + x, _ = nn.utils.rnn.pad_packed_sequence( + x, batch_first=True) + x = F.dropout(x, p=self.dropout, training=self.training) + x = x.transpose(-1, -2) + + x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]]) + + x_pad[:, :, :x.shape[-1]] = x + x = x_pad.to(x.device) + + return x.transpose(-1, -2) + + def inference(self, x, style): + x = self.embedding(x.transpose(-1, -2)) * math.sqrt(self.d_model) + style = style.expand(x.shape[0], x.shape[1], -1) + x = torch.cat([x, style], axis=-1) + src = self.pos_encoder(x) + output = self.transformer_encoder(src).transpose(0, 1) + return output + + def length_to_mask(self, lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask + +def load_F0_models(path): + # load F0 model + + F0_model = JDCNet(num_class=1, seq_len=192) + params = torch.load(path, map_location='cpu')['net'] + F0_model.load_state_dict(params) + _ = F0_model.train() + + return F0_model + +def load_ASR_models(ASR_MODEL_PATH, ASR_MODEL_CONFIG): + # load ASR model + def _load_config(path): + with open(path) as f: + config = yaml.safe_load(f) + model_config = config['model_params'] + return model_config + + def _load_model(model_config, model_path): + model = ASRCNN(**model_config) + params = torch.load(model_path, map_location='cpu')['model'] + model.load_state_dict(params) + return model + + asr_model_config = _load_config(ASR_MODEL_CONFIG) + asr_model = _load_model(asr_model_config, ASR_MODEL_PATH) + _ = asr_model.train() + + return asr_model + +def build_model(args, text_aligner, pitch_extractor, bert): + assert args.decoder.type in ['istftnet', 'hifigan'], 'Decoder type unknown' + + if args.decoder.type == "istftnet": + from .Modules.istftnet import Decoder + decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels, + resblock_kernel_sizes = args.decoder.resblock_kernel_sizes, + upsample_rates = args.decoder.upsample_rates, + upsample_initial_channel=args.decoder.upsample_initial_channel, + resblock_dilation_sizes=args.decoder.resblock_dilation_sizes, + upsample_kernel_sizes=args.decoder.upsample_kernel_sizes, + gen_istft_n_fft=args.decoder.gen_istft_n_fft, gen_istft_hop_size=args.decoder.gen_istft_hop_size) + else: + from .Modules.hifigan import Decoder + decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels, + resblock_kernel_sizes = args.decoder.resblock_kernel_sizes, + upsample_rates = args.decoder.upsample_rates, + upsample_initial_channel=args.decoder.upsample_initial_channel, + resblock_dilation_sizes=args.decoder.resblock_dilation_sizes, + upsample_kernel_sizes=args.decoder.upsample_kernel_sizes) + + text_encoder = TextEncoder(channels=args.hidden_dim, kernel_size=5, depth=args.n_layer, n_symbols=args.n_token) + + predictor = ProsodyPredictor(style_dim=args.style_dim, d_hid=args.hidden_dim, nlayers=args.n_layer, max_dur=args.max_dur, dropout=args.dropout) + + style_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # acoustic style encoder + predictor_encoder = StyleEncoder(dim_in=args.dim_in, style_dim=args.style_dim, max_conv_dim=args.hidden_dim) # prosodic style encoder + + # define diffusion model + if args.multispeaker: + transformer = StyleTransformer1d(channels=args.style_dim*2, + context_embedding_features=bert.config.hidden_size, + context_features=args.style_dim*2, + **args.diffusion.transformer) + else: + transformer = Transformer1d(channels=args.style_dim*2, + context_embedding_features=bert.config.hidden_size, + **args.diffusion.transformer) + + diffusion = AudioDiffusionConditional( + in_channels=1, + embedding_max_length=bert.config.max_position_embeddings, + embedding_features=bert.config.hidden_size, + embedding_mask_proba=args.diffusion.embedding_mask_proba, # Conditional dropout of batch elements, + channels=args.style_dim*2, + context_features=args.style_dim*2, + ) + + diffusion.diffusion = KDiffusion( + net=diffusion.unet, + sigma_distribution=LogNormalDistribution(mean = args.diffusion.dist.mean, std = args.diffusion.dist.std), + sigma_data=args.diffusion.dist.sigma_data, # a placeholder, will be changed dynamically when start training diffusion model + dynamic_threshold=0.0 + ) + diffusion.diffusion.net = transformer + diffusion.unet = transformer + + + nets = Munch( + bert=bert, + bert_encoder=nn.Linear(bert.config.hidden_size, args.hidden_dim), + + predictor=predictor, + decoder=decoder, + text_encoder=text_encoder, + + predictor_encoder=predictor_encoder, + style_encoder=style_encoder, + diffusion=diffusion, + + text_aligner = text_aligner, + pitch_extractor=pitch_extractor, + + mpd = MultiPeriodDiscriminator(), + msd = MultiResSpecDiscriminator(), + + # slm discriminator head + wd = WavLMDiscriminator(args.slm.hidden, args.slm.nlayers, args.slm.initial_channel), + ) + + return nets + +def load_checkpoint(model, optimizer, path, load_only_params=True, ignore_modules=[]): + state = torch.load(path, map_location='cpu') + params = state['net'] + for key in model: + if key in params and key not in ignore_modules: + print('%s loaded' % key) + model[key].load_state_dict(params[key], strict=False) + _ = [model[key].eval() for key in model] + + if not load_only_params: + epoch = state["epoch"] + iters = state["iters"] + optimizer.load_state_dict(state["optimizer"]) + else: + epoch = 0 + iters = 0 + + return model, optimizer, epoch, iters diff --git a/styletts2/optimizers.py b/styletts2/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..032ae15d9dd5f0020d68d28440b25c3570bc8ed2 --- /dev/null +++ b/styletts2/optimizers.py @@ -0,0 +1,73 @@ +#coding:utf-8 +import os, sys +import os.path as osp +import numpy as np +import torch +from torch import nn +from torch.optim import Optimizer +from functools import reduce +from torch.optim import AdamW + +class MultiOptimizer: + def __init__(self, optimizers={}, schedulers={}): + self.optimizers = optimizers + self.schedulers = schedulers + self.keys = list(optimizers.keys()) + self.param_groups = reduce(lambda x,y: x+y, [v.param_groups for v in self.optimizers.values()]) + + def state_dict(self): + state_dicts = [(key, self.optimizers[key].state_dict())\ + for key in self.keys] + return state_dicts + + def load_state_dict(self, state_dict): + for key, val in state_dict: + try: + self.optimizers[key].load_state_dict(val) + except: + print("Unloaded %s" % key) + + def step(self, key=None, scaler=None): + keys = [key] if key is not None else self.keys + _ = [self._step(key, scaler) for key in keys] + + def _step(self, key, scaler=None): + if scaler is not None: + scaler.step(self.optimizers[key]) + scaler.update() + else: + self.optimizers[key].step() + + def zero_grad(self, key=None): + if key is not None: + self.optimizers[key].zero_grad() + else: + _ = [self.optimizers[key].zero_grad() for key in self.keys] + + def scheduler(self, *args, key=None): + if key is not None: + self.schedulers[key].step(*args) + else: + _ = [self.schedulers[key].step(*args) for key in self.keys] + +def define_scheduler(optimizer, params): + scheduler = torch.optim.lr_scheduler.OneCycleLR( + optimizer, + max_lr=params.get('max_lr', 2e-4), + epochs=params.get('epochs', 200), + steps_per_epoch=params.get('steps_per_epoch', 1000), + pct_start=params.get('pct_start', 0.0), + div_factor=1, + final_div_factor=1) + + return scheduler + +def build_optimizer(parameters_dict, scheduler_params_dict, lr): + optim = dict([(key, AdamW(params, lr=lr, weight_decay=1e-4, betas=(0.0, 0.99), eps=1e-9)) + for key, params in parameters_dict.items()]) + + schedulers = dict([(key, define_scheduler(opt, scheduler_params_dict[key])) \ + for key, opt in optim.items()]) + + multi_optim = MultiOptimizer(optim, schedulers) + return multi_optim \ No newline at end of file diff --git a/styletts2/phoneme.py b/styletts2/phoneme.py new file mode 100644 index 0000000000000000000000000000000000000000..fd863d6d9c68ef82dd899ec5eb8ddf5f8ceafc6b --- /dev/null +++ b/styletts2/phoneme.py @@ -0,0 +1,34 @@ +from gruut import sentences +from collections.abc import Iterable + + +class PhonemeConverter: + def phonemize(self, text): + pass + + +class GruutPhonemizer(PhonemeConverter): + def phonemize(self, text, lang='en-us'): + phonemized = [] + for sent in sentences(text, lang=lang): + for word in sent: + if isinstance(word.phonemes, Iterable): + phonemized.append(''.join(word.phonemes)) + elif isinstance(word.phonemes, str): + phonemized.append(word.phonemes) + phonemized_text = ' '.join(phonemized) + return phonemized_text + + +# class YourPhonemizer(Phonemizer): +# def phonemize(self, text): +# ... + + +class PhonemeConverterFactory: + @staticmethod + def load_phoneme_converter(name: str, **kwargs): + if name == 'gruut': + return GruutPhonemizer() + else: + raise ValueError("Invalid phoneme converter.") \ No newline at end of file diff --git a/styletts2/requirements.txt b/styletts2/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7faf08b0f238f19c97cb8770f41416a307ee7d36 --- /dev/null +++ b/styletts2/requirements.txt @@ -0,0 +1,20 @@ +SoundFile +torchaudio +munch +torch +pydub +pyyaml +librosa +nltk +matplotlib +accelerate +transformers +einops +einops-exts +tqdm +typing +typing-extensions +git+https://github.com/resemble-ai/monotonic_align.git +gruut>=2.3.4 +gruut-ipa>=0.13.0 +gruut-lang-en>=2.0.0 \ No newline at end of file diff --git a/styletts2/text_utils.py b/styletts2/text_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97c252ba5953910fc39fd0b25b51f01e17ef0f81 --- /dev/null +++ b/styletts2/text_utils.py @@ -0,0 +1,26 @@ +# IPA Phonemizer: https://github.com/bootphon/phonemizer + +_pad = "$" +_punctuation = ';:,.!?¡¿—…"«»“” ' +_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" + +# Export all symbols: +symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) + +dicts = {} +for i in range(len((symbols))): + dicts[symbols[i]] = i + +class TextCleaner: + def __init__(self, dummy=None): + self.word_index_dictionary = dicts + print(len(dicts)) + def __call__(self, text): + indexes = [] + for char in text: + try: + indexes.append(self.word_index_dictionary[char]) + except KeyError: + print(text) + return indexes diff --git a/styletts2/tts.py b/styletts2/tts.py new file mode 100644 index 0000000000000000000000000000000000000000..f572206b14862cec2bf0a1f17ddf5c70b6c33455 --- /dev/null +++ b/styletts2/tts.py @@ -0,0 +1,451 @@ +from nltk.tokenize import word_tokenize +import nltk +nltk.download('punkt') + +from pathlib import Path +import librosa +import scipy +import torch +import torchaudio +from cached_path import cached_path +torch.manual_seed(0) +torch.backends.cudnn.benchmark = False +torch.backends.cudnn.deterministic = True + +import random +random.seed(0) + +import numpy as np +np.random.seed(0) + +from langchain.text_splitter import RecursiveCharacterTextSplitter +import yaml + +from . import models +from . import utils +from .phoneme import PhonemeConverterFactory +from .text_utils import TextCleaner +from .Utils.PLBERT.util import load_plbert +from .Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule + + +LIBRI_TTS_CHECKPOINT_URL = "https://huggingface.co/yl4579/StyleTTS2-LibriTTS/resolve/main/Models/LibriTTS/epochs_2nd_00020.pth" +LIBRI_TTS_CONFIG_URL = "https://huggingface.co/yl4579/StyleTTS2-LibriTTS/resolve/main/Models/LibriTTS/config.yml?download=true" + +ASR_CHECKPOINT_URL = "https://github.com/yl4579/StyleTTS2/raw/main/Utils/ASR/epoch_00080.pth" +ASR_CONFIG_URL = "https://github.com/yl4579/StyleTTS2/raw/main/Utils/ASR/config.yml" +F0_CHECKPOINT_URL = "https://github.com/yl4579/StyleTTS2/raw/main/Utils/JDC/bst.t7" +BERT_CHECKPOINT_URL = "https://github.com/yl4579/StyleTTS2/raw/main/Utils/PLBERT/step_1000000.t7" +BERT_CONFIG_URL = "https://github.com/yl4579/StyleTTS2/raw/main/Utils/PLBERT/config.yml" + +DEFAULT_TARGET_VOICE_URL = "https://styletts2.github.io/wavs/LJSpeech/OOD/GT/00001.wav" + +SINGLE_INFERENCE_MAX_LEN = 420 + +to_mel = torchaudio.transforms.MelSpectrogram( + n_mels=80, n_fft=2048, win_length=1200, hop_length=300) +mean, std = -4, 4 + + +def length_to_mask(lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask + + +def preprocess(wave): + wave_tensor = torch.from_numpy(wave).float() + mel_tensor = to_mel(wave_tensor) + mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std + return mel_tensor + + +def segment_text(text): + splitter = RecursiveCharacterTextSplitter( + separators=["\n\n", "\n", " ", ""], + chunk_size=SINGLE_INFERENCE_MAX_LEN, + chunk_overlap=0, + length_function=len, + ) + segments = splitter.split_text(text) + return segments + + +class StyleTTS2: + def __init__(self, model_checkpoint_path=None, config_path=None, phoneme_converter='gruut'): + self.model = None + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.phoneme_converter = PhonemeConverterFactory.load_phoneme_converter(phoneme_converter) + self.config = None + self.model_params = None + self.model = self.load_model(model_path=model_checkpoint_path, config_path=config_path) + + self.sampler = DiffusionSampler( + self.model.diffusion.diffusion, + sampler=ADPM2Sampler(), + sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters + clamp=False + ) + + + def load_model(self, model_path=None, config_path=None): + """ + Loads model to prepare for inference. Loads checkpoints from provided paths or from local cache (or downloads + default checkpoints to local cache if not present). + :param model_path: Path to LibriTTS StyleTTS2 model checkpoint (TODO: LJSpeech model support) + :param config_path: Path to LibriTTS StyleTTS2 model config JSON (TODO: LJSpeech model support) + :return: + """ + + if not model_path or not Path(model_path).exists(): + print("Invalid or missing model checkpoint path. Loading default model...") + model_path = cached_path(LIBRI_TTS_CHECKPOINT_URL) + + if not config_path or not Path(config_path).exists(): + print("Invalid or missing config path. Loading default config...") + config_path = cached_path(LIBRI_TTS_CONFIG_URL) + + self.config = yaml.safe_load(open(config_path)) + + # load pretrained ASR model + ASR_config = self.config.get('ASR_config', False) + if not ASR_config or not Path(ASR_config).exists(): + print("Invalid ASR config path. Loading default config...") + ASR_config = cached_path(ASR_CONFIG_URL) + ASR_path = self.config.get('ASR_path', False) + if not ASR_path or not Path(ASR_path).exists(): + print("Invalid ASR model checkpoint path. Loading default model...") + ASR_path = cached_path(ASR_CHECKPOINT_URL) + text_aligner = models.load_ASR_models(ASR_path, ASR_config) + + # load pretrained F0 model + F0_path = self.config.get('F0_path', False) + if F0_path or not Path(F0_path).exists(): + print("Invalid F0 model path. Loading default model...") + F0_path = cached_path(F0_CHECKPOINT_URL) + pitch_extractor = models.load_F0_models(F0_path) + + # load BERT model + BERT_dir_path = self.config.get('PLBERT_dir', False) # Directory at BERT_dir_path should contain PLBERT config.yml AND checkpoint + if not BERT_dir_path or not Path(BERT_dir_path).exists(): + BERT_config_path = cached_path(BERT_CONFIG_URL) + BERT_checkpoint_path = cached_path(BERT_CHECKPOINT_URL) + plbert = load_plbert(None, config_path=BERT_config_path, checkpoint_path=BERT_checkpoint_path) + else: + plbert = load_plbert(BERT_dir_path) + + self.model_params = utils.recursive_munch(self.config['model_params']) + model = models.build_model(self.model_params, text_aligner, pitch_extractor, plbert) + _ = [model[key].eval() for key in model] + _ = [model[key].to(self.device) for key in model] + + params_whole = torch.load(model_path, map_location='cpu') + params = params_whole['net'] + + for key in model: + if key in params: + print('%s loaded' % key) + try: + model[key].load_state_dict(params[key]) + except: + from collections import OrderedDict + state_dict = params[key] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + model[key].load_state_dict(new_state_dict, strict=False) + # except: + # _load(params[key], model[key]) + _ = [model[key].eval() for key in model] + + return model + + + def compute_style(self, path): + """ + Compute style vector, essentially an embedding that captures the characteristics + of the target voice that is being cloned + :param path: Path to target voice audio file + :return: style vector + """ + wave, sr = librosa.load(path, sr=24000) + audio, index = librosa.effects.trim(wave, top_db=30) + if sr != 24000: + audio = librosa.resample(audio, sr, 24000) + mel_tensor = preprocess(audio).to(self.device) + + with torch.no_grad(): + ref_s = self.model.style_encoder(mel_tensor.unsqueeze(1)) + ref_p = self.model.predictor_encoder(mel_tensor.unsqueeze(1)) + + return torch.cat([ref_s, ref_p], dim=1) + + + def inference(self, + text: str, + target_voice_path=None, + output_wav_file=None, + output_sample_rate=24000, + alpha=0.3, + beta=0.7, + diffusion_steps=5, + embedding_scale=1, + ref_s=None): + """ + Text-to-speech function + :param text: Input text to turn into speech. + :param target_voice_path: Path to audio file of target voice to clone. + :param output_wav_file: Name of output audio file (if output WAV file is desired). + :param output_sample_rate: Output sample rate (default 24000). + :param alpha: Determines timbre of speech, higher means style is more suitable to text than to the target voice. + :param beta: Determines prosody of speech, higher means style is more suitable to text than to the target voice. + :param diffusion_steps: The more the steps, the more diverse the samples are, with the cost of speed. + :param embedding_scale: Higher scale means style is more conditional to the input text and hence more emotional. + :param ref_s: Pre-computed style vector to pass directly. + :return: audio data as a Numpy array (will also create the WAV file if output_wav_file was set). + """ + + # BERT model is limited by a tensor size [1, 512] during its inference, which roughly corresponds to ~450 characters + if len(text) > SINGLE_INFERENCE_MAX_LEN: + return self.long_inference(text, + target_voice_path=target_voice_path, + output_wav_file=output_wav_file, + output_sample_rate=output_sample_rate, + alpha=alpha, + beta=beta, + diffusion_steps=diffusion_steps, + embedding_scale=embedding_scale, + ref_s=ref_s) + + if ref_s is None: + # default to clone https://styletts2.github.io/wavs/LJSpeech/OOD/GT/00001.wav voice from LibriVox (public domain) + if not target_voice_path or not Path(target_voice_path).exists(): + print("Cloning default target voice...") + target_voice_path = cached_path(DEFAULT_TARGET_VOICE_URL) + ref_s = self.compute_style(target_voice_path) # target style vector + + text = text.strip() + text = text.replace('"', '') + phonemized_text = self.phoneme_converter.phonemize(text) + ps = word_tokenize(phonemized_text) + phoneme_string = ' '.join(ps) + + textcleaner = TextCleaner() + tokens = textcleaner(phoneme_string) + tokens.insert(0, 0) + tokens = torch.LongTensor(tokens).to(self.device).unsqueeze(0) + + with torch.no_grad(): + input_lengths = torch.LongTensor([tokens.shape[-1]]).to(self.device) + text_mask = length_to_mask(input_lengths).to(self.device) + + t_en = self.model.text_encoder(tokens, input_lengths, text_mask) + bert_dur = self.model.bert(tokens, attention_mask=(~text_mask).int()) + d_en = self.model.bert_encoder(bert_dur).transpose(-1, -2) + + s_pred = self.sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(self.device), + embedding=bert_dur, + embedding_scale=embedding_scale, + features=ref_s, # reference from the same speaker as the embedding + num_steps=diffusion_steps).squeeze(1) + + s = s_pred[:, 128:] + ref = s_pred[:, :128] + + ref = alpha * ref + (1 - alpha) * ref_s[:, :128] + s = beta * s + (1 - beta) * ref_s[:, 128:] + + # duration prediction + d = self.model.predictor.text_encoder(d_en, + s, input_lengths, text_mask) + + x, _ = self.model.predictor.lstm(d) + duration = self.model.predictor.duration_proj(x) + + duration = torch.sigmoid(duration).sum(axis=-1) + pred_dur = torch.round(duration.squeeze()).clamp(min=1) + + pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data)) + c_frame = 0 + for i in range(pred_aln_trg.size(0)): + pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1 + c_frame += int(pred_dur[i].data) + + # encode prosody + en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(self.device)) + if self.model_params.decoder.type == "hifigan": + asr_new = torch.zeros_like(en) + asr_new[:, :, 0] = en[:, :, 0] + asr_new[:, :, 1:] = en[:, :, 0:-1] + en = asr_new + + F0_pred, N_pred = self.model.predictor.F0Ntrain(en, s) + + asr = (t_en @ pred_aln_trg.unsqueeze(0).to(self.device)) + if self.model_params.decoder.type == "hifigan": + asr_new = torch.zeros_like(asr) + asr_new[:, :, 0] = asr[:, :, 0] + asr_new[:, :, 1:] = asr[:, :, 0:-1] + asr = asr_new + + out = self.model.decoder(asr, + F0_pred, N_pred, ref.squeeze().unsqueeze(0)) + + output = out.squeeze().cpu().numpy()[..., :-50] # weird pulse at the end of the model, need to be fixed later + if output_wav_file: + scipy.io.wavfile.write(output_wav_file, rate=output_sample_rate, data=output) + return output + + def long_inference(self, + text: str, + target_voice_path=None, + output_wav_file=None, + output_sample_rate=24000, + alpha=0.3, + beta=0.7, + t=0.7, + diffusion_steps=5, + embedding_scale=1, + ref_s=None): + """ + Inference for longform text. Used automatically in inference() when needed. + :param text: Input text to turn into speech. + :param target_voice_path: Path to audio file of target voice to clone. + :param output_wav_file: Name of output audio file (if output WAV file is desired). + :param output_sample_rate: Output sample rate (default 24000). + :param alpha: Determines timbre of speech, higher means style is more suitable to text than to the target voice. + :param beta: Determines prosody of speech, higher means style is more suitable to text than to the target voice. + :param t: Determines consistency of style across inference segments (0 lowest, 1 highest) + :param diffusion_steps: The more the steps, the more diverse the samples are, with the cost of speed. + :param embedding_scale: Higher scale means style is more conditional to the input text and hence more emotional. + :param ref_s: Pre-computed style vector to pass directly. + :return: concatenated audio data as a Numpy array (will also create the WAV file if output_wav_file was set). + """ + + if ref_s is None: + # default to clone https://styletts2.github.io/wavs/LJSpeech/OOD/GT/00001.wav voice from LibriVox (public domain) + if not target_voice_path or not Path(target_voice_path).exists(): + print("Cloning default target voice...") + target_voice_path = cached_path(DEFAULT_TARGET_VOICE_URL) + ref_s = self.compute_style(target_voice_path) # target style vector + + text_segments = segment_text(text) + segments = [] + prev_s = None + for text_segment in text_segments: + # Address cut-off sentence issue due to langchain text splitter + if text_segment[-1] != '.': + text_segment += ', ' + segment_output, prev_s = self.long_inference_segment(text_segment, + prev_s, + ref_s, + alpha=alpha, + beta=beta, + t=t, + diffusion_steps=diffusion_steps, + embedding_scale=embedding_scale) + segments.append(segment_output) + output = np.concatenate(segments) + if output_wav_file: + scipy.io.wavfile.write(output_wav_file, rate=output_sample_rate, data=output) + return output + + def long_inference_segment(self, + text, + prev_s, + ref_s, + alpha=0.3, + beta=0.7, + t=0.7, + diffusion_steps=5, + embedding_scale=1): + """ + Performs inference for segment of longform text; see long_inference() + :param text: Input text + :param prev_s: Style vector of previous speech segment (used to keep voice consistent in longform inference) + :param ref_s: Pre-computed style vector of target voice to clone + :param alpha: Determines timbre of speech, higher means style is more suitable to text than to the target voice. + :param beta: Determines prosody of speech, higher means style is more suitable to text than to the target voice. + :param t: Determines consistency of style across inference segments (0 lowest, 1 highest) + :param diffusion_steps: The more the steps, the more diverse the samples are, with the cost of speed. + :param embedding_scale: Higher scale means style is more conditional to the input text and hence more emotional. + :return: audio data as a Numpy array + """ + text = text.strip() + text = text.replace('"', '') + phonemized_text = self.phoneme_converter.phonemize(text) + ps = word_tokenize(phonemized_text) + phoneme_string = ' '.join(ps) + phoneme_string = phoneme_string.replace('``', '"') + phoneme_string = phoneme_string.replace("''", '"') + + textcleaner = TextCleaner() + tokens = textcleaner(phoneme_string) + tokens.insert(0, 0) + tokens = torch.LongTensor(tokens).to(self.device).unsqueeze(0) + + with torch.no_grad(): + input_lengths = torch.LongTensor([tokens.shape[-1]]).to(self.device) + text_mask = length_to_mask(input_lengths).to(self.device) + + t_en = self.model.text_encoder(tokens, input_lengths, text_mask) + bert_dur = self.model.bert(tokens, attention_mask=(~text_mask).int()) + d_en = self.model.bert_encoder(bert_dur).transpose(-1, -2) + + s_pred = self.sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(self.device), + embedding=bert_dur, + embedding_scale=embedding_scale, + features=ref_s, # reference from the same speaker as the embedding + num_steps=diffusion_steps).squeeze(1) + + if prev_s is not None: + # convex combination of previous and current style + s_pred = t * prev_s + (1 - t) * s_pred + + s = s_pred[:, 128:] + ref = s_pred[:, :128] + + ref = alpha * ref + (1 - alpha) * ref_s[:, :128] + s = beta * s + (1 - beta) * ref_s[:, 128:] + + s_pred = torch.cat([ref, s], dim=-1) + + d = self.model.predictor.text_encoder(d_en, + s, input_lengths, text_mask) + + x, _ = self.model.predictor.lstm(d) + duration = self.model.predictor.duration_proj(x) + + duration = torch.sigmoid(duration).sum(axis=-1) + pred_dur = torch.round(duration.squeeze()).clamp(min=1) + + + pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data)) + c_frame = 0 + for i in range(pred_aln_trg.size(0)): + pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1 + c_frame += int(pred_dur[i].data) + + # encode prosody + en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(self.device)) + if self.model_params.decoder.type == "hifigan": + asr_new = torch.zeros_like(en) + asr_new[:, :, 0] = en[:, :, 0] + asr_new[:, :, 1:] = en[:, :, 0:-1] + en = asr_new + + F0_pred, N_pred = self.model.predictor.F0Ntrain(en, s) + + asr = (t_en @ pred_aln_trg.unsqueeze(0).to(self.device)) + if self.model_params.decoder.type == "hifigan": + asr_new = torch.zeros_like(asr) + asr_new[:, :, 0] = asr[:, :, 0] + asr_new[:, :, 1:] = asr[:, :, 0:-1] + asr = asr_new + + out = self.model.decoder(asr, + F0_pred, N_pred, ref.squeeze().unsqueeze(0)) + + return out.squeeze().cpu().numpy()[..., :-100], s_pred diff --git a/styletts2/utils.py b/styletts2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9f68abbe5c5ddb98e85ac6ad11f6739cad9172fc --- /dev/null +++ b/styletts2/utils.py @@ -0,0 +1,57 @@ +import numpy as np +import torch +import copy +from torch import nn +import torch.nn.functional as F +import torchaudio +import librosa +import matplotlib.pyplot as plt +from munch import Munch + + +def get_data_path_list(train_path=None, val_path=None): + if train_path is None: + train_path = "Data/train_list.txt" + if val_path is None: + val_path = "Data/val_list.txt" + + with open(train_path, 'r', encoding='utf-8', errors='ignore') as f: + train_list = f.readlines() + with open(val_path, 'r', encoding='utf-8', errors='ignore') as f: + val_list = f.readlines() + + return train_list, val_list + +def length_to_mask(lengths): + mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths) + mask = torch.gt(mask+1, lengths.unsqueeze(1)) + return mask + +# for norm consistency loss +def log_norm(x, mean=-4, std=4, dim=2): + """ + normalized log mel -> mel -> norm -> log(norm) + """ + x = torch.log(torch.exp(x * std + mean).norm(dim=dim)) + return x + +def get_image(arrs): + plt.switch_backend('agg') + fig = plt.figure() + ax = plt.gca() + ax.imshow(arrs) + + return fig + +def recursive_munch(d): + if isinstance(d, dict): + return Munch((k, recursive_munch(v)) for k, v in d.items()) + elif isinstance(d, list): + return [recursive_munch(v) for v in d] + else: + return d + +def log_print(message, logger): + logger.info(message) + print(message) + \ No newline at end of file