python_code
stringlengths
0
290k
repo_name
stringclasses
30 values
file_path
stringlengths
6
125
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. from .utils import is_inflect_available, is_transformers_available, is_unidecode_available __version__ = "0.0.4" from .modeling_utils import ModelMixin from .models import AutoencoderKL, NCSNpp, TemporalUNet, UNetLDMModel, UNetModel, UNetUnconditionalModel, VQModel from .pipeline_utils import DiffusionPipeline from .pipelines import ( BDDMPipeline, DDIMPipeline, DDPMPipeline, LatentDiffusionUncondPipeline, PNDMPipeline, ScoreSdeVePipeline, ScoreSdeVpPipeline, ) from .schedulers import ( DDIMScheduler, DDPMScheduler, GradTTSScheduler, PNDMScheduler, SchedulerMixin, ScoreSdeVeScheduler, ScoreSdeVpScheduler, ) if is_transformers_available(): from .models.unet_glide import GlideSuperResUNetModel, GlideTextToImageUNetModel, GlideUNetModel from .models.unet_grad_tts import UNetGradTTSModel from .pipelines import GlidePipeline, LatentDiffusionPipeline else: from .utils.dummy_transformers_objects import * if is_transformers_available() and is_inflect_available() and is_unidecode_available(): from .pipelines import GradTTSPipeline else: from .utils.dummy_transformers_and_inflect_and_unidecode_objects import *
diffusers_all-main
src/diffusers/__init__.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil from pathlib import Path from typing import Optional from diffusers import DiffusionPipeline from huggingface_hub import HfFolder, Repository, whoami from modelcards import CardData, ModelCard from .utils import logging logger = logging.get_logger(__name__) MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "utils" / "model_card_template.md" def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def init_git_repo(args, at_init: bool = False): """ Args: Initializes a git repo in `args.hub_model_id`. at_init (`bool`, *optional*, defaults to `False`): Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped out. """ if args.local_rank not in [-1, 0]: return use_auth_token = True if args.hub_token is None else args.hub_token if args.hub_model_id is None: repo_name = Path(args.output_dir).absolute().name else: repo_name = args.hub_model_id if "/" not in repo_name: repo_name = get_full_repo_name(repo_name, token=args.hub_token) try: repo = Repository( args.output_dir, clone_from=repo_name, use_auth_token=use_auth_token, private=args.hub_private_repo, ) except EnvironmentError: if args.overwrite_output_dir and at_init: # Try again after wiping output_dir shutil.rmtree(args.output_dir) repo = Repository( args.output_dir, clone_from=repo_name, use_auth_token=use_auth_token, ) else: raise repo.git_pull() # By default, ignore the checkpoint folders if not os.path.exists(os.path.join(args.output_dir, ".gitignore")): with open(os.path.join(args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer: writer.writelines(["checkpoint-*/"]) return repo def push_to_hub( args, pipeline: DiffusionPipeline, repo: Repository, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs, ) -> str: """ Parameters: Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*. commit_message (`str`, *optional*, defaults to `"End of training"`): Message to commit while pushing. blocking (`bool`, *optional*, defaults to `True`): Whether the function should return only when the `git push` has finished. kwargs: Additional keyword arguments passed along to [`create_model_card`]. Returns: The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of the commit and an object to track the progress of the commit if `blocking=True` """ if args.hub_model_id is None: model_name = Path(args.output_dir).name else: model_name = args.hub_model_id.split("/")[-1] output_dir = args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving pipeline checkpoint to {output_dir}") pipeline.save_pretrained(output_dir) # Only push from one node. if args.local_rank not in [-1, 0]: return # Cancel any async push in progress if blocking=True. The commits will all be pushed together. if ( blocking and len(repo.command_queue) > 0 and repo.command_queue[-1] is not None and not repo.command_queue[-1].is_done ): repo.command_queue[-1]._process.kill() git_head_commit_url = repo.push_to_hub(commit_message=commit_message, blocking=blocking, auto_lfs_prune=True) # push separately the model card to be independent from the rest of the model create_model_card(args, model_name=model_name) try: repo.push_to_hub(commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True) except EnvironmentError as exc: logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}") return git_head_commit_url def create_model_card(args, model_name): if args.local_rank not in [-1, 0]: return repo_name = get_full_repo_name(model_name, token=args.hub_token) model_card = ModelCard.from_template( card_data=CardData( # Card metadata object that will be converted to YAML block language="en", license="apache-2.0", library_name="diffusers", tags=[], datasets=args.dataset, metrics=[], ), template_path=MODEL_CARD_TEMPLATE_PATH, model_name=model_name, repo_name=repo_name, dataset_name=args.dataset, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=args.gradient_accumulation_steps, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2, adam_weight_decay=args.adam_weight_decay, adam_epsilon=args.adam_epsilon, lr_scheduler=args.lr_scheduler, lr_warmup_steps=args.lr_warmup_steps, ema_inv_gamma=args.ema_inv_gamma, ema_power=args.ema_power, ema_max_decay=args.ema_max_decay, mixed_precision=args.mixed_precision, ) card_path = os.path.join(args.output_dir, "README.md") model_card.save(card_path)
diffusers_all-main
src/diffusers/hub_utils.py
import copy import torch class EMAModel: """ Exponential Moving Average of models weights """ def __init__( self, model, update_after_step=0, inv_gamma=1.0, power=2 / 3, min_value=0.0, max_value=0.9999, device=None, ): """ @crowsonkb's notes on EMA Warmup: If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). Args: inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1. power (float): Exponential factor of EMA warmup. Default: 2/3. min_value (float): The minimum EMA decay rate. Default: 0. """ self.averaged_model = copy.deepcopy(model).eval() self.averaged_model.requires_grad_(False) self.update_after_step = update_after_step self.inv_gamma = inv_gamma self.power = power self.min_value = min_value self.max_value = max_value if device is not None: self.averaged_model = self.averaged_model.to(device=device) self.decay = 0.0 self.optimization_step = 0 def get_decay(self, optimization_step): """ Compute the decay factor for the exponential moving average. """ step = max(0, optimization_step - self.update_after_step - 1) value = 1 - (1 + step / self.inv_gamma) ** -self.power if step <= 0: return 0.0 return max(self.min_value, min(value, self.max_value)) @torch.no_grad() def step(self, new_model): ema_state_dict = {} ema_params = self.averaged_model.state_dict() self.decay = self.get_decay(self.optimization_step) for key, param in new_model.named_parameters(): if isinstance(param, dict): continue try: ema_param = ema_params[key] except KeyError: ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param) ema_params[key] = ema_param if not param.requires_grad: ema_params[key].copy_(param.to(dtype=ema_param.dtype).data) ema_param = ema_params[key] else: ema_param.mul_(self.decay) ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=1 - self.decay) ema_state_dict[key] = ema_param for key, param in new_model.named_buffers(): ema_state_dict[key] = param self.averaged_model.load_state_dict(ema_state_dict, strict=False) self.optimization_step += 1
diffusers_all-main
src/diffusers/training_utils.py
# THIS FILE HAS BEEN AUTOGENERATED. To update: # 1. modify the `_deps` dict in setup.py # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow", "black": "black~=22.0,>=22.3", "filelock": "filelock", "flake8": "flake8>=3.8.3", "huggingface-hub": "huggingface-hub", "isort": "isort>=5.5.4", "numpy": "numpy", "pytest": "pytest", "regex": "regex!=2019.12.17", "requests": "requests", "torch": "torch>=1.4", "tensorboard": "tensorboard", "modelcards": "modelcards==0.1.4", }
diffusers_all-main
src/diffusers/dependency_versions_table.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Callable, List, Optional, Tuple, Union import torch from torch import Tensor, device from huggingface_hub import hf_hub_download from requests import HTTPError from .utils import ( CONFIG_NAME, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, logging, ) WEIGHTS_NAME = "diffusion_model.pt" logger = logging.get_logger(__name__) def get_parameter_device(parameter: torch.nn.Module): try: return next(parameter.parameters()).device except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_parameter_dtype(parameter: torch.nn.Module): try: return next(parameter.parameters()).dtype except StopIteration: # For torch.nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def load_state_dict(checkpoint_file: Union[str, os.PathLike]): """ Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. """ try: return torch.load(checkpoint_file, map_location="cpu") except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " "model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." ) def _load_state_dict_into_model(model_to_load, state_dict): # Convert old format to new format if needed from a PyTorch state_dict # copy state_dict so _load_from_state_dict can modify it state_dict = state_dict.copy() error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: torch.nn.Module, prefix=""): args = (state_dict, prefix, {}, True, [], [], error_msgs) module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") load(model_to_load) return error_msgs class ModelMixin(torch.nn.Module): r""" Base class for all models. [`ModelMixin`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`ConfigMixin`]) -- A subclass of [`ConfigMixin`] to use as configuration class for this model architecture. - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - **model** ([`ModelMixin`]) -- An instance of the model on which to load the TensorFlow checkpoint. - **config** ([`PreTrainedConfigMixin`]) -- An instance of the configuration associated to the model. - **path** (`str`) -- A path to the TensorFlow checkpoint. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_name = CONFIG_NAME def __init__(self): super().__init__() def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Callable = torch.save, **kwargs, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `[`~ModelMixin.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. kwargs: Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # Save the model state_dict = model_to_save.state_dict() # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. if filename.startswith(WEIGHTS_NAME[:-4]) and os.path.isfile(full_filename) and is_main_process: os.remove(full_filename) # Save the model save_function(state_dict, os.path.join(save_directory, WEIGHTS_NAME)) logger.info(f"Model weights saved in {os.path.join(save_directory, WEIGHTS_NAME)}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~ModelMixin.save_pretrained`], e.g., `./my_model_directory/`. config (`Union[ConfigMixin, str, os.PathLike]`, *optional*): Can be either: - an instance of a class derived from [`ConfigMixin`], - a string or path valid as input to [`~ConfigMixin.from_pretrained`]. ConfigMixinuration for the model to use instead of an automatically loaded configuration. ConfigMixinuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~ModelMixin.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (`bool`, *optional*, defaults to `False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). from_flax (`bool`, *optional*, defaults to `False`): Load the model weights from a Flax checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~ConfigMixin.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Passing `use_auth_token=True`` is required when you want to use a private model. </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} # Load config if we don't provide a configuration config_path = pretrained_model_name_or_path model, unused_kwargs = cls.from_config( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, **kwargs, ) model.register_to_config(name_or_path=pretrained_model_name_or_path) # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # Load model pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}." ) else: try: # Load from URL or cache if already cached model_file = hf_hub_download( pretrained_model_name_or_path, filename=WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login` and pass `use_auth_token=True`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {model_file}." ) except HTTPError as err: raise EnvironmentError( "There was a specific connection error when trying to load" f" {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {WEIGHTS_NAME} or" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {WEIGHTS_NAME}" ) # restore default dtype state_dict = load_state_dict(model_file) model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, ) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info return model @classmethod def _load_pretrained_model( cls, model, state_dict, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=False, ): # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() loaded_keys = [k for k in state_dict.keys()] expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Make sure we are able to load base models as well as derived models (with heads) model_to_load = model def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: # Whole checkpoint mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes, ) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) if "size mismatch" in error_msg: error_msg += ( "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." ) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" " to use it for predictions and inference." ) return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs @property def device(self) -> device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (optionally, trainable or non-embeddings) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters. """ if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, torch.nn.Embedding) ] non_embedding_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) else: return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) def unwrap_model(model: torch.nn.Module) -> torch.nn.Module: """ Recursively unwraps a model from potential containers (as used in distributed training). Args: model (`torch.nn.Module`): The model to unwrap. """ # since there could be multiple levels of wrapping, unwrap recursively if hasattr(model, "module"): return unwrap_model(model.module) else: return model
diffusers_all-main
src/diffusers/modeling_utils.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities to dynamically load objects from the Hub.""" import importlib import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from huggingface_hub import cached_download from .utils import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name def init_hf_modules(): """ Creates the cache directory for modules with an init, and adds it to the Python path. """ # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(HF_MODULES_CACHE) os.makedirs(HF_MODULES_CACHE, exist_ok=True) init_path = Path(HF_MODULES_CACHE) / "__init__.py" if not init_path.exists(): init_path.touch() def create_dynamic_module(name: Union[str, os.PathLike]): """ Creates a dynamic module in the cache directory for modules. """ init_hf_modules() dynamic_module_path = Path(HF_MODULES_CACHE) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent) os.makedirs(dynamic_module_path, exist_ok=True) init_path = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def get_relative_imports(module_file): """ Get the list of modules that are relatively imported in a module file. Args: module_file (`str` or `os.PathLike`): The module file to inspect. """ with open(module_file, "r", encoding="utf-8") as f: content = f.read() # Imports of the form `import .xxx` relative_imports = re.findall("^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) # Unique-ify return list(set(relative_imports)) def get_relative_import_files(module_file): """ Get the list of all files that are needed for a given module. Note that this function recurses through the relative imports (if a imports b and b imports c, it will return module files for b and c). Args: module_file (`str` or `os.PathLike`): The module file to inspect. """ no_change = False files_to_check = [module_file] all_relative_imports = [] # Let's recurse through all relative imports while not no_change: new_imports = [] for f in files_to_check: new_imports.extend(get_relative_imports(f)) module_path = Path(module_file).parent new_import_files = [str(module_path / m) for m in new_imports] new_import_files = [f for f in new_import_files if f not in all_relative_imports] files_to_check = [f"{f}.py" for f in new_import_files] no_change = len(new_import_files) == 0 all_relative_imports.extend(files_to_check) return all_relative_imports def check_imports(filename): """ Check if the current Python environment contains all the libraries that are imported in a file. """ with open(filename, "r", encoding="utf-8") as f: content = f.read() # Imports of the form `import xxx` imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) # Only keep the top-level module imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] # Unique-ify and test we got them all imports = list(set(imports)) missing_packages = [] for imp in imports: try: importlib.import_module(imp) except ImportError: missing_packages.append(imp) if len(missing_packages) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" ) return get_relative_imports(filename) def get_class_in_module(class_name, module_path): """ Import a module on the cache directory for modules and extract a class from it. """ module_path = module_path.replace(os.path.sep, ".") module = importlib.import_module(module_path) return getattr(module, class_name) def get_cached_module_file( pretrained_model_name_or_path: Union[str, os.PathLike], module_file: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, ): """ Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached Transformers module. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. module_file (`str`): The name of the module file containing the class to look for. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `str`: The path to the module inside the cache. """ # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. pretrained_model_name_or_path = str(pretrained_model_name_or_path) module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) submodule = "local" if os.path.isfile(module_file_or_url): resolved_module_file = module_file_or_url else: try: # Load from URL or cache if already cached resolved_module_file = cached_download( module_file_or_url, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") raise # Check we have all the requirements in our environment modules_needed = check_imports(resolved_module_file) # Now we move the module inside our cached dynamic modules. full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(full_submodule) submodule_path = Path(HF_MODULES_CACHE) / full_submodule # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(resolved_module_file, submodule_path / module_file) for module_needed in modules_needed: module_needed = f"{module_needed}.py" shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed) return os.path.join(full_submodule, module_file) def get_class_from_dynamic_module( pretrained_model_name_or_path: Union[str, os.PathLike], module_file: str, class_name: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, **kwargs, ): """ Extracts a class from a module file, present in the local folder or repository of a model. <Tip warning={true}> Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should therefore only be called on trusted repos. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. module_file (`str`): The name of the module file containing the class to look for. class_name (`str`): The name of the class to import in the module. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `type`: The class, dynamically imported from the module. Examples: ```python # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this # module. cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") ```""" # And lastly we get the class inside our newly created module final_module = get_cached_module_file( pretrained_model_name_or_path, module_file, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, ) return get_class_in_module(class_name, final_module.replace(".py", ""))
diffusers_all-main
src/diffusers/dynamic_modules_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from typing import Optional, Union from huggingface_hub import snapshot_download from .configuration_utils import ConfigMixin from .utils import DIFFUSERS_CACHE, logging INDEX_FILE = "diffusion_model.pt" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "ModelMixin": ["save_pretrained", "from_pretrained"], "SchedulerMixin": ["save_config", "from_config"], "DiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "PreTrainedModel": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) class DiffusionPipeline(ConfigMixin): config_name = "model_index.json" def register_modules(self, **kwargs): # import it here to avoid circular import from diffusers import pipelines for name, module in kwargs.items(): # retrive library library = module.__module__.split(".")[0] # check if the module is a pipeline module pipeline_file = module.__module__.split(".")[-1] pipeline_dir = module.__module__.split(".")[-2] is_pipeline_module = pipeline_file == "pipeline_" + pipeline_dir and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if library not in LOADABLE_CLASSES or is_pipeline_module: library = pipeline_dir # retrive class_name class_name = module.__class__.__name__ register_dict = {name: (library, class_name)} # save model index config self.register_to_config(**register_dict) # set models setattr(self, name, module) def save_pretrained(self, save_directory: Union[str, os.PathLike]): self.save_config(save_directory) model_index_dict = dict(self.config) model_index_dict.pop("_class_name") model_index_dict.pop("_diffusers_version") model_index_dict.pop("_module", None) for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) model_cls = sub_model.__class__ save_method_name = None # search for the model's base class in LOADABLE_CLASSES for library_name, library_classes in LOADABLE_CLASSES.items(): library = importlib.import_module(library_name) for base_class, save_load_methods in library_classes.items(): class_candidate = getattr(library, base_class) if issubclass(model_cls, class_candidate): # if we found a suitable base class in LOADABLE_CLASSES then grab its save method save_method_name = save_load_methods[0] break if save_method_name is not None: break save_method = getattr(sub_model, save_method_name) save_method(os.path.join(save_directory, pipeline_component_name)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Add docstrings """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): cached_folder = snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, ) else: cached_folder = pretrained_model_name_or_path config_dict = cls.get_config_dict(cached_folder) # 2. Load the pipeline class, if using custom module then load it from the hub # if we load from explicit class, let's use it if cls != DiffusionPipeline: pipeline_class = cls else: diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) init_dict, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) init_kwargs = {} # import it here to avoid circular import from diffusers import pipelines # 3. Load each module in the pipeline for name, (library_name, class_name) in init_dict.items(): is_pipeline_module = hasattr(pipelines, library_name) # if the model is in a pipeline module, then we load it from the pipeline if is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = getattr(pipeline_module, class_name) importable_classes = ALL_IMPORTABLE_CLASSES class_candidates = {c: class_obj for c in importable_classes.keys()} else: # else we just import it from the library. library = importlib.import_module(library_name) class_obj = getattr(library, class_name) importable_classes = LOADABLE_CLASSES[library_name] class_candidates = {c: getattr(library, c) for c in importable_classes.keys()} load_method_name = None for class_name, class_candidate in class_candidates.items(): if issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] load_method = getattr(class_obj, load_method_name) # check if the module is in a subdirectory if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name)) else: # else load from the root directory loaded_sub_model = load_method(cached_folder) init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) # 5. Instantiate the pipeline model = pipeline_class(**init_kwargs) return model
diffusers_all-main
src/diffusers/pipeline_utils.py
from ..utils import is_inflect_available, is_transformers_available, is_unidecode_available from .bddm import BDDMPipeline from .ddim import DDIMPipeline from .ddpm import DDPMPipeline from .latent_diffusion_uncond import LatentDiffusionUncondPipeline from .pndm import PNDMPipeline from .score_sde_ve import ScoreSdeVePipeline from .score_sde_vp import ScoreSdeVpPipeline if is_transformers_available(): from .glide import GlidePipeline from .latent_diffusion import LatentDiffusionPipeline if is_transformers_available() and is_unidecode_available() and is_inflect_available(): from .grad_tts import GradTTSPipeline
diffusers_all-main
src/diffusers/pipelines/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import tqdm from ...pipeline_utils import DiffusionPipeline class DDIMPipeline(DiffusionPipeline): def __init__(self, unet, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(unet=unet, noise_scheduler=noise_scheduler) def __call__(self, batch_size=1, generator=None, torch_device=None, eta=0.0, num_inference_steps=50): # eta corresponds to η in paper and should be between [0, 1] if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" num_trained_timesteps = self.noise_scheduler.config.timesteps inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps) self.unet.to(torch_device) # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution), generator=generator, ) image = image.to(torch_device) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_image -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_image_direction -> "direction pointingc to x_t" # - pred_prev_image -> "x_t-1" for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): # 1. predict noise residual with torch.no_grad(): residual = self.unet(image, inference_step_times[t]) # 2. predict previous mean of image x_t-1 pred_prev_image = self.noise_scheduler.step(residual, image, t, num_inference_steps, eta) # 3. optionally sample variance variance = 0 if eta > 0: noise = torch.randn(image.shape, generator=generator).to(image.device) variance = self.noise_scheduler.get_variance(t, num_inference_steps).sqrt() * eta * noise # 4. set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance return image
diffusers_all-main
src/diffusers/pipelines/ddim/pipeline_ddim.py
from .pipeline_ddim import DDIMPipeline
diffusers_all-main
src/diffusers/pipelines/ddim/__init__.py
# coding=utf-8 # Copyright 2022 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch CLIP model.""" import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn import tqdm from transformers import CLIPConfig, CLIPModel, CLIPTextConfig, CLIPVisionConfig, GPT2Tokenizer from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from transformers.modeling_utils import PreTrainedModel from transformers.utils import ModelOutput, add_start_docstrings_to_model_forward, replace_return_docstrings from ...models import GlideSuperResUNetModel, GlideTextToImageUNetModel from ...pipeline_utils import DiffusionPipeline from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import logging ##################### # START OF THE CLIP MODEL COPY-PASTE (with a modified attention module) ##################### logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "fusing/glide-base" CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "fusing/glide-base", # See all CLIP models at https://huggingface.co/models?filter=clip ] # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.T) return (caption_loss + image_loss) / 2.0 @dataclass class CLIPOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`]. text_model_output(`BaseModelOutputWithPooling`): The output of the [`CLIPTextModel`]. vision_model_output(`BaseModelOutputWithPooling`): The output of the [`CLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class CLIPVisionEmbeddings(nn.Module): def __init__(self, config: CLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1))) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class CLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.use_padding_embeddings = config.use_padding_embeddings if self.use_padding_embeddings: self.padding_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings if self.use_padding_embeddings and attention_mask is not None: padding_embeddings = self.padding_embedding(position_ids) embeddings = torch.where(attention_mask.bool().unsqueeze(-1), embeddings, padding_embeddings) return embeddings class CLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = 1 / math.sqrt(math.sqrt(self.head_dim)) self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() qkv_states = self.qkv_proj(hidden_states) qkv_states = qkv_states.view(bsz, tgt_len, self.num_heads, -1) query_states, key_states, value_states = torch.split(qkv_states, self.head_dim, dim=-1) attn_weights = torch.einsum("bthc,bshc->bhts", query_states * self.scale, key_states * self.scale) wdtype = attn_weights.dtype attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1).type(wdtype) attn_output = torch.einsum("bhts,bshc->bthc", attn_weights, value_states) attn_output = attn_output.reshape(bsz, tgt_len, -1) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class CLIPMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class CLIPEncoderLayer(nn.Module): def __init__(self, config: CLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim) self.mlp = CLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CLIPConfig base_model_prefix = "clip" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, CLIPTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) if hasattr(module, "padding_embedding"): module.padding_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, CLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, CLIPAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.qkv_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPMLP): factor = self.config.initializer_factor in_proj_std = ( (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor ) fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, CLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, CLIPEncoder): module.gradient_checkpointing = value CLIP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class CLIPEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`CLIPEncoderLayer`]. Args: config: CLIPConfig """ def __init__(self, config: CLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class CLIPTextTransformer(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPTextEmbeddings(config) self.encoder = CLIPEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim) @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask) bsz, seq_len = input_shape # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 # causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len).to(hidden_states.device) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, bsz, seq_len): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(bsz, seq_len, seq_len) mask.fill_(torch.tensor(float("-inf"))) mask.triu_(1) # zero out the lower diagonal mask = mask.unsqueeze(1) # expand mask return mask class CLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import CLIPTokenizer, CLIPTextModel >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) ##################### # END OF THE CLIP MODEL COPY-PASTE ##################### def _extract_into_tensor(arr, timesteps, broadcast_shape): """ Extract values from a 1-D numpy array for a batch of indices. :param arr: the 1-D numpy array. :param timesteps: a tensor of indices into the array to extract. :param broadcast_shape: a larger shape of K dimensions with the batch dimension equal to the length of timesteps. :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. """ res = torch.from_numpy(arr).to(device=timesteps.device)[timesteps].float() while len(res.shape) < len(broadcast_shape): res = res[..., None] return res + torch.zeros(broadcast_shape, device=timesteps.device) class GlidePipeline(DiffusionPipeline): def __init__( self, text_unet: GlideTextToImageUNetModel, text_noise_scheduler: DDPMScheduler, text_encoder: CLIPTextModel, tokenizer: GPT2Tokenizer, upscale_unet: GlideSuperResUNetModel, upscale_noise_scheduler: DDIMScheduler, ): super().__init__() self.register_modules( text_unet=text_unet, text_noise_scheduler=text_noise_scheduler, text_encoder=text_encoder, tokenizer=tokenizer, upscale_unet=upscale_unet, upscale_noise_scheduler=upscale_noise_scheduler, ) @torch.no_grad() def __call__( self, prompt, generator=None, torch_device=None, num_inference_steps_upscale=50, guidance_scale=3.0, eta=0.0, upsample_temp=0.997, ): torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.text_unet.to(torch_device) self.text_encoder.to(torch_device) self.upscale_unet.to(torch_device) def text_model_fn(x_t, timesteps, transformer_out, **kwargs): half = x_t[: len(x_t) // 2] combined = torch.cat([half, half], dim=0) model_out = self.text_unet(combined, timesteps, transformer_out, **kwargs) eps, rest = model_out[:, :3], model_out[:, 3:] cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) eps = torch.cat([half_eps, half_eps], dim=0) return torch.cat([eps, rest], dim=1) # 1. Sample gaussian noise batch_size = 2 # second image is empty for classifier-free guidance image = torch.randn( ( batch_size, self.text_unet.in_channels, self.text_unet.resolution, self.text_unet.resolution, ), generator=generator, ).to(torch_device) # 2. Encode tokens # an empty input is needed to guide the model away from it inputs = self.tokenizer([prompt, ""], padding="max_length", max_length=128, return_tensors="pt") input_ids = inputs["input_ids"].to(torch_device) attention_mask = inputs["attention_mask"].to(torch_device) transformer_out = self.text_encoder(input_ids, attention_mask).last_hidden_state # 3. Run the text2image generation step num_prediction_steps = len(self.text_noise_scheduler) for t in tqdm.tqdm(reversed(range(num_prediction_steps)), total=num_prediction_steps): with torch.no_grad(): time_input = torch.tensor([t] * image.shape[0], device=torch_device) model_output = text_model_fn(image, time_input, transformer_out) noise_residual, model_var_values = torch.split(model_output, 3, dim=1) min_log = self.text_noise_scheduler.get_variance(t, "fixed_small_log") max_log = self.text_noise_scheduler.get_variance(t, "fixed_large_log") # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log pred_prev_image = self.text_noise_scheduler.step(noise_residual, image, t) noise = torch.randn(image.shape, generator=generator).to(torch_device) variance = torch.exp(0.5 * model_log_variance) * noise # set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance # 4. Run the upscaling step batch_size = 1 image = image[:1] low_res = ((image + 1) * 127.5).round() / 127.5 - 1 # Sample gaussian noise to begin loop image = torch.randn( ( batch_size, self.upscale_unet.in_channels // 2, self.upscale_unet.resolution, self.upscale_unet.resolution, ), generator=generator, ).to(torch_device) image = image * upsample_temp num_trained_timesteps = self.upscale_noise_scheduler.timesteps inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps_upscale) for t in tqdm.tqdm(reversed(range(num_inference_steps_upscale)), total=num_inference_steps_upscale): # 1. predict noise residual with torch.no_grad(): time_input = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device) model_output = self.upscale_unet(image, time_input, low_res) noise_residual, pred_variance = torch.split(model_output, 3, dim=1) # 2. predict previous mean of image x_t-1 pred_prev_image = self.upscale_noise_scheduler.step( noise_residual, image, t, num_inference_steps_upscale, eta, use_clipped_residual=True ) # 3. optionally sample variance variance = 0 if eta > 0: noise = torch.randn(image.shape, generator=generator).to(torch_device) variance = ( self.upscale_noise_scheduler.get_variance(t, num_inference_steps_upscale).sqrt() * eta * noise ) # 4. set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance image = image.clamp(-1, 1).permute(0, 2, 3, 1) return image
diffusers_all-main
src/diffusers/pipelines/glide/pipeline_glide.py
from ...utils import is_transformers_available if is_transformers_available(): from .pipeline_glide import CLIPTextModel, GlidePipeline
diffusers_all-main
src/diffusers/pipelines/glide/__init__.py
import torch import tqdm from ...pipeline_utils import DiffusionPipeline class LatentDiffusionUncondPipeline(DiffusionPipeline): def __init__(self, vqvae, unet, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(vqvae=vqvae, unet=unet, noise_scheduler=noise_scheduler) @torch.no_grad() def __call__( self, batch_size=1, generator=None, torch_device=None, eta=0.0, num_inference_steps=50, ): # eta corresponds to η in paper and should be between [0, 1] if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.unet.to(torch_device) self.vqvae.to(torch_device) num_trained_timesteps = self.noise_scheduler.config.timesteps inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps) image = torch.randn( (batch_size, self.unet.in_channels, self.unet.image_size, self.unet.image_size), generator=generator, ).to(torch_device) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_image -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_image_direction -> "direction pointingc to x_t" # - pred_prev_image -> "x_t-1" for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): # 1. predict noise residual timesteps = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device) pred_noise_t = self.unet(image, timesteps) # 2. predict previous mean of image x_t-1 pred_prev_image = self.noise_scheduler.step(pred_noise_t, image, t, num_inference_steps, eta) # 3. optionally sample variance variance = 0 if eta > 0: noise = torch.randn(image.shape, generator=generator).to(image.device) variance = self.noise_scheduler.get_variance(t, num_inference_steps).sqrt() * eta * noise # 4. set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance # decode image with vae image = self.vqvae.decode(image) return image
diffusers_all-main
src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
from .pipeline_latent_diffusion_uncond import LatentDiffusionUncondPipeline
diffusers_all-main
src/diffusers/pipelines/latent_diffusion_uncond/__init__.py
from ...utils import is_transformers_available if is_transformers_available(): from .pipeline_latent_diffusion import LatentDiffusionPipeline, LDMBertModel
diffusers_all-main
src/diffusers/pipelines/latent_diffusion/__init__.py
from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.utils.checkpoint import tqdm from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from transformers.modeling_outputs import BaseModelOutput from transformers.modeling_utils import PreTrainedModel from transformers.utils import logging from ...pipeline_utils import DiffusionPipeline ################################################################################ # Code for the text transformer model ################################################################################ """ PyTorch LDMBERT model.""" logger = logging.get_logger(__name__) LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "ldm-bert", # See all LDMBert models at https://huggingface.co/models?filter=ldmbert ] LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "ldm-bert": "https://huggingface.co/ldm-bert/resolve/main/config.json", } """ LDMBERT model configuration""" class LDMBertConfig(PretrainedConfig): model_type = "ldmbert" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=30522, max_position_embeddings=77, encoder_layers=32, encoder_ffn_dim=5120, encoder_attention_heads=8, head_dim=64, encoder_layerdrop=0.0, activation_function="gelu", d_model=1280, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.head_dim = head_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__(pad_token_id=pad_token_id, **kwargs) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert class LDMBertAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, head_dim: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = False, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = head_dim self.inner_dim = head_dim * num_heads self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.out_proj = nn.Linear(self.inner_dim, embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class LDMBertEncoderLayer(nn.Module): def __init__(self, config: LDMBertConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = LDMBertAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, head_dim=config.head_dim, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert class LDMBertPreTrainedModel(PreTrainedModel): config_class = LDMBertConfig base_model_prefix = "model" supports_gradient_checkpointing = True _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LDMBertEncoder,)): module.gradient_checkpointing = value @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs class LDMBertEncoder(LDMBertPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`LDMBertEncoderLayer`]. Args: config: LDMBertConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: LDMBertConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_len = input_shape[1] if position_ids is None: position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class LDMBertModel(LDMBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.model = LDMBertEncoder(config) self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] return sequence_output class LatentDiffusionPipeline(DiffusionPipeline): def __init__(self, vqvae, bert, tokenizer, unet, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, noise_scheduler=noise_scheduler) @torch.no_grad() def __call__( self, prompt, batch_size=1, generator=None, torch_device=None, eta=0.0, guidance_scale=1.0, num_inference_steps=50, ): # eta corresponds to η in paper and should be between [0, 1] if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.unet.to(torch_device) self.vqvae.to(torch_device) self.bert.to(torch_device) # get unconditional embeddings for classifier free guidence if guidance_scale != 1.0: uncond_input = self.tokenizer([""], padding="max_length", max_length=77, return_tensors="pt").to( torch_device ) uncond_embeddings = self.bert(uncond_input.input_ids) # get text embedding text_input = self.tokenizer(prompt, padding="max_length", max_length=77, return_tensors="pt").to(torch_device) text_embedding = self.bert(text_input.input_ids) num_trained_timesteps = self.noise_scheduler.config.timesteps inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps) image = torch.randn( (batch_size, self.unet.in_channels, self.unet.image_size, self.unet.image_size), generator=generator, ).to(torch_device) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_image -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_image_direction -> "direction pointingc to x_t" # - pred_prev_image -> "x_t-1" for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): # guidance_scale of 1 means no guidance if guidance_scale == 1.0: image_in = image context = text_embedding timesteps = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device) else: # for classifier free guidance, we need to do two forward passes # here we concanate embedding and unconditioned embedding in a single batch # to avoid doing two forward passes image_in = torch.cat([image] * 2) context = torch.cat([uncond_embeddings, text_embedding]) timesteps = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device) # 1. predict noise residual pred_noise_t = self.unet(image_in, timesteps, context=context) # perform guidance if guidance_scale != 1.0: pred_noise_t_uncond, pred_noise_t = pred_noise_t.chunk(2) pred_noise_t = pred_noise_t_uncond + guidance_scale * (pred_noise_t - pred_noise_t_uncond) # 2. predict previous mean of image x_t-1 pred_prev_image = self.noise_scheduler.step(pred_noise_t, image, t, num_inference_steps, eta) # 3. optionally sample variance variance = 0 if eta > 0: noise = torch.randn(image.shape, generator=generator).to(image.device) variance = self.noise_scheduler.get_variance(t, num_inference_steps).sqrt() * eta * noise # 4. set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance # scale and decode image with vae image = 1 / 0.18215 * image image = self.vqvae.decode(image) image = torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0) return image
diffusers_all-main
src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
from .pipeline_bddm import BDDMPipeline, DiffWave
diffusers_all-main
src/diffusers/pipelines/bddm/__init__.py
#!/bin/env python # -*- coding: utf-8 -*- ######################################################################## # # DiffWave: A Versatile Diffusion Model for Audio Synthesis # (https://arxiv.org/abs/2009.09761) # Modified from https://github.com/philsyn/DiffWave-Vocoder # # Author: Max W. Y. Lam (maxwylam@tencent.com) # Copyright (c) 2021Tencent. All Rights Reserved # ######################################################################## import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import tqdm from ...configuration_utils import ConfigMixin from ...modeling_utils import ModelMixin from ...pipeline_utils import DiffusionPipeline def calc_diffusion_step_embedding(diffusion_steps, diffusion_step_embed_dim_in): """ Embed a diffusion step $t$ into a higher dimensional space E.g. the embedding vector in the 128-dimensional space is [sin(t * 10^(0*4/63)), ... , sin(t * 10^(63*4/63)), cos(t * 10^(0*4/63)), ... , cos(t * 10^(63*4/63))] Parameters: diffusion_steps (torch.long tensor, shape=(batchsize, 1)): diffusion steps for batch data diffusion_step_embed_dim_in (int, default=128): dimensionality of the embedding space for discrete diffusion steps Returns: the embedding vectors (torch.tensor, shape=(batchsize, diffusion_step_embed_dim_in)): """ assert diffusion_step_embed_dim_in % 2 == 0 half_dim = diffusion_step_embed_dim_in // 2 _embed = np.log(10000) / (half_dim - 1) _embed = torch.exp(torch.arange(half_dim) * -_embed).cuda() _embed = diffusion_steps * _embed diffusion_step_embed = torch.cat((torch.sin(_embed), torch.cos(_embed)), 1) return diffusion_step_embed """ Below scripts were borrowed from https://github.com/philsyn/DiffWave-Vocoder/blob/master/WaveNet.py """ def swish(x): return x * torch.sigmoid(x) # dilated conv layer with kaiming_normal initialization # from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py class Conv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1): super().__init__() self.padding = dilation * (kernel_size - 1) // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding) self.conv = nn.utils.weight_norm(self.conv) nn.init.kaiming_normal_(self.conv.weight) def forward(self, x): out = self.conv(x) return out # conv1x1 layer with zero initialization # from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py but the scale parameter is removed class ZeroConv1d(nn.Module): def __init__(self, in_channel, out_channel): super().__init__() self.conv = nn.Conv1d(in_channel, out_channel, kernel_size=1, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() def forward(self, x): out = self.conv(x) return out # every residual block (named residual layer in paper) # contains one noncausal dilated conv class ResidualBlock(nn.Module): def __init__(self, res_channels, skip_channels, dilation, diffusion_step_embed_dim_out): super().__init__() self.res_channels = res_channels # Use a FC layer for diffusion step embedding self.fc_t = nn.Linear(diffusion_step_embed_dim_out, self.res_channels) # Dilated conv layer self.dilated_conv_layer = Conv(self.res_channels, 2 * self.res_channels, kernel_size=3, dilation=dilation) # Add mel spectrogram upsampler and conditioner conv1x1 layer self.upsample_conv2d = nn.ModuleList() for s in [16, 16]: conv_trans2d = nn.ConvTranspose2d(1, 1, (3, 2 * s), padding=(1, s // 2), stride=(1, s)) conv_trans2d = nn.utils.weight_norm(conv_trans2d) nn.init.kaiming_normal_(conv_trans2d.weight) self.upsample_conv2d.append(conv_trans2d) # 80 is mel bands self.mel_conv = Conv(80, 2 * self.res_channels, kernel_size=1) # Residual conv1x1 layer, connect to next residual layer self.res_conv = nn.Conv1d(res_channels, res_channels, kernel_size=1) self.res_conv = nn.utils.weight_norm(self.res_conv) nn.init.kaiming_normal_(self.res_conv.weight) # Skip conv1x1 layer, add to all skip outputs through skip connections self.skip_conv = nn.Conv1d(res_channels, skip_channels, kernel_size=1) self.skip_conv = nn.utils.weight_norm(self.skip_conv) nn.init.kaiming_normal_(self.skip_conv.weight) def forward(self, input_data): x, mel_spec, diffusion_step_embed = input_data h = x batch_size, n_channels, seq_len = x.shape assert n_channels == self.res_channels # Add in diffusion step embedding part_t = self.fc_t(diffusion_step_embed) part_t = part_t.view([batch_size, self.res_channels, 1]) h += part_t # Dilated conv layer h = self.dilated_conv_layer(h) # Upsample2D spectrogram to size of audio mel_spec = torch.unsqueeze(mel_spec, dim=1) mel_spec = F.leaky_relu(self.upsample_conv2d[0](mel_spec), 0.4, inplace=False) mel_spec = F.leaky_relu(self.upsample_conv2d[1](mel_spec), 0.4, inplace=False) mel_spec = torch.squeeze(mel_spec, dim=1) assert mel_spec.size(2) >= seq_len if mel_spec.size(2) > seq_len: mel_spec = mel_spec[:, :, :seq_len] mel_spec = self.mel_conv(mel_spec) h += mel_spec # Gated-tanh nonlinearity out = torch.tanh(h[:, : self.res_channels, :]) * torch.sigmoid(h[:, self.res_channels :, :]) # Residual and skip outputs res = self.res_conv(out) assert x.shape == res.shape skip = self.skip_conv(out) # Normalize for training stability return (x + res) * math.sqrt(0.5), skip class ResidualGroup(nn.Module): def __init__( self, res_channels, skip_channels, num_res_layers, dilation_cycle, diffusion_step_embed_dim_in, diffusion_step_embed_dim_mid, diffusion_step_embed_dim_out, ): super().__init__() self.num_res_layers = num_res_layers self.diffusion_step_embed_dim_in = diffusion_step_embed_dim_in # Use the shared two FC layers for diffusion step embedding self.fc_t1 = nn.Linear(diffusion_step_embed_dim_in, diffusion_step_embed_dim_mid) self.fc_t2 = nn.Linear(diffusion_step_embed_dim_mid, diffusion_step_embed_dim_out) # Stack all residual blocks with dilations 1, 2, ... , 512, ... , 1, 2, ..., 512 self.residual_blocks = nn.ModuleList() for n in range(self.num_res_layers): self.residual_blocks.append( ResidualBlock( res_channels, skip_channels, dilation=2 ** (n % dilation_cycle), diffusion_step_embed_dim_out=diffusion_step_embed_dim_out, ) ) def forward(self, input_data): x, mel_spectrogram, diffusion_steps = input_data # Embed diffusion step t diffusion_step_embed = calc_diffusion_step_embedding(diffusion_steps, self.diffusion_step_embed_dim_in) diffusion_step_embed = swish(self.fc_t1(diffusion_step_embed)) diffusion_step_embed = swish(self.fc_t2(diffusion_step_embed)) # Pass all residual layers h = x skip = 0 for n in range(self.num_res_layers): # Use the output from last residual layer h, skip_n = self.residual_blocks[n]((h, mel_spectrogram, diffusion_step_embed)) # Accumulate all skip outputs skip += skip_n # Normalize for training stability return skip * math.sqrt(1.0 / self.num_res_layers) class DiffWave(ModelMixin, ConfigMixin): def __init__( self, in_channels=1, res_channels=128, skip_channels=128, out_channels=1, num_res_layers=30, dilation_cycle=10, diffusion_step_embed_dim_in=128, diffusion_step_embed_dim_mid=512, diffusion_step_embed_dim_out=512, ): super().__init__() # register all init arguments with self.register self.register_to_config( in_channels=in_channels, res_channels=res_channels, skip_channels=skip_channels, out_channels=out_channels, num_res_layers=num_res_layers, dilation_cycle=dilation_cycle, diffusion_step_embed_dim_in=diffusion_step_embed_dim_in, diffusion_step_embed_dim_mid=diffusion_step_embed_dim_mid, diffusion_step_embed_dim_out=diffusion_step_embed_dim_out, ) # Initial conv1x1 with relu self.init_conv = nn.Sequential(Conv(in_channels, res_channels, kernel_size=1), nn.ReLU(inplace=False)) # All residual layers self.residual_layer = ResidualGroup( res_channels, skip_channels, num_res_layers, dilation_cycle, diffusion_step_embed_dim_in, diffusion_step_embed_dim_mid, diffusion_step_embed_dim_out, ) # Final conv1x1 -> relu -> zeroconv1x1 self.final_conv = nn.Sequential( Conv(skip_channels, skip_channels, kernel_size=1), nn.ReLU(inplace=False), ZeroConv1d(skip_channels, out_channels), ) def forward(self, input_data): audio, mel_spectrogram, diffusion_steps = input_data x = audio x = self.init_conv(x).clone() x = self.residual_layer((x, mel_spectrogram, diffusion_steps)) return self.final_conv(x) class BDDMPipeline(DiffusionPipeline): def __init__(self, diffwave, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(diffwave=diffwave, noise_scheduler=noise_scheduler) @torch.no_grad() def __call__(self, mel_spectrogram, generator, torch_device=None): if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.diffwave.to(torch_device) mel_spectrogram = mel_spectrogram.to(torch_device) audio_length = mel_spectrogram.size(-1) * 256 audio_size = (1, 1, audio_length) # Sample gaussian noise to begin loop audio = torch.normal(0, 1, size=audio_size, generator=generator).to(torch_device) timestep_values = self.noise_scheduler.config.timestep_values num_prediction_steps = len(self.noise_scheduler) for t in tqdm.tqdm(reversed(range(num_prediction_steps)), total=num_prediction_steps): # 1. predict noise residual ts = (torch.tensor(timestep_values[t]) * torch.ones((1, 1))).to(torch_device) residual = self.diffwave((audio, mel_spectrogram, ts)) # 2. predict previous mean of audio x_t-1 pred_prev_audio = self.noise_scheduler.step(residual, audio, t) # 3. optionally sample variance variance = 0 if t > 0: noise = torch.normal(0, 1, size=audio_size, generator=generator).to(torch_device) variance = self.noise_scheduler.get_variance(t).sqrt() * noise # 4. set current audio to prev_audio: x_t -> x_t-1 audio = pred_prev_audio + variance return audio
diffusers_all-main
src/diffusers/pipelines/bddm/pipeline_bddm.py
#!/usr/bin/env python3 import torch from diffusers import DiffusionPipeline # TODO(Patrick, Anton, Suraj) - rename `x` to better variable names class ScoreSdeVpPipeline(DiffusionPipeline): def __init__(self, model, scheduler): super().__init__() self.register_modules(model=model, scheduler=scheduler) def __call__(self, num_inference_steps=1000, generator=None): device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") img_size = self.model.config.image_size channels = self.model.config.num_channels shape = (1, channels, img_size, img_size) model = self.model.to(device) x = torch.randn(*shape).to(device) self.scheduler.set_timesteps(num_inference_steps) for t in self.scheduler.timesteps: t = t * torch.ones(shape[0], device=device) scaled_t = t * (num_inference_steps - 1) with torch.no_grad(): result = model(x, scaled_t) x, x_mean = self.scheduler.step_pred(result, x, t) x_mean = (x_mean + 1.0) / 2.0 return x_mean
diffusers_all-main
src/diffusers/pipelines/score_sde_vp/pipeline_score_sde_vp.py
from .pipeline_score_sde_vp import ScoreSdeVpPipeline
diffusers_all-main
src/diffusers/pipelines/score_sde_vp/__init__.py
from .pipeline_score_sde_ve import ScoreSdeVePipeline
diffusers_all-main
src/diffusers/pipelines/score_sde_ve/__init__.py
#!/usr/bin/env python3 import torch from diffusers import DiffusionPipeline # TODO(Patrick, Anton, Suraj) - rename `x` to better variable names class ScoreSdeVePipeline(DiffusionPipeline): def __init__(self, model, scheduler): super().__init__() self.register_modules(model=model, scheduler=scheduler) def __call__(self, num_inference_steps=2000, generator=None): device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") img_size = self.model.config.image_size channels = self.model.config.num_channels shape = (1, channels, img_size, img_size) model = self.model.to(device) # TODO(Patrick) move to scheduler config n_steps = 1 x = torch.randn(*shape) * self.scheduler.config.sigma_max x = x.to(device) self.scheduler.set_timesteps(num_inference_steps) self.scheduler.set_sigmas(num_inference_steps) for i, t in enumerate(self.scheduler.timesteps): sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=device) for _ in range(n_steps): with torch.no_grad(): result = self.model(x, sigma_t) x = self.scheduler.step_correct(result, x) with torch.no_grad(): result = model(x, sigma_t) x, x_mean = self.scheduler.step_pred(result, x, t) return x_mean
diffusers_all-main
src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import tqdm from ...pipeline_utils import DiffusionPipeline class PNDMPipeline(DiffusionPipeline): def __init__(self, unet, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(unet=unet, noise_scheduler=noise_scheduler) def __call__(self, batch_size=1, generator=None, torch_device=None, num_inference_steps=50): # For more information on the sampling method you can take a look at Algorithm 2 of # the official paper: https://arxiv.org/pdf/2202.09778.pdf if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.unet.to(torch_device) # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution), generator=generator, ) image = image.to(torch_device) prk_time_steps = self.noise_scheduler.get_prk_time_steps(num_inference_steps) for t in tqdm.tqdm(range(len(prk_time_steps))): t_orig = prk_time_steps[t] residual = self.unet(image, t_orig) image = self.noise_scheduler.step_prk(residual, image, t, num_inference_steps) timesteps = self.noise_scheduler.get_time_steps(num_inference_steps) for t in tqdm.tqdm(range(len(timesteps))): t_orig = timesteps[t] residual = self.unet(image, t_orig) image = self.noise_scheduler.step_plms(residual, image, t, num_inference_steps) return image
diffusers_all-main
src/diffusers/pipelines/pndm/pipeline_pndm.py
from .pipeline_pndm import PNDMPipeline
diffusers_all-main
src/diffusers/pipelines/pndm/__init__.py
""" from https://github.com/jaywalnut310/glow-tts""" import math import torch from torch import nn import tqdm from ...configuration_utils import ConfigMixin from ...modeling_utils import ModelMixin from ...pipeline_utils import DiffusionPipeline from .grad_tts_utils import GradTTSTokenizer # flake8: noqa def sequence_mask(length, max_length=None): if max_length is None: max_length = length.max() x = torch.arange(int(max_length), dtype=length.dtype, device=length.device) return x.unsqueeze(0) < length.unsqueeze(1) def fix_len_compatibility(length, num_downsamplings_in_unet=2): while True: if length % (2**num_downsamplings_in_unet) == 0: return length length += 1 def convert_pad_shape(pad_shape): l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape def generate_path(duration, mask): device = duration.device b, t_x, t_y = mask.shape cum_duration = torch.cumsum(duration, 1) path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device) cum_duration_flat = cum_duration.view(b * t_x) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] path = path * mask return path def duration_loss(logw, logw_, lengths): loss = torch.sum((logw - logw_) ** 2) / torch.sum(lengths) return loss class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-4): super(LayerNorm, self).__init__() self.channels = channels self.eps = eps self.gamma = torch.nn.Parameter(torch.ones(channels)) self.beta = torch.nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class ConvReluNorm(nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): super(ConvReluNorm, self).__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = p_dropout self.conv_layers = torch.nn.ModuleList() self.norm_layers = torch.nn.ModuleList() self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout)) for _ in range(n_layers - 1): self.conv_layers.append( torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask): x_org = x for i in range(self.n_layers): x = self.conv_layers[i](x * x_mask) x = self.norm_layers[i](x) x = self.relu_drop(x) x = x_org + self.proj(x) return x * x_mask class DurationPredictor(nn.Module): def __init__(self, in_channels, filter_channels, kernel_size, p_dropout): super(DurationPredictor, self).__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.p_dropout = p_dropout self.drop = torch.nn.Dropout(p_dropout) self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_1 = LayerNorm(filter_channels) self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_2 = LayerNorm(filter_channels) self.proj = torch.nn.Conv1d(filter_channels, 1, 1) def forward(self, x, x_mask): x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.norm_1(x) x = self.drop(x) x = self.conv_2(x * x_mask) x = torch.relu(x) x = self.norm_2(x) x = self.drop(x) x = self.proj(x * x_mask) return x * x_mask class MultiHeadAttention(nn.Module): def __init__( self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init=False, ): super(MultiHeadAttention, self).__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = torch.nn.Conv1d(channels, channels, 1) self.conv_k = torch.nn.Conv1d(channels, channels, 1) self.conv_v = torch.nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels**-0.5 self.emb_rel_k = torch.nn.Parameter( torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev ) self.emb_rel_v = torch.nn.Parameter( torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev ) self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) self.drop = torch.nn.Dropout(p_dropout) torch.nn.init.xavier_uniform_(self.conv_q.weight) torch.nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) torch.nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = (*key.size(), query.size(2)) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) if self.window_size is not None: assert t_s == t_t, "Relative attention is only available for self-attention." key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position(rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, "Proximal bias is only available for self-attention." scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) if mask is not None: scores = scores.masked_fill(mask == 0, -1e4) p_attn = torch.nn.functional.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position(p_attn) value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max((self.window_size + 1) - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = torch.nn.functional.pad( relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]) ) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :] return x_final def _absolute_position_to_relative_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) class FFN(nn.Module): def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0): super(FFN, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2) self.drop = torch.nn.Dropout(p_dropout) def forward(self, x, x_mask): x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.drop(x) x = self.conv_2(x * x_mask) return x * x_mask class Encoder(nn.Module): def __init__( self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, window_size=None, **kwargs, ): super(Encoder, self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout self.window_size = window_size self.drop = torch.nn.Dropout(p_dropout) self.attn_layers = torch.nn.ModuleList() self.norm_layers_1 = torch.nn.ModuleList() self.ffn_layers = torch.nn.ModuleList() self.norm_layers_2 = torch.nn.ModuleList() for _ in range(self.n_layers): self.attn_layers.append( MultiHeadAttention( hidden_channels, hidden_channels, n_heads, window_size=window_size, p_dropout=p_dropout ) ) self.norm_layers_1.append(LayerNorm(hidden_channels)) self.ffn_layers.append( FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout) ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) for i in range(self.n_layers): x = x * x_mask y = self.attn_layers[i](x, x, attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) x = x * x_mask return x class TextEncoder(ModelMixin, ConfigMixin): def __init__( self, n_vocab, n_feats, n_channels, filter_channels, filter_channels_dp, n_heads, n_layers, kernel_size, p_dropout, window_size=None, spk_emb_dim=64, n_spks=1, ): super(TextEncoder, self).__init__() self.register_to_config( n_vocab=n_vocab, n_feats=n_feats, n_channels=n_channels, filter_channels=filter_channels, filter_channels_dp=filter_channels_dp, n_heads=n_heads, n_layers=n_layers, kernel_size=kernel_size, p_dropout=p_dropout, window_size=window_size, spk_emb_dim=spk_emb_dim, n_spks=n_spks, ) self.n_vocab = n_vocab self.n_feats = n_feats self.n_channels = n_channels self.filter_channels = filter_channels self.filter_channels_dp = filter_channels_dp self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout self.window_size = window_size self.spk_emb_dim = spk_emb_dim self.n_spks = n_spks self.emb = torch.nn.Embedding(n_vocab, n_channels) torch.nn.init.normal_(self.emb.weight, 0.0, n_channels**-0.5) self.prenet = ConvReluNorm(n_channels, n_channels, n_channels, kernel_size=5, n_layers=3, p_dropout=0.5) self.encoder = Encoder( n_channels + (spk_emb_dim if n_spks > 1 else 0), filter_channels, n_heads, n_layers, kernel_size, p_dropout, window_size=window_size, ) self.proj_m = torch.nn.Conv1d(n_channels + (spk_emb_dim if n_spks > 1 else 0), n_feats, 1) self.proj_w = DurationPredictor( n_channels + (spk_emb_dim if n_spks > 1 else 0), filter_channels_dp, kernel_size, p_dropout ) def forward(self, x, x_lengths, spk=None): x = self.emb(x) * math.sqrt(self.n_channels) x = torch.transpose(x, 1, -1) x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) x = self.prenet(x, x_mask) if self.n_spks > 1: x = torch.cat([x, spk.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1) x = self.encoder(x, x_mask) mu = self.proj_m(x) * x_mask x_dp = torch.detach(x) logw = self.proj_w(x_dp, x_mask) return mu, logw, x_mask class GradTTSPipeline(DiffusionPipeline): def __init__(self, unet, text_encoder, noise_scheduler, tokenizer): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules( unet=unet, text_encoder=text_encoder, noise_scheduler=noise_scheduler, tokenizer=tokenizer ) @torch.no_grad() def __call__( self, text, num_inference_steps=50, temperature=1.3, length_scale=0.91, speaker_id=15, torch_device=None, generator=None, ): if torch_device is None: torch_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.unet.to(torch_device) self.text_encoder.to(torch_device) x, x_lengths = self.tokenizer(text) x = x.to(torch_device) x_lengths = x_lengths.to(torch_device) if speaker_id is not None: speaker_id = torch.LongTensor([speaker_id]).to(torch_device) # Get encoder_outputs `mu_x` and log-scaled token durations `logw` mu_x, logw, x_mask = self.text_encoder(x, x_lengths) w = torch.exp(logw) * x_mask w_ceil = torch.ceil(w) * length_scale y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() y_max_length = int(y_lengths.max()) y_max_length_ = fix_len_compatibility(y_max_length) # Using obtained durations `w` construct alignment map `attn` y_mask = sequence_mask(y_lengths, y_max_length_).unsqueeze(1).to(x_mask.dtype) attn_mask = x_mask.unsqueeze(-1) * y_mask.unsqueeze(2) attn = generate_path(w_ceil.squeeze(1), attn_mask.squeeze(1)).unsqueeze(1) # Align encoded text and get mu_y mu_y = torch.matmul(attn.squeeze(1).transpose(1, 2), mu_x.transpose(1, 2)) mu_y = mu_y.transpose(1, 2) # Sample latent representation from terminal distribution N(mu_y, I) z = mu_y + torch.randn(mu_y.shape, generator=generator).to(mu_y.device) xt = z * y_mask h = 1.0 / num_inference_steps # (Patrick: TODO) for t in tqdm.tqdm(range(num_inference_steps), total=num_inference_steps): t_new = num_inference_steps - t - 1 t = (1.0 - (t + 0.5) * h) * torch.ones(z.shape[0], dtype=z.dtype, device=z.device) residual = self.unet(xt, t, mu_y, y_mask, speaker_id) scheduler_residual = residual - mu_y + xt xt = self.noise_scheduler.step(scheduler_residual, xt, t_new, num_inference_steps) xt = xt * y_mask return xt[:, :, :y_max_length]
diffusers_all-main
src/diffusers/pipelines/grad_tts/pipeline_grad_tts.py
from ...utils import is_inflect_available, is_transformers_available, is_unidecode_available if is_transformers_available() and is_unidecode_available() and is_inflect_available(): from .grad_tts_utils import GradTTSTokenizer from .pipeline_grad_tts import GradTTSPipeline, TextEncoder
diffusers_all-main
src/diffusers/pipelines/grad_tts/__init__.py
# tokenizer import os import re from shutil import copyfile import torch import inflect from transformers import PreTrainedTokenizer from unidecode import unidecode valid_symbols = [ "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH", ] _valid_symbol_set = set(valid_symbols) def intersperse(lst, item): # Adds blank symbol result = [item] * (len(lst) * 2 + 1) result[1::2] = lst return result class CMUDict: def __init__(self, file_or_path, keep_ambiguous=True): if isinstance(file_or_path, str): with open(file_or_path, encoding="latin-1") as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if not keep_ambiguous: entries = {word: pron for word, pron in entries.items() if len(pron) == 1} self._entries = entries def __len__(self): return len(self._entries) def lookup(self, word): return self._entries.get(word.upper()) _alt_re = re.compile(r"\([0-9]+\)") def _parse_cmudict(file): cmudict = {} for line in file: if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"): parts = line.split(" ") word = re.sub(_alt_re, "", parts[0]) pronunciation = _get_pronunciation(parts[1]) if pronunciation: if word in cmudict: cmudict[word].append(pronunciation) else: cmudict[word] = [pronunciation] return cmudict def _get_pronunciation(s): parts = s.strip().split(" ") for part in parts: if part not in _valid_symbol_set: return None return " ".join(parts) _whitespace_re = re.compile(r"\s+") _abbreviations = [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("mrs", "misess"), ("mr", "mister"), ("dr", "doctor"), ("st", "saint"), ("co", "company"), ("jr", "junior"), ("maj", "major"), ("gen", "general"), ("drs", "doctors"), ("rev", "reverend"), ("lt", "lieutenant"), ("hon", "honorable"), ("sgt", "sergeant"), ("capt", "captain"), ("esq", "esquire"), ("ltd", "limited"), ("col", "colonel"), ("ft", "fort"), ] ] def expand_abbreviations(text): for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text def expand_numbers(text): return normalize_numbers(text) def lowercase(text): return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, " ", text) def convert_to_ascii(text): return unidecode(text) def basic_cleaners(text): text = lowercase(text) text = collapse_whitespace(text) return text def transliteration_cleaners(text): text = convert_to_ascii(text) text = lowercase(text) text = collapse_whitespace(text) return text def english_cleaners(text): text = convert_to_ascii(text) text = lowercase(text) text = expand_numbers(text) text = expand_abbreviations(text) text = collapse_whitespace(text) return text _inflect = inflect.engine() _comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") _decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") _pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") _dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") _ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") _number_re = re.compile(r"[0-9]+") def _remove_commas(m): return m.group(1).replace(",", "") def _expand_decimal_point(m): return m.group(1).replace(".", " point ") def _expand_dollars(m): match = m.group(1) parts = match.split(".") if len(parts) > 2: return match + " dollars" dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = "dollar" if dollars == 1 else "dollars" cent_unit = "cent" if cents == 1 else "cents" return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) elif dollars: dollar_unit = "dollar" if dollars == 1 else "dollars" return "%s %s" % (dollars, dollar_unit) elif cents: cent_unit = "cent" if cents == 1 else "cents" return "%s %s" % (cents, cent_unit) else: return "zero dollars" def _expand_ordinal(m): return _inflect.number_to_words(m.group(0)) def _expand_number(m): num = int(m.group(0)) if num > 1000 and num < 3000: if num == 2000: return "two thousand" elif num > 2000 and num < 2010: return "two thousand " + _inflect.number_to_words(num % 100) elif num % 100 == 0: return _inflect.number_to_words(num // 100) + " hundred" else: return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") else: return _inflect.number_to_words(num, andword="") def normalize_numbers(text): text = re.sub(_comma_number_re, _remove_commas, text) text = re.sub(_pounds_re, r"\1 pounds", text) text = re.sub(_dollars_re, _expand_dollars, text) text = re.sub(_decimal_number_re, _expand_decimal_point, text) text = re.sub(_ordinal_re, _expand_ordinal, text) text = re.sub(_number_re, _expand_number, text) return text """ from https://github.com/keithito/tacotron""" _pad = "_" _punctuation = "!'(),.:;? " _special = "-" _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" # Prepend "@" to ARPAbet symbols to ensure uniqueness: _arpabet = ["@" + s for s in valid_symbols] # Export all symbols: symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet _symbol_to_id = {s: i for i, s in enumerate(symbols)} _id_to_symbol = {i: s for i, s in enumerate(symbols)} _curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)") def get_arpabet(word, dictionary): word_arpabet = dictionary.lookup(word) if word_arpabet is not None: return "{" + word_arpabet[0] + "}" else: return word def text_to_sequence(text, cleaner_names=[english_cleaners], dictionary=None): """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through dictionary: arpabet class with arpabet dictionary Returns: List of integers corresponding to the symbols in the text """ sequence = [] space = _symbols_to_sequence(" ") # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: clean_text = _clean_text(text, cleaner_names) if dictionary is not None: clean_text = [get_arpabet(w, dictionary) for w in clean_text.split(" ")] for i in range(len(clean_text)): t = clean_text[i] if t.startswith("{"): sequence += _arpabet_to_sequence(t[1:-1]) else: sequence += _symbols_to_sequence(t) sequence += space else: sequence += _symbols_to_sequence(clean_text) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # remove trailing space if dictionary is not None: sequence = sequence[:-1] if sequence[-1] == space[0] else sequence return sequence def sequence_to_text(sequence): """Converts a sequence of IDs back to a string""" result = "" for symbol_id in sequence: if symbol_id in _id_to_symbol: s = _id_to_symbol[symbol_id] # Enclose ARPAbet back in curly braces: if len(s) > 1 and s[0] == "@": s = "{%s}" % s[1:] result += s return result.replace("}{", " ") def _clean_text(text, cleaner_names): for cleaner in cleaner_names: text = cleaner(text) return text def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] def _arpabet_to_sequence(text): return _symbols_to_sequence(["@" + s for s in text.split()]) def _should_keep_symbol(s): return s in _symbol_to_id and s != "_" and s != "~" VOCAB_FILES_NAMES = { "dict_file": "dict_file.txt", } class GradTTSTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, dict_file, **kwargs): super().__init__(**kwargs) self.cmu = CMUDict(dict_file) self.dict_file = dict_file def __call__(self, text): x = torch.LongTensor(intersperse(text_to_sequence(text, dictionary=self.cmu), len(symbols)))[None] x_lengths = torch.LongTensor([x.shape[-1]]) return x, x_lengths def save_vocabulary(self, save_directory: str, filename_prefix=None): dict_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["dict_file"] ) copyfile(self.dict_file, dict_file) return (dict_file,)
diffusers_all-main
src/diffusers/pipelines/grad_tts/grad_tts_utils.py
from .pipeline_ddpm import DDPMPipeline
diffusers_all-main
src/diffusers/pipelines/ddpm/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import tqdm from ...pipeline_utils import DiffusionPipeline class DDPMPipeline(DiffusionPipeline): def __init__(self, unet, noise_scheduler): super().__init__() noise_scheduler = noise_scheduler.set_format("pt") self.register_modules(unet=unet, noise_scheduler=noise_scheduler) def __call__(self, batch_size=1, generator=None, torch_device=None): if torch_device is None: torch_device = "cuda" if torch.cuda.is_available() else "cpu" self.unet.to(torch_device) # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution), generator=generator, ) image = image.to(torch_device) num_prediction_steps = len(self.noise_scheduler) for t in tqdm.tqdm(reversed(range(num_prediction_steps)), total=num_prediction_steps): # 1. predict noise residual with torch.no_grad(): residual = self.unet(image, t) # 2. predict previous mean of image x_t-1 pred_prev_image = self.noise_scheduler.step(residual, image, t) # 3. optionally sample variance variance = 0 if t > 0: noise = torch.randn(image.shape, generator=generator).to(image.device) variance = self.noise_scheduler.get_variance(t).sqrt() * noise # 4. set current image to prev_image: x_t -> x_t-1 image = pred_prev_image + variance return image
diffusers_all-main
src/diffusers/pipelines/ddpm/pipeline_ddpm.py
# coding=utf-8 # Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities.""" import logging import os import sys import threading from logging import CRITICAL # NOQA from logging import DEBUG # NOQA from logging import ERROR # NOQA from logging import FATAL # NOQA from logging import INFO # NOQA from logging import NOTSET # NOQA from logging import WARN # NOQA from logging import WARNING # NOQA from typing import Optional from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = True def _get_default_logging_level(): """ If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to `_default_log_level` """ env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict(): return log_levels def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom diffusers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Diffusers' root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Diffusers has following logging levels: - 50: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - 40: `diffusers.logging.ERROR` - 30: `diffusers.logging.WARNING` or `diffusers.logging.WARN` - 20: `diffusers.logging.INFO` - 10: `diffusers.logging.DEBUG` </Tip>""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Diffusers' root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - `diffusers.logging.ERROR` - `diffusers.logging.WARNING` or `diffusers.logging.WARN` - `diffusers.logging.INFO` - `diffusers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning(): """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug(): """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error(): """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Diffusers' logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for HuggingFace Diffusers' loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs): """ This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice class EmptyTqdm: """Dummy tqdm which doesn't do anything.""" def __init__(self, *args, **kwargs): # pylint: disable=unused-argument self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): """Return empty function.""" def empty_fn(*args, **kwargs): # pylint: disable=unused-argument return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active) def enable_progress_bar(): """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True def disable_progress_bar(): """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False
diffusers_all-main
src/diffusers/utils/logging.py
# This file is autogenerated by the command `make fix-copies`, do not edit. # flake8: noqa from ..utils import DummyObject, requires_backends class GlideSuperResUNetModel(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"]) class GlideTextToImageUNetModel(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"]) class GlideUNetModel(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"]) class UNetGradTTSModel(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"]) class GlidePipeline(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"]) class LatentDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers"])
diffusers_all-main
src/diffusers/utils/dummy_transformers_objects.py
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from collections import OrderedDict import importlib_metadata from requests.exceptions import HTTPError from .logging import get_logger logger = get_logger(__name__) hf_cache_home = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) default_cache_path = os.path.join(hf_cache_home, "diffusers") CONFIG_NAME = "config.json" HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co" DIFFUSERS_CACHE = default_cache_path DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) _transformers_available = importlib.util.find_spec("transformers") is not None try: _transformers_version = importlib_metadata.version("transformers") logger.debug(f"Successfully imported transformers version {_transformers_version}") except importlib_metadata.PackageNotFoundError: _transformers_available = False _inflect_available = importlib.util.find_spec("inflect") is not None try: _inflect_version = importlib_metadata.version("inflect") logger.debug(f"Successfully imported inflect version {_inflect_version}") except importlib_metadata.PackageNotFoundError: _inflect_available = False _unidecode_available = importlib.util.find_spec("unidecode") is not None try: _unidecode_version = importlib_metadata.version("unidecode") logger.debug(f"Successfully imported unidecode version {_unidecode_version}") except importlib_metadata.PackageNotFoundError: _unidecode_available = False def is_transformers_available(): return _transformers_available def is_inflect_available(): return _inflect_available def is_unidecode_available(): return _unidecode_available class RepositoryNotFoundError(HTTPError): """ Raised when trying to access a hf.co URL with an invalid repository name, or with a private repo name the user does not have access to. """ class EntryNotFoundError(HTTPError): """Raised when trying to access a hf.co URL with a valid repository and revision but an invalid filename.""" class RevisionNotFoundError(HTTPError): """Raised when trying to access a hf.co URL with a valid repository but an invalid revision.""" TRANSFORMERS_IMPORT_ERROR = """ {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` """ UNIDECODE_IMPORT_ERROR = """ {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode` """ INFLECT_IMPORT_ERROR = """ {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` """ BACKENDS_MAPPING = OrderedDict( [ ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed)) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattr__(cls, key): if key.startswith("_"): return super().__getattr__(cls, key) requires_backends(cls, cls._backends)
diffusers_all-main
src/diffusers/utils/__init__.py
# This file is autogenerated by the command `make fix-copies`, do not edit. # flake8: noqa from ..utils import DummyObject, requires_backends class GradTTSPipeline(metaclass=DummyObject): _backends = ["transformers", "inflect", "unidecode"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "inflect", "unidecode"])
diffusers_all-main
src/diffusers/utils/dummy_transformers_and_inflect_and_unidecode_objects.py
import math from inspect import isfunction import torch import torch.nn.functional as F from torch import nn # unet_grad_tts.py # TODO(Patrick) - weird linear attention layer. Check with: https://github.com/huawei-noah/Speech-Backbones/issues/15 class LinearAttention(torch.nn.Module): def __init__(self, dim, heads=4, dim_head=32): super(LinearAttention, self).__init__() self.heads = heads self.dim_head = dim_head hidden_dim = dim_head * heads self.to_qkv = torch.nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) self.to_out = torch.nn.Conv2d(hidden_dim, dim, 1) def forward(self, x, encoder_states=None): b, c, h, w = x.shape qkv = self.to_qkv(x) q, k, v = ( qkv.reshape(b, 3, self.heads, self.dim_head, h, w) .permute(1, 0, 2, 3, 4, 5) .reshape(3, b, self.heads, self.dim_head, -1) ) k = k.softmax(dim=-1) context = torch.einsum("bhdn,bhen->bhde", k, v) out = torch.einsum("bhde,bhdn->bhen", context, q) out = out.reshape(b, self.heads, self.dim_head, h, w).reshape(b, self.heads * self.dim_head, h, w) return self.to_out(out) # the main attention block that is used for all models class AttentionBlock(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_heads=1, num_head_channels=None, num_groups=32, encoder_channels=None, overwrite_qkv=False, overwrite_linear=False, rescale_output_factor=1.0, ): super().__init__() self.channels = channels if num_head_channels is None: self.num_heads = num_heads else: assert ( channels % num_head_channels == 0 ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=1e-5, affine=True) self.qkv = nn.Conv1d(channels, channels * 3, 1) self.n_heads = self.num_heads self.rescale_output_factor = rescale_output_factor if encoder_channels is not None: self.encoder_kv = nn.Conv1d(encoder_channels, channels * 2, 1) self.proj = zero_module(nn.Conv1d(channels, channels, 1)) self.overwrite_qkv = overwrite_qkv self.overwrite_linear = overwrite_linear if overwrite_qkv: in_channels = channels self.norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=1e-6) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) elif self.overwrite_linear: num_groups = min(channels // 4, 32) self.norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=1e-6) self.NIN_0 = NIN(channels, channels) self.NIN_1 = NIN(channels, channels) self.NIN_2 = NIN(channels, channels) self.NIN_3 = NIN(channels, channels) self.GroupNorm_0 = nn.GroupNorm(num_groups=num_groups, num_channels=channels, eps=1e-6) else: self.proj_out = zero_module(nn.Conv1d(channels, channels, 1)) self.set_weights(self) self.is_overwritten = False def set_weights(self, module): if self.overwrite_qkv: qkv_weight = torch.cat([module.q.weight.data, module.k.weight.data, module.v.weight.data], dim=0)[ :, :, :, 0 ] qkv_bias = torch.cat([module.q.bias.data, module.k.bias.data, module.v.bias.data], dim=0) self.qkv.weight.data = qkv_weight self.qkv.bias.data = qkv_bias proj_out = zero_module(nn.Conv1d(self.channels, self.channels, 1)) proj_out.weight.data = module.proj_out.weight.data[:, :, :, 0] proj_out.bias.data = module.proj_out.bias.data self.proj = proj_out elif self.overwrite_linear: self.qkv.weight.data = torch.concat( [self.NIN_0.W.data.T, self.NIN_1.W.data.T, self.NIN_2.W.data.T], dim=0 )[:, :, None] self.qkv.bias.data = torch.concat([self.NIN_0.b.data, self.NIN_1.b.data, self.NIN_2.b.data], dim=0) self.proj.weight.data = self.NIN_3.W.data.T[:, :, None] self.proj.bias.data = self.NIN_3.b.data self.norm.weight.data = self.GroupNorm_0.weight.data self.norm.bias.data = self.GroupNorm_0.bias.data else: self.proj.weight.data = self.proj_out.weight.data self.proj.bias.data = self.proj_out.bias.data def forward(self, x, encoder_out=None): if not self.is_overwritten and (self.overwrite_qkv or self.overwrite_linear): self.set_weights(self) self.is_overwritten = True b, c, *spatial = x.shape hid_states = self.norm(x).view(b, c, -1) qkv = self.qkv(hid_states) bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) if encoder_out is not None: encoder_kv = self.encoder_kv(encoder_out) assert encoder_kv.shape[1] == self.n_heads * ch * 2 ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch, dim=1) k = torch.cat([ek, k], dim=-1) v = torch.cat([ev, v], dim=-1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) a = torch.einsum("bts,bcs->bct", weight, v) h = a.reshape(bs, -1, length) h = self.proj(h) h = h.reshape(b, c, *spatial) result = x + h result = result / self.rescale_output_factor return result class AttentionBlockNew(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_head_channels=1, num_groups=32, encoder_channels=None, rescale_output_factor=1.0, ): super().__init__() self.norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=1e-5, affine=True) self.qkv = nn.Conv1d(channels, channels * 3, 1) self.n_heads = channels // num_head_channels self.rescale_output_factor = rescale_output_factor if encoder_channels is not None: self.encoder_kv = nn.Conv1d(encoder_channels, channels * 2, 1) self.proj = zero_module(nn.Conv1d(channels, channels, 1)) def set_weight(self, attn_layer): self.norm.weight.data = attn_layer.norm.weight.data self.norm.bias.data = attn_layer.norm.bias.data self.qkv.weight.data = attn_layer.qkv.weight.data self.qkv.bias.data = attn_layer.qkv.bias.data self.proj.weight.data = attn_layer.proj.weight.data self.proj.bias.data = attn_layer.proj.bias.data def forward(self, x, encoder_out=None): b, c, *spatial = x.shape hid_states = self.norm(x).view(b, c, -1) qkv = self.qkv(hid_states) bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) if encoder_out is not None: encoder_kv = self.encoder_kv(encoder_out) assert encoder_kv.shape[1] == self.n_heads * ch * 2 ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch, dim=1) k = torch.cat([ek, k], dim=-1) v = torch.cat([ev, v], dim=-1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) a = torch.einsum("bts,bcs->bct", weight, v) h = a.reshape(bs, -1, length) h = self.proj(h) h = h.reshape(b, c, *spatial) result = x + h result = result / self.rescale_output_factor return result class SpatialTransformer(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) for d in range(depth) ] ) self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) for block in self.transformer_blocks: x = block(x, context=context) x = x.reshape(b, h, w, c).permute(0, 3, 1, 2) x = self.proj_out(x) return x + x_in class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): super().__init__() self.attn1 = CrossAttention( query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout ) # is a self-attention self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention( query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout ) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): x = self.attn1(self.norm1(x)) + x x = self.attn2(self.norm2(x), context=context) + x x = self.ff(self.norm3(x)) + x return x class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) self.scale = dim_head**-0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) def reshape_heads_to_batch_dim(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def reshape_batch_dim_to_heads(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def forward(self, x, context=None, mask=None): batch_size, sequence_length, dim = x.shape h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) q = self.reshape_heads_to_batch_dim(q) k = self.reshape_heads_to_batch_dim(k) v = self.reshape_heads_to_batch_dim(v) sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale if exists(mask): mask = mask.reshape(batch_size, -1) max_neg_value = -torch.finfo(sim.dtype).max mask = mask[:, None, :].repeat(h, 1, 1) sim.masked_fill_(~mask, max_neg_value) # attention, what we cannot get enough of attn = sim.softmax(dim=-1) out = torch.einsum("b i j, b j d -> b i d", attn, v) out = self.reshape_batch_dim_to_heads(out) return self.to_out(out) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if not glu else GEGLU(dim, inner_dim) self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)) def forward(self, x): return self.net(x) # TODO(Patrick) - this can and should be removed def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module # TODO(Patrick) - remove once all weights have been converted -> not needed anymore then class NIN(nn.Module): def __init__(self, in_dim, num_units, init_scale=0.1): super().__init__() self.W = nn.Parameter(torch.zeros(in_dim, num_units), requires_grad=True) self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate)
diffusers_all-main
src/diffusers/models/attention.py
import torch from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import LinearAttention from .embeddings import get_timestep_embedding from .resnet import Downsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D class Mish(torch.nn.Module): def forward(self, x): return x * torch.tanh(torch.nn.functional.softplus(x)) class Rezero(torch.nn.Module): def __init__(self, fn): super(Rezero, self).__init__() self.fn = fn self.g = torch.nn.Parameter(torch.zeros(1)) def forward(self, x, encoder_out=None): return self.fn(x, encoder_out) * self.g class Block(torch.nn.Module): def __init__(self, dim, dim_out, groups=8): super(Block, self).__init__() self.block = torch.nn.Sequential( torch.nn.Conv2d(dim, dim_out, 3, padding=1), torch.nn.GroupNorm(groups, dim_out), Mish() ) def forward(self, x, mask): output = self.block(x * mask) return output * mask class Residual(torch.nn.Module): def __init__(self, fn): super(Residual, self).__init__() self.fn = fn def forward(self, x, *args, **kwargs): output = self.fn(x, *args, **kwargs) + x return output class UNetGradTTSModel(ModelMixin, ConfigMixin): def __init__(self, dim, dim_mults=(1, 2, 4), groups=8, n_spks=None, spk_emb_dim=64, n_feats=80, pe_scale=1000): super(UNetGradTTSModel, self).__init__() self.register_to_config( dim=dim, dim_mults=dim_mults, groups=groups, n_spks=n_spks, spk_emb_dim=spk_emb_dim, n_feats=n_feats, pe_scale=pe_scale, ) self.dim = dim self.dim_mults = dim_mults self.groups = groups self.n_spks = n_spks if not isinstance(n_spks, type(None)) else 1 self.spk_emb_dim = spk_emb_dim self.pe_scale = pe_scale if n_spks > 1: self.spk_emb = torch.nn.Embedding(n_spks, spk_emb_dim) self.spk_mlp = torch.nn.Sequential( torch.nn.Linear(spk_emb_dim, spk_emb_dim * 4), Mish(), torch.nn.Linear(spk_emb_dim * 4, n_feats) ) self.mlp = torch.nn.Sequential(torch.nn.Linear(dim, dim * 4), Mish(), torch.nn.Linear(dim * 4, dim)) dims = [2 + (1 if n_spks > 1 else 0), *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) self.downs = torch.nn.ModuleList([]) self.ups = torch.nn.ModuleList([]) num_resolutions = len(in_out) for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) self.downs.append( torch.nn.ModuleList( [ ResnetBlock2D( in_channels=dim_in, out_channels=dim_out, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ), ResnetBlock2D( in_channels=dim_out, out_channels=dim_out, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ), Residual(Rezero(LinearAttention(dim_out))), Downsample2D(dim_out, use_conv=True, padding=1) if not is_last else torch.nn.Identity(), ] ) ) mid_dim = dims[-1] self.mid = UNetMidBlock2D( in_channels=mid_dim, temb_channels=dim, resnet_groups=8, resnet_pre_norm=False, resnet_eps=1e-5, resnet_act_fn="mish", attention_layer_type="linear", ) self.mid_block1 = ResnetBlock2D( in_channels=mid_dim, out_channels=mid_dim, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ) self.mid_attn = Residual(Rezero(LinearAttention(mid_dim))) self.mid_block2 = ResnetBlock2D( in_channels=mid_dim, out_channels=mid_dim, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ) self.mid.resnets[0] = self.mid_block1 self.mid.attentions[0] = self.mid_attn self.mid.resnets[1] = self.mid_block2 for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): self.ups.append( torch.nn.ModuleList( [ ResnetBlock2D( in_channels=dim_out * 2, out_channels=dim_in, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ), ResnetBlock2D( in_channels=dim_in, out_channels=dim_in, temb_channels=dim, groups=8, pre_norm=False, eps=1e-5, non_linearity="mish", overwrite_for_grad_tts=True, ), Residual(Rezero(LinearAttention(dim_in))), Upsample2D(dim_in, use_conv_transpose=True), ] ) ) self.final_block = Block(dim, dim) self.final_conv = torch.nn.Conv2d(dim, 1, 1) def forward(self, sample, timesteps, mu, mask, spk=None): x = sample if self.n_spks > 1: # Get speaker embedding spk = self.spk_emb(spk) if not isinstance(spk, type(None)): s = self.spk_mlp(spk) t = get_timestep_embedding(timesteps, self.dim, scale=self.pe_scale) t = self.mlp(t) if self.n_spks < 2: x = torch.stack([mu, x], 1) else: s = s.unsqueeze(-1).repeat(1, 1, x.shape[-1]) x = torch.stack([mu, x, s], 1) mask = mask.unsqueeze(1) hiddens = [] masks = [mask] for resnet1, resnet2, attn, downsample in self.downs: mask_down = masks[-1] x = resnet1(x, t, mask_down) x = resnet2(x, t, mask_down) x = attn(x) hiddens.append(x) x = downsample(x * mask_down) masks.append(mask_down[:, :, :, ::2]) masks = masks[:-1] mask_mid = masks[-1] x = self.mid(x, t, mask=mask_mid) for resnet1, resnet2, attn, upsample in self.ups: mask_up = masks.pop() x = torch.cat((x, hiddens.pop()), dim=1) x = resnet1(x, t, mask_up) x = resnet2(x, t, mask_up) x = attn(x) x = upsample(x * mask_up) x = self.final_block(x, mask) output = self.final_conv(x * mask) return (output * mask).squeeze(1)
diffusers_all-main
src/diffusers/models/unet_grad_tts.py
import torch import torch.nn as nn import torch.nn.functional as F from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding from .resnet import Downsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D def convert_module_to_f16(l): """ Convert primitive modules to float16. """ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() def convert_module_to_f32(l): """ Convert primitive modules to float32, undoing convert_module_to_f16(). """ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): l.weight.data = l.weight.data.float() if l.bias is not None: l.bias.data = l.bias.data.float() def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs) class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-5): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()).to(x.dtype) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def normalization(channels, swish=0.0): """ Make a standard normalization layer, with an optional swish activation. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(num_channels=channels, num_groups=32, swish=swish) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module class TimestepEmbedSequential(nn.Sequential): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, encoder_out=None): for layer in self: if isinstance(layer, ResnetBlock2D) or isinstance(layer, TimestepEmbedSequential): x = layer(x, emb) elif isinstance(layer, AttentionBlock): x = layer(x, encoder_out) else: x = layer(x) return x class GlideUNetModel(ModelMixin, ConfigMixin): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. """ def __init__( self, in_channels=3, resolution=64, model_channels=192, out_channels=6, num_res_blocks=3, attention_resolutions=(2, 4, 8), dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, transformer_dim=None, ): super().__init__() if num_heads_upsample == -1: num_heads_upsample = num_heads self.in_channels = in_channels self.resolution = resolution self.model_channels = model_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.use_checkpoint = use_checkpoint # self.dtype = torch.float16 if use_fp16 else torch.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) ch = input_ch = int(channel_mult[0] * model_channels) self.input_blocks = nn.ModuleList([TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]) self._feature_size = ch input_block_chans = [ch] ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResnetBlock2D( in_channels=ch, out_channels=mult * model_channels, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ) ] ch = int(mult * model_channels) if ds in attention_resolutions: layers.append( AttentionBlock( ch, num_heads=num_heads, num_head_channels=num_head_channels, encoder_channels=transformer_dim, ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResnetBlock2D( in_channels=ch, out_channels=out_ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, down=True, ) if resblock_updown else Downsample2D(ch, use_conv=conv_resample, out_channels=out_ch, padding=1, name="op") ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch self.mid = UNetMidBlock2D( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, resnet_eps=1e-5, resnet_act_fn="silu", resnet_time_scale_shift="scale_shift" if use_scale_shift_norm else "default", attn_num_heads=num_heads, attn_num_head_channels=num_head_channels, attn_encoder_channels=transformer_dim, ) # TODO(Patrick) - delete after weight conversion # init to be able to overwrite `self.mid` self.middle_block = TimestepEmbedSequential( ResnetBlock2D( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ), AttentionBlock( ch, num_heads=num_heads, num_head_channels=num_head_channels, encoder_channels=transformer_dim, ), ResnetBlock2D( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ), ) self.mid.resnets[0] = self.middle_block[0] self.mid.attentions[0] = self.middle_block[1] self.mid.resnets[1] = self.middle_block[2] self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResnetBlock2D( in_channels=ch + ich, out_channels=model_channels * mult, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, ), ] ch = int(model_channels * mult) if ds in attention_resolutions: layers.append( AttentionBlock( ch, num_heads=num_heads_upsample, num_head_channels=num_head_channels, encoder_channels=transformer_dim, ) ) if level and i == num_res_blocks: out_ch = ch layers.append( ResnetBlock2D( in_channels=ch, out_channels=out_ch, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", time_embedding_norm="scale_shift" if use_scale_shift_norm else "default", overwrite_for_glide=True, up=True, ) if resblock_updown else Upsample2D(ch, use_conv=conv_resample, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch, swish=1.0), nn.Identity(), zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), ) self.use_fp16 = use_fp16 def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ hs = [] emb = self.time_embed( get_timestep_embedding(timesteps, self.model_channels, flip_sin_to_cos=True, downscale_freq_shift=0) ) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb) hs.append(h) h = self.mid(h, emb) for module in self.output_blocks: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb) h = h.type(x.dtype) return self.out(h) class GlideTextToImageUNetModel(GlideUNetModel): """ A UNetModel that performs super-resolution. Expects an extra kwarg `low_res` to condition on a low-resolution image. """ def __init__( self, in_channels=3, resolution=64, model_channels=192, out_channels=6, num_res_blocks=3, attention_resolutions=(2, 4, 8), dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, transformer_dim=512, ): super().__init__( in_channels=in_channels, resolution=resolution, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, dropout=dropout, channel_mult=channel_mult, conv_resample=conv_resample, dims=dims, use_checkpoint=use_checkpoint, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, transformer_dim=transformer_dim, ) self.register_to_config( in_channels=in_channels, resolution=resolution, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, dropout=dropout, channel_mult=channel_mult, conv_resample=conv_resample, dims=dims, use_checkpoint=use_checkpoint, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, transformer_dim=transformer_dim, ) self.transformer_proj = nn.Linear(transformer_dim, self.model_channels * 4) def forward(self, sample, timesteps, transformer_out=None): x = sample hs = [] emb = self.time_embed( get_timestep_embedding(timesteps, self.model_channels, flip_sin_to_cos=True, downscale_freq_shift=0) ) # project the last token transformer_proj = self.transformer_proj(transformer_out[:, -1]) transformer_out = transformer_out.permute(0, 2, 1) # NLC -> NCL emb = emb + transformer_proj.to(emb) h = x for module in self.input_blocks: h = module(h, emb, transformer_out) hs.append(h) h = self.mid(h, emb, transformer_out) for module in self.output_blocks: other = hs.pop() h = torch.cat([h, other], dim=1) h = module(h, emb, transformer_out) return self.out(h) class GlideSuperResUNetModel(GlideUNetModel): """ A UNetModel that performs super-resolution. Expects an extra kwarg `low_res` to condition on a low-resolution image. """ def __init__( self, in_channels=3, resolution=256, model_channels=192, out_channels=6, num_res_blocks=3, attention_resolutions=(2, 4, 8), dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, ): super().__init__( in_channels=in_channels, resolution=resolution, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, dropout=dropout, channel_mult=channel_mult, conv_resample=conv_resample, dims=dims, use_checkpoint=use_checkpoint, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, ) self.register_to_config( in_channels=in_channels, resolution=resolution, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, dropout=dropout, channel_mult=channel_mult, conv_resample=conv_resample, dims=dims, use_checkpoint=use_checkpoint, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, ) def forward(self, sample, timesteps, low_res=None): x = sample _, _, new_height, new_width = x.shape upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") x = torch.cat([x, upsampled], dim=1) hs = [] emb = self.time_embed( get_timestep_embedding(timesteps, self.model_channels, flip_sin_to_cos=True, downscale_freq_shift=0) ) h = x for module in self.input_blocks: h = module(h, emb) hs.append(h) h = self.mid(h, emb) for module in self.output_blocks: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb) return self.out(h)
diffusers_all-main
src/diffusers/models/unet_glide.py
# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py import torch import torch.nn as nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .embeddings import get_timestep_embedding from .resnet import Downsample1D, ResidualTemporalBlock, Upsample1D class SinusoidalPosEmb(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): return get_timestep_embedding(x, self.dim) class RearrangeDim(nn.Module): def __init__(self): super().__init__() def forward(self, tensor): if len(tensor.shape) == 2: return tensor[:, :, None] if len(tensor.shape) == 3: return tensor[:, :, None, :] elif len(tensor.shape) == 4: return tensor[:, :, 0, :] else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") class Conv1dBlock(nn.Module): """ Conv1d --> GroupNorm --> Mish """ def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() self.block = nn.Sequential( nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), RearrangeDim(), # Rearrange("batch channels horizon -> batch channels 1 horizon"), nn.GroupNorm(n_groups, out_channels), RearrangeDim(), # Rearrange("batch channels 1 horizon -> batch channels horizon"), nn.Mish(), ) def forward(self, x): return self.block(x) class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): def __init__( self, training_horizon=128, transition_dim=14, cond_dim=3, predict_epsilon=False, clip_denoised=True, dim=32, dim_mults=(1, 4, 8), ): super().__init__() self.transition_dim = transition_dim self.cond_dim = cond_dim self.predict_epsilon = predict_epsilon self.clip_denoised = clip_denoised dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) time_dim = dim self.time_mlp = nn.Sequential( SinusoidalPosEmb(dim), nn.Linear(dim, dim * 4), nn.Mish(), nn.Linear(dim * 4, dim), ) self.downs = nn.ModuleList([]) self.ups = nn.ModuleList([]) num_resolutions = len(in_out) for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) self.downs.append( nn.ModuleList( [ ResidualTemporalBlock(dim_in, dim_out, embed_dim=time_dim, horizon=training_horizon), ResidualTemporalBlock(dim_out, dim_out, embed_dim=time_dim, horizon=training_horizon), Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), ] ) ) if not is_last: training_horizon = training_horizon // 2 mid_dim = dims[-1] self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) self.ups.append( nn.ModuleList( [ ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=time_dim, horizon=training_horizon), ResidualTemporalBlock(dim_in, dim_in, embed_dim=time_dim, horizon=training_horizon), Upsample1D(dim_in, use_conv_transpose=True) if not is_last else nn.Identity(), ] ) ) if not is_last: training_horizon = training_horizon * 2 self.final_conv = nn.Sequential( Conv1dBlock(dim, dim, kernel_size=5), nn.Conv1d(dim, transition_dim, 1), ) def forward(self, sample, timesteps): """ x : [ batch x horizon x transition ] """ x = sample x = x.permute(0, 2, 1) t = self.time_mlp(timesteps) h = [] for resnet, resnet2, downsample in self.downs: x = resnet(x, t) x = resnet2(x, t) h.append(x) x = downsample(x) x = self.mid_block1(x, t) x = self.mid_block2(x, t) for resnet, resnet2, upsample in self.ups: x = torch.cat((x, h.pop()), dim=1) x = resnet(x, t) x = resnet2(x, t) x = upsample(x) x = self.final_conv(x) x = x.permute(0, 2, 1) return x class TemporalValue(nn.Module): def __init__( self, horizon, transition_dim, cond_dim, dim=32, time_dim=None, out_dim=1, dim_mults=(1, 2, 4, 8), ): super().__init__() dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) time_dim = time_dim or dim self.time_mlp = nn.Sequential( SinusoidalPosEmb(dim), nn.Linear(dim, dim * 4), nn.Mish(), nn.Linear(dim * 4, dim), ) self.blocks = nn.ModuleList([]) print(in_out) for dim_in, dim_out in in_out: self.blocks.append( nn.ModuleList( [ ResidualTemporalBlock(dim_in, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), ResidualTemporalBlock(dim_out, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), Downsample1d(dim_out), ] ) ) horizon = horizon // 2 fc_dim = dims[-1] * max(horizon, 1) self.final_block = nn.Sequential( nn.Linear(fc_dim + time_dim, fc_dim // 2), nn.Mish(), nn.Linear(fc_dim // 2, out_dim), ) def forward(self, x, cond, time, *args): """ x : [ batch x horizon x transition ] """ x = x.permute(0, 2, 1) t = self.time_mlp(time) for resnet, resnet2, downsample in self.blocks: x = resnet(x, t) x = resnet2(x, t) x = downsample(x) x = x.view(len(x), -1) out = self.final_block(torch.cat([x, t], dim=-1)) return out
diffusers_all-main
src/diffusers/models/unet_rl.py
import torch import torch.nn as nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding from .resnet import Downsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D, get_down_block, get_up_block class UNetUnconditionalModel(ModelMixin, ConfigMixin): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, out_channels, num_res_blocks, dropout=0, block_input_channels=(224, 224, 448, 672), block_output_channels=(224, 448, 672, 896), down_blocks=( "UNetResDownBlock2D", "UNetResAttnDownBlock2D", "UNetResAttnDownBlock2D", "UNetResAttnDownBlock2D", ), up_blocks=("UNetResAttnUpBlock2D", "UNetResAttnUpBlock2D", "UNetResAttnUpBlock2D", "UNetResUpBlock2D"), resnet_act_fn="silu", resnet_eps=1e-5, conv_resample=True, num_head_channels=32, # To delete once weights are converted attention_resolutions=(8, 4, 2), ): super().__init__() # register all __init__ params with self.register self.register_to_config( image_size=image_size, in_channels=in_channels, block_input_channels=block_input_channels, block_output_channels=block_output_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, down_blocks=down_blocks, up_blocks=up_blocks, dropout=dropout, conv_resample=conv_resample, num_head_channels=num_head_channels, # (TODO(PVP) - To delete once weights are converted attention_resolutions=attention_resolutions, ) # To delete - replace with config values self.image_size = image_size self.in_channels = in_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.dropout = dropout time_embed_dim = block_input_channels[0] * 4 # ======================== Input =================== self.conv_in = nn.Conv2d(in_channels, block_input_channels[0], kernel_size=3, padding=(1, 1)) # ======================== Time ==================== self.time_embed = nn.Sequential( nn.Linear(block_input_channels[0], time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim), ) # ======================== Down ==================== input_channels = list(block_input_channels) output_channels = list(block_output_channels) self.downsample_blocks = nn.ModuleList([]) for i, (input_channel, output_channel) in enumerate(zip(input_channels, output_channels)): down_block_type = down_blocks[i] is_final_block = i == len(input_channels) - 1 down_block = get_down_block( down_block_type, num_layers=num_res_blocks, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=num_head_channels, ) self.downsample_blocks.append(down_block) # ======================== Mid ==================== self.mid = UNetMidBlock2D( in_channels=output_channels[-1], dropout=dropout, temb_channels=time_embed_dim, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift="default", attn_num_head_channels=num_head_channels, ) self.upsample_blocks = nn.ModuleList([]) for i, (input_channel, output_channel) in enumerate(zip(reversed(input_channels), reversed(output_channels))): up_block_type = up_blocks[i] is_final_block = i == len(input_channels) - 1 up_block = get_up_block( up_block_type, num_layers=num_res_blocks + 1, in_channels=output_channel, next_channels=input_channel, temb_channels=time_embed_dim, add_upsample=not is_final_block, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=num_head_channels, ) self.upsample_blocks.append(up_block) # ======================== Out ==================== self.out = nn.Sequential( nn.GroupNorm(num_channels=output_channels[0], num_groups=32, eps=1e-5), nn.SiLU(), nn.Conv2d(block_input_channels[0], out_channels, 3, padding=1), ) # =========== TO DELETE AFTER CONVERSION ========== transformer_depth = 1 context_dim = None legacy = True num_heads = -1 model_channels = block_input_channels[0] channel_mult = tuple([x // model_channels for x in block_output_channels]) self.init_for_ldm( in_channels, model_channels, channel_mult, num_res_blocks, dropout, time_embed_dim, attention_resolutions, num_head_channels, num_heads, legacy, False, transformer_depth, context_dim, conv_resample, out_channels, ) def forward(self, sample, timesteps=None): # 1. time step embeddings if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) t_emb = get_timestep_embedding( timesteps, self.config.block_input_channels[0], flip_sin_to_cos=True, downscale_freq_shift=0 ) emb = self.time_embed(t_emb) # 2. pre-process sample # sample = sample.type(self.dtype_) sample = self.conv_in(sample) # 3. down blocks down_block_res_samples = (sample,) for downsample_block in self.downsample_blocks: sample, res_samples = downsample_block(sample, emb) # append to tuple down_block_res_samples += res_samples # 4. mid block sample = self.mid(sample, emb) # 5. up blocks for upsample_block in self.upsample_blocks: # pop from tuple res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] sample = upsample_block(sample, res_samples, emb) # 6. post-process sample sample = self.out(sample) return sample def init_for_ldm( self, in_channels, model_channels, channel_mult, num_res_blocks, dropout, time_embed_dim, attention_resolutions, num_head_channels, num_heads, legacy, use_spatial_transformer, transformer_depth, context_dim, conv_resample, out_channels, ): # TODO(PVP) - delete after weight conversion class TimestepEmbedSequential(nn.Sequential): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ pass # TODO(PVP) - delete after weight conversion def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") dims = 2 self.input_blocks = nn.ModuleList( [TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResnetBlock2D( in_channels=ch, out_channels=mult * model_channels, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = num_head_channels layers.append( AttentionBlock( ch, num_heads=num_heads, num_head_channels=dim_head, ), ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( Downsample2D(ch, use_conv=conv_resample, out_channels=out_ch, padding=1, name="op") ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = num_head_channels if dim_head < 0: dim_head = None # TODO(Patrick) - delete after weight conversion # init to be able to overwrite `self.mid` self.middle_block = TimestepEmbedSequential( ResnetBlock2D( in_channels=ch, out_channels=None, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), AttentionBlock( ch, num_heads=num_heads, num_head_channels=dim_head, ), ResnetBlock2D( in_channels=ch, out_channels=None, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResnetBlock2D( in_channels=ch + ich, out_channels=model_channels * mult, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = num_head_channels layers.append( AttentionBlock( ch, num_heads=-1, num_head_channels=dim_head, ), ) if level and i == num_res_blocks: out_ch = ch layers.append(Upsample2D(ch, use_conv=conv_resample, out_channels=out_ch)) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch # ================ SET WEIGHTS OF ALL WEIGHTS ================== for i, input_layer in enumerate(self.input_blocks[1:]): block_id = i // (num_res_blocks + 1) layer_in_block_id = i % (num_res_blocks + 1) if layer_in_block_id == 2: self.downsample_blocks[block_id].downsamplers[0].op.weight.data = input_layer[0].op.weight.data self.downsample_blocks[block_id].downsamplers[0].op.bias.data = input_layer[0].op.bias.data elif len(input_layer) > 1: self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.downsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1]) else: self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.mid.resnets[0].set_weight(self.middle_block[0]) self.mid.resnets[1].set_weight(self.middle_block[2]) self.mid.attentions[0].set_weight(self.middle_block[1]) for i, input_layer in enumerate(self.output_blocks): block_id = i // (num_res_blocks + 1) layer_in_block_id = i % (num_res_blocks + 1) if len(input_layer) > 2: self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.upsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1]) self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_layer[2].conv.weight.data self.upsample_blocks[block_id].upsamplers[0].conv.bias.data = input_layer[2].conv.bias.data elif len(input_layer) > 1 and "Upsample2D" in input_layer[1].__class__.__name__: self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_layer[1].conv.weight.data self.upsample_blocks[block_id].upsamplers[0].conv.bias.data = input_layer[1].conv.bias.data elif len(input_layer) > 1: self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.upsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1]) else: self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0]) self.conv_in.weight.data = self.input_blocks[0][0].weight.data self.conv_in.bias.data = self.input_blocks[0][0].bias.data
diffusers_all-main
src/diffusers/models/unet_unconditional.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # helpers functions import torch from torch import nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding from .resnet import Downsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D def nonlinearity(x): # swish return x * torch.sigmoid(x) def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class UNetModel(ModelMixin, ConfigMixin): def __init__( self, ch=128, out_ch=3, ch_mult=(1, 1, 2, 2, 4, 4), num_res_blocks=2, attn_resolutions=(16,), dropout=0.0, resamp_with_conv=True, in_channels=3, resolution=256, ): super().__init__() self.register_to_config( ch=ch, out_ch=out_ch, ch_mult=ch_mult, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, in_channels=in_channels, resolution=resolution, ) ch_mult = tuple(ch_mult) self.ch = ch self.temb_ch = self.ch * 4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList( [ torch.nn.Linear(self.ch, self.temb_ch), torch.nn.Linear(self.temb_ch, self.temb_ch), ] ) # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,) + ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch * in_ch_mult[i_level] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append( ResnetBlock2D( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(AttentionBlock(block_in, overwrite_qkv=True)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = Downsample2D(block_in, use_conv=resamp_with_conv, padding=0) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) self.mid.attn_1 = AttentionBlock(block_in, overwrite_qkv=True) self.mid.block_2 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) self.mid_new = UNetMidBlock2D(in_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid_new.resnets[0] = self.mid.block_1 self.mid_new.attentions[0] = self.mid.attn_1 self.mid_new.resnets[1] = self.mid.block_2 # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch * ch_mult[i_level] skip_in = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): if i_block == self.num_res_blocks: skip_in = ch * in_ch_mult[i_level] block.append( ResnetBlock2D( in_channels=block_in + skip_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout, ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(AttentionBlock(block_in, overwrite_qkv=True)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample2D(block_in, use_conv=resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, sample, timesteps): x = sample assert x.shape[2] == x.shape[3] == self.resolution if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=x.device) # timestep embedding temb = get_timestep_embedding(timesteps, self.ch) temb = self.temb.dense[0](temb) temb = nonlinearity(temb) temb = self.temb.dense[1](temb) # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions - 1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = self.mid_new(hs[-1], temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block](torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h
diffusers_all-main
src/diffusers/models/unet.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .unet import UNetModel from .unet_glide import GlideSuperResUNetModel, GlideTextToImageUNetModel, GlideUNetModel from .unet_grad_tts import UNetGradTTSModel from .unet_ldm import UNetLDMModel from .unet_rl import TemporalUNet from .unet_sde_score_estimation import NCSNpp from .unet_unconditional import UNetUnconditionalModel from .vae import AutoencoderKL, VQModel
diffusers_all-main
src/diffusers/models/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np import torch from torch import nn def get_timestep_embedding( timesteps, embedding_dim, flip_sin_to_cos=False, downscale_freq_shift=1, scale=1, max_period=10000 ): """ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. """ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" half_dim = embedding_dim // 2 emb_coeff = -math.log(max_period) / (half_dim - downscale_freq_shift) emb = torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) emb = torch.exp(emb * emb_coeff) emb = timesteps[:, None].float() * emb[None, :] # scale embeddings emb = scale * emb # concat sine and cosine embeddings emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # flip sine and cosine embeddings if flip_sin_to_cos: emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) # zero pad if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb # unet_sde_score_estimation.py class GaussianFourierProjection(nn.Module): """Gaussian Fourier embeddings for noise levels.""" def __init__(self, embedding_size=256, scale=1.0): super().__init__() self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) def forward(self, x): x_proj = x[:, None] * self.W[None, :] * 2 * np.pi return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
diffusers_all-main
src/diffusers/models/embeddings.py
from functools import partial import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class Upsample2D(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name conv = None if use_conv_transpose: conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1) elif use_conv: conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.conv = conv else: self.Conv2d_0 = conv def forward(self, x): assert x.shape[1] == self.channels if self.use_conv_transpose: return self.conv(x) x = F.interpolate(x, scale_factor=2.0, mode="nearest") # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if self.use_conv: if self.name == "conv": x = self.conv(x) else: x = self.Conv2d_0(x) return x class Downsample2D(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if use_conv: conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels conv = nn.AvgPool2d(kernel_size=stride, stride=stride) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.conv = conv elif name == "Conv2d_0": self.Conv2d_0 = conv else: self.op = conv def forward(self, x): assert x.shape[1] == self.channels if self.use_conv and self.padding == 0: pad = (0, 1, 0, 1) x = F.pad(x, pad, mode="constant", value=0) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if self.name == "conv": return self.conv(x) elif self.name == "Conv2d_0": return self.Conv2d_0(x) else: return self.op(x) class Upsample1D(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) elif use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) def forward(self, x): assert x.shape[1] == self.channels if self.use_conv_transpose: return self.conv(x) x = F.interpolate(x, scale_factor=2.0, mode="nearest") if self.use_conv: x = self.conv(x) return x class Downsample1D(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.conv(x) class FirUpsample2D(nn.Module): def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.use_conv = use_conv self.fir_kernel = fir_kernel self.out_channels = out_channels def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1): """Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. """ assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) if self.use_conv: convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 inC = w.shape[1] num_groups = x.shape[1] // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) x = upfirdn2d_native(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) else: p = k.shape[0] - factor x = upfirdn2d_native( x, torch.tensor(k, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2) ) return x def forward(self, x): if self.use_conv: h = self._upsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: h = self._upsample_2d(x, k=self.fir_kernel, factor=2) return h class FirDownsample2D(nn.Module): def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.use_conv = use_conv self.out_channels = out_channels def _downsample_2d(self, x, w=None, k=None, factor=2, gain=1): """Fused `Conv2d()` followed by `downsample_2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. """ assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * gain if self.use_conv: _, _, convH, convW = w.shape p = (k.shape[0] - factor) + (convW - 1) s = [factor, factor] x = upfirdn2d_native(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2, p // 2)) x = F.conv2d(x, w, stride=s, padding=0) else: p = k.shape[0] - factor x = upfirdn2d_native(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) return x def forward(self, x): if self.use_conv: x = self._downsample_2d(x, w=self.Conv2d_0.weight, k=self.fir_kernel) x = x + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: x = self._downsample_2d(x, k=self.fir_kernel, factor=2) return x # TODO (patil-suraj): needs test # class Upsample2D1d(nn.Module): # def __init__(self, dim): # super().__init__() # self.conv = nn.ConvTranspose1d(dim, dim, 4, 2, 1) # # def forward(self, x): # return self.conv(x) # unet.py, unet_grad_tts.py, unet_ldm.py, unet_glide.py, unet_score_vde.py # => All 2D-Resnets are included here now! class ResnetBlock2D(nn.Module): def __init__( self, *, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, temb_channels=512, groups=32, groups_out=None, pre_norm=True, eps=1e-6, non_linearity="swish", time_embedding_norm="default", kernel=None, output_scale_factor=1.0, use_nin_shortcut=None, up=False, down=False, overwrite_for_grad_tts=False, overwrite_for_ldm=False, overwrite_for_glide=False, overwrite_for_score_vde=False, ): super().__init__() self.pre_norm = pre_norm self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.time_embedding_norm = time_embedding_norm self.up = up self.down = down self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups if self.pre_norm: self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) else: self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if time_embedding_norm == "default" and temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) elif time_embedding_norm == "scale_shift" and temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) elif non_linearity == "mish": self.nonlinearity = Mish() elif non_linearity == "silu": self.nonlinearity = nn.SiLU() self.upsample = self.downsample = None if self.up: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.upsample = lambda x: upsample_2d(x, k=fir_kernel) elif kernel == "sde_vp": self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") else: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.downsample = lambda x: downsample_2d(x, k=fir_kernel) elif kernel == "sde_vp": self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) else: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut self.nin_shortcut = None if self.use_nin_shortcut: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) # TODO(SURAJ, PATRICK): ALL OF THE FOLLOWING OF THE INIT METHOD CAN BE DELETED ONCE WEIGHTS ARE CONVERTED self.is_overwritten = False self.overwrite_for_glide = overwrite_for_glide self.overwrite_for_grad_tts = overwrite_for_grad_tts self.overwrite_for_ldm = overwrite_for_ldm or overwrite_for_glide self.overwrite_for_score_vde = overwrite_for_score_vde if self.overwrite_for_grad_tts: dim = in_channels dim_out = out_channels time_emb_dim = temb_channels self.mlp = torch.nn.Sequential(Mish(), torch.nn.Linear(time_emb_dim, dim_out)) self.pre_norm = pre_norm self.block1 = Block(dim, dim_out, groups=groups) self.block2 = Block(dim_out, dim_out, groups=groups) if dim != dim_out: self.res_conv = torch.nn.Conv2d(dim, dim_out, 1) else: self.res_conv = torch.nn.Identity() elif self.overwrite_for_ldm: channels = in_channels emb_channels = temb_channels use_scale_shift_norm = False non_linearity = "silu" self.in_layers = nn.Sequential( normalization(channels, swish=1.0), nn.Identity(), nn.Conv2d(channels, self.out_channels, 3, padding=1), ) self.emb_layers = nn.Sequential( nn.SiLU(), linear( emb_channels, 2 * self.out_channels if self.time_embedding_norm == "scale_shift" else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), nn.SiLU() if use_scale_shift_norm else nn.Identity(), nn.Dropout(p=dropout), zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)), ) if self.out_channels == in_channels: self.skip_connection = nn.Identity() else: self.skip_connection = nn.Conv2d(channels, self.out_channels, 1) self.set_weights_ldm() elif self.overwrite_for_score_vde: in_ch = in_channels out_ch = out_channels eps = 1e-6 num_groups = min(in_ch // 4, 32) num_groups_out = min(out_ch // 4, 32) temb_dim = temb_channels self.GroupNorm_0 = nn.GroupNorm(num_groups=num_groups, num_channels=in_ch, eps=eps) self.up = up self.down = down self.Conv_0 = nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1) if temb_dim is not None: self.Dense_0 = nn.Linear(temb_dim, out_ch) nn.init.zeros_(self.Dense_0.bias) self.GroupNorm_1 = nn.GroupNorm(num_groups=num_groups_out, num_channels=out_ch, eps=eps) self.Dropout_0 = nn.Dropout(dropout) self.Conv_1 = nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1) if in_ch != out_ch or up or down: # 1x1 convolution with DDPM initialization. self.Conv_2 = nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0) self.in_ch = in_ch self.out_ch = out_ch def set_weights_grad_tts(self): self.conv1.weight.data = self.block1.block[0].weight.data self.conv1.bias.data = self.block1.block[0].bias.data self.norm1.weight.data = self.block1.block[1].weight.data self.norm1.bias.data = self.block1.block[1].bias.data self.conv2.weight.data = self.block2.block[0].weight.data self.conv2.bias.data = self.block2.block[0].bias.data self.norm2.weight.data = self.block2.block[1].weight.data self.norm2.bias.data = self.block2.block[1].bias.data self.temb_proj.weight.data = self.mlp[1].weight.data self.temb_proj.bias.data = self.mlp[1].bias.data if self.in_channels != self.out_channels: self.nin_shortcut.weight.data = self.res_conv.weight.data self.nin_shortcut.bias.data = self.res_conv.bias.data def set_weights_ldm(self): self.norm1.weight.data = self.in_layers[0].weight.data self.norm1.bias.data = self.in_layers[0].bias.data self.conv1.weight.data = self.in_layers[-1].weight.data self.conv1.bias.data = self.in_layers[-1].bias.data self.temb_proj.weight.data = self.emb_layers[-1].weight.data self.temb_proj.bias.data = self.emb_layers[-1].bias.data self.norm2.weight.data = self.out_layers[0].weight.data self.norm2.bias.data = self.out_layers[0].bias.data self.conv2.weight.data = self.out_layers[-1].weight.data self.conv2.bias.data = self.out_layers[-1].bias.data if self.in_channels != self.out_channels: self.nin_shortcut.weight.data = self.skip_connection.weight.data self.nin_shortcut.bias.data = self.skip_connection.bias.data def set_weights_score_vde(self): self.conv1.weight.data = self.Conv_0.weight.data self.conv1.bias.data = self.Conv_0.bias.data self.norm1.weight.data = self.GroupNorm_0.weight.data self.norm1.bias.data = self.GroupNorm_0.bias.data self.conv2.weight.data = self.Conv_1.weight.data self.conv2.bias.data = self.Conv_1.bias.data self.norm2.weight.data = self.GroupNorm_1.weight.data self.norm2.bias.data = self.GroupNorm_1.bias.data self.temb_proj.weight.data = self.Dense_0.weight.data self.temb_proj.bias.data = self.Dense_0.bias.data if self.in_channels != self.out_channels or self.up or self.down: self.nin_shortcut.weight.data = self.Conv_2.weight.data self.nin_shortcut.bias.data = self.Conv_2.bias.data def forward(self, x, temb, mask=1.0): # TODO(Patrick) eventually this class should be split into multiple classes # too many if else statements if self.overwrite_for_grad_tts and not self.is_overwritten: self.set_weights_grad_tts() self.is_overwritten = True elif self.overwrite_for_score_vde and not self.is_overwritten: self.set_weights_score_vde() self.is_overwritten = True h = x h = h * mask if self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) if self.upsample is not None: x = self.upsample(x) h = self.upsample(h) elif self.downsample is not None: x = self.downsample(x) h = self.downsample(h) h = self.conv1(h) if not self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) h = h * mask if temb is not None: temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] else: temb = 0 if self.time_embedding_norm == "scale_shift": scale, shift = torch.chunk(temb, 2, dim=1) h = self.norm2(h) h = h + h * scale + shift h = self.nonlinearity(h) elif self.time_embedding_norm == "default": h = h + temb h = h * mask if self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if not self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) h = h * mask x = x * mask if self.nin_shortcut is not None: x = self.nin_shortcut(x) return (x + h) / self.output_scale_factor class ResnetBlock(nn.Module): def __init__( self, *, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, temb_channels=512, groups=32, groups_out=None, pre_norm=True, eps=1e-6, non_linearity="swish", time_embedding_norm="default", kernel=None, output_scale_factor=1.0, use_nin_shortcut=None, up=False, down=False, overwrite_for_grad_tts=False, overwrite_for_ldm=False, overwrite_for_glide=False, overwrite_for_score_vde=False, ): super().__init__() self.pre_norm = pre_norm self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.time_embedding_norm = time_embedding_norm self.up = up self.down = down self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups if self.pre_norm: self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) else: self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if time_embedding_norm == "default" and temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) elif time_embedding_norm == "scale_shift" and temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, 2 * out_channels) self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) elif non_linearity == "mish": self.nonlinearity = Mish() elif non_linearity == "silu": self.nonlinearity = nn.SiLU() self.upsample = self.downsample = None if self.up: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.upsample = lambda x: upsample_2d(x, k=fir_kernel) elif kernel == "sde_vp": self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") else: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.downsample = lambda x: downsample_2d(x, k=fir_kernel) elif kernel == "sde_vp": self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) else: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut self.nin_shortcut = None if self.use_nin_shortcut: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x, temb): h = x if self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) if self.upsample is not None: x = self.upsample(x) h = self.upsample(h) elif self.downsample is not None: x = self.downsample(x) h = self.downsample(h) h = self.conv1(h) if not self.pre_norm: h = self.norm1(h) h = self.nonlinearity(h) if temb is not None: temb = self.temb_proj(self.nonlinearity(temb))[:, :, None, None] else: temb = 0 if self.time_embedding_norm == "scale_shift": scale, shift = torch.chunk(temb, 2, dim=1) h = self.norm2(h) h = h + h * scale + shift h = self.nonlinearity(h) elif self.time_embedding_norm == "default": h = h + temb if self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if not self.pre_norm: h = self.norm2(h) h = self.nonlinearity(h) if self.nin_shortcut is not None: x = self.nin_shortcut(x) return (x + h) / self.output_scale_factor def set_weight(self, resnet): self.norm1.weight.data = resnet.norm1.weight.data self.norm1.bias.data = resnet.norm1.bias.data self.conv1.weight.data = resnet.conv1.weight.data self.conv1.bias.data = resnet.conv1.bias.data self.temb_proj.weight.data = resnet.temb_proj.weight.data self.temb_proj.bias.data = resnet.temb_proj.bias.data self.norm2.weight.data = resnet.norm2.weight.data self.norm2.bias.data = resnet.norm2.bias.data self.conv2.weight.data = resnet.conv2.weight.data self.conv2.bias.data = resnet.conv2.bias.data if self.use_nin_shortcut: self.nin_shortcut.weight.data = resnet.nin_shortcut.weight.data self.nin_shortcut.bias.data = resnet.nin_shortcut.bias.data # TODO(Patrick) - just there to convert the weights; can delete afterward class Block(torch.nn.Module): def __init__(self, dim, dim_out, groups=8): super(Block, self).__init__() self.block = torch.nn.Sequential( torch.nn.Conv2d(dim, dim_out, 3, padding=1), torch.nn.GroupNorm(groups, dim_out), Mish() ) # unet_rl.py class ResidualTemporalBlock(nn.Module): def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5): super().__init__() self.blocks = nn.ModuleList( [ Conv1dBlock(inp_channels, out_channels, kernel_size), Conv1dBlock(out_channels, out_channels, kernel_size), ] ) self.time_mlp = nn.Sequential( nn.Mish(), nn.Linear(embed_dim, out_channels), RearrangeDim(), # Rearrange("batch t -> batch t 1"), ) self.residual_conv = ( nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() ) def forward(self, x, t): """ x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] """ out = self.blocks[0](x) + self.time_mlp(t) out = self.blocks[1](out) return out + self.residual_conv(x) # HELPER Modules def normalization(channels, swish=0.0): """ Make a standard normalization layer, with an optional swish activation. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(num_channels=channels, num_groups=32, swish=swish) class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-5): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()).to(x.dtype) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module class Mish(torch.nn.Module): def forward(self, x): return x * torch.tanh(torch.nn.functional.softplus(x)) class Conv1dBlock(nn.Module): """ Conv1d --> GroupNorm --> Mish """ def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() self.block = nn.Sequential( nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), RearrangeDim(), # Rearrange("batch channels horizon -> batch channels 1 horizon"), nn.GroupNorm(n_groups, out_channels), RearrangeDim(), # Rearrange("batch channels 1 horizon -> batch channels horizon"), nn.Mish(), ) def forward(self, x): return self.block(x) class RearrangeDim(nn.Module): def __init__(self): super().__init__() def forward(self, tensor): if len(tensor.shape) == 2: return tensor[:, :, None] if len(tensor.shape) == 3: return tensor[:, :, None, :] elif len(tensor.shape) == 4: return tensor[:, :, 0, :] else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") def upsample_2d(x, k=None, factor=2, gain=1): r"""Upsample2D a batch of 2D images with the given filter. Args: Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a: multiple of the upsampling factor. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` """ assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) p = k.shape[0] - factor return upfirdn2d_native(x, torch.tensor(k, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) def downsample_2d(x, k=None, factor=2, gain=1): r"""Downsample2D a batch of 2D images with the given filter. Args: Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a multiple of the downsampling factor. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * gain p = k.shape[0] - factor return upfirdn2d_native(x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2)) def upfirdn2d_native(input, kernel, up=1, down=1, pad=(0, 0)): up_x = up_y = up down_x = down_y = down pad_x0 = pad_y0 = pad[0] pad_x1 = pad_y1 = pad[1] _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[ :, max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), :, ] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape( -1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, ) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w)
diffusers_all-main
src/diffusers/models/resnet.py
import math from inspect import isfunction import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import get_timestep_embedding from .resnet import Downsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D # from .resnet import ResBlock def exists(val): return val is not None def uniq(arr): return {el: True for el in arr}.keys() def default(val, d): if exists(val): return val return d() if isfunction(d) else d def max_neg_value(t): return -torch.finfo(t.dtype).max def init_(tensor): dim = tensor.shape[-1] std = 1 / math.sqrt(dim) tensor.uniform_(-std, std) return tensor # feedforward class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if not glu else GEGLU(dim, inner_dim) self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)) def forward(self, x): return self.net(x) def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) def convert_module_to_f16(l): """ Convert primitive modules to float16. """ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() def convert_module_to_f32(l): """ Convert primitive modules to float32, undoing convert_module_to_f16(). """ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): l.weight.data = l.weight.data.float() if l.bias is not None: l.bias.data = l.bias.data.float() def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs) class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-5): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()).to(x.dtype) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def normalization(channels, swish=0.0): """ Make a standard normalization layer, with an optional swish activation. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(num_channels=channels, num_groups=32, swish=swish) class TimestepEmbedSequential(nn.Sequential): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, ResnetBlock2D) or isinstance(layer, TimestepEmbedSequential): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial**2) * c model.total_ops += torch.DoubleTensor([matmul_ops]) class UNetLDMModel(ModelMixin, ConfigMixin): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, ): super().__init__() # register all __init__ params with self.register self.register_to_config( image_size=image_size, in_channels=in_channels, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, dropout=dropout, channel_mult=channel_mult, conv_resample=conv_resample, dims=dims, num_classes=num_classes, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, use_spatial_transformer=use_spatial_transformer, transformer_depth=transformer_depth, context_dim=context_dim, n_embed=n_embed, legacy=legacy, ) if use_spatial_transformer: assert ( context_dim is not None ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..." if context_dim is not None: assert ( use_spatial_transformer ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..." if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, "Either num_heads or num_head_channels has to be set" if num_head_channels == -1: assert num_heads != -1, "Either num_heads or num_head_channels has to be set" self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.dtype_ = torch.float16 if use_fp16 else torch.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))] ) self.down_in_conv = self.input_blocks[0][0] self.downsample_blocks = nn.ModuleList([]) self.upsample_blocks = nn.ModuleList([]) # ========================= Down (OLD) =================== # self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResnetBlock2D( in_channels=ch, out_channels=mult * model_channels, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels layers.append( AttentionBlock( ch, num_heads=num_heads, num_head_channels=dim_head, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( Downsample2D(ch, use_conv=conv_resample, out_channels=out_ch, padding=1, name="op") ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch input_channels = [model_channels * mult for mult in [1] + list(channel_mult[:-1])] output_channels = [model_channels * mult for mult in channel_mult] if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if dim_head < 0: dim_head = None # ========================= MID (New) =================== # self.mid = UNetMidBlock2D( in_channels=ch, dropout=dropout, temb_channels=time_embed_dim, resnet_eps=1e-5, resnet_act_fn="silu", resnet_time_scale_shift="scale_shift" if use_scale_shift_norm else "default", attention_layer_type="self" if not use_spatial_transformer else "spatial", attn_num_heads=num_heads, attn_num_head_channels=dim_head, attn_depth=transformer_depth, attn_encoder_channels=context_dim, ) # TODO(Patrick) - delete after weight conversion # init to be able to overwrite `self.mid` self.middle_block = TimestepEmbedSequential( ResnetBlock2D( in_channels=ch, out_channels=None, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), AttentionBlock( ch, num_heads=num_heads, num_head_channels=dim_head, ) if not use_spatial_transformer else SpatialTransformer(ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim), ResnetBlock2D( in_channels=ch, out_channels=None, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), ) self.mid.resnets[0] = self.middle_block[0] self.mid.attentions[0] = self.middle_block[1] self.mid.resnets[1] = self.middle_block[2] self._feature_size += ch # ========================= Up (Old) =================== # self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResnetBlock2D( in_channels=ch + ich, out_channels=model_channels * mult, dropout=dropout, temb_channels=time_embed_dim, eps=1e-5, non_linearity="silu", overwrite_for_ldm=True, ), ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: # num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels layers.append( AttentionBlock( ch, num_heads=num_heads_upsample, num_head_channels=dim_head, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ) ) if level and i == num_res_blocks: out_ch = ch layers.append(Upsample2D(ch, use_conv=conv_resample, out_channels=out_ch)) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) def forward(self, x, timesteps=None, context=None, y=None, **kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=x.device) t_emb = get_timestep_embedding(timesteps, self.model_channels, flip_sin_to_cos=True, downscale_freq_shift=0) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape == (x.shape[0],) emb = emb + self.label_emb(y) h = x.type(self.dtype_) for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.mid(h, emb, context) for module in self.output_blocks: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb, context) return self.out(h) class SpatialTransformer(nn.Module): """ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image """ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) for d in range(depth) ] ) self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention b, c, h, w = x.shape x_in = x x = self.norm(x) x = self.proj_in(x) x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) for block in self.transformer_blocks: x = block(x, context=context) x = x.reshape(b, h, w, c).permute(0, 3, 1, 2) x = self.proj_out(x) return x + x_in class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): super().__init__() self.attn1 = CrossAttention( query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout ) # is a self-attention self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention( query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout ) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): x = self.attn1(self.norm1(x)) + x x = self.attn2(self.norm2(x), context=context) + x x = self.ff(self.norm3(x)) + x return x class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) self.scale = dim_head**-0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) def reshape_heads_to_batch_dim(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def reshape_batch_dim_to_heads(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def forward(self, x, context=None, mask=None): batch_size, sequence_length, dim = x.shape h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) q = self.reshape_heads_to_batch_dim(q) k = self.reshape_heads_to_batch_dim(k) v = self.reshape_heads_to_batch_dim(v) sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale if exists(mask): mask = mask.reshape(batch_size, -1) max_neg_value = -torch.finfo(sim.dtype).max mask = mask[:, None, :].repeat(h, 1, 1) sim.masked_fill_(~mask, max_neg_value) # attention, what we cannot get enough of attn = sim.softmax(dim=-1) out = torch.einsum("b i j, b j d -> b i d", attn, v) out = self.reshape_batch_dim_to_heads(out) return self.to_out(out)
diffusers_all-main
src/diffusers/models/unet_ldm.py
import numpy as np import torch import torch.nn as nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .resnet import Downsample2D, ResnetBlock2D, Upsample2D def nonlinearity(x): # swish return x * torch.sigmoid(x) def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class Encoder(nn.Module): def __init__( self, *, ch, ch_mult=(1, 2, 4, 8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, double_z=True, **ignore_kwargs, ): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1,) + tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch * in_ch_mult[i_level] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append( ResnetBlock2D( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(AttentionBlock(block_in, overwrite_qkv=True)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = Downsample2D(block_in, use_conv=resamp_with_conv, padding=0) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) self.mid.attn_1 = AttentionBlock(block_in, overwrite_qkv=True) self.mid.block_2 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d( block_in, 2 * z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1 ) def forward(self, x): # assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution) # timestep embedding temb = None # downsampling hs = [self.conv_in(x)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](hs[-1], temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) if i_level != self.num_resolutions - 1: hs.append(self.down[i_level].downsample(hs[-1])) # middle h = hs[-1] h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class Decoder(nn.Module): def __init__( self, *, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, **ignorekwargs, ): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end # compute in_ch_mult, block_in and curr_res at lowest res block_in = ch * ch_mult[self.num_resolutions - 1] curr_res = resolution // 2 ** (self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) print("Working with z of shape {} = {} dimensions.".format(self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle self.mid = nn.Module() self.mid.block_1 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) self.mid.attn_1 = AttentionBlock(block_in, overwrite_qkv=True) self.mid.block_2 = ResnetBlock2D( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout ) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): block.append( ResnetBlock2D( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout ) ) block_in = block_out if curr_res in attn_resolutions: attn.append(AttentionBlock(block_in, overwrite_qkv=True)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample2D(block_in, use_conv=resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z): # assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class VectorQuantizer(nn.Module): """ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix multiplications and allows for post-hoc remapping of indices. """ # NOTE: due to a bug the beta term was applied to the wrong term. for # backwards compatibility we use the buggy version by default, but you can # specify legacy=False to fix it. def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=True): super().__init__() self.n_e = n_e self.e_dim = e_dim self.beta = beta self.legacy = legacy self.embedding = nn.Embedding(self.n_e, self.e_dim) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) self.remap = remap if self.remap is not None: self.register_buffer("used", torch.tensor(np.load(self.remap))) self.re_embed = self.used.shape[0] self.unknown_index = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": self.unknown_index = self.re_embed self.re_embed = self.re_embed + 1 print( f"Remapping {self.n_e} indices to {self.re_embed} indices. " f"Using {self.unknown_index} for unknown indices." ) else: self.re_embed = n_e self.sane_index_shape = sane_index_shape def remap_to_used(self, inds): ishape = inds.shape assert len(ishape) > 1 inds = inds.reshape(ishape[0], -1) used = self.used.to(inds) match = (inds[:, :, None] == used[None, None, ...]).long() new = match.argmax(-1) unknown = match.sum(2) < 1 if self.unknown_index == "random": new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) else: new[unknown] = self.unknown_index return new.reshape(ishape) def unmap_to_all(self, inds): ishape = inds.shape assert len(ishape) > 1 inds = inds.reshape(ishape[0], -1) used = self.used.to(inds) if self.re_embed > self.used.shape[0]: # extra token inds[inds >= self.used.shape[0]] = 0 # simply set to zero back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) return back.reshape(ishape) def forward(self, z): # reshape z -> (batch, height, width, channel) and flatten z = z.permute(0, 2, 3, 1).contiguous() z_flattened = z.view(-1, self.e_dim) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z d = ( torch.sum(z_flattened**2, dim=1, keepdim=True) + torch.sum(self.embedding.weight**2, dim=1) - 2 * torch.einsum("bd,dn->bn", z_flattened, self.embedding.weight.t()) ) min_encoding_indices = torch.argmin(d, dim=1) z_q = self.embedding(min_encoding_indices).view(z.shape) perplexity = None min_encodings = None # compute loss for embedding if not self.legacy: loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2) else: loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2) # preserve gradients z_q = z + (z_q - z).detach() # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() if self.remap is not None: min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis min_encoding_indices = self.remap_to_used(min_encoding_indices) min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten if self.sane_index_shape: min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def get_codebook_entry(self, indices, shape): # shape specifying (batch, height, width, channel) if self.remap is not None: indices = indices.reshape(shape[0], -1) # add batch axis indices = self.unmap_to_all(indices) indices = indices.reshape(-1) # flatten again # get quantized latent vectors z_q = self.embedding(indices) if shape is not None: z_q = z_q.view(shape) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q class DiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): self.parameters = parameters self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) self.logvar = torch.clamp(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = torch.exp(0.5 * self.logvar) self.var = torch.exp(self.logvar) if self.deterministic: self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) def sample(self): x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) return x def kl(self, other=None): if self.deterministic: return torch.Tensor([0.0]) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, dim=[1, 2, 3]) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean, 2) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, dim=[1, 2, 3], ) def nll(self, sample, dims=[1, 2, 3]): if self.deterministic: return torch.Tensor([0.0]) logtwopi = np.log(2.0 * np.pi) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims) def mode(self): return self.mean class VQModel(ModelMixin, ConfigMixin): def __init__( self, ch, out_ch, num_res_blocks, attn_resolutions, in_channels, resolution, z_channels, n_embed, embed_dim, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw ch_mult=(1, 2, 4, 8), dropout=0.0, double_z=True, resamp_with_conv=True, give_pre_end=False, ): super().__init__() # register all __init__ params with self.register self.register_to_config( ch=ch, out_ch=out_ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, n_embed=n_embed, embed_dim=embed_dim, remap=remap, sane_index_shape=sane_index_shape, ch_mult=ch_mult, dropout=dropout, double_z=double_z, resamp_with_conv=resamp_with_conv, give_pre_end=give_pre_end, ) # pass init params to Encoder self.encoder = Encoder( ch=ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, ch_mult=ch_mult, dropout=dropout, resamp_with_conv=resamp_with_conv, double_z=double_z, give_pre_end=give_pre_end, ) self.quant_conv = torch.nn.Conv2d(z_channels, embed_dim, 1) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape) self.post_quant_conv = torch.nn.Conv2d(embed_dim, z_channels, 1) # pass init params to Decoder self.decoder = Decoder( ch=ch, out_ch=out_ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, ch_mult=ch_mult, dropout=dropout, resamp_with_conv=resamp_with_conv, give_pre_end=give_pre_end, ) def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, h, force_not_quantize=False): # also go through quantization layer if not force_not_quantize: quant, emb_loss, info = self.quantize(h) else: quant = h quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def forward(self, sample): x = sample h = self.encode(x) dec = self.decode(h) return dec class AutoencoderKL(ModelMixin, ConfigMixin): def __init__( self, ch, out_ch, num_res_blocks, attn_resolutions, in_channels, resolution, z_channels, embed_dim, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw ch_mult=(1, 2, 4, 8), dropout=0.0, double_z=True, resamp_with_conv=True, give_pre_end=False, ): super().__init__() # register all __init__ params with self.register self.register_to_config( ch=ch, out_ch=out_ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, embed_dim=embed_dim, remap=remap, sane_index_shape=sane_index_shape, ch_mult=ch_mult, dropout=dropout, double_z=double_z, resamp_with_conv=resamp_with_conv, give_pre_end=give_pre_end, ) # pass init params to Encoder self.encoder = Encoder( ch=ch, out_ch=out_ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, ch_mult=ch_mult, dropout=dropout, resamp_with_conv=resamp_with_conv, double_z=double_z, give_pre_end=give_pre_end, ) # pass init params to Decoder self.decoder = Decoder( ch=ch, out_ch=out_ch, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, in_channels=in_channels, resolution=resolution, z_channels=z_channels, ch_mult=ch_mult, dropout=dropout, resamp_with_conv=resamp_with_conv, give_pre_end=give_pre_end, ) self.quant_conv = torch.nn.Conv2d(2 * z_channels, 2 * embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, z_channels, 1) def encode(self, x): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, sample, sample_posterior=False): x = sample posterior = self.encode(x) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return dec
diffusers_all-main
src/diffusers/models/vae.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # helpers functions import functools import math import numpy as np import torch import torch.nn as nn from ..configuration_utils import ConfigMixin from ..modeling_utils import ModelMixin from .attention import AttentionBlock from .embeddings import GaussianFourierProjection, get_timestep_embedding from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D from .unet_new import UNetMidBlock2D class Combine(nn.Module): """Combine information from skip connections.""" def __init__(self, dim1, dim2, method="cat"): super().__init__() # 1x1 convolution with DDPM initialization. self.Conv_0 = nn.Conv2d(dim1, dim2, kernel_size=1, padding=0) self.method = method def forward(self, x, y): h = self.Conv_0(x) if self.method == "cat": return torch.cat([h, y], dim=1) elif self.method == "sum": return h + y else: raise ValueError(f"Method {self.method} not recognized.") class NCSNpp(ModelMixin, ConfigMixin): """NCSN++ model""" def __init__( self, image_size=1024, num_channels=3, centered=False, attn_resolutions=(16,), ch_mult=(1, 2, 4, 8, 16, 32, 32, 32), conditional=True, conv_size=3, dropout=0.0, embedding_type="fourier", fir=True, fir_kernel=(1, 3, 3, 1), fourier_scale=16, init_scale=0.0, nf=16, num_res_blocks=1, progressive="output_skip", progressive_combine="sum", progressive_input="input_skip", resamp_with_conv=True, scale_by_sigma=True, skip_rescale=True, continuous=True, ): super().__init__() self.register_to_config( image_size=image_size, num_channels=num_channels, centered=centered, attn_resolutions=attn_resolutions, ch_mult=ch_mult, conditional=conditional, conv_size=conv_size, dropout=dropout, embedding_type=embedding_type, fir=fir, fir_kernel=fir_kernel, fourier_scale=fourier_scale, init_scale=init_scale, nf=nf, num_res_blocks=num_res_blocks, progressive=progressive, progressive_combine=progressive_combine, progressive_input=progressive_input, resamp_with_conv=resamp_with_conv, scale_by_sigma=scale_by_sigma, skip_rescale=skip_rescale, continuous=continuous, ) self.act = nn.SiLU() self.nf = nf self.num_res_blocks = num_res_blocks self.attn_resolutions = attn_resolutions self.num_resolutions = len(ch_mult) self.all_resolutions = all_resolutions = [image_size // (2**i) for i in range(self.num_resolutions)] self.conditional = conditional self.skip_rescale = skip_rescale self.progressive = progressive self.progressive_input = progressive_input self.embedding_type = embedding_type assert progressive in ["none", "output_skip", "residual"] assert progressive_input in ["none", "input_skip", "residual"] assert embedding_type in ["fourier", "positional"] combine_method = progressive_combine.lower() combiner = functools.partial(Combine, method=combine_method) modules = [] # timestep/noise_level embedding; only for continuous training if embedding_type == "fourier": # Gaussian Fourier features embeddings. modules.append(GaussianFourierProjection(embedding_size=nf, scale=fourier_scale)) embed_dim = 2 * nf elif embedding_type == "positional": embed_dim = nf else: raise ValueError(f"embedding type {embedding_type} unknown.") modules.append(nn.Linear(embed_dim, nf * 4)) modules.append(nn.Linear(nf * 4, nf * 4)) AttnBlock = functools.partial(AttentionBlock, overwrite_linear=True, rescale_output_factor=math.sqrt(2.0)) if self.fir: Up_sample = functools.partial(FirUpsample2D, fir_kernel=fir_kernel, use_conv=resamp_with_conv) else: Up_sample = functools.partial(Upsample2D, name="Conv2d_0") if progressive == "output_skip": self.pyramid_upsample = Up_sample(channels=None, use_conv=False) elif progressive == "residual": pyramid_upsample = functools.partial(Up_sample, use_conv=True) if self.fir: Down_sample = functools.partial(FirDownsample2D, fir_kernel=fir_kernel, use_conv=resamp_with_conv) else: Down_sample = functools.partial(Downsample2D, padding=0, name="Conv2d_0") if progressive_input == "input_skip": self.pyramid_downsample = Down_sample(channels=None, use_conv=False) elif progressive_input == "residual": pyramid_downsample = functools.partial(Down_sample, use_conv=True) channels = num_channels if progressive_input != "none": input_pyramid_ch = channels modules.append(nn.Conv2d(channels, nf, kernel_size=3, padding=1)) hs_c = [nf] in_ch = nf for i_level in range(self.num_resolutions): # Residual blocks for this resolution for i_block in range(num_res_blocks): out_ch = nf * ch_mult[i_level] modules.append( ResnetBlock2D( in_channels=in_ch, out_channels=out_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, ) ) in_ch = out_ch if all_resolutions[i_level] in attn_resolutions: modules.append(AttnBlock(channels=in_ch)) hs_c.append(in_ch) if i_level != self.num_resolutions - 1: modules.append( ResnetBlock2D( in_channels=in_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, down=True, kernel="fir" if self.fir else "sde_vp", use_nin_shortcut=True, ) ) if progressive_input == "input_skip": modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch)) if combine_method == "cat": in_ch *= 2 elif progressive_input == "residual": modules.append(pyramid_downsample(channels=input_pyramid_ch, out_channels=in_ch)) input_pyramid_ch = in_ch hs_c.append(in_ch) # mid self.mid = UNetMidBlock2D( in_channels=in_ch, temb_channels=4 * nf, output_scale_factor=math.sqrt(2.0), resnet_act_fn="silu", resnet_groups=min(in_ch // 4, 32), dropout=dropout, ) in_ch = hs_c[-1] modules.append( ResnetBlock2D( in_channels=in_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, ) ) modules.append(AttnBlock(channels=in_ch)) modules.append( ResnetBlock2D( in_channels=in_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, ) ) self.mid.resnets[0] = modules[len(modules) - 3] self.mid.attentions[0] = modules[len(modules) - 2] self.mid.resnets[1] = modules[len(modules) - 1] pyramid_ch = 0 # Upsampling block for i_level in reversed(range(self.num_resolutions)): for i_block in range(num_res_blocks + 1): out_ch = nf * ch_mult[i_level] in_ch = in_ch + hs_c.pop() modules.append( ResnetBlock2D( in_channels=in_ch, out_channels=out_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, ) ) in_ch = out_ch if all_resolutions[i_level] in attn_resolutions: modules.append(AttnBlock(channels=in_ch)) if progressive != "none": if i_level == self.num_resolutions - 1: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) modules.append(nn.Conv2d(in_ch, channels, kernel_size=3, padding=1)) pyramid_ch = channels elif progressive == "residual": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) modules.append(nn.Conv2d(in_ch, in_ch, bias=True, kernel_size=3, padding=1)) pyramid_ch = in_ch else: raise ValueError(f"{progressive} is not a valid name.") else: if progressive == "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) modules.append(nn.Conv2d(in_ch, channels, bias=True, kernel_size=3, padding=1)) pyramid_ch = channels elif progressive == "residual": modules.append(pyramid_upsample(channels=pyramid_ch, out_channels=in_ch)) pyramid_ch = in_ch else: raise ValueError(f"{progressive} is not a valid name") if i_level != 0: modules.append( ResnetBlock2D( in_channels=in_ch, temb_channels=4 * nf, output_scale_factor=np.sqrt(2.0), non_linearity="silu", groups=min(in_ch // 4, 32), groups_out=min(out_ch // 4, 32), overwrite_for_score_vde=True, up=True, kernel="fir" if self.fir else "sde_vp", use_nin_shortcut=True, ) ) assert not hs_c if progressive != "output_skip": modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) modules.append(nn.Conv2d(in_ch, channels, kernel_size=3, padding=1)) self.all_modules = nn.ModuleList(modules) def forward(self, sample, timesteps, sigmas=None): x = sample # timestep/noise_level embedding; only for continuous training modules = self.all_modules m_idx = 0 if self.embedding_type == "fourier": # Gaussian Fourier features embeddings. used_sigmas = timesteps temb = modules[m_idx](torch.log(used_sigmas)) m_idx += 1 elif self.embedding_type == "positional": # Sinusoidal positional embeddings. timesteps = timesteps used_sigmas = sigmas temb = get_timestep_embedding(timesteps, self.nf) else: raise ValueError(f"embedding type {self.embedding_type} unknown.") if self.conditional: temb = modules[m_idx](temb) m_idx += 1 temb = modules[m_idx](self.act(temb)) m_idx += 1 else: temb = None # If input data is in [0, 1] if not self.config.centered: x = 2 * x - 1.0 # Downsampling block input_pyramid = None if self.progressive_input != "none": input_pyramid = x hs = [modules[m_idx](x)] m_idx += 1 for i_level in range(self.num_resolutions): # Residual blocks for this resolution for i_block in range(self.num_res_blocks): h = modules[m_idx](hs[-1], temb) m_idx += 1 if h.shape[-1] in self.attn_resolutions: h = modules[m_idx](h) m_idx += 1 hs.append(h) if i_level != self.num_resolutions - 1: h = modules[m_idx](hs[-1], temb) m_idx += 1 if self.progressive_input == "input_skip": input_pyramid = self.pyramid_downsample(input_pyramid) h = modules[m_idx](input_pyramid, h) m_idx += 1 elif self.progressive_input == "residual": input_pyramid = modules[m_idx](input_pyramid) m_idx += 1 if self.skip_rescale: input_pyramid = (input_pyramid + h) / np.sqrt(2.0) else: input_pyramid = input_pyramid + h h = input_pyramid hs.append(h) # h = hs[-1] # h = modules[m_idx](h, temb) # m_idx += 1 # h = modules[m_idx](h) # m_idx += 1 # h = modules[m_idx](h, temb) # m_idx += 1 h = self.mid(h, temb) m_idx += 3 pyramid = None # Upsampling block for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb) m_idx += 1 if h.shape[-1] in self.attn_resolutions: h = modules[m_idx](h) m_idx += 1 if self.progressive != "none": if i_level == self.num_resolutions - 1: if self.progressive == "output_skip": pyramid = self.act(modules[m_idx](h)) m_idx += 1 pyramid = modules[m_idx](pyramid) m_idx += 1 elif self.progressive == "residual": pyramid = self.act(modules[m_idx](h)) m_idx += 1 pyramid = modules[m_idx](pyramid) m_idx += 1 else: raise ValueError(f"{self.progressive} is not a valid name.") else: if self.progressive == "output_skip": pyramid = self.pyramid_upsample(pyramid) pyramid_h = self.act(modules[m_idx](h)) m_idx += 1 pyramid_h = modules[m_idx](pyramid_h) m_idx += 1 pyramid = pyramid + pyramid_h elif self.progressive == "residual": pyramid = modules[m_idx](pyramid) m_idx += 1 if self.skip_rescale: pyramid = (pyramid + h) / np.sqrt(2.0) else: pyramid = pyramid + h h = pyramid else: raise ValueError(f"{self.progressive} is not a valid name") if i_level != 0: h = modules[m_idx](h, temb) m_idx += 1 assert not hs if self.progressive == "output_skip": h = pyramid else: h = self.act(modules[m_idx](h)) m_idx += 1 h = modules[m_idx](h) m_idx += 1 assert m_idx == len(modules) if self.config.scale_by_sigma: used_sigmas = used_sigmas.reshape((x.shape[0], *([1] * len(x.shape[1:])))) h = h / used_sigmas return h
diffusers_all-main
src/diffusers/models/unet_sde_score_estimation.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from .attention import AttentionBlockNew from .resnet import Downsample2D, ResnetBlock, Upsample2D def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, attn_num_head_channels, ): if down_block_type == "UNetResDownBlock2D": return UNetResAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif down_block_type == "UNetResAttnDownBlock2D": return UNetResAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=attn_num_head_channels, ) def get_up_block( up_block_type, num_layers, in_channels, next_channels, temb_channels, add_upsample, resnet_eps, resnet_act_fn, attn_num_head_channels, ): if up_block_type == "UNetResUpBlock2D": return UNetResUpBlock2D( num_layers=num_layers, in_channels=in_channels, next_channels=next_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif up_block_type == "UNetResAttnUpBlock2D": return UNetResAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, next_channels=next_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=attn_num_head_channels, ) class UNetMidBlock2D(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, **kwargs, ): super().__init__() # there is always at least one resnet resnets = [ ResnetBlock( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for _ in range(num_layers): attentions.append( AttentionBlockNew( in_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, ) ) resnets.append( ResnetBlock( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states, temb=None, encoder_states=None, mask=None): if mask is not None: hidden_states = self.resnets[0](hidden_states, temb, mask=mask) else: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_states) if mask is not None: hidden_states = resnet(hidden_states, temb, mask=mask) else: hidden_states = resnet(hidden_states, temb) return hidden_states class UNetResAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, add_downsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlockNew( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [Downsample2D(in_channels, use_conv=True, out_channels=out_channels, padding=1, name="op")] ) else: self.downsamplers = None def forward(self, hidden_states, temb=None): output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class UNetResDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_downsample=True, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [Downsample2D(in_channels, use_conv=True, out_channels=out_channels, padding=1, name="op")] ) else: self.downsamplers = None def forward(self, hidden_states, temb=None): output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class UNetResAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, next_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_layer_type: str = "self", attn_num_head_channels=1, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): resnet_channels = in_channels if i < num_layers - 1 else next_channels resnets.append( ResnetBlock( in_channels=in_channels + resnet_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlockNew( in_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(in_channels, use_conv=True, out_channels=in_channels)]) else: self.upsamplers = None def forward(self, hidden_states, res_hidden_states_tuple, temb=None): for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class UNetResUpBlock2D(nn.Module): def __init__( self, in_channels: int, next_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_layer_type: str = "self", output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] for i in range(num_layers): resnet_channels = in_channels if i < num_layers - 1 else next_channels resnets.append( ResnetBlock( in_channels=in_channels + resnet_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(in_channels, use_conv=True, out_channels=in_channels)]) else: self.upsamplers = None def forward(self, hidden_states, res_hidden_states_tuple, temb=None): for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states
diffusers_all-main
src/diffusers/models/unet_new.py
# Copyright 2022 UC Berkely Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math import numpy as np from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas, dtype=np.float32) class DDPMScheduler(SchedulerMixin, ConfigMixin): def __init__( self, timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", trained_betas=None, timestep_values=None, variance_type="fixed_small", clip_sample=True, tensor_format="np", ): super().__init__() self.register_to_config( timesteps=timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, timestep_values=timestep_values, variance_type=variance_type, clip_sample=clip_sample, ) if trained_betas is not None: self.betas = np.asarray(trained_betas) elif beta_schedule == "linear": self.betas = np.linspace(beta_start, beta_end, timesteps, dtype=np.float32) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = np.cumprod(self.alphas, axis=0) self.one = np.array(1.0) self.set_format(tensor_format=tensor_format) def get_variance(self, t, variance_type=None): alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] if variance_type is None: variance_type = self.config.variance_type # hacks - were probs added for training stability if variance_type == "fixed_small": variance = self.clip(variance, min_value=1e-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = self.log(self.clip(variance, min_value=1e-20)) elif variance_type == "fixed_large": variance = self.betas[t] elif variance_type == "fixed_large_log": # Glide max_log variance = self.log(self.betas[t]) return variance def step(self, residual, sample, t, predict_epsilon=True): # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if predict_epsilon: pred_original_sample = (sample - beta_prod_t ** (0.5) * residual) / alpha_prod_t ** (0.5) else: pred_original_sample = residual # 3. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = self.clip(pred_original_sample, -1, 1) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample return pred_prev_sample def add_noise(self, original_samples, noise, timesteps): sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = self.match_shape(sqrt_alpha_prod, original_samples) sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = self.match_shape(sqrt_one_minus_alpha_prod, original_samples) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.timesteps
diffusers_all-main
src/diffusers/schedulers/scheduling_ddpm.py
# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch # TODO(Patrick, Anton, Suraj) - make scheduler framework indepedent and clean-up a bit import numpy as np import torch from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): def __init__(self, beta_min=0.1, beta_max=20, sampling_eps=1e-3, tensor_format="np"): super().__init__() self.register_to_config( beta_min=beta_min, beta_max=beta_max, sampling_eps=sampling_eps, ) self.sigmas = None self.discrete_sigmas = None self.timesteps = None def set_timesteps(self, num_inference_steps): self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps) def step_pred(self, result, x, t): # TODO(Patrick) better comments + non-PyTorch # postprocess model result log_mean_coeff = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) result = -result / std[:, None, None, None] # compute dt = -1.0 / len(self.timesteps) beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) drift = -0.5 * beta_t[:, None, None, None] * x diffusion = torch.sqrt(beta_t) drift = drift - diffusion[:, None, None, None] ** 2 * result x_mean = x + drift * dt # add noise z = torch.randn_like(x) x = x_mean + diffusion[:, None, None, None] * np.sqrt(-dt) * z return x, x_mean
diffusers_all-main
src/diffusers/schedulers/scheduling_sde_vp.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin class GradTTSScheduler(SchedulerMixin, ConfigMixin): def __init__( self, beta_start=0.05, beta_end=20, tensor_format="np", ): super().__init__() self.register_to_config( beta_start=beta_start, beta_end=beta_end, ) self.set_format(tensor_format=tensor_format) self.betas = None def get_timesteps(self, num_inference_steps): return np.array([(t + 0.5) / num_inference_steps for t in range(num_inference_steps)]) def set_betas(self, num_inference_steps): timesteps = self.get_timesteps(num_inference_steps) self.betas = np.array([self.beta_start + (self.beta_end - self.beta_start) * t for t in timesteps]) def step(self, residual, sample, t, num_inference_steps): # This is a VE scheduler from https://arxiv.org/pdf/2011.13456.pdf (see Algorithm 2 in Appendix) if self.betas is None: self.set_betas(num_inference_steps) beta_t = self.betas[t] beta_t_deriv = beta_t / num_inference_steps sample_deriv = residual * beta_t_deriv / 2 sample = sample + sample_deriv return sample
diffusers_all-main
src/diffusers/schedulers/scheduling_grad_tts.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .scheduling_ddim import DDIMScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_grad_tts import GradTTSScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_utils import SchedulerMixin
diffusers_all-main
src/diffusers/schedulers/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union import numpy as np import torch SCHEDULER_CONFIG_NAME = "scheduler_config.json" class SchedulerMixin: config_name = SCHEDULER_CONFIG_NAME def set_format(self, tensor_format="pt"): self.tensor_format = tensor_format if tensor_format == "pt": for key, value in vars(self).items(): if isinstance(value, np.ndarray): setattr(self, key, torch.from_numpy(value)) return self def clip(self, tensor, min_value=None, max_value=None): tensor_format = getattr(self, "tensor_format", "pt") if tensor_format == "np": return np.clip(tensor, min_value, max_value) elif tensor_format == "pt": return torch.clamp(tensor, min_value, max_value) raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") def log(self, tensor): tensor_format = getattr(self, "tensor_format", "pt") if tensor_format == "np": return np.log(tensor) elif tensor_format == "pt": return torch.log(tensor) raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") def match_shape(self, values: Union[np.ndarray, torch.Tensor], broadcast_array: Union[np.ndarray, torch.Tensor]): """ Turns a 1-D array into an array or tensor with len(broadcast_array.shape) dims. Args: timesteps: an array or tensor of values to extract. broadcast_array: an array with a larger shape of K dimensions with the batch dimension equal to the length of timesteps. Returns: a tensor of shape [batch_size, 1, ...] where the shape has K dims. """ tensor_format = getattr(self, "tensor_format", "pt") values = values.flatten() while len(values.shape) < len(broadcast_array.shape): values = values[..., None] if tensor_format == "pt": values = values.to(broadcast_array.device) return values
diffusers_all-main
src/diffusers/schedulers/scheduling_utils.py
# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch # TODO(Patrick, Anton, Suraj) - make scheduler framework indepedent and clean-up a bit import numpy as np import torch from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): def __init__(self, snr=0.15, sigma_min=0.01, sigma_max=1348, sampling_eps=1e-5, tensor_format="np"): super().__init__() self.register_to_config( snr=snr, sigma_min=sigma_min, sigma_max=sigma_max, sampling_eps=sampling_eps, ) self.sigmas = None self.discrete_sigmas = None self.timesteps = None def set_timesteps(self, num_inference_steps): self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps) def set_sigmas(self, num_inference_steps): if self.timesteps is None: self.set_timesteps(num_inference_steps) self.discrete_sigmas = torch.exp( torch.linspace(np.log(self.config.sigma_min), np.log(self.config.sigma_max), num_inference_steps) ) self.sigmas = torch.tensor( [self.config.sigma_min * (self.config.sigma_max / self.sigma_min) ** t for t in self.timesteps] ) def step_pred(self, result, x, t): # TODO(Patrick) better comments + non-PyTorch t = t * torch.ones(x.shape[0], device=x.device) timestep = (t * (len(self.timesteps) - 1)).long() sigma = self.discrete_sigmas.to(t.device)[timestep] adjacent_sigma = torch.where( timestep == 0, torch.zeros_like(t), self.discrete_sigmas[timestep - 1].to(timestep.device) ) f = torch.zeros_like(x) G = torch.sqrt(sigma**2 - adjacent_sigma**2) f = f - G[:, None, None, None] ** 2 * result z = torch.randn_like(x) x_mean = x - f x = x_mean + G[:, None, None, None] * z return x, x_mean def step_correct(self, result, x): # TODO(Patrick) better comments + non-PyTorch noise = torch.randn_like(x) grad_norm = torch.norm(result.reshape(result.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 step_size = step_size * torch.ones(x.shape[0], device=x.device) x_mean = x + step_size[:, None, None, None] * result x = x_mean + torch.sqrt(step_size * 2)[:, None, None, None] * noise return x
diffusers_all-main
src/diffusers/schedulers/scheduling_sde_ve.py
# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math import numpy as np from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas, dtype=np.float32) class DDIMScheduler(SchedulerMixin, ConfigMixin): def __init__( self, timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", trained_betas=None, timestep_values=None, clip_sample=True, tensor_format="np", ): super().__init__() self.register_to_config( timesteps=timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, timestep_values=timestep_values, clip_sample=clip_sample, ) if beta_schedule == "linear": self.betas = np.linspace(beta_start, beta_end, timesteps, dtype=np.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = np.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=np.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = np.cumprod(self.alphas, axis=0) self.one = np.array(1.0) self.set_format(tensor_format=tensor_format) def get_variance(self, t, num_inference_steps): orig_t = self.config.timesteps // num_inference_steps * t orig_prev_t = self.config.timesteps // num_inference_steps * (t - 1) if t > 0 else -1 alpha_prod_t = self.alphas_cumprod[orig_t] alpha_prod_t_prev = self.alphas_cumprod[orig_prev_t] if orig_prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def step(self, residual, sample, t, num_inference_steps, eta, use_clipped_residual=False): # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointingc to x_t" # - pred_prev_sample -> "x_t-1" # 1. get actual t and t-1 orig_t = self.config.timesteps // num_inference_steps * t orig_prev_t = self.config.timesteps // num_inference_steps * (t - 1) if t > 0 else -1 # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[orig_t] alpha_prod_t_prev = self.alphas_cumprod[orig_prev_t] if orig_prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (sample - beta_prod_t ** (0.5) * residual) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = self.clip(pred_original_sample, -1, 1) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = self.get_variance(t, num_inference_steps) std_dev_t = eta * variance ** (0.5) if use_clipped_residual: # the residual is always re-derived from the clipped x_0 in Glide residual = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * residual # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction return pred_prev_sample def add_noise(self, original_samples, noise, timesteps): sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = self.match_shape(sqrt_alpha_prod, original_samples) sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = self.match_shape(sqrt_one_minus_alpha_prod, original_samples) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.timesteps
diffusers_all-main
src/diffusers/schedulers/scheduling_ddim.py
# Copyright 2022 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math import numpy as np from ..configuration_utils import ConfigMixin from .scheduling_utils import SchedulerMixin def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas, dtype=np.float32) class PNDMScheduler(SchedulerMixin, ConfigMixin): def __init__( self, timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", tensor_format="np", ): super().__init__() self.register_to_config( timesteps=timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, ) if beta_schedule == "linear": self.betas = np.linspace(beta_start, beta_end, timesteps, dtype=np.float32) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = np.cumprod(self.alphas, axis=0) self.one = np.array(1.0) self.set_format(tensor_format=tensor_format) # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. self.pndm_order = 4 # running values self.cur_residual = 0 self.cur_sample = None self.ets = [] self.prk_time_steps = {} self.time_steps = {} self.set_prk_mode() def get_prk_time_steps(self, num_inference_steps): if num_inference_steps in self.prk_time_steps: return self.prk_time_steps[num_inference_steps] inference_step_times = list(range(0, self.config.timesteps, self.config.timesteps // num_inference_steps)) prk_time_steps = np.array(inference_step_times[-self.pndm_order :]).repeat(2) + np.tile( np.array([0, self.config.timesteps // num_inference_steps // 2]), self.pndm_order ) self.prk_time_steps[num_inference_steps] = list(reversed(prk_time_steps[:-1].repeat(2)[1:-1])) return self.prk_time_steps[num_inference_steps] def get_time_steps(self, num_inference_steps): if num_inference_steps in self.time_steps: return self.time_steps[num_inference_steps] inference_step_times = list(range(0, self.config.timesteps, self.config.timesteps // num_inference_steps)) self.time_steps[num_inference_steps] = list(reversed(inference_step_times[:-3])) return self.time_steps[num_inference_steps] def set_prk_mode(self): self.mode = "prk" def set_plms_mode(self): self.mode = "plms" def step(self, *args, **kwargs): if self.mode == "prk": return self.step_prk(*args, **kwargs) if self.mode == "plms": return self.step_plms(*args, **kwargs) raise ValueError(f"mode {self.mode} does not exist.") def step_prk(self, residual, sample, t, num_inference_steps): prk_time_steps = self.get_prk_time_steps(num_inference_steps) t_orig = prk_time_steps[t // 4 * 4] t_orig_prev = prk_time_steps[min(t + 1, len(prk_time_steps) - 1)] if t % 4 == 0: self.cur_residual += 1 / 6 * residual self.ets.append(residual) self.cur_sample = sample elif (t - 1) % 4 == 0: self.cur_residual += 1 / 3 * residual elif (t - 2) % 4 == 0: self.cur_residual += 1 / 3 * residual elif (t - 3) % 4 == 0: residual = self.cur_residual + 1 / 6 * residual self.cur_residual = 0 # cur_sample should not be `None` cur_sample = self.cur_sample if self.cur_sample is not None else sample return self.get_prev_sample(cur_sample, t_orig, t_orig_prev, residual) def step_plms(self, residual, sample, t, num_inference_steps): if len(self.ets) < 3: raise ValueError( f"{self.__class__} can only be run AFTER scheduler has been run " "in 'prk' mode for at least 12 iterations " "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py " "for more information." ) timesteps = self.get_time_steps(num_inference_steps) t_orig = timesteps[t] t_orig_prev = timesteps[min(t + 1, len(timesteps) - 1)] self.ets.append(residual) residual = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) return self.get_prev_sample(sample, t_orig, t_orig_prev, residual) def get_prev_sample(self, sample, t_orig, t_orig_prev, residual): # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf # this function computes x_(t−δ) using the formula of (9) # Note that x_t needs to be added to both sides of the equation # Notation (<variable name> -> <name in paper> # alpha_prod_t -> α_t # alpha_prod_t_prev -> α_(t−δ) # beta_prod_t -> (1 - α_t) # beta_prod_t_prev -> (1 - α_(t−δ)) # sample -> x_t # residual -> e_θ(x_t, t) # prev_sample -> x_(t−δ) alpha_prod_t = self.alphas_cumprod[t_orig + 1] alpha_prod_t_prev = self.alphas_cumprod[t_orig_prev + 1] beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # corresponds to (α_(t−δ) - α_t) divided by # denominator of x_t in formula (9) and plus 1 # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = # sqrt(α_(t−δ)) / sqrt(α_t)) sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) # corresponds to denominator of e_θ(x_t, t) in formula (9) residual_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( alpha_prod_t * beta_prod_t * alpha_prod_t_prev ) ** (0.5) # full formula (9) prev_sample = sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * residual / residual_denom_coeff return prev_sample def __len__(self): return self.config.timesteps
diffusers_all-main
src/diffusers/schedulers/scheduling_pndm.py
from setuptools import setup, find_packages with open("requirements.txt", "r") as f: requirements = f.read().splitlines() setup(name="huggan", install_requires=requirements, packages=find_packages())
community-events-main
setup.py
from pathlib import Path TEMPLATE_MODEL_CARD_PATH = Path(__file__).parent.absolute() / 'model_card_template.md'
community-events-main
huggan/__init__.py
import argparse from datasets import load_dataset from tqdm import tqdm # choose a dataset available_datasets = ["apple2orange", "summer2winter_yosemite", "horse2zebra", "monet2photo", "cezanne2photo", "ukiyoe2photo", "vangogh2photo", "maps", "cityscapes", "facades", "iphone2dslr_flower", "ae_photos", "grumpifycat"] def upload_dataset(dataset_name): if dataset_name not in available_datasets: raise ValueError("Please choose one of the supported datasets:", available_datasets) # step 1: load dataset dataset = load_dataset("imagefolder", data_files=f"https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/{dataset_name}.zip") # step 2: push to hub dataset.push_to_hub(f"huggan/{dataset_name}") def main(): parser = argparse.ArgumentParser() parser.add_argument("--dataset", default="apple2orange", type=str, help="Dataset to upload") args = parser.parse_args() upload_dataset(args.dataset) if __name__ == "__main__": main()
community-events-main
huggan/utils/push_to_hub_example.py
community-events-main
huggan/utils/__init__.py
from typing import Optional from huggingface_hub import HfFolder, whoami def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}"
community-events-main
huggan/utils/hub.py
community-events-main
huggan/tensorflow/dcgan/__init__.py
import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from pathlib import Path import os import PIL from tqdm.auto import tqdm import argparse from tensorflow.keras import layers from datasets import load_dataset from transformers import DefaultDataCollator from huggingface_hub import push_to_hub_keras def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("--dataset", type=str, default="mnist", help="Dataset to load from the HuggingFace hub.") parser.add_argument("--batch_size", type=int, default=128, help="Batch size to use during training") parser.add_argument("--number_of_examples_to_generate", type=int, default=4, help="Number of examples to be generated in inference mode") parser.add_argument( "--generator_hidden_size", type=int, default=28, help="Hidden size of the generator's feature maps.", ) parser.add_argument("--latent_dim", type=int, default=100, help="Dimensionality of the latent space.") parser.add_argument( "--discriminator_hidden_size", type=int, default=28, help="Hidden size of the discriminator's feature maps.", ) parser.add_argument( "--image_size", type=int, default=28, help="Spatial size to use when resizing images for training.", ) parser.add_argument( "--num_channels", type=int, default=3, help="Number of channels in the training images. For color images this is 3.", ) parser.add_argument("--num_epochs", type=int, default=5, help="number of epochs of training") parser.add_argument("--output_dir", type=Path, default=Path("./output"), help="Name of the directory to dump generated images during training.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the HuggingFace hub after training.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the model on the hub.", ) parser.add_argument( "--organization_name", default="huggan", type=str, help="Organization name to push to, in case args.push_to_hub is specified.", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." assert args.model_name is not None, "Need a `model_name` to create a repo when `--push_to_hub` is passed." if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def stack_generator_layers(model, units): model.add(layers.Conv2DTranspose(units, (4, 4), strides=2, padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) return model def create_generator(channel, hidden_size, latent_dim): generator = tf.keras.Sequential() generator.add(layers.Input((latent_dim,))) # generator.add(layers.Dense(hidden_size*4*7*7, use_bias=False, input_shape=(100,))) generator.add(layers.LeakyReLU()) generator.add(layers.Reshape((7, 7, hidden_size*4))) units = [hidden_size*2, hidden_size*1] for unit in units: generator = stack_generator_layers(generator, unit) generator.add(layers.Conv2DTranspose(args.num_channels, (4, 4), strides=1, padding='same', use_bias=False, activation='tanh')) return generator def stack_discriminator_layers(model, units, use_batch_norm=False, use_dropout=False): model.add(layers.Conv2D(units, (4, 4), strides=(2, 2), padding='same')) if use_batch_norm: model.add(layers.BatchNormalization()) if use_dropout: model.add(layers.Dropout(0.1)) model.add(layers.LeakyReLU()) return model def create_discriminator(channel, hidden_size, args): discriminator = tf.keras.Sequential() discriminator.add(layers.Input((args.image_size, args.image_size, args.num_channels))) discriminator = stack_discriminator_layers(discriminator, hidden_size, use_batch_norm = True, use_dropout = True) discriminator = stack_discriminator_layers(discriminator, hidden_size * 2) discriminator = stack_discriminator_layers(discriminator,True, hidden_size*4) discriminator = stack_discriminator_layers(discriminator,True, hidden_size*16) discriminator.add(layers.Flatten()) discriminator.add(layers.Dense(1)) return discriminator def discriminator_loss(real_image, generated_image): real_loss = cross_entropy(tf.ones_like(real_image), real_image) fake_loss = cross_entropy(tf.zeros_like(generated_image), generated_image) total_loss = real_loss + fake_loss return total_loss @tf.function def train_step(images): noise = tf.random.normal([128, 100]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_image = discriminator(images, training=True) generated_image = discriminator(generated_images, training=True) # calculate loss inside train step gen_loss = cross_entropy(tf.ones_like(generated_image), generated_image) disc_loss = discriminator_loss(real_image, generated_image) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) def generate_and_save_images(model, epoch, test_input, output_dir, number_of_examples_to_generate): predictions = model(test_input, training=False) fig = plt.figure(figsize=(number_of_examples_to_generate*4, number_of_examples_to_generate*16)) for i in range(predictions.shape[0]): plt.subplot(1, number_of_examples_to_generate, i+1) if args.num_channels == 1: plt.imshow(predictions[i, :, :, :], cmap='gray') else: plt.imshow(predictions[i, :, :, :]) plt.axis('off') plt.savefig(f'{output_dir}/image_at_epoch_{epoch}.png') def train(dataset, epochs, output_dir, args): for epoch in range(epochs): print("Epoch:", epoch) for image_batch in tqdm(dataset): train_step(image_batch) generate_and_save_images(generator, epoch + 1, seed, output_dir, args.number_of_examples_to_generate) def preprocess(examples): images = (np.asarray(examples["image"]).astype('float32')- 127.5) / 127.5 images = np.expand_dims(images, -1) examples["pixel_values"] = images return examples def preprocess_images(dataset, args): data_collator = DefaultDataCollator(return_tensors="tf") processed_dataset = dataset.map(preprocess) tf_train_dataset = processed_dataset["train"].to_tf_dataset( columns=['pixel_values'], shuffle=True, batch_size=args.batch_size, collate_fn=data_collator) return tf_train_dataset if __name__ == "__main__": args = parse_args() print("Downloading dataset..") dataset = load_dataset(args.dataset) dataset= preprocess_images(dataset, args) print("Training model..") generator = create_generator(args.num_channels, args.generator_hidden_size, args.latent_dim) discriminator = create_discriminator(args.num_channels, args.discriminator_hidden_size, args) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # create seed with dimensions of number of examples to generate and noise seed = tf.random.normal([args.number_of_examples_to_generate, args.latent_dim]) cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) train(dataset, args.num_epochs, args.output_dir, args) if args.push_to_hub is not None: push_to_hub_keras(generator, repo_path_or_name=f"{args.output_dir}/{args.model_name}",organization=args.organization_name)
community-events-main
huggan/tensorflow/dcgan/train.py
community-events-main
huggan/pytorch/__init__.py
from pathlib import Path from re import TEMPLATE from typing import Optional, Union import os from huggingface_hub import PyTorchModelHubMixin, HfApi, HfFolder, Repository from huggan import TEMPLATE_MODEL_CARD_PATH class HugGANModelHubMixin(PyTorchModelHubMixin): """A mixin to push PyTorch Models to the Hugging Face Hub. This mixin was adapted from the PyTorchModelHubMixin to also push a template README.md for the HugGAN sprint. """ def push_to_hub( self, repo_path_or_name: Optional[str] = None, repo_url: Optional[str] = None, commit_message: Optional[str] = "Add model", organization: Optional[str] = None, private: Optional[bool] = None, api_endpoint: Optional[str] = None, use_auth_token: Optional[Union[bool, str]] = None, git_user: Optional[str] = None, git_email: Optional[str] = None, config: Optional[dict] = None, skip_lfs_files: bool = False, default_model_card: Optional[str] = TEMPLATE_MODEL_CARD_PATH ) -> str: """ Upload model checkpoint or tokenizer files to the Hub while synchronizing a local clone of the repo in `repo_path_or_name`. Parameters: repo_path_or_name (`str`, *optional*): Can either be a repository name for your model or tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by `repo_url` and a local directory with that name will be created. repo_url (`str`, *optional*): Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an `organization`) with `repo_name`. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"add config"`, `"add tokenizer"` or `"add model"` depending on the type of the class. organization (`str`, *optional*): Organization in which you want to push your model or tokenizer (you must be a member of this organization). private (`bool`, *optional*): Whether the repository created should be private. api_endpoint (`str`, *optional*): The API endpoint to use when pushing the model to the hub. use_auth_token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. git_user (`str`, *optional*): will override the `git config user.name` for committing and pushing files to the hub. git_email (`str`, *optional*): will override the `git config user.email` for committing and pushing files to the hub. config (`dict`, *optional*): Configuration object to be saved alongside the model weights. default_model_card (`str`, *optional*): Path to a markdown file to use as your default model card. Returns: The url of the commit of your model in the given repository. """ if repo_path_or_name is None and repo_url is None: raise ValueError( "You need to specify a `repo_path_or_name` or a `repo_url`." ) if use_auth_token is None and repo_url is None: token = HfFolder.get_token() if token is None: raise ValueError( "You must login to the Hugging Face hub on this computer by typing `huggingface-cli login` and " "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " "token as the `use_auth_token` argument." ) elif isinstance(use_auth_token, str): token = use_auth_token else: token = None if repo_path_or_name is None: repo_path_or_name = repo_url.split("/")[-1] # If no URL is passed and there's no path to a directory containing files, create a repo if repo_url is None and not os.path.exists(repo_path_or_name): repo_id = Path(repo_path_or_name).name if organization: repo_id = f"{organization}/{repo_id}" repo_url = HfApi(endpoint=api_endpoint).create_repo( repo_id=repo_id, token=token, private=private, repo_type=None, exist_ok=True, ) repo = Repository( repo_path_or_name, clone_from=repo_url, use_auth_token=use_auth_token, git_user=git_user, git_email=git_email, skip_lfs_files=skip_lfs_files ) repo.git_pull(rebase=True) # Save the files in the cloned repo self.save_pretrained(repo_path_or_name, config=config) model_card_path = Path(repo_path_or_name) / 'README.md' if not model_card_path.exists(): model_card_path.write_text(TEMPLATE_MODEL_CARD_PATH.read_text()) # Commit and push! repo.git_add() repo.git_commit(commit_message) return repo.git_push()
community-events-main
huggan/pytorch/huggan_mixin.py
community-events-main
huggan/pytorch/metrics/__init__.py
import torch import torch.nn as nn import torch.nn.functional as F import torchvision try: from torchvision.models.utils import load_state_dict_from_url except ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url # Inception weights ported to Pytorch from # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501 class InceptionV3(nn.Module): """Pretrained InceptionV3 network returning feature maps""" # Index of default block of inception to return, # corresponds to output of final average pooling DEFAULT_BLOCK_INDEX = 3 # Maps feature dimensionality to their output blocks indices BLOCK_INDEX_BY_DIM = { 64: 0, # First max pooling features 192: 1, # Second max pooling featurs 768: 2, # Pre-aux classifier features 2048: 3 # Final average pooling features } def __init__(self, output_blocks=(DEFAULT_BLOCK_INDEX,), resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=True): """Build pretrained InceptionV3 Parameters ---------- output_blocks : list of int Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input : bool If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed normalize_input : bool If true, scales the input from range (0, 1) to the range the pretrained Inception network expects, namely (-1, 1) requires_grad : bool If true, parameters of the model require gradients. Possibly useful for finetuning the network use_fid_inception : bool If true, uses the pretrained Inception model used in Tensorflow's FID implementation. If false, uses the pretrained Inception model available in torchvision. The FID Inception model has different weights and a slightly different structure from torchvision's Inception model. If you want to compute FID scores, you are strongly advised to set this parameter to true to get comparable results. """ super(InceptionV3, self).__init__() self.resize_input = resize_input self.normalize_input = normalize_input self.output_blocks = sorted(output_blocks) self.last_needed_block = max(output_blocks) assert self.last_needed_block <= 3, \ 'Last possible output block index is 3' self.blocks = nn.ModuleList() if use_fid_inception: inception = fid_inception_v3() else: inception = _inception_v3(pretrained=True) # Block 0: input to maxpool1 block0 = [ inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2) ] self.blocks.append(nn.Sequential(*block0)) # Block 1: maxpool1 to maxpool2 if self.last_needed_block >= 1: block1 = [ inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2) ] self.blocks.append(nn.Sequential(*block1)) # Block 2: maxpool2 to aux classifier if self.last_needed_block >= 2: block2 = [ inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e, ] self.blocks.append(nn.Sequential(*block2)) # Block 3: aux classifier to final avgpool if self.last_needed_block >= 3: block3 = [ inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1)) ] self.blocks.append(nn.Sequential(*block3)) for param in self.parameters(): param.requires_grad = requires_grad def forward(self, inp): """Get Inception feature maps Parameters ---------- inp : torch.autograd.Variable Input tensor of shape Bx3xHxW. Values are expected to be in range (0, 1) Returns ------- List of torch.autograd.Variable, corresponding to the selected output block, sorted ascending by index """ outp = [] x = inp if self.resize_input: x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False) if self.normalize_input: x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) for idx, block in enumerate(self.blocks): x = block(x) if idx in self.output_blocks: outp.append(x) if idx == self.last_needed_block: break return outp def _inception_v3(*args, **kwargs): """Wraps `torchvision.models.inception_v3` Skips default weight inititialization if supported by torchvision version. See https://github.com/mseitzer/pytorch-fid/issues/28. """ try: version = tuple(map(int, torchvision.__version__.split('.')[:2])) except ValueError: # Just a caution against weird version strings version = (0,) if version >= (0, 6): kwargs['init_weights'] = False return torchvision.models.inception_v3(*args, **kwargs) def fid_inception_v3(): """Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. """ inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False) inception.Mixed_5b = FIDInceptionA(192, pool_features=32) inception.Mixed_5c = FIDInceptionA(256, pool_features=64) inception.Mixed_5d = FIDInceptionA(288, pool_features=64) inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) inception.Mixed_7b = FIDInceptionE_1(1280) inception.Mixed_7c = FIDInceptionE_2(2048) state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) inception.load_state_dict(state_dict) return inception class FIDInceptionA(torchvision.models.inception.InceptionA): """InceptionA block patched for FID computation""" def __init__(self, in_channels, pool_features): super(FIDInceptionA, self).__init__(in_channels, pool_features) def forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionC(torchvision.models.inception.InceptionC): """InceptionC block patched for FID computation""" def __init__(self, in_channels, channels_7x7): super(FIDInceptionC, self).__init__(in_channels, channels_7x7) def forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionE_1(torchvision.models.inception.InceptionE): """First InceptionE block patched for FID computation""" def __init__(self, in_channels): super(FIDInceptionE_1, self).__init__(in_channels) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionE_2(torchvision.models.inception.InceptionE): """Second InceptionE block patched for FID computation""" def __init__(self, in_channels): super(FIDInceptionE_2, self).__init__(in_channels) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) # Patch: The FID Inception model uses max pooling instead of average # pooling. This is likely an error in this specific Inception # implementation, as other Inception models use average pooling here # (which matches the description in the paper). branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1)
community-events-main
huggan/pytorch/metrics/inception.py
# sources: # https://www.kaggle.com/code/ibtesama/gan-in-pytorch-with-fid/notebook # https://github.com/mseitzer/pytorch-fid/blob/master/src/pytorch_fid/fid_score.py import numpy as np from scipy import linalg from torch.nn.functional import adaptive_avg_pool2d def calculate_activation_statistics(images, model, batch_size=128, dims=2048): model.eval() act = np.empty((len(images), dims)) batch = images pred = model(batch)[0] # If model output is not scalar, apply global spatial average pooling. # This happens if you choose a dimensionality not equal 2048. if pred.size(2) != 1 or pred.size(3) != 1: pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) act = pred.cpu().data.numpy().reshape(pred.size(0), -1) mu = np.mean(act, axis=0) sigma = np.cov(act, rowvar=False) return mu, sigma def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, \ 'Training and test mean vectors have different lengths' assert sigma1.shape == sigma2.shape, \ 'Training and test covariances have different dimensions' diff = mu1 - mu2 covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ('fid calculation produces singular product; ' 'adding %s to diagonal of cov estimates') % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean) def calculate_fretchet(images_real, images_fake, model): """Calculate the fretched distance.""" # calculate statistics (mean + std) mu_1, std_1 = calculate_activation_statistics(images_real, model) mu_2, std_2 = calculate_activation_statistics(images_fake, model) # compute distance fid_value = calculate_frechet_distance(mu_1, std_1, mu_2, std_2) return fid_value
community-events-main
huggan/pytorch/metrics/fid_score.py
community-events-main
huggan/pytorch/dcgan/__init__.py
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2022 PyTorch contributors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions. import torch.nn as nn from huggan.pytorch.huggan_mixin import HugGANModelHubMixin class Generator(nn.Module, HugGANModelHubMixin): def __init__(self, num_channels=3, latent_dim=100, hidden_size=64): super(Generator, self).__init__() self.model = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(latent_dim, hidden_size * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(hidden_size * 8), nn.ReLU(True), # state size. (hidden_size*8) x 4 x 4 nn.ConvTranspose2d(hidden_size * 8, hidden_size * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size * 4), nn.ReLU(True), # state size. (hidden_size*4) x 8 x 8 nn.ConvTranspose2d(hidden_size * 4, hidden_size * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size * 2), nn.ReLU(True), # state size. (hidden_size*2) x 16 x 16 nn.ConvTranspose2d(hidden_size * 2, hidden_size, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size), nn.ReLU(True), # state size. (hidden_size) x 32 x 32 nn.ConvTranspose2d(hidden_size, num_channels, 4, 2, 1, bias=False), nn.Tanh() # state size. (num_channels) x 64 x 64 ) def forward(self, noise): pixel_values = self.model(noise) return pixel_values class Discriminator(nn.Module): def __init__(self, num_channels=3, hidden_size=64): super(Discriminator, self).__init__() self.model = nn.Sequential( # input is (num_channels) x 64 x 64 nn.Conv2d(num_channels, hidden_size, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (hidden_size) x 32 x 32 nn.Conv2d(hidden_size, hidden_size * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (hidden_size*2) x 16 x 16 nn.Conv2d(hidden_size * 2, hidden_size * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (hidden_size*4) x 8 x 8 nn.Conv2d(hidden_size * 4, hidden_size * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(hidden_size * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (hidden_size*8) x 4 x 4 nn.Conv2d(hidden_size * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid(), ) def forward(self, pixel_values): logits = self.model(pixel_values) return logits
community-events-main
huggan/pytorch/dcgan/modeling_dcgan.py
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2022 PyTorch contributors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions. """ Training a Deep Convolutional Generative Adversarial Network (DCGAN) leveraging the 🤗 ecosystem. Paper: https://arxiv.org/abs/1511.06434. Based on PyTorch's official tutorial: https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html. """ import argparse import logging import os import sys from pathlib import Path import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision.transforms import (CenterCrop, Compose, Normalize, Resize, ToTensor, ToPILImage) from torchvision.utils import save_image from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from accelerate import Accelerator from modeling_dcgan import Discriminator, Generator from datasets import load_dataset from huggan.pytorch.metrics.inception import InceptionV3 from huggan.pytorch.metrics.fid_score import calculate_fretchet import wandb logger = logging.getLogger(__name__) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("--dataset", type=str, default="mnist", help="Dataset to load from the HuggingFace hub.") parser.add_argument("--num_workers", type=int, default=0, help="Number of workers when loading data") parser.add_argument("--batch_size", type=int, default=128, help="Batch size to use during training") parser.add_argument( "--image_size", type=int, default=64, help="Spatial size to use when resizing images for training.", ) parser.add_argument( "--num_channels", type=int, default=3, help="Number of channels in the training images. For color images this is 3.", ) parser.add_argument("--latent_dim", type=int, default=100, help="Dimensionality of the latent space.") parser.add_argument( "--generator_hidden_size", type=int, default=64, help="Hidden size of the generator's feature maps.", ) parser.add_argument( "--discriminator_hidden_size", type=int, default=64, help="Hidden size of the discriminator's feature maps.", ) parser.add_argument("--num_epochs", type=int, default=5, help="number of epochs of training") parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate") parser.add_argument( "--beta1", type=float, default=0.5, help="adam: decay of first order momentum of gradient", ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument("--output_dir", type=Path, default=Path("./output"), help="Name of the directory to dump generated images during training.") parser.add_argument("--wandb", action="store_true", help="If passed, will log to Weights and Biases.") parser.add_argument( "--logging_steps", type=int, default=50, help="Number of steps between each logging", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the HuggingFace hub after training.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the model on the hub.", ) parser.add_argument( "--organization_name", default="huggan", type=str, help="Organization name to push to, in case args.push_to_hub is specified.", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." assert args.model_name is not None, "Need a `model_name` to create a repo when `--push_to_hub` is passed." if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args # Custom weights initialization called on Generator and Discriminator def weights_init(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm") != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) def training_function(config, args): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: # set up Weights and Biases if requested if args.wandb: import wandb wandb.init(project=str(args.output_dir).split("/")[-1]) # Loss function criterion = nn.BCELoss() # Initialize generator and discriminator generator = Generator( num_channels=args.num_channels, latent_dim=args.latent_dim, hidden_size=args.generator_hidden_size, ) discriminator = Discriminator(num_channels=args.num_channels, hidden_size=args.discriminator_hidden_size) # Initialize weights generator.apply(weights_init) discriminator.apply(weights_init) # Initialize Inceptionv3 (for FID metric) model = InceptionV3() # Initialize Inceptionv3 (for FID metric) model = InceptionV3() # Create batch of latent vectors that we will use to visualize # the progression of the generator fixed_noise = torch.randn(64, args.latent_dim, 1, 1, device=accelerator.device) # Establish convention for real and fake labels during training real_label = 1.0 fake_label = 0.0 # Setup Adam optimizers for both G and D discriminator_optimizer = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta1, 0.999)) generator_optimizer = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, 0.999)) # Configure data loader dataset = load_dataset(args.dataset) transform = Compose( [ Resize(args.image_size), CenterCrop(args.image_size), ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ) def transforms(examples): examples["pixel_values"] = [transform(image.convert("RGB")) for image in examples["image"]] del examples["image"] return examples transformed_dataset = dataset.with_transform(transforms) dataloader = DataLoader( transformed_dataset["train"], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers ) generator, discriminator, generator_optimizer, discriminator_optimizer, dataloader = accelerator.prepare(generator, discriminator, generator_optimizer, discriminator_optimizer, dataloader) # ---------- # Training # ---------- # Training Loop # Lists to keep track of progress img_list = [] logger.info("***** Running training *****") logger.info(f" Num Epochs = {args.num_epochs}") # For each epoch for epoch in range(args.num_epochs): # For each batch in the dataloader for step, batch in enumerate(dataloader, 0): ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### ## Train with all-real batch discriminator.zero_grad() # Format batch real_cpu = batch["pixel_values"] batch_size = real_cpu.size(0) label = torch.full((batch_size,), real_label, dtype=torch.float, device=accelerator.device) # Forward pass real batch through D output = discriminator(real_cpu).view(-1) # Calculate loss on all-real batch errD_real = criterion(output, label) # Calculate gradients for D in backward pass accelerator.backward(errD_real) D_x = output.mean().item() ## Train with all-fake batch # Generate batch of latent vectors noise = torch.randn(batch_size, args.latent_dim, 1, 1, device=accelerator.device) # Generate fake image batch with G fake = generator(noise) label.fill_(fake_label) # Classify all fake batch with D output = discriminator(fake.detach()).view(-1) # Calculate D's loss on the all-fake batch errD_fake = criterion(output, label) # Calculate the gradients for this batch, accumulated (summed) with previous gradients accelerator.backward(errD_fake) D_G_z1 = output.mean().item() # Compute error of D as sum over the fake and the real batches errD = errD_real + errD_fake # Update D discriminator_optimizer.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### generator.zero_grad() label.fill_(real_label) # fake labels are real for generator cost # Since we just updated D, perform another forward pass of all-fake batch through D output = discriminator(fake).view(-1) # Calculate G's loss based on this output errG = criterion(output, label) # Calculate gradients for G accelerator.backward(errG) D_G_z2 = output.mean().item() # Update G generator_optimizer.step() # Log all results if (step + 1) % args.logging_steps == 0: errD.detach() errG.detach() if accelerator.state.num_processes > 1: errD = accelerator.gather(errD).sum() / accelerator.state.num_processes errG = accelerator.gather(errG).sum() / accelerator.state.num_processes train_logs = { "epoch": epoch, "discriminator_loss": errD, "generator_loss": errG, "D_x": D_x, "D_G_z1": D_G_z1, "D_G_z2": D_G_z2, } log_str = "" for k, v in train_logs.items(): log_str += "| {}: {:.3e}".format(k, v) if accelerator.is_local_main_process: logger.info(log_str) if args.wandb: wandb.log(train_logs) # Check how the generator is doing by saving G's output on fixed_noise if (step % 500 == 0) or ((epoch == args.num_epochs - 1) and (step == len(dataloader) - 1)): with torch.no_grad(): fake_images = generator(fixed_noise).detach().cpu() file_name = args.output_dir/f"iter_{step}.png" save_image(fake_images.data[:25], file_name, nrow=5, normalize=True) if accelerator.is_local_main_process and args.wandb: wandb.log({'generated_examples': wandb.Image(str(file_name)) }) # Calculate FID metric fid = calculate_fretchet(real_cpu, fake, model.to(accelerator.device)) logger.info(f"FID: {fid}") if accelerator.is_local_main_process and args.wandb: wandb.log({"FID": fid}) # Optionally push to hub if accelerator.is_main_process and args.push_to_hub: generator.module.push_to_hub( repo_path_or_name=args.output_dir / args.model_name, organization=args.organization_name, ) def main(): args = parse_args() print(args) training_function({}, args) if __name__ == "__main__": main()
community-events-main
huggan/pytorch/dcgan/train.py
import random import torch import torch.nn.functional as F def DiffAugment(x, types=[]): for p in types: for f in AUGMENT_FNS[p]: x = f(x) return x.contiguous() # """ # Augmentation functions got images as `x` # where `x` is tensor with this dimensions: # 0 - count of images # 1 - channels # 2 - width # 3 - height of image # """ def rand_brightness(x): x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) return x def rand_saturation(x): x_mean = x.mean(dim=1, keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean return x def rand_contrast(x): x_mean = x.mean(dim=[1, 2, 3], keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean return x def rand_translation(x, ratio=0.125): shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device) translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(x.size(2), dtype=torch.long, device=x.device), torch.arange(x.size(3), dtype=torch.long, device=x.device), indexing = 'ij') grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1) grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1) x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0]) x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2) return x def rand_offset(x, ratio=1, ratio_h=1, ratio_v=1): w, h = x.size(2), x.size(3) imgs = [] for img in x.unbind(dim = 0): max_h = int(w * ratio * ratio_h) max_v = int(h * ratio * ratio_v) value_h = random.randint(0, max_h) * 2 - max_h value_v = random.randint(0, max_v) * 2 - max_v if abs(value_h) > 0: img = torch.roll(img, value_h, 2) if abs(value_v) > 0: img = torch.roll(img, value_v, 1) imgs.append(img) return torch.stack(imgs) def rand_offset_h(x, ratio=1): return rand_offset(x, ratio=1, ratio_h=ratio, ratio_v=0) def rand_offset_v(x, ratio=1): return rand_offset(x, ratio=1, ratio_h=0, ratio_v=ratio) def rand_cutout(x, ratio=0.5): cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device) offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(cutout_size[0], dtype=torch.long, device=x.device), torch.arange(cutout_size[1], dtype=torch.long, device=x.device), indexing = 'ij') grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1) grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1) mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device) mask[grid_batch, grid_x, grid_y] = 0 x = x * mask.unsqueeze(1) return x AUGMENT_FNS = { 'color': [rand_brightness, rand_saturation, rand_contrast], 'offset': [rand_offset], 'offset_h': [rand_offset_h], 'offset_v': [rand_offset_v], 'translation': [rand_translation], 'cutout': [rand_cutout], }
community-events-main
huggan/pytorch/lightweight_gan/diff_augment.py
community-events-main
huggan/pytorch/lightweight_gan/__init__.py
import fire import random from retry.api import retry_call from tqdm import tqdm from datetime import datetime from pathlib import Path from lightweight_gan import Trainer, NanException import torch import torch.multiprocessing as mp import numpy as np def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_list(el): return el if isinstance(el, list) else [el] def timestamped_filename(prefix = 'generated-'): now = datetime.now() timestamp = now.strftime("%m-%d-%Y_%H-%M-%S") return f'{prefix}{timestamp}' def set_seed(seed): torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) def run_training(model_args, data, load_from, new, num_train_steps, name, seed): if seed is not None: set_seed(seed) model = Trainer(**model_args) if not new: model.load(load_from) else: model.clear() progress_bar = tqdm(initial = model.steps, total = num_train_steps, mininterval=10., desc=f'{name}<{data}>') G, D, D_aug = model.init_accelerator() # model.set_data_src(data) while model.steps < num_train_steps: # retry_call(model.train, tries=3, exceptions=NanException) model.train(G, D, D_aug) progress_bar.n = model.steps progress_bar.refresh() if model.accelerator.is_local_main_process and model.steps % 50 == 0: model.print_log() model.save(model.checkpoint_num) def train_from_folder( dataset_name = 'huggan/CelebA-faces', data = './data', results_dir = './results', models_dir = './models', name = 'default', new = False, load_from = -1, image_size = 256, optimizer = 'adam', fmap_max = 512, transparent = False, greyscale = False, batch_size = 10, gradient_accumulate_every = 4, num_train_steps = 150000, learning_rate = 2e-4, save_every = 10000, evaluate_every = 1000, generate = False, generate_types = ['default', 'ema'], generate_interpolation = False, aug_test = False, aug_prob=None, aug_types=['cutout', 'translation'], dataset_aug_prob=0., attn_res_layers = [32], freq_chan_attn = False, disc_output_size = 1, dual_contrast_loss = False, antialias = False, interpolation_num_steps = 100, save_frames = False, num_image_tiles = None, calculate_fid_every = None, calculate_fid_num_images = 12800, clear_fid_cache = False, seed = 42, cpu = False, mixed_precision = "no", show_progress = False, wandb = False, push_to_hub = False, organization_name = None, ): if push_to_hub: if name == 'default': raise RuntimeError( "You've chosen to push to hub, but have left the --name flag as 'default'." " You should name your model something other than 'default'!" ) num_image_tiles = default(num_image_tiles, 4 if image_size > 512 else 8) model_args = dict( dataset_name = dataset_name, name = name, results_dir = results_dir, models_dir = models_dir, batch_size = batch_size, gradient_accumulate_every = gradient_accumulate_every, attn_res_layers = cast_list(attn_res_layers), freq_chan_attn = freq_chan_attn, disc_output_size = disc_output_size, dual_contrast_loss = dual_contrast_loss, antialias = antialias, image_size = image_size, num_image_tiles = num_image_tiles, optimizer = optimizer, fmap_max = fmap_max, transparent = transparent, greyscale = greyscale, lr = learning_rate, save_every = save_every, evaluate_every = evaluate_every, aug_prob = aug_prob, aug_types = cast_list(aug_types), dataset_aug_prob = dataset_aug_prob, calculate_fid_every = calculate_fid_every, calculate_fid_num_images = calculate_fid_num_images, clear_fid_cache = clear_fid_cache, cpu = cpu, mixed_precision = mixed_precision, wandb = wandb, push_to_hub = push_to_hub, organization_name = organization_name ) if generate: model = Trainer(**model_args) model.load(load_from) samples_name = timestamped_filename() checkpoint = model.checkpoint_num dir_result = model.generate(samples_name, num_image_tiles, checkpoint, generate_types) print(f'sample images generated at {dir_result}') return if generate_interpolation: model = Trainer(**model_args) model.load(load_from) samples_name = timestamped_filename() model.generate_interpolation(samples_name, num_image_tiles, num_steps = interpolation_num_steps, save_frames = save_frames) print(f'interpolation generated at {results_dir}/{name}/{samples_name}') return if show_progress: model = Trainer(**model_args) model.show_progress(num_images=num_image_tiles, types=generate_types) return run_training(model_args, data, load_from, new, num_train_steps, name, seed) def main(): fire.Fire(train_from_folder) if __name__ == "__main__": main()
community-events-main
huggan/pytorch/lightweight_gan/cli.py
import os import json import tempfile from random import random import math from math import log2, floor from pathlib import Path from functools import partial from contextlib import contextmanager, ExitStack from pathlib import Path from shutil import rmtree import torch from torch.optim import Adam from torch import nn, einsum import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torch.autograd import grad as torch_grad from PIL import Image import torchvision from torchvision import transforms from torchvision.utils import save_image from kornia.filters import filter2d from huggan.pytorch.lightweight_gan.diff_augment import DiffAugment from tqdm import tqdm from einops import rearrange, reduce, repeat from datasets import load_dataset from accelerate import Accelerator, DistributedDataParallelKwargs from huggingface_hub import hf_hub_download, create_repo from huggan.pytorch.huggan_mixin import HugGANModelHubMixin from huggan.utils.hub import get_full_repo_name # constants # NUM_CORES = multiprocessing.cpu_count() EXTS = ['jpg', 'jpeg', 'png'] PYTORCH_WEIGHTS_NAME = 'model.pt' # helpers def exists(val): return val is not None @contextmanager def null_context(): yield def is_power_of_two(val): return log2(val).is_integer() def default(val, d): return val if exists(val) else d def set_requires_grad(model, bool): for p in model.parameters(): p.requires_grad = bool def cycle(iterable): while True: for i in iterable: yield i def raise_if_nan(t): if torch.isnan(t): raise NanException def evaluate_in_chunks(max_batch_size, model, *args): split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args)))) chunked_outputs = [model(*i) for i in split_args] if len(chunked_outputs) == 1: return chunked_outputs[0] return torch.cat(chunked_outputs, dim=0) def slerp(val, low, high): low_norm = low / torch.norm(low, dim=1, keepdim=True) high_norm = high / torch.norm(high, dim=1, keepdim=True) omega = torch.acos((low_norm * high_norm).sum(1)) so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high return res def safe_div(n, d): try: res = n / d except ZeroDivisionError: prefix = '' if int(n >= 0) else '-' res = float(f'{prefix}inf') return res # loss functions def gen_hinge_loss(fake, real): return fake.mean() def hinge_loss(real, fake): return (F.relu(1 + real) + F.relu(1 - fake)).mean() def dual_contrastive_loss(real_logits, fake_logits): device = real_logits.device real_logits, fake_logits = map(lambda t: rearrange(t, '... -> (...)'), (real_logits, fake_logits)) def loss_half(t1, t2): t1 = rearrange(t1, 'i -> i ()') t2 = repeat(t2, 'j -> i j', i=t1.shape[0]) t = torch.cat((t1, t2), dim=-1) return F.cross_entropy(t, torch.zeros(t1.shape[0], device=device, dtype=torch.long)) return loss_half(real_logits, fake_logits) + loss_half(-fake_logits, -real_logits) # helper classes class NanException(Exception): pass class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if not exists(old): return new return old * self.beta + (1 - self.beta) * new class RandomApply(nn.Module): def __init__(self, prob, fn, fn_else=lambda x: x): super().__init__() self.fn = fn self.fn_else = fn_else self.prob = prob def forward(self, x): fn = self.fn if random() < self.prob else self.fn_else return fn(x) class ChanNorm(nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim=1, unbiased=False, keepdim=True) mean = torch.mean(x, dim=1, keepdim=True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = ChanNorm(dim) def forward(self, x): return self.fn(self.norm(x)) class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class SumBranches(nn.Module): def __init__(self, branches): super().__init__() self.branches = nn.ModuleList(branches) def forward(self, x): return sum(map(lambda fn: fn(x), self.branches)) class Fuzziness(nn.Module): def __init__(self): super().__init__() f = torch.Tensor([1, 2, 1]) self.register_buffer('f', f) def forward(self, x): f = self.f f = f[None, None, :] * f[None, :, None] return filter2d(x, f, normalized=True) Blur = nn.Identity # attention class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding=0, stride=1, bias=True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size=kernel_size, padding=padding, groups=dim_in, stride=stride, bias=bias), nn.Conv2d(dim_in, dim_out, kernel_size=1, bias=bias) ) def forward(self, x): return self.net(x) class LinearAttention(nn.Module): def __init__(self, dim, dim_head=64, heads=8): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads inner_dim = dim_head * heads self.nonlin = nn.GELU() self.to_q = nn.Conv2d(dim, inner_dim, 1, bias=False) self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding=1, bias=False) self.to_out = nn.Conv2d(inner_dim, dim, 1) def forward(self, fmap): h, x, y = self.heads, *fmap.shape[-2:] q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim=1)) q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h=h), (q, k, v)) q = q.softmax(dim=-1) k = k.softmax(dim=-2) q = q * self.scale context = einsum('b n d, b n e -> b d e', k, v) out = einsum('b n d, b d e -> b n e', q, context) out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h=h, x=x, y=y) out = self.nonlin(out) return self.to_out(out) # dataset def convert_image_to(img_type, image): if image.mode != img_type: return image.convert(img_type) return image class identity(object): def __call__(self, tensor): return tensor class expand_greyscale(object): def __init__(self, transparent): self.transparent = transparent def __call__(self, tensor): channels = tensor.shape[0] num_target_channels = 4 if self.transparent else 3 if channels == num_target_channels: return tensor alpha = None if channels == 1: color = tensor.expand(3, -1, -1) elif channels == 2: color = tensor[:1].expand(3, -1, -1) alpha = tensor[1:] else: raise Exception(f'image with invalid number of channels given {channels}') if not exists(alpha) and self.transparent: alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device) return color if not self.transparent else torch.cat((color, alpha)) def resize_to_minimum_size(min_size, image): if max(*image.size) < min_size: return torchvision.transforms.functional.resize(image, min_size) return image # augmentations def random_hflip(tensor, prob): if prob > random(): return tensor return torch.flip(tensor, dims=(3,)) class AugWrapper(nn.Module): def __init__(self, D, image_size): super().__init__() self.D = D def forward(self, images, prob=0., types=[], detach=False, **kwargs): context = torch.no_grad if detach else null_context with context(): if random() < prob: images = random_hflip(images, prob=0.5) images = DiffAugment(images, types=types) return self.D(images, **kwargs) # modifiable global variables norm_class = nn.BatchNorm2d def upsample(scale_factor=2): return nn.Upsample(scale_factor=scale_factor) # squeeze excitation classes # global context network # https://arxiv.org/abs/2012.13375 # similar to squeeze-excite, but with a simplified attention pooling and a subsequent layer norm class GlobalContext(nn.Module): def __init__( self, *, chan_in, chan_out ): super().__init__() self.to_k = nn.Conv2d(chan_in, 1, 1) chan_intermediate = max(3, chan_out // 2) self.net = nn.Sequential( nn.Conv2d(chan_in, chan_intermediate, 1), nn.LeakyReLU(0.1), nn.Conv2d(chan_intermediate, chan_out, 1), nn.Sigmoid() ) def forward(self, x): context = self.to_k(x) context = context.flatten(2).softmax(dim=-1) out = einsum('b i n, b c n -> b c i', context, x.flatten(2)) out = out.unsqueeze(-1) return self.net(out) # frequency channel attention # https://arxiv.org/abs/2012.11879 def get_1d_dct(i, freq, L): result = math.cos(math.pi * freq * (i + 0.5) / L) / math.sqrt(L) return result * (1 if freq == 0 else math.sqrt(2)) def get_dct_weights(width, channel, fidx_u, fidx_v): dct_weights = torch.zeros(1, channel, width, width) c_part = channel // len(fidx_u) for i, (u_x, v_y) in enumerate(zip(fidx_u, fidx_v)): for x in range(width): for y in range(width): coor_value = get_1d_dct(x, u_x, width) * get_1d_dct(y, v_y, width) dct_weights[:, i * c_part: (i + 1) * c_part, x, y] = coor_value return dct_weights class FCANet(nn.Module): def __init__( self, *, chan_in, chan_out, reduction=4, width ): super().__init__() freq_w, freq_h = ([0] * 8), list(range(8)) # in paper, it seems 16 frequencies was ideal dct_weights = get_dct_weights(width, chan_in, [*freq_w, *freq_h], [*freq_h, *freq_w]) self.register_buffer('dct_weights', dct_weights) chan_intermediate = max(3, chan_out // reduction) self.net = nn.Sequential( nn.Conv2d(chan_in, chan_intermediate, 1), nn.LeakyReLU(0.1), nn.Conv2d(chan_intermediate, chan_out, 1), nn.Sigmoid() ) def forward(self, x): x = reduce(x * self.dct_weights, 'b c (h h1) (w w1) -> b c h1 w1', 'sum', h1=1, w1=1) return self.net(x) # generative adversarial network class Generator(nn.Module): def __init__( self, *, image_size, latent_dim=256, fmap_max=512, fmap_inverse_coef=12, transparent=False, greyscale=False, attn_res_layers=[], freq_chan_attn=False ): super().__init__() resolution = log2(image_size) assert is_power_of_two(image_size), 'image size must be a power of 2' if transparent: init_channel = 4 elif greyscale: init_channel = 1 else: init_channel = 3 fmap_max = default(fmap_max, latent_dim) self.initial_conv = nn.Sequential( nn.ConvTranspose2d(latent_dim, latent_dim * 2, 4), norm_class(latent_dim * 2), nn.GLU(dim=1) ) num_layers = int(resolution) - 2 features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), range(2, num_layers + 2))) features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features)) features = list(map(lambda n: 3 if n[0] >= 8 else n[1], features)) features = [latent_dim, *features] in_out_features = list(zip(features[:-1], features[1:])) self.res_layers = range(2, num_layers + 2) self.layers = nn.ModuleList([]) self.res_to_feature_map = dict(zip(self.res_layers, in_out_features)) self.sle_map = ((3, 7), (4, 8), (5, 9), (6, 10)) self.sle_map = list(filter(lambda t: t[0] <= resolution and t[1] <= resolution, self.sle_map)) self.sle_map = dict(self.sle_map) self.num_layers_spatial_res = 1 for (res, (chan_in, chan_out)) in zip(self.res_layers, in_out_features): image_width = 2 ** res attn = None if image_width in attn_res_layers: attn = PreNorm(chan_in, LinearAttention(chan_in)) sle = None if res in self.sle_map: residual_layer = self.sle_map[res] sle_chan_out = self.res_to_feature_map[residual_layer - 1][-1] if freq_chan_attn: sle = FCANet( chan_in=chan_out, chan_out=sle_chan_out, width=2 ** (res + 1) ) else: sle = GlobalContext( chan_in=chan_out, chan_out=sle_chan_out ) layer = nn.ModuleList([ nn.Sequential( upsample(), Blur(), nn.Conv2d(chan_in, chan_out * 2, 3, padding=1), norm_class(chan_out * 2), nn.GLU(dim=1) ), sle, attn ]) self.layers.append(layer) self.out_conv = nn.Conv2d(features[-1], init_channel, 3, padding=1) def forward(self, x): x = rearrange(x, 'b c -> b c () ()') x = self.initial_conv(x) x = F.normalize(x, dim=1) residuals = dict() for (res, (up, sle, attn)) in zip(self.res_layers, self.layers): if exists(attn): x = attn(x) + x x = up(x) if exists(sle): out_res = self.sle_map[res] residual = sle(x) residuals[out_res] = residual next_res = res + 1 if next_res in residuals: x = x * residuals[next_res] return self.out_conv(x) class SimpleDecoder(nn.Module): def __init__( self, *, chan_in, chan_out=3, num_upsamples=4, ): super().__init__() self.layers = nn.ModuleList([]) final_chan = chan_out chans = chan_in for ind in range(num_upsamples): last_layer = ind == (num_upsamples - 1) chan_out = chans if not last_layer else final_chan * 2 layer = nn.Sequential( upsample(), nn.Conv2d(chans, chan_out, 3, padding=1), nn.GLU(dim=1) ) self.layers.append(layer) chans //= 2 def forward(self, x): for layer in self.layers: x = layer(x) return x class Discriminator(nn.Module): def __init__( self, *, image_size, fmap_max=512, fmap_inverse_coef=12, transparent=False, greyscale=False, disc_output_size=5, attn_res_layers=[] ): super().__init__() resolution = log2(image_size) assert is_power_of_two(image_size), 'image size must be a power of 2' assert disc_output_size in {1, 5}, 'discriminator output dimensions can only be 5x5 or 1x1' resolution = int(resolution) if transparent: init_channel = 4 elif greyscale: init_channel = 1 else: init_channel = 3 num_non_residual_layers = max(0, int(resolution) - 8) num_residual_layers = 8 - 3 non_residual_resolutions = range(min(8, resolution), 2, -1) features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), non_residual_resolutions)) features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features)) if num_non_residual_layers == 0: res, _ = features[0] features[0] = (res, init_channel) chan_in_out = list(zip(features[:-1], features[1:])) self.non_residual_layers = nn.ModuleList([]) for ind in range(num_non_residual_layers): first_layer = ind == 0 last_layer = ind == (num_non_residual_layers - 1) chan_out = features[0][-1] if last_layer else init_channel self.non_residual_layers.append(nn.Sequential( Blur(), nn.Conv2d(init_channel, chan_out, 4, stride=2, padding=1), nn.LeakyReLU(0.1) )) self.residual_layers = nn.ModuleList([]) for (res, ((_, chan_in), (_, chan_out))) in zip(non_residual_resolutions, chan_in_out): image_width = 2 ** res attn = None if image_width in attn_res_layers: attn = PreNorm(chan_in, LinearAttention(chan_in)) self.residual_layers.append(nn.ModuleList([ SumBranches([ nn.Sequential( Blur(), nn.Conv2d(chan_in, chan_out, 4, stride=2, padding=1), nn.LeakyReLU(0.1), nn.Conv2d(chan_out, chan_out, 3, padding=1), nn.LeakyReLU(0.1) ), nn.Sequential( Blur(), nn.AvgPool2d(2), nn.Conv2d(chan_in, chan_out, 1), nn.LeakyReLU(0.1), ) ]), attn ])) last_chan = features[-1][-1] if disc_output_size == 5: self.to_logits = nn.Sequential( nn.Conv2d(last_chan, last_chan, 1), nn.LeakyReLU(0.1), nn.Conv2d(last_chan, 1, 4) ) elif disc_output_size == 1: self.to_logits = nn.Sequential( Blur(), nn.Conv2d(last_chan, last_chan, 3, stride=2, padding=1), nn.LeakyReLU(0.1), nn.Conv2d(last_chan, 1, 4) ) self.to_shape_disc_out = nn.Sequential( nn.Conv2d(init_channel, 64, 3, padding=1), Residual(PreNorm(64, LinearAttention(64))), SumBranches([ nn.Sequential( Blur(), nn.Conv2d(64, 32, 4, stride=2, padding=1), nn.LeakyReLU(0.1), nn.Conv2d(32, 32, 3, padding=1), nn.LeakyReLU(0.1) ), nn.Sequential( Blur(), nn.AvgPool2d(2), nn.Conv2d(64, 32, 1), nn.LeakyReLU(0.1), ) ]), Residual(PreNorm(32, LinearAttention(32))), nn.AdaptiveAvgPool2d((4, 4)), nn.Conv2d(32, 1, 4) ) self.decoder1 = SimpleDecoder(chan_in=last_chan, chan_out=init_channel) self.decoder2 = SimpleDecoder(chan_in=features[-2][-1], chan_out=init_channel) if resolution >= 9 else None def forward(self, x, calc_aux_loss=False): orig_img = x for layer in self.non_residual_layers: x = layer(x) layer_outputs = [] for (net, attn) in self.residual_layers: if exists(attn): x = attn(x) + x x = net(x) layer_outputs.append(x) out = self.to_logits(x).flatten(1) img_32x32 = F.interpolate(orig_img, size=(32, 32)) out_32x32 = self.to_shape_disc_out(img_32x32) if not calc_aux_loss: return out, out_32x32, None # self-supervised auto-encoding loss layer_8x8 = layer_outputs[-1] layer_16x16 = layer_outputs[-2] recon_img_8x8 = self.decoder1(layer_8x8) aux_loss = F.mse_loss( recon_img_8x8, F.interpolate(orig_img, size=recon_img_8x8.shape[2:]) ) if exists(self.decoder2): select_random_quadrant = lambda rand_quadrant, img: \ rearrange(img, 'b c (m h) (n w) -> (m n) b c h w', m=2, n=2)[rand_quadrant] crop_image_fn = partial(select_random_quadrant, floor(random() * 4)) img_part, layer_16x16_part = map(crop_image_fn, (orig_img, layer_16x16)) recon_img_16x16 = self.decoder2(layer_16x16_part) aux_loss_16x16 = F.mse_loss( recon_img_16x16, F.interpolate(img_part, size=recon_img_16x16.shape[2:]) ) aux_loss = aux_loss + aux_loss_16x16 return out, out_32x32, aux_loss class LightweightGAN(nn.Module, HugGANModelHubMixin): def __init__( self, *, latent_dim, image_size, optimizer="adam", fmap_max=512, fmap_inverse_coef=12, transparent=False, greyscale=False, disc_output_size=5, attn_res_layers=[], freq_chan_attn=False, ttur_mult=1., lr=2e-4, ): super().__init__() self.config = { 'latent_dim': latent_dim, 'image_size': image_size, 'optimizer': optimizer, 'fmap_max': fmap_max, 'fmap_inverse_coef': fmap_inverse_coef, 'transparent': transparent, 'greyscale': greyscale, 'disc_output_size': disc_output_size, 'attn_res_layers': attn_res_layers, 'freq_chan_attn': freq_chan_attn, 'ttur_mult': ttur_mult, 'lr': lr } self.latent_dim = latent_dim self.image_size = image_size G_kwargs = dict( image_size=image_size, latent_dim=latent_dim, fmap_max=fmap_max, fmap_inverse_coef=fmap_inverse_coef, transparent=transparent, greyscale=greyscale, attn_res_layers=attn_res_layers, freq_chan_attn=freq_chan_attn ) self.G = Generator(**G_kwargs) self.D = Discriminator( image_size=image_size, fmap_max=fmap_max, fmap_inverse_coef=fmap_inverse_coef, transparent=transparent, greyscale=greyscale, attn_res_layers=attn_res_layers, disc_output_size=disc_output_size ) self.ema_updater = EMA(0.995) self.GE = Generator(**G_kwargs) set_requires_grad(self.GE, False) if optimizer == "adam": self.G_opt = Adam(self.G.parameters(), lr=lr, betas=(0.5, 0.9)) self.D_opt = Adam(self.D.parameters(), lr=lr * ttur_mult, betas=(0.5, 0.9)) elif optimizer == "adabelief": from adabelief_pytorch import AdaBelief self.G_opt = AdaBelief(self.G.parameters(), lr=lr, betas=(0.5, 0.9)) self.D_opt = AdaBelief(self.D.parameters(), lr=lr * ttur_mult, betas=(0.5, 0.9)) else: assert False, "No valid optimizer is given" self.apply(self._init_weights) self.reset_parameter_averaging() self.D_aug = AugWrapper(self.D, image_size) def _init_weights(self, m): if type(m) in {nn.Conv2d, nn.Linear}: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu') def EMA(self): def update_moving_average(ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = self.ema_updater.update_average(old_weight, up_weight) for current_buffer, ma_buffer in zip(current_model.buffers(), ma_model.buffers()): new_buffer_value = self.ema_updater.update_average(ma_buffer, current_buffer) ma_buffer.copy_(new_buffer_value) update_moving_average(self.GE, self.G) def reset_parameter_averaging(self): self.GE.load_state_dict(self.G.state_dict()) def forward(self, x): raise NotImplemented def _save_pretrained(self, save_directory): """ Overwrite this method in case you don't want to save complete model, rather some specific layers """ path = os.path.join(save_directory, PYTORCH_WEIGHTS_NAME) model_to_save = self.module if hasattr(self, "module") else self # We update this to be a dict containing 'GAN', as that's what is expected torch.save({'GAN': model_to_save.state_dict()}, path) @classmethod def _from_pretrained( cls, model_id, revision, cache_dir, force_download, proxies, resume_download, local_files_only, token, map_location="cpu", strict=False, **model_kwargs, ): """ Overwrite this method in case you wish to initialize your model in a different way. """ map_location = torch.device(map_location) if os.path.isdir(model_id): print("Loading weights from local directory") model_file = os.path.join(model_id, PYTORCH_WEIGHTS_NAME) else: model_file = hf_hub_download( repo_id=model_id, filename=PYTORCH_WEIGHTS_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) # We update here to directly unpack config model = cls(**model_kwargs['config']) state_dict = torch.load(model_file, map_location=map_location) model.load_state_dict(state_dict["GAN"], strict=strict) model.eval() return model # trainer class Trainer(): def __init__( self, dataset_name="huggan/CelebA-faces", name='default', results_dir='results', models_dir='models', base_dir='./', optimizer='adam', latent_dim=256, image_size=128, num_image_tiles=8, fmap_max=512, transparent=False, greyscale=False, batch_size=4, gp_weight=10, gradient_accumulate_every=1, attn_res_layers=[], freq_chan_attn=False, disc_output_size=5, dual_contrast_loss=False, antialias=False, lr=2e-4, lr_mlp=1., ttur_mult=1., save_every=10000, evaluate_every=1000, aug_prob=None, aug_types=['translation', 'cutout'], dataset_aug_prob=0., calculate_fid_every=None, calculate_fid_num_images=12800, clear_fid_cache=False, log=False, cpu=False, mixed_precision="no", wandb=False, push_to_hub=False, organization_name=None, *args, **kwargs ): self.GAN_params = [args, kwargs] self.GAN = None self.dataset_name = dataset_name self.name = name base_dir = Path(base_dir) self.base_dir = base_dir self.results_dir = base_dir / results_dir self.models_dir = base_dir / models_dir self.fid_dir = base_dir / 'fid' / name # Note - in original repo config is private - ".config.json", but here, we make it public self.config_path = self.models_dir / name / 'config.json' assert is_power_of_two(image_size), 'image size must be a power of 2 (64, 128, 256, 512, 1024)' assert all(map(is_power_of_two, attn_res_layers)), 'resolution layers of attention must all be powers of 2 (16, 32, 64, 128, 256, 512)' assert not ( dual_contrast_loss and disc_output_size > 1), 'discriminator output size cannot be greater than 1 if using dual contrastive loss' self.image_size = image_size self.num_image_tiles = num_image_tiles self.latent_dim = latent_dim self.fmap_max = fmap_max self.transparent = transparent self.greyscale = greyscale assert (int(self.transparent) + int(self.greyscale)) < 2, 'you can only set either transparency or greyscale' self.aug_prob = aug_prob self.aug_types = aug_types self.lr = lr self.optimizer = optimizer self.ttur_mult = ttur_mult self.batch_size = batch_size self.gradient_accumulate_every = gradient_accumulate_every self.gp_weight = gp_weight self.evaluate_every = evaluate_every self.save_every = save_every self.steps = 0 self.attn_res_layers = attn_res_layers self.freq_chan_attn = freq_chan_attn self.disc_output_size = disc_output_size self.antialias = antialias self.dual_contrast_loss = dual_contrast_loss self.d_loss = 0 self.g_loss = 0 self.last_gp_loss = None self.last_recon_loss = None self.last_fid = None self.init_folders() self.loader = None self.dataset_aug_prob = dataset_aug_prob self.calculate_fid_every = calculate_fid_every self.calculate_fid_num_images = calculate_fid_num_images self.clear_fid_cache = clear_fid_cache self.syncbatchnorm = torch.cuda.device_count() > 1 and not cpu self.cpu = cpu self.mixed_precision = mixed_precision self.wandb = wandb self.push_to_hub = push_to_hub self.organization_name = organization_name self.repo_name = get_full_repo_name(self.name, self.organization_name) if self.push_to_hub: self.repo_url = create_repo(self.repo_name, exist_ok=True) @property def image_extension(self): return 'jpg' if not self.transparent else 'png' @property def checkpoint_num(self): return floor(self.steps // self.save_every) def init_GAN(self): args, kwargs = self.GAN_params # set some global variables before instantiating GAN global norm_class global Blur norm_class = nn.SyncBatchNorm if self.syncbatchnorm else nn.BatchNorm2d Blur = nn.Identity if not self.antialias else Fuzziness # instantiate GAN self.GAN = LightweightGAN( optimizer=self.optimizer, lr=self.lr, latent_dim=self.latent_dim, attn_res_layers=self.attn_res_layers, freq_chan_attn=self.freq_chan_attn, image_size=self.image_size, ttur_mult=self.ttur_mult, fmap_max=self.fmap_max, disc_output_size=self.disc_output_size, transparent=self.transparent, greyscale=self.greyscale, *args, **kwargs ) def write_config(self): self.config_path.write_text(json.dumps(self.config())) def load_config(self): config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text()) self.image_size = config['image_size'] self.transparent = config['transparent'] self.syncbatchnorm = config['syncbatchnorm'] self.disc_output_size = config['disc_output_size'] self.greyscale = config.pop('greyscale', False) self.attn_res_layers = config.pop('attn_res_layers', []) self.freq_chan_attn = config.pop('freq_chan_attn', False) self.optimizer = config.pop('optimizer', 'adam') self.fmap_max = config.pop('fmap_max', 512) del self.GAN self.init_GAN() def config(self): return { 'image_size': self.image_size, 'transparent': self.transparent, 'greyscale': self.greyscale, 'syncbatchnorm': self.syncbatchnorm, 'disc_output_size': self.disc_output_size, 'optimizer': self.optimizer, 'attn_res_layers': self.attn_res_layers, 'freq_chan_attn': self.freq_chan_attn } def set_data_src(self): # start of using HuggingFace dataset dataset = load_dataset(self.dataset_name) if self.transparent: num_channels = 4 pillow_mode = 'RGBA' expand_fn = expand_greyscale(self.transparent) elif self.greyscale: num_channels = 1 pillow_mode = 'L' expand_fn = identity() else: num_channels = 3 pillow_mode = 'RGB' expand_fn = expand_greyscale(self.transparent) convert_image_fn = partial(convert_image_to, pillow_mode) transform = transforms.Compose([ transforms.Lambda(convert_image_fn), transforms.Lambda(partial(resize_to_minimum_size, self.image_size)), transforms.Resize(self.image_size), RandomApply(0., transforms.RandomResizedCrop(self.image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(self.image_size)), transforms.ToTensor(), transforms.Lambda(expand_fn) ]) def transform_images(examples): transformed_images = [transform(image.convert("RGB")) for image in examples["image"]] examples["image"] = torch.stack(transformed_images) return examples transformed_dataset = dataset.with_transform(transform_images) per_device_batch_size = math.ceil(self.batch_size / self.accelerator.num_processes) dataloader = DataLoader(transformed_dataset["train"], per_device_batch_size, sampler=None, shuffle=False, drop_last=True, pin_memory=True) num_samples = len(transformed_dataset) ## end of HuggingFace dataset # Note - in original repo, this is wrapped with cycle, but we will do that after accelerator prepares self.loader = dataloader # auto set augmentation prob for user if dataset is detected to be low # num_samples = len(self.dataset) if not exists(self.aug_prob) and num_samples < 1e5: self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6) print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%') def init_accelerator(self): # Initialize the accelerator. We will let the accelerator handle device placement. ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) self.accelerator = Accelerator(kwargs_handlers=[ddp_kwargs], mixed_precision=self.mixed_precision, cpu=self.cpu) if self.accelerator.is_local_main_process: # set up Weights and Biases if requested if self.wandb: import wandb wandb.init(project=str(self.results_dir).split("/")[-1]) if not exists(self.GAN): self.init_GAN() G = self.GAN.G D = self.GAN.D D_aug = self.GAN.D_aug # discriminator loss fn self.set_data_src() # prepare G, D, D_aug, self.GAN.D_opt, self.GAN.G_opt, self.loader = self.accelerator.prepare(G, D, D_aug, self.GAN.D_opt, self.GAN.G_opt, self.loader) self.loader = cycle(self.loader) return G, D, D_aug def train(self, G, D, D_aug): assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`' self.GAN.train() total_disc_loss = torch.zeros([], device=self.accelerator.device) total_gen_loss = torch.zeros([], device=self.accelerator.device) batch_size = math.ceil(self.batch_size / self.accelerator.num_processes) image_size = self.GAN.image_size latent_dim = self.GAN.latent_dim aug_prob = default(self.aug_prob, 0) aug_types = self.aug_types aug_kwargs = {'prob': aug_prob, 'types': aug_types} apply_gradient_penalty = self.steps % 4 == 0 # discriminator loss fn if self.dual_contrast_loss: D_loss_fn = dual_contrastive_loss else: D_loss_fn = hinge_loss # train discriminator self.GAN.D_opt.zero_grad() for i in range(self.gradient_accumulate_every): latents = torch.randn(batch_size, latent_dim, device=self.accelerator.device) image_batch = next(self.loader)["image"] image_batch.requires_grad_() with torch.no_grad(): generated_images = G(latents) fake_output, fake_output_32x32, _ = D_aug(generated_images, detach=True, **aug_kwargs) real_output, real_output_32x32, real_aux_loss = D_aug(image_batch, calc_aux_loss=True, **aug_kwargs) real_output_loss = real_output fake_output_loss = fake_output divergence = D_loss_fn(real_output_loss, fake_output_loss) divergence_32x32 = D_loss_fn(real_output_32x32, fake_output_32x32) disc_loss = divergence + divergence_32x32 aux_loss = real_aux_loss disc_loss = disc_loss + aux_loss if apply_gradient_penalty: outputs = [real_output, real_output_32x32] if self.accelerator.scaler is not None: outputs = list(map(self.accelerator.scaler.scale, outputs)) scaled_gradients = torch_grad(outputs=outputs, inputs=image_batch, grad_outputs=list( map(lambda t: torch.ones(t.size(), device=self.accelerator.device), outputs)), create_graph=True, retain_graph=True, only_inputs=True)[0] inv_scale = 1. if self.accelerator.scaler is not None: inv_scale = safe_div(1., self.accelerator.scaler.get_scale()) if inv_scale != float('inf'): gradients = scaled_gradients * inv_scale gradients = gradients.reshape(batch_size, -1) gp = self.gp_weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean() if not torch.isnan(gp): disc_loss = disc_loss + gp self.last_gp_loss = gp.clone().detach().item() # divide loss by gradient accumulation steps since gradients # are accumulated for multiple backward passes in PyTorch disc_loss = disc_loss / self.gradient_accumulate_every disc_loss.register_hook(raise_if_nan) self.accelerator.backward(disc_loss) total_disc_loss += divergence self.last_recon_loss = aux_loss.item() self.d_loss = float(total_disc_loss.item() / self.gradient_accumulate_every) self.GAN.D_opt.step() # generator loss fn if self.dual_contrast_loss: G_loss_fn = dual_contrastive_loss G_requires_calc_real = True else: G_loss_fn = gen_hinge_loss G_requires_calc_real = False # train generator self.GAN.G_opt.zero_grad() for i in range(self.gradient_accumulate_every): latents = torch.randn(batch_size, latent_dim, device=self.accelerator.device) if G_requires_calc_real: image_batch = next(self.loader)["image"] image_batch.requires_grad_() generated_images = G(latents) fake_output, fake_output_32x32, _ = D_aug(generated_images, **aug_kwargs) real_output, real_output_32x32, _ = D_aug(image_batch, **aug_kwargs) if G_requires_calc_real else ( None, None, None) loss = G_loss_fn(fake_output, real_output) loss_32x32 = G_loss_fn(fake_output_32x32, real_output_32x32) gen_loss = loss + loss_32x32 gen_loss = gen_loss / self.gradient_accumulate_every gen_loss.register_hook(raise_if_nan) self.accelerator.backward(gen_loss) total_gen_loss += loss # divide loss by gradient accumulation steps since gradients # are accumulated for multiple backward passes in PyTorch self.g_loss = float(total_gen_loss.item() / self.gradient_accumulate_every) self.GAN.G_opt.step() # calculate moving averages if self.accelerator.is_main_process and self.steps % 10 == 0 and self.steps > 20000: self.GAN.EMA() if self.accelerator.is_main_process and self.steps <= 25000 and self.steps % 1000 == 2: self.GAN.reset_parameter_averaging() # save from NaN errors if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)): print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}') self.load(self.checkpoint_num) raise NanException del total_disc_loss del total_gen_loss # periodically save results if self.accelerator.is_main_process: if self.steps % self.save_every == 0: self.save(self.checkpoint_num) if self.push_to_hub: with tempfile.TemporaryDirectory() as temp_dir: self.GAN.push_to_hub(temp_dir, self.repo_url, config=self.GAN.config, skip_lfs_files=True) if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 20000): self.evaluate(floor(self.steps / self.evaluate_every), num_image_tiles=self.num_image_tiles) if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0: num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size) fid = self.calculate_fid(num_batches) self.last_fid = fid with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f: f.write(f'{self.steps},{fid}\n') self.steps += 1 @torch.no_grad() def evaluate(self, num=0, num_image_tiles=4): self.GAN.eval() ext = self.image_extension num_rows = num_image_tiles latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size # latents and noise latents = torch.randn(num_rows ** 2, latent_dim, device=self.accelerator.device) # regular generated_images = self.generate_(self.GAN.G, latents) file_name = str(self.results_dir / self.name / f'{str(num)}.{ext}') save_image(generated_images, file_name, nrow=num_rows) # moving averages generated_images = self.generate_(self.GAN.GE.to(self.accelerator.device), latents) file_name_ema = str(self.results_dir / self.name / f'{str(num)}-ema.{ext}') save_image(generated_images, file_name_ema, nrow=num_rows) if self.accelerator.is_local_main_process and self.wandb: import wandb wandb.log({'generated_examples': wandb.Image(str(file_name))}) wandb.log({'generated_examples_ema': wandb.Image(str(file_name_ema))}) @torch.no_grad() def generate(self, num=0, num_image_tiles=4, checkpoint=None, types=['default', 'ema']): self.GAN.eval() latent_dim = self.GAN.latent_dim dir_name = self.name + str('-generated-') + str(checkpoint) dir_full = Path().absolute() / self.results_dir / dir_name ext = self.image_extension if not dir_full.exists(): os.mkdir(dir_full) # regular if 'default' in types: for i in tqdm(range(num_image_tiles), desc='Saving generated default images'): latents = torch.randn(1, latent_dim, device=self.accelerator.device) generated_image = self.generate_(self.GAN.G, latents) path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}.{ext}') save_image(generated_image[0], path, nrow=1) # moving averages if 'ema' in types: for i in tqdm(range(num_image_tiles), desc='Saving generated EMA images'): latents = torch.randn(1, latent_dim, device=self.accelerator.device) generated_image = self.generate_(self.GAN.GE, latents) path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}-ema.{ext}') save_image(generated_image[0], path, nrow=1) return dir_full @torch.no_grad() def show_progress(self, num_images=4, types=['default', 'ema']): checkpoints = self.get_checkpoints() assert exists(checkpoints), 'cannot find any checkpoints to create a training progress video for' dir_name = self.name + str('-progress') dir_full = Path().absolute() / self.results_dir / dir_name ext = self.image_extension latents = None zfill_length = math.ceil(math.log10(len(checkpoints))) if not dir_full.exists(): os.mkdir(dir_full) for checkpoint in tqdm(checkpoints, desc='Generating progress images'): self.load(checkpoint, print_version=False) self.GAN.eval() if checkpoint == 0: latents = torch.randn(num_images, self.GAN.latent_dim, self.accelerator.device) # regular if 'default' in types: generated_image = self.generate_(self.GAN.G, latents) path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}.{ext}') save_image(generated_image, path, nrow=num_images) # moving averages if 'ema' in types: generated_image = self.generate_(self.GAN.GE, latents) path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}-ema.{ext}') save_image(generated_image, path, nrow=num_images) @torch.no_grad() def calculate_fid(self, num_batches): from pytorch_fid import fid_score real_path = self.fid_dir / 'real' fake_path = self.fid_dir / 'fake' # remove any existing files used for fid calculation and recreate directories if not real_path.exists() or self.clear_fid_cache: rmtree(real_path, ignore_errors=True) os.makedirs(real_path) for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'): real_batch = next(self.loader)["image"] for k, image in enumerate(real_batch.unbind(0)): ind = k + batch_num * self.batch_size save_image(image, real_path / f'{ind}.png') # generate a bunch of fake images in results / name / fid_fake rmtree(fake_path, ignore_errors=True) os.makedirs(fake_path) self.GAN.eval() ext = self.image_extension latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'): # latents and noise latents = torch.randn(self.batch_size, latent_dim, device=self.accelerator.device) # moving averages generated_images = self.generate_(self.GAN.GE, latents) for j, image in enumerate(generated_images.unbind(0)): ind = j + batch_num * self.batch_size save_image(image, str(fake_path / f'{str(ind)}-ema.{ext}')) return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, latents.device, 2048) @torch.no_grad() def generate_(self, G, style, num_image_tiles=8): generated_images = evaluate_in_chunks(self.batch_size, G, style) return generated_images.clamp_(0., 1.) @torch.no_grad() def generate_interpolation(self, num=0, num_image_tiles=8, num_steps=100, save_frames=False): self.GAN.eval() ext = self.image_extension num_rows = num_image_tiles latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size # latents and noise latents_low = torch.randn(num_rows ** 2, latent_dim, device=self.accelerator.device) latents_high = torch.randn(num_rows ** 2, latent_dim, device=self.accelerator.device) ratios = torch.linspace(0., 8., num_steps) frames = [] for ratio in tqdm(ratios): interp_latents = slerp(ratio, latents_low, latents_high) generated_images = self.generate_(self.GAN.GE, interp_latents) images_grid = torchvision.utils.make_grid(generated_images, nrow=num_rows) pil_image = transforms.ToPILImage()(images_grid.cpu()) if self.transparent: background = Image.new('RGBA', pil_image.size, (255, 255, 255)) pil_image = Image.alpha_composite(background, pil_image) frames.append(pil_image) frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True) if save_frames: folder_path = (self.results_dir / self.name / f'{str(num)}') folder_path.mkdir(parents=True, exist_ok=True) for ind, frame in enumerate(frames): frame.save(str(folder_path / f'{str(ind)}.{ext}')) def print_log(self): data = [ ('G', self.g_loss), ('D', self.d_loss), ('GP', self.last_gp_loss), ('SS', self.last_recon_loss), ('FID', self.last_fid) ] data = [d for d in data if exists(d[1])] log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data)) print(log) if self.accelerator.is_local_main_process: log_dict = {v[0]: v[1] for v in data} if self.wandb: import wandb wandb.log(log_dict) def model_name(self, num): return str(self.models_dir / self.name / f'model_{num}.pt') def init_folders(self): (self.results_dir / self.name).mkdir(parents=True, exist_ok=True) (self.models_dir / self.name).mkdir(parents=True, exist_ok=True) def clear(self): rmtree(str(self.models_dir / self.name), True) rmtree(str(self.results_dir / self.name), True) rmtree(str(self.fid_dir), True) rmtree(str(self.config_path), True) self.init_folders() def save(self, num): save_data = { 'GAN': self.GAN.state_dict(), } torch.save(save_data, self.model_name(num)) self.write_config() def load(self, num=-1): self.load_config() name = num if num == -1: checkpoints = self.get_checkpoints() if not exists(checkpoints): return name = checkpoints[-1] print(f'continuing from previous epoch - {name}') self.steps = name * self.save_every load_data = torch.load(self.model_name(name)) try: self.GAN.load_state_dict(load_data['GAN']) except Exception as e: print( 'unable to load save model. please try downgrading the package to the version specified by the saved model') raise e def get_checkpoints(self): file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')] saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths)) if len(saved_nums) == 0: return None return saved_nums
community-events-main
huggan/pytorch/lightweight_gan/lightweight_gan.py
community-events-main
huggan/pytorch/cyclegan/__init__.py
import random import time import datetime import sys from torch.autograd import Variable import torch import numpy as np from torchvision.utils import save_image class ReplayBuffer: def __init__(self, max_size=50): assert max_size > 0, "Empty buffer or trying to create a black hole. Be careful." self.max_size = max_size self.data = [] def push_and_pop(self, data): to_return = [] for element in data.data: element = torch.unsqueeze(element, 0) if len(self.data) < self.max_size: self.data.append(element) to_return.append(element) else: if random.uniform(0, 1) > 0.5: i = random.randint(0, self.max_size - 1) to_return.append(self.data[i].clone()) self.data[i] = element else: to_return.append(element) return Variable(torch.cat(to_return)) class LambdaLR: def __init__(self, n_epochs, offset, decay_start_epoch): assert (n_epochs - decay_start_epoch) > 0, "Decay must start before the training session ends!" self.n_epochs = n_epochs self.offset = offset self.decay_start_epoch = decay_start_epoch def step(self, epoch): return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)
community-events-main
huggan/pytorch/cyclegan/utils.py
import argparse import os import numpy as np import itertools from pathlib import Path import datetime import time import sys from PIL import Image from torchvision.transforms import Compose, Resize, ToTensor, Normalize, RandomCrop, RandomHorizontalFlip from torchvision.utils import save_image, make_grid from torch.utils.data import DataLoader from modeling_cyclegan import GeneratorResNet, Discriminator from utils import ReplayBuffer, LambdaLR from datasets import load_dataset from accelerate import Accelerator import torch.nn as nn import torch def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from") parser.add_argument("--num_epochs", type=int, default=200, help="number of epochs of training") parser.add_argument("--dataset_name", type=str, default="huggan/facades", help="name of the dataset") parser.add_argument("--batch_size", type=int, default=1, help="size of the batches") parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate") parser.add_argument("--beta1", type=float, default=0.5, help="adam: decay of first order momentum of gradient") parser.add_argument("--beta2", type=float, default=0.999, help="adam: decay of first order momentum of gradient") parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay") parser.add_argument("--num_workers", type=int, default=8, help="Number of CPU threads to use during batch generation") parser.add_argument("--image_size", type=int, default=256, help="Size of images for training") parser.add_argument("--channels", type=int, default=3, help="Number of image channels") parser.add_argument("--sample_interval", type=int, default=100, help="interval between saving generator outputs") parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between saving model checkpoints") parser.add_argument("--n_residual_blocks", type=int, default=9, help="number of residual blocks in generator") parser.add_argument("--lambda_cyc", type=float, default=10.0, help="cycle loss weight") parser.add_argument("--lambda_id", type=float, default=5.0, help="identity loss weight") parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the HuggingFace hub after training.", ) parser.add_argument( "--pytorch_dump_folder_path", required="--push_to_hub" in sys.argv, type=Path, help="Path to save the model. Will be created if it doesn't exist already.", ) parser.add_argument( "--model_name", required="--push_to_hub" in sys.argv, type=str, help="Name of the model on the hub.", ) parser.add_argument( "--organization_name", required=False, default="huggan", type=str, help="Organization name to push to, in case args.push_to_hub is specified.", ) return parser.parse_args(args=args) def weights_init_normal(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: torch.nn.init.normal_(m.weight.data, 0.0, 0.02) if hasattr(m, "bias") and m.bias is not None: torch.nn.init.constant_(m.bias.data, 0.0) elif classname.find("BatchNorm2d") != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) def training_function(config, args): accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision) # Create sample and checkpoint directories os.makedirs("images/%s" % args.dataset_name, exist_ok=True) os.makedirs("saved_models/%s" % args.dataset_name, exist_ok=True) # Losses criterion_GAN = torch.nn.MSELoss() criterion_cycle = torch.nn.L1Loss() criterion_identity = torch.nn.L1Loss() input_shape = (args.channels, args.image_size, args.image_size) # Calculate output shape of image discriminator (PatchGAN) output_shape = (1, args.image_size // 2 ** 4, args.image_size // 2 ** 4) # Initialize generator and discriminator G_AB = GeneratorResNet(input_shape, args.n_residual_blocks) G_BA = GeneratorResNet(input_shape, args.n_residual_blocks) D_A = Discriminator(args.channels) D_B = Discriminator(args.channels) if args.epoch != 0: # Load pretrained models G_AB.load_state_dict(torch.load("saved_models/%s/G_AB_%d.pth" % (args.dataset_name, args.epoch))) G_BA.load_state_dict(torch.load("saved_models/%s/G_BA_%d.pth" % (args.dataset_name, args.epoch))) D_A.load_state_dict(torch.load("saved_models/%s/D_A_%d.pth" % (args.dataset_name, args.epoch))) D_B.load_state_dict(torch.load("saved_models/%s/D_B_%d.pth" % (args.dataset_name, args.epoch))) else: # Initialize weights G_AB.apply(weights_init_normal) G_BA.apply(weights_init_normal) D_A.apply(weights_init_normal) D_B.apply(weights_init_normal) # Optimizers optimizer_G = torch.optim.Adam( itertools.chain(G_AB.parameters(), G_BA.parameters()), lr=args.lr, betas=(args.beta1, args.beta2) ) optimizer_D_A = torch.optim.Adam(D_A.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) optimizer_D_B = torch.optim.Adam(D_B.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) # Learning rate update schedulers lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR( optimizer_G, lr_lambda=LambdaLR(args.num_epochs, args.epoch, args.decay_epoch).step ) lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR( optimizer_D_A, lr_lambda=LambdaLR(args.num_epochs, args.epoch, args.decay_epoch).step ) lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR( optimizer_D_B, lr_lambda=LambdaLR(args.num_epochs, args.epoch, args.decay_epoch).step ) # Buffers of previously generated samples fake_A_buffer = ReplayBuffer() fake_B_buffer = ReplayBuffer() # Image transformations transform = Compose([ Resize(int(args.image_size * 1.12), Image.BICUBIC), RandomCrop((args.image_size, args.image_size)), RandomHorizontalFlip(), ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) def transforms(examples): examples["A"] = [transform(image.convert("RGB")) for image in examples["imageA"]] examples["B"] = [transform(image.convert("RGB")) for image in examples["imageB"]] del examples["imageA"] del examples["imageB"] return examples dataset = load_dataset(args.dataset_name) transformed_dataset = dataset.with_transform(transforms) splits = transformed_dataset['train'].train_test_split(test_size=0.1) train_ds = splits['train'] val_ds = splits['test'] dataloader = DataLoader(train_ds, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) val_dataloader = DataLoader(val_ds, batch_size=5, shuffle=True, num_workers=1) def sample_images(batches_done): """Saves a generated sample from the test set""" batch = next(iter(val_dataloader)) G_AB.eval() G_BA.eval() real_A = batch["A"] fake_B = G_AB(real_A) real_B = batch["B"] fake_A = G_BA(real_B) # Arange images along x-axis real_A = make_grid(real_A, nrow=5, normalize=True) real_B = make_grid(real_B, nrow=5, normalize=True) fake_A = make_grid(fake_A, nrow=5, normalize=True) fake_B = make_grid(fake_B, nrow=5, normalize=True) # Arange images along y-axis image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1) save_image(image_grid, "images/%s/%s.png" % (args.dataset_name, batches_done), normalize=False) G_AB, G_BA, D_A, D_B, optimizer_G, optimizer_D_A, optimizer_D_B, dataloader, val_dataloader = accelerator.prepare(G_AB, G_BA, D_A, D_B, optimizer_G, optimizer_D_A, optimizer_D_B, dataloader, val_dataloader) # ---------- # Training # ---------- prev_time = time.time() for epoch in range(args.epoch, args.num_epochs): for i, batch in enumerate(dataloader): # Set model input real_A = batch["A"] real_B = batch["B"] # Adversarial ground truths valid = torch.ones((real_A.size(0), *output_shape), device=accelerator.device) fake = torch.zeros((real_A.size(0), *output_shape), device=accelerator.device) # ------------------ # Train Generators # ------------------ G_AB.train() G_BA.train() optimizer_G.zero_grad() # Identity loss loss_id_A = criterion_identity(G_BA(real_A), real_A) loss_id_B = criterion_identity(G_AB(real_B), real_B) loss_identity = (loss_id_A + loss_id_B) / 2 # GAN loss fake_B = G_AB(real_A) loss_GAN_AB = criterion_GAN(D_B(fake_B), valid) fake_A = G_BA(real_B) loss_GAN_BA = criterion_GAN(D_A(fake_A), valid) loss_GAN = (loss_GAN_AB + loss_GAN_BA) / 2 # Cycle loss recov_A = G_BA(fake_B) loss_cycle_A = criterion_cycle(recov_A, real_A) recov_B = G_AB(fake_A) loss_cycle_B = criterion_cycle(recov_B, real_B) loss_cycle = (loss_cycle_A + loss_cycle_B) / 2 # Total loss loss_G = loss_GAN + args.lambda_cyc * loss_cycle + args.lambda_id * loss_identity accelerator.backward(loss_G) optimizer_G.step() # ----------------------- # Train Discriminator A # ----------------------- optimizer_D_A.zero_grad() # Real loss loss_real = criterion_GAN(D_A(real_A), valid) # Fake loss (on batch of previously generated samples) fake_A_ = fake_A_buffer.push_and_pop(fake_A) loss_fake = criterion_GAN(D_A(fake_A_.detach()), fake) # Total loss loss_D_A = (loss_real + loss_fake) / 2 accelerator.backward(loss_D_A) optimizer_D_A.step() # ----------------------- # Train Discriminator B # ----------------------- optimizer_D_B.zero_grad() # Real loss loss_real = criterion_GAN(D_B(real_B), valid) # Fake loss (on batch of previously generated samples) fake_B_ = fake_B_buffer.push_and_pop(fake_B) loss_fake = criterion_GAN(D_B(fake_B_.detach()), fake) # Total loss loss_D_B = (loss_real + loss_fake) / 2 accelerator.backward(loss_D_B) optimizer_D_B.step() loss_D = (loss_D_A + loss_D_B) / 2 # -------------- # Log Progress # -------------- # Determine approximate time left batches_done = epoch * len(dataloader) + i batches_left = args.num_epochs * len(dataloader) - batches_done time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time)) prev_time = time.time() # Print log sys.stdout.write( "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f] ETA: %s" % ( epoch, args.num_epochs, i, len(dataloader), loss_D.item(), loss_G.item(), loss_GAN.item(), loss_cycle.item(), loss_identity.item(), time_left, ) ) # If at sample interval save image if batches_done % args.sample_interval == 0: sample_images(batches_done) # Update learning rates lr_scheduler_G.step() lr_scheduler_D_A.step() lr_scheduler_D_B.step() if args.checkpoint_interval != -1 and epoch % args.checkpoint_interval == 0: # Save model checkpoints torch.save(G_AB.state_dict(), "saved_models/%s/G_AB_%d.pth" % (args.dataset_name, epoch)) torch.save(G_BA.state_dict(), "saved_models/%s/G_BA_%d.pth" % (args.dataset_name, epoch)) torch.save(D_A.state_dict(), "saved_models/%s/D_A_%d.pth" % (args.dataset_name, epoch)) torch.save(D_B.state_dict(), "saved_models/%s/D_B_%d.pth" % (args.dataset_name, epoch)) # Optionally push to hub if args.push_to_hub: save_directory = args.pytorch_dump_folder_path if not save_directory.exists(): save_directory.mkdir(parents=True) G_AB.push_to_hub( repo_path_or_name=save_directory / args.model_name, organization=args.organization_name, ) def main(): args = parse_args() print(args) # Make directory for saving generated images os.makedirs("images", exist_ok=True) training_function({}, args) if __name__ == "__main__": main()
community-events-main
huggan/pytorch/cyclegan/train.py
import torch.nn as nn import torch.nn.functional as F import torch from huggan.pytorch.huggan_mixin import HugGANModelHubMixin ############################## # RESNET ############################## class ResidualBlock(nn.Module): def __init__(self, in_features): super(ResidualBlock, self).__init__() self.block = nn.Sequential( nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), nn.InstanceNorm2d(in_features), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), nn.InstanceNorm2d(in_features), ) def forward(self, x): return x + self.block(x) class GeneratorResNet(nn.Module, HugGANModelHubMixin): def __init__(self, input_shape, num_residual_blocks): super(GeneratorResNet, self).__init__() channels = input_shape[0] # Initial convolution block out_features = 64 model = [ nn.ReflectionPad2d(channels), nn.Conv2d(channels, out_features, 7), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True), ] in_features = out_features # Downsampling for _ in range(2): out_features *= 2 model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True), ] in_features = out_features # Residual blocks for _ in range(num_residual_blocks): model += [ResidualBlock(out_features)] # Upsampling for _ in range(2): out_features //= 2 model += [ nn.Upsample(scale_factor=2), nn.Conv2d(in_features, out_features, 3, stride=1, padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True), ] in_features = out_features # Output layer model += [nn.ReflectionPad2d(channels), nn.Conv2d(out_features, channels, 7), nn.Tanh()] self.model = nn.Sequential(*model) def forward(self, x): return self.model(x) ############################## # Discriminator ############################## class Discriminator(nn.Module): def __init__(self, channels): super(Discriminator, self).__init__() def discriminator_block(in_filters, out_filters, normalize=True): """Returns downsampling layers of each discriminator block""" layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)] if normalize: layers.append(nn.InstanceNorm2d(out_filters)) layers.append(nn.LeakyReLU(0.2, inplace=True)) return layers self.model = nn.Sequential( *discriminator_block(channels, 64, normalize=False), *discriminator_block(64, 128), *discriminator_block(128, 256), *discriminator_block(256, 512), nn.ZeroPad2d((1, 0, 1, 0)), nn.Conv2d(512, 1, 4, padding=1) ) def forward(self, img): return self.model(img)
community-events-main
huggan/pytorch/cyclegan/modeling_cyclegan.py
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2022 Erik Linder-Norén and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions. import torch.nn as nn import torch.nn.functional as F import torch from huggan.pytorch.huggan_mixin import HugGANModelHubMixin def weights_init_normal(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: torch.nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm2d") != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) ############################## # U-NET ############################## class UNetDown(nn.Module): def __init__(self, in_size, out_size, normalize=True, dropout=0.0): super(UNetDown, self).__init__() layers = [nn.Conv2d(in_size, out_size, 4, 2, 1, bias=False)] if normalize: layers.append(nn.InstanceNorm2d(out_size)) layers.append(nn.LeakyReLU(0.2)) if dropout: layers.append(nn.Dropout(dropout)) self.model = nn.Sequential(*layers) def forward(self, x): return self.model(x) class UNetUp(nn.Module): def __init__(self, in_size, out_size, dropout=0.0): super(UNetUp, self).__init__() layers = [ nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, bias=False), nn.InstanceNorm2d(out_size), nn.ReLU(inplace=True), ] if dropout: layers.append(nn.Dropout(dropout)) self.model = nn.Sequential(*layers) def forward(self, x, skip_input): x = self.model(x) x = torch.cat((x, skip_input), 1) return x class GeneratorUNet(nn.Module, HugGANModelHubMixin): def __init__(self, in_channels=3, out_channels=3): super(GeneratorUNet, self).__init__() self.down1 = UNetDown(in_channels, 64, normalize=False) self.down2 = UNetDown(64, 128) self.down3 = UNetDown(128, 256) self.down4 = UNetDown(256, 512, dropout=0.5) self.down5 = UNetDown(512, 512, dropout=0.5) self.down6 = UNetDown(512, 512, dropout=0.5) self.down7 = UNetDown(512, 512, dropout=0.5) self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5) self.up1 = UNetUp(512, 512, dropout=0.5) self.up2 = UNetUp(1024, 512, dropout=0.5) self.up3 = UNetUp(1024, 512, dropout=0.5) self.up4 = UNetUp(1024, 512, dropout=0.5) self.up5 = UNetUp(1024, 256) self.up6 = UNetUp(512, 128) self.up7 = UNetUp(256, 64) self.final = nn.Sequential( nn.Upsample(scale_factor=2), nn.ZeroPad2d((1, 0, 1, 0)), nn.Conv2d(128, out_channels, 4, padding=1), nn.Tanh(), ) def forward(self, x): # U-Net generator with skip connections from encoder to decoder d1 = self.down1(x) d2 = self.down2(d1) d3 = self.down3(d2) d4 = self.down4(d3) d5 = self.down5(d4) d6 = self.down6(d5) d7 = self.down7(d6) d8 = self.down8(d7) u1 = self.up1(d8, d7) u2 = self.up2(u1, d6) u3 = self.up3(u2, d5) u4 = self.up4(u3, d4) u5 = self.up5(u4, d3) u6 = self.up6(u5, d2) u7 = self.up7(u6, d1) return self.final(u7) ############################## # Discriminator ############################## class Discriminator(nn.Module): def __init__(self, in_channels=3): super(Discriminator, self).__init__() def discriminator_block(in_filters, out_filters, normalization=True): """Returns downsampling layers of each discriminator block""" layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)] if normalization: layers.append(nn.InstanceNorm2d(out_filters)) layers.append(nn.LeakyReLU(0.2, inplace=True)) return layers self.model = nn.Sequential( *discriminator_block(in_channels * 2, 64, normalization=False), *discriminator_block(64, 128), *discriminator_block(128, 256), *discriminator_block(256, 512), nn.ZeroPad2d((1, 0, 1, 0)), nn.Conv2d(512, 1, 4, padding=1, bias=False) ) def forward(self, img_A, img_B): # Concatenate image and condition image by channels to produce input img_input = torch.cat((img_A, img_B), 1) return self.model(img_input)
community-events-main
huggan/pytorch/pix2pix/modeling_pix2pix.py
community-events-main
huggan/pytorch/pix2pix/__init__.py
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2022 Erik Linder-Norén and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions. import argparse import os from pathlib import Path import numpy as np import time import datetime import sys import tempfile from torchvision.transforms import Compose, Resize, ToTensor, Normalize, RandomVerticalFlip from torchvision.utils import save_image from PIL import Image from torch.utils.data import DataLoader from modeling_pix2pix import GeneratorUNet, Discriminator from datasets import load_dataset from accelerate import Accelerator import torch.nn as nn import torch from huggan.utils.hub import get_full_repo_name from huggingface_hub import create_repo def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("--dataset", type=str, default="huggan/facades", help="Dataset to use") parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from") parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training") parser.add_argument("--batch_size", type=int, default=1, help="size of the batches") parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate") parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient") parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient") parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay") parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") parser.add_argument("--image_size", type=int, default=256, help="size of images for training") parser.add_argument("--channels", type=int, default=3, help="number of image channels") parser.add_argument( "--sample_interval", type=int, default=500, help="interval between sampling of images from generators" ) parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints") parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the HuggingFace hub after training.", ) parser.add_argument( "--model_name", required="--push_to_hub" in sys.argv, type=str, help="Name of the model on the hub.", ) parser.add_argument( "--organization_name", required=False, default="huggan", type=str, help="Organization name to push to, in case args.push_to_hub is specified.", ) return parser.parse_args(args=args) # Custom weights initialization called on Generator and Discriminator def weights_init_normal(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: torch.nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm2d") != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) def training_function(config, args): accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision) os.makedirs("images/%s" % args.dataset, exist_ok=True) os.makedirs("saved_models/%s" % args.dataset, exist_ok=True) repo_name = get_full_repo_name(args.model_name, args.organization_name) if args.push_to_hub: if accelerator.is_main_process: repo_url = create_repo(repo_name, exist_ok=True) # Loss functions criterion_GAN = torch.nn.MSELoss() criterion_pixelwise = torch.nn.L1Loss() # Loss weight of L1 pixel-wise loss between translated image and real image lambda_pixel = 100 # Calculate output of image discriminator (PatchGAN) patch = (1, args.image_size // 2 ** 4, args.image_size // 2 ** 4) # Initialize generator and discriminator generator = GeneratorUNet() discriminator = Discriminator() if args.epoch != 0: # Load pretrained models generator.load_state_dict(torch.load("saved_models/%s/generator_%d.pth" % (args.dataset, args.epoch))) discriminator.load_state_dict(torch.load("saved_models/%s/discriminator_%d.pth" % (args.dataset, args.epoch))) else: # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) # Optimizers optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2)) optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2)) # Configure dataloaders transform = Compose( [ Resize((args.image_size, args.image_size), Image.BICUBIC), ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ) def transforms(examples): # random vertical flip imagesA = [] imagesB = [] for imageA, imageB in zip(examples['imageA'], examples['imageB']): if np.random.random() < 0.5: imageA = Image.fromarray(np.array(imageA)[:, ::-1, :], "RGB") imageB = Image.fromarray(np.array(imageB)[:, ::-1, :], "RGB") imagesA.append(imageA) imagesB.append(imageB) # transforms examples["A"] = [transform(image.convert("RGB")) for image in imagesA] examples["B"] = [transform(image.convert("RGB")) for image in imagesB] del examples["imageA"] del examples["imageB"] return examples dataset = load_dataset(args.dataset) transformed_dataset = dataset.with_transform(transforms) splits = transformed_dataset['train'].train_test_split(test_size=0.1) train_ds = splits['train'] val_ds = splits['test'] dataloader = DataLoader(train_ds, shuffle=True, batch_size=args.batch_size, num_workers=args.n_cpu) val_dataloader = DataLoader(val_ds, batch_size=10, shuffle=True, num_workers=1) def sample_images(batches_done, accelerator): """Saves a generated sample from the validation set""" batch = next(iter(val_dataloader)) real_A = batch["A"] real_B = batch["B"] fake_B = generator(real_A) img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2) if accelerator.is_main_process: save_image(img_sample, "images/%s/%s.png" % (args.dataset, batches_done), nrow=5, normalize=True) generator, discriminator, optimizer_G, optimizer_D, dataloader, val_dataloader = accelerator.prepare(generator, discriminator, optimizer_G, optimizer_D, dataloader, val_dataloader) # ---------- # Training # ---------- prev_time = time.time() for epoch in range(args.epoch, args.n_epochs): print("Epoch:", epoch) for i, batch in enumerate(dataloader): # Model inputs real_A = batch["A"] real_B = batch["B"] # Adversarial ground truths valid = torch.ones((real_A.size(0), *patch), device=accelerator.device) fake = torch.zeros((real_A.size(0), *patch), device=accelerator.device) # ------------------ # Train Generators # ------------------ optimizer_G.zero_grad() # GAN loss fake_B = generator(real_A) pred_fake = discriminator(fake_B, real_A) loss_GAN = criterion_GAN(pred_fake, valid) # Pixel-wise loss loss_pixel = criterion_pixelwise(fake_B, real_B) # Total loss loss_G = loss_GAN + lambda_pixel * loss_pixel accelerator.backward(loss_G) optimizer_G.step() # --------------------- # Train Discriminator # --------------------- optimizer_D.zero_grad() # Real loss pred_real = discriminator(real_B, real_A) loss_real = criterion_GAN(pred_real, valid) # Fake loss pred_fake = discriminator(fake_B.detach(), real_A) loss_fake = criterion_GAN(pred_fake, fake) # Total loss loss_D = 0.5 * (loss_real + loss_fake) accelerator.backward(loss_D) optimizer_D.step() # -------------- # Log Progress # -------------- # Determine approximate time left batches_done = epoch * len(dataloader) + i batches_left = args.n_epochs * len(dataloader) - batches_done time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time)) prev_time = time.time() # Print log sys.stdout.write( "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, pixel: %f, adv: %f] ETA: %s" % ( epoch, args.n_epochs, i, len(dataloader), loss_D.item(), loss_G.item(), loss_pixel.item(), loss_GAN.item(), time_left, ) ) # If at sample interval save image if batches_done % args.sample_interval == 0: sample_images(batches_done, accelerator) if args.checkpoint_interval != -1 and epoch % args.checkpoint_interval == 0: if accelerator.is_main_process: unwrapped_generator = accelerator.unwrap_model(generator) unwrapped_discriminator = accelerator.unwrap_model(discriminator) # Save model checkpoints torch.save(unwrapped_generator.state_dict(), "saved_models/%s/generator_%d.pth" % (args.dataset, epoch)) torch.save(unwrapped_discriminator.state_dict(), "saved_models/%s/discriminator_%d.pth" % (args.dataset, epoch)) # Optionally push to hub if args.push_to_hub: if accelerator.is_main_process: with tempfile.TemporaryDirectory() as temp_dir: unwrapped_generator = accelerator.unwrap_model(generator) unwrapped_generator.push_to_hub( repo_path_or_name=temp_dir, repo_url=repo_url, commit_message=f"Training in progress, epoch {epoch}", skip_lfs_files=True ) def main(): args = parse_args() print(args) training_function({}, args) if __name__ == "__main__": main()
community-events-main
huggan/pytorch/pix2pix/train.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for sequence to sequence speech recognition with 🤗 Datasets' streaming mode. """ # You can also adapt this script for your own sequence to sequence speech # recognition task. Pointers for this are left as comments. import logging import os import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import torch from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from torch.utils.data import IterableDataset import evaluate import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForSpeechSeq2Seq, AutoProcessor, AutoTokenizer, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, TrainerCallback, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.trainer_pt_utils import IterableDatasetShard from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.25.0.dev0") require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) feature_extractor_name: Optional[str] = field( default=None, metadata={"help": "feature extractor name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) freeze_feature_encoder: bool = field( default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) freeze_encoder: bool = field( default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."} ) forced_decoder_ids: List[List[int]] = field( default=None, metadata={ "help": ( "A list of pairs of integers which indicates a mapping from generation indices to token indices " "that will be forced before sampling. For example, [[0, 123]] means the first generated token " "will always be a token of index 123." ) }, ) suppress_tokens: List[int] = field( default=None, metadata={"help": "A list of tokens that will be suppressed at generation."} ) model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."}) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) text_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, ) max_duration_in_seconds: float = field( default=20.0, metadata={ "help": ( "Truncate audio files that are longer than `max_duration_in_seconds` seconds to" " 'max_duration_in_seconds`" ) }, ) min_duration_in_seconds: float = field( default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="test", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) do_lower_case: bool = field( default=False, metadata={"help": "Whether the target text should be lower cased."}, ) do_remove_punctuation: bool = field( default=False, metadata={"help": "Whether the target text should be striped of punctuation."}, ) do_normalize_eval: bool = field( default=True, metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."}, ) language: str = field( default=None, metadata={ "help": ( "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning " "only. For English speech recognition, it should be set to `None`." ) }, ) task: str = field( default="transcribe", metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."}, ) shuffle_buffer_size: Optional[int] = field( default=500, metadata={ "help": ( "The number of streamed examples to download before shuffling them. The large the buffer, " "the closer it is to real offline shuffling." ) }, ) streaming: bool = field( default=True, metadata={"help": "Whether to use streaming mode to load and pre-process the data."}, ) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor ([`WhisperProcessor`]) The processor used for processing the data. decoder_start_token_id (`int`) The begin-of-sentence of the decoder. """ processor: Any decoder_start_token_id: int def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods model_input_name = self.processor.model_input_names[0] input_features = [{model_input_name: feature[model_input_name]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs): """ Utility function to load a dataset in streaming mode. For datasets with multiple splits, each split is loaded individually and then splits combined by taking alternating examples from each (interleaving). """ if "+" in split: # load multiple splits separated by the `+` symbol with streaming mode dataset_splits = [ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs) for split_name in split.split("+") ] # interleave multiple splits to form one dataset interleaved_dataset = interleave_datasets(dataset_splits) return interleaved_dataset else: # load a single split *with* streaming mode dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs) return dataset def main(): # 1. Parse input arguments # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args) # 2. Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # 3. Detecting last checkpoint and eventually continue from last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # 4. Load dataset raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() if training_args.do_train: raw_datasets["train"] = load_maybe_streaming_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming, ) if training_args.do_eval: raw_datasets["eval"] = load_maybe_streaming_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming, ) raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) if data_args.audio_column_name not in raw_datasets_features: raise ValueError( f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " f"{', '.join(raw_datasets_features)}." ) if data_args.text_column_name not in raw_datasets_features: raise ValueError( f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--text_column_name` to the correct text column - one of " f"{', '.join(raw_datasets_features)}." ) # 5. Load pretrained model, tokenizer, and feature extractor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens}) if training_args.gradient_checkpointing: config.update({"use_cache": False}) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if model_args.freeze_encoder: model.freeze_encoder() if data_args.language is not None: # We only need to set the task id when the language is specified (i.e. in a multilingual setting) tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task) # 6. Resample speech dataset if necessary dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate if dataset_sampling_rate != feature_extractor.sampling_rate: raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # 7. Preprocessing the datasets. # We need to read the audio files as arrays and tokenize the targets. max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate audio_column_name = data_args.audio_column_name text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] do_lower_case = data_args.do_lower_case do_remove_punctuation = data_args.do_remove_punctuation normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI if data_args.max_train_samples is not None: raw_datasets["train"] = ( raw_datasets["train"].take(data_args.max_train_samples) if data_args.streaming else raw_datasets["train"].select(range(data_args.max_train_samples)) ) if data_args.max_eval_samples is not None: raw_datasets["eval"] = ( raw_datasets["eval"].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets["eval"].select(range(data_args.max_eval_samples)) ) def prepare_dataset(batch): # process audio sample = batch[audio_column_name] inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) # process audio length batch[model_input_name] = inputs.get(model_input_name)[0] batch["input_length"] = len(sample["array"]) # process targets input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name] if do_remove_punctuation: input_str = normalizer(input_str).strip() batch["labels"] = tokenizer(input_str).input_ids return batch with training_args.main_process_first(desc="dataset map pre-processing"): vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=raw_datasets_features, ).with_format("torch") if training_args.do_train and data_args.streaming: # manually shuffle if streaming (done by the trainer for non-streaming) vectorized_datasets["train"] = vectorized_datasets["train"].shuffle( buffer_size=data_args.shuffle_buffer_size, seed=training_args.seed, ) # filter training data that is shorter than min_input_length or longer than # max_input_length def is_audio_in_length_range(length): return min_input_length < length < max_input_length if training_args.do_train: vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, input_columns=["input_length"], ) # 8. Load Metric metric = evaluate.load("wer") do_normalize_eval = data_args.do_normalize_eval def compute_metrics(pred): pred_ids = pred.predictions pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) # we do not want to group tokens when computing the metrics label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True) if do_normalize_eval: pred_str = [normalizer(pred) for pred in pred_str] label_str = [normalizer(label) for label in label_str] # filtering step to only evaluate the samples that correspond to non-zero references: pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0] label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0] wer = 100 * metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} # 9. Create a single speech processor if is_main_process(training_args.local_rank): # save feature extractor, tokenizer and config feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) processor = AutoProcessor.from_pretrained(training_args.output_dir) # 10. Define data collator data_collator = DataCollatorSpeechSeq2SeqWithPadding( processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, ) # 11. Configure Trainer # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch # Only required for streaming: Trainer automatically shuffles non-streaming datasets class ShuffleCallback(TrainerCallback): def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs): if isinstance(train_dataloader.dataset, IterableDatasetShard): pass # set_epoch() is handled by the Trainer elif isinstance(train_dataloader.dataset, IterableDataset): train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1) # Initialize Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, callbacks=[ShuffleCallback()] if data_args.streaming else None, ) # 12. Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the feature extractor too for easy upload metrics = train_result.metrics if data_args.max_train_samples: metrics["train_samples"] = data_args.max_train_samples trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # 13. Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate( metric_key_prefix="eval", max_length=training_args.generation_max_length, num_beams=training_args.generation_num_beams, ) if data_args.max_eval_samples: metrics["eval_samples"] = data_args.max_eval_samples trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # 14. Write Training Stats kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": "automatic-speech-recognition", "tags": "whisper-event", } if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if "common_voice" in data_args.dataset_name: kwargs["language"] = data_args.dataset_config_name.split('-')[0] if model_args.model_index_name is not None: kwargs["model_name"] = model_args.model_index_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results if __name__ == "__main__": main()
community-events-main
whisper-fine-tuning-event/run_speech_recognition_seq2seq_streaming.py
import argparse from transformers import pipeline from transformers.models.whisper.english_normalizer import BasicTextNormalizer from datasets import load_dataset, Audio import evaluate wer_metric = evaluate.load("wer") def is_target_text_in_range(ref): if ref.strip() == "ignore time segment in scoring": return False else: return ref.strip() != "" def get_text(sample): if "text" in sample: return sample["text"] elif "sentence" in sample: return sample["sentence"] elif "normalized_text" in sample: return sample["normalized_text"] elif "transcript" in sample: return sample["transcript"] elif "transcription" in sample: return sample["transcription"] else: raise ValueError( f"Expected transcript column of either 'text', 'sentence', 'normalized_text' or 'transcript'. Got sample of " ".join{sample.keys()}. Ensure a text column name is present in the dataset." ) whisper_norm = BasicTextNormalizer() def normalise(batch): batch["norm_text"] = whisper_norm(get_text(batch)) return batch def data(dataset): for i, item in enumerate(dataset): yield {**item["audio"], "reference": item["norm_text"]} def main(args): batch_size = args.batch_size whisper_asr = pipeline( "automatic-speech-recognition", model=args.model_id, device=args.device ) whisper_asr.model.config.forced_decoder_ids = ( whisper_asr.tokenizer.get_decoder_prompt_ids( language=args.language, task="transcribe" ) ) dataset = load_dataset( args.dataset, args.config, split=args.split, streaming=args.streaming, use_auth_token=True, ) # Only uncomment for debugging dataset = dataset.take(args.max_eval_samples) dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) dataset = dataset.map(normalise) dataset = dataset.filter(is_target_text_in_range, input_columns=["norm_text"]) predictions = [] references = [] # run streamed inference for out in whisper_asr(data(dataset), batch_size=batch_size): predictions.append(whisper_norm(out["text"])) references.append(out["reference"][0]) wer = wer_metric.compute(references=references, predictions=predictions) wer = round(100 * wer, 2) print("WER:", wer) evaluate.push_to_hub( model_id=args.model_id, metric_value=wer, metric_type="wer", metric_name="WER", dataset_name=args.dataset, dataset_type=args.dataset, dataset_split=args.split, dataset_config=args.config, task_type="automatic-speech-recognition", task_name="Automatic Speech Recognition" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers", ) parser.add_argument( "--dataset", type=str, default="mozilla-foundation/common_voice_11_0", help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for the English split of Common Voice", ) parser.add_argument( "--split", type=str, default="test", help="Split of the dataset. *E.g.* `'test'`", ) parser.add_argument( "--device", type=int, default=-1, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) parser.add_argument( "--batch_size", type=int, default=16, help="Number of samples to go through each streamed batch.", ) parser.add_argument( "--max_eval_samples", type=int, default=None, help="Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.", ) parser.add_argument( "--streaming", type=bool, default=True, help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.", ) parser.add_argument( "--language", type=str, required=True, help="Two letter language code for the transcription language, e.g. use 'en' for English.", ) args = parser.parse_args() main(args)
community-events-main
whisper-fine-tuning-event/run_eval_whisper_streaming.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import random import time from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import torch import torch.utils.checkpoint import transformers from datasets import load_dataset, load_from_disk from flax import jax_utils from flax.core.frozen_dict import unfreeze from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder from PIL import Image, PngImagePlugin from torch.utils.data import IterableDataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxControlNetModel, FlaxDDPMScheduler, FlaxStableDiffusionControlNetPipeline, FlaxUNet2DConditionModel, ) from diffusers.utils import check_min_version, is_wandb_available # To prevent an error that occurs when there are abnormally large compressed data chunk in the png image # see more https://github.com/python-pillow/Pillow/issues/5610 LARGE_ENOUGH_NUMBER = 100 PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2) if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.16.0.dev0") logger = logging.getLogger(__name__) def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args, rng, weight_dtype): logger.info("Running validation...") pipeline_params = pipeline_params.copy() pipeline_params["controlnet"] = controlnet_params num_samples = jax.device_count() prng_seed = jax.random.split(rng, jax.device_count()) if len(args.validation_image) == len(args.validation_prompt): validation_images = args.validation_image validation_prompts = args.validation_prompt elif len(args.validation_image) == 1: validation_images = args.validation_image * len(args.validation_prompt) validation_prompts = args.validation_prompt elif len(args.validation_prompt) == 1: validation_images = args.validation_image validation_prompts = args.validation_prompt * len(args.validation_image) else: raise ValueError( "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" ) image_logs = [] for validation_prompt, validation_image in zip(validation_prompts, validation_images): prompts = num_samples * [validation_prompt] prompt_ids = pipeline.prepare_text_inputs(prompts) prompt_ids = shard(prompt_ids) validation_image = Image.open(validation_image).convert("RGB") processed_image = pipeline.prepare_image_inputs(num_samples * [validation_image]) processed_image = shard(processed_image) images = pipeline( prompt_ids=prompt_ids, image=processed_image, params=pipeline_params, prng_seed=prng_seed, num_inference_steps=50, jit=True, ).images images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) image_logs.append( {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} ) if args.report_to == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning")) for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) wandb.log({"validation": formatted_images}) else: logger.warn(f"image logging not implemented for {args.report_to}") return image_logs def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): img_str = "" if image_logs is not None: for i, log in enumerate(image_logs): images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] validation_image.save(os.path.join(repo_folder, "image_control.png")) img_str += f"prompt: {validation_prompt}\n" images = [validation_image] + images image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) img_str += f"![images_{i})](./images_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - controlnet - jax-diffusers-event inference: true --- """ model_card = f""" # controlnet- {repo_id} These are controlnet weights trained on {base_model} with new type of conditioning. You can find some example images in the following. \n {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--controlnet_model_name_or_path", type=str, default=None, help="Path to pretrained controlnet model or model identifier from huggingface.co/models." " If not specified controlnet weights are initialized from unet.", ) parser.add_argument( "--revision", type=str, default=None, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--from_pt", action="store_true", help="Load the pretrained model from a PyTorch checkpoint.", ) parser.add_argument( "--controlnet_revision", type=str, default=None, help="Revision of controlnet model identifier from huggingface.co/models.", ) parser.add_argument( "--profile_steps", type=int, default=0, help="How many training steps to profile in the beginning.", ) parser.add_argument( "--profile_validation", action="store_true", help="Whether to profile the (last) validation.", ) parser.add_argument( "--profile_memory", action="store_true", help="Whether to dump an initial (before training loop) and a final (at program end) memory profile.", ) parser.add_argument( "--ccache", type=str, default=None, help="Enables compilation cache.", ) parser.add_argument( "--controlnet_from_pt", action="store_true", help="Load the controlnet model from a PyTorch checkpoint.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--output_dir", type=str, default="runs/{timestamp}", help="The output directory where the model predictions and checkpoints will be written. " "Can contain placeholders: {timestamp}.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform.", ) parser.add_argument( "--checkpointing_steps", type=int, default=5000, help=("Save a checkpoint of the training state every X updates."), ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_steps", type=int, default=100, help=("log training metric every X steps to `--report_t`"), ) parser.add_argument( "--report_to", type=str, default="wandb", help=('The integration to report the results and logs to. Currently only supported platforms are `"wandb"`'), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument("--streaming", action="store_true", help="To stream a large dataset from Hub.") parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training dataset. By default it will use `load_dataset` method to load a custom dataset from the folder." "Folder must contain a dataset script as described here https://huggingface.co/docs/datasets/dataset_script) ." "If `--load_from_disk` flag is passed, it will use `load_from_disk` method instead. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--load_from_disk", action="store_true", help=( "If True, will load a dataset that was previously saved using `save_to_disk` from `--train_data_dir`" "See more https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.load_from_disk" ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--conditioning_image_column", type=str, default="conditioning_image", help="The column of the dataset containing the controlnet conditioning image.", ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set. Needed if `streaming` is set to True." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--validation_prompt", type=str, default=None, nargs="+", help=( "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." " Provide either a matching number of `--validation_image`s, a single `--validation_image`" " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help=( "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" " `--validation_image` that will be used with all `--validation_prompt`s." ), ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` and logging the images." ), ) parser.add_argument("--wandb_entity", type=str, default=None, help=("The wandb entity to use (for teams).")) parser.add_argument( "--tracker_project_name", type=str, default="train_controlnet_flax", help=("The `project` argument passed to wandb"), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of steps to accumulate gradients over" ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() args.output_dir = args.output_dir.replace("{timestamp}", time.strftime("%Y%m%d_%H%M%S")) env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") if args.dataset_name is not None and args.train_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") if args.validation_prompt is not None and args.validation_image is None: raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") if args.validation_prompt is None and args.validation_image is not None: raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") if ( args.validation_image is not None and args.validation_prompt is not None and len(args.validation_image) != 1 and len(args.validation_prompt) != 1 and len(args.validation_image) != len(args.validation_prompt) ): raise ValueError( "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," " or the same number of `--validation_prompt`s and `--validation_image`s" ) # This idea comes from # https://github.com/borisdayma/dalle-mini/blob/d2be512d4a6a9cda2d63ba04afc33038f98f705f/src/dalle_mini/data.py#L370 if args.streaming and args.max_train_samples is None: raise ValueError("You must specify `max_train_samples` when using dataset streaming.") return args def make_train_dataset(args, tokenizer, batch_size=None): # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, streaming=args.streaming, ) else: if args.train_data_dir is not None: if args.load_from_disk: dataset = load_from_disk( args.train_data_dir, ) else: dataset = load_dataset( args.train_data_dir, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script # Preprocessing the datasets. # We need to tokenize inputs and targets. if isinstance(dataset["train"], IterableDataset): column_names = next(iter(dataset["train"])).keys() else: column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = column_names[1] logger.info(f"caption column defaulting to {caption_column}") else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.conditioning_image_column is None: conditioning_image_column = column_names[2] logger.info(f"conditioning image column defaulting to {caption_column}") else: conditioning_image_column = args.conditioning_image_column if conditioning_image_column not in column_names: raise ValueError( f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if random.random() < args.proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) conditioning_image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] images = [image_transforms(image) for image in images] conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]] conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images] examples["pixel_values"] = images examples["conditioning_pixel_values"] = conditioning_images examples["input_ids"] = tokenize_captions(examples) return examples if jax.process_index() == 0: if args.max_train_samples is not None: if args.streaming: dataset["train"] = dataset["train"].shuffle(seed=args.seed).take(args.max_train_samples) else: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms if args.streaming: train_dataset = dataset["train"].map( preprocess_train, batched=True, batch_size=batch_size, remove_columns=list(dataset["train"].features.keys()), ) else: train_dataset = dataset["train"].with_transform(preprocess_train) return train_dataset def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) batch = { "pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "input_ids": input_ids, } batch = {k: v.numpy() for k, v in batch.items()} return batch def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # wandb init if jax.process_index() == 0 and args.report_to == "wandb": wandb.init( entity=args.wandb_entity, project=args.tracker_project_name, job_type="train", config=args, ) if args.seed is not None: set_seed(args.seed) rng = jax.random.PRNGKey(0) # Handle the repository creation if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) else: raise NotImplementedError("No tokenizer specified!") # Get the datasets: you can either provide your own training and evaluation files (see below) total_train_batch_size = args.train_batch_size * jax.local_device_count() * args.gradient_accumulation_steps train_dataset = make_train_dataset(args, tokenizer, batch_size=total_train_batch_size) train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=not args.streaming, collate_fn=collate_fn, batch_size=total_train_batch_size, num_workers=args.dataloader_num_workers, drop_last=True, ) weight_dtype = jnp.float32 if args.mixed_precision == "fp16": weight_dtype = jnp.float16 elif args.mixed_precision == "bf16": weight_dtype = jnp.bfloat16 # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, subfolder="vae", dtype=weight_dtype, from_pt=args.from_pt, ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) if args.controlnet_model_name_or_path: logger.info("Loading existing controlnet weights") controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( args.controlnet_model_name_or_path, revision=args.controlnet_revision, from_pt=args.controlnet_from_pt, dtype=jnp.float32, ) else: logger.info("Initializing controlnet weights from unet") rng, rng_params = jax.random.split(rng) controlnet = FlaxControlNetModel( in_channels=unet.config.in_channels, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, attention_head_dim=unet.config.attention_head_dim, cross_attention_dim=unet.config.cross_attention_dim, use_linear_projection=unet.config.use_linear_projection, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, ) controlnet_params = controlnet.init_weights(rng=rng_params) controlnet_params = unfreeze(controlnet_params) for key in [ "conv_in", "time_embedding", "down_blocks_0", "down_blocks_1", "down_blocks_2", "down_blocks_3", "mid_block", ]: controlnet_params[key] = unet_params[key] pipeline, pipeline_params = FlaxStableDiffusionControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, tokenizer=tokenizer, controlnet=controlnet, safety_checker=None, dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) pipeline_params = jax_utils.replicate(pipeline_params) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) adamw = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) optimizer = optax.chain( optax.clip_by_global_norm(args.max_grad_norm), adamw, ) state = train_state.TrainState.create(apply_fn=controlnet.__call__, params=controlnet_params, tx=optimizer) noise_scheduler, noise_scheduler_state = FlaxDDPMScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler" ) # Initialize our training validation_rng, train_rngs = jax.random.split(rng) train_rngs = jax.random.split(train_rngs, jax.local_device_count()) def compute_snr(timesteps): """ Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 """ alphas_cumprod = noise_scheduler_state.common.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod**0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 alpha = sqrt_alphas_cumprod[timesteps] sigma = sqrt_one_minus_alphas_cumprod[timesteps] # Compute SNR. snr = (alpha / sigma) ** 2 return snr def train_step(state, unet_params, text_encoder_params, vae_params, batch, train_rng): # reshape batch, add grad_step_dim if gradient_accumulation_steps > 1 if args.gradient_accumulation_steps > 1: grad_steps = args.gradient_accumulation_steps batch = jax.tree_map(lambda x: x.reshape((grad_steps, x.shape[0] // grad_steps) + x.shape[1:]), batch) def compute_loss(params, minibatch, sample_rng): # Convert images to latent space vae_outputs = vae.apply( {"params": vae_params}, minibatch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) # Sample a random timestep for each image bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder( minibatch["input_ids"], params=text_encoder_params, train=False, )[0] controlnet_cond = minibatch["conditioning_pixel_values"] # Predict the noise residual and compute loss down_block_res_samples, mid_block_res_sample = controlnet.apply( {"params": params}, noisy_latents, timesteps, encoder_hidden_states, controlnet_cond, train=True, return_dict=False, ) model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 if args.snr_gamma is not None: snr = jnp.array(compute_snr(timesteps)) snr_loss_weights = jnp.where(snr < args.snr_gamma, snr, jnp.ones_like(snr) * args.snr_gamma) / snr loss = loss * snr_loss_weights loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) # get a minibatch (one gradient accumulation slice) def get_minibatch(batch, grad_idx): return jax.tree_util.tree_map( lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False), batch, ) def loss_and_grad(grad_idx, train_rng): # create minibatch for the grad step minibatch = get_minibatch(batch, grad_idx) if grad_idx is not None else batch sample_rng, train_rng = jax.random.split(train_rng, 2) loss, grad = grad_fn(state.params, minibatch, sample_rng) return loss, grad, train_rng if args.gradient_accumulation_steps == 1: loss, grad, new_train_rng = loss_and_grad(None, train_rng) else: init_loss_grad_rng = ( 0.0, # initial value for cumul_loss jax.tree_map(jnp.zeros_like, state.params), # initial value for cumul_grad train_rng, # initial value for train_rng ) def cumul_grad_step(grad_idx, loss_grad_rng): cumul_loss, cumul_grad, train_rng = loss_grad_rng loss, grad, new_train_rng = loss_and_grad(grad_idx, train_rng) cumul_loss, cumul_grad = jax.tree_map(jnp.add, (cumul_loss, cumul_grad), (loss, grad)) return cumul_loss, cumul_grad, new_train_rng loss, grad, new_train_rng = jax.lax.fori_loop( 0, args.gradient_accumulation_steps, cumul_grad_step, init_loss_grad_rng, ) loss, grad = jax.tree_map(lambda x: x / args.gradient_accumulation_steps, (loss, grad)) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") def l2(xs): return jnp.sqrt(sum([jnp.vdot(x, x) for x in jax.tree_util.tree_leaves(xs)])) metrics["l2_grads"] = l2(jax.tree_util.tree_leaves(grad)) return new_state, metrics, new_train_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) unet_params = jax_utils.replicate(unet_params) text_encoder_params = jax_utils.replicate(text_encoder.params) vae_params = jax_utils.replicate(vae_params) # Train! if args.streaming: dataset_length = args.max_train_samples else: dataset_length = len(train_dataloader) num_update_steps_per_epoch = math.ceil(dataset_length / args.gradient_accumulation_steps) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {args.max_train_samples if args.streaming else len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.num_train_epochs * num_update_steps_per_epoch}") if jax.process_index() == 0 and args.report_to == "wandb": wandb.define_metric("*", step_metric="train/step") wandb.define_metric("train/step", step_metric="walltime") wandb.config.update( { "num_train_examples": args.max_train_samples if args.streaming else len(train_dataset), "total_train_batch_size": total_train_batch_size, "total_optimization_step": args.num_train_epochs * num_update_steps_per_epoch, "num_devices": jax.device_count(), "controlnet_params": sum(np.prod(x.shape) for x in jax.tree_util.tree_leaves(state.params)), } ) global_step = step0 = 0 epochs = tqdm( range(args.num_train_epochs), desc="Epoch ... ", position=0, disable=jax.process_index() > 0, ) if args.profile_memory: jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_initial.prof")) t00 = t0 = time.monotonic() for epoch in epochs: # ======================== Training ================================ train_metrics = [] train_metric = None steps_per_epoch = ( args.max_train_samples // total_train_batch_size if args.streaming or args.max_train_samples else len(train_dataset) // total_train_batch_size ) train_step_progress_bar = tqdm( total=steps_per_epoch, desc="Training...", position=1, leave=False, disable=jax.process_index() > 0, ) # train for batch in train_dataloader: if args.profile_steps and global_step == 1: train_metric["loss"].block_until_ready() jax.profiler.start_trace(args.output_dir) if args.profile_steps and global_step == 1 + args.profile_steps: train_metric["loss"].block_until_ready() jax.profiler.stop_trace() batch = shard(batch) with jax.profiler.StepTraceAnnotation("train", step_num=global_step): state, train_metric, train_rngs = p_train_step( state, unet_params, text_encoder_params, vae_params, batch, train_rngs ) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break if ( args.validation_prompt is not None and global_step % args.validation_steps == 0 and jax.process_index() == 0 ): _ = log_validation(pipeline, pipeline_params, state.params, tokenizer, args, validation_rng, weight_dtype) if global_step % args.logging_steps == 0 and jax.process_index() == 0: if args.report_to == "wandb": train_metrics = jax_utils.unreplicate(train_metrics) train_metrics = jax.tree_util.tree_map(lambda *m: jnp.array(m).mean(), *train_metrics) wandb.log( { "walltime": time.monotonic() - t00, "train/step": global_step, "train/epoch": global_step / dataset_length, "train/steps_per_sec": (global_step - step0) / (time.monotonic() - t0), **{f"train/{k}": v for k, v in train_metrics.items()}, } ) t0, step0 = time.monotonic(), global_step train_metrics = [] if global_step % args.checkpointing_steps == 0 and jax.process_index() == 0: controlnet.save_pretrained( f"{args.output_dir}/{global_step}", params=get_params_to_save(state.params), ) train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Final validation & store model. if jax.process_index() == 0: if args.validation_prompt is not None: if args.profile_validation: jax.profiler.start_trace(args.output_dir) image_logs = log_validation(pipeline, pipeline_params, state.params, tokenizer, args, validation_rng, weight_dtype) if args.profile_validation: jax.profiler.stop_trace() else: image_logs = None controlnet.save_pretrained( args.output_dir, params=get_params_to_save(state.params), ) if args.push_to_hub: save_model_card( repo_id, image_logs=image_logs, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if args.profile_memory: jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_final.prof")) logger.info("Finished training.") if __name__ == "__main__": main()
community-events-main
jax-controlnet-sprint/training_scripts/train_controlnet_flax.py
import os import datasets import pandas as pd _VERSION = datasets.Version("0.0.2") _DESCRIPTION = "TODO" _HOMEPAGE = "TODO" _LICENSE = "TODO" _CITATION = "TODO" _FEATURES = datasets.Features( { "image": datasets.Image(), "conditioning_image": datasets.Image(), "text": datasets.Value("string"), }, ) _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION) DATA_DIR = "/mnt/disks/persist/data" class coyo(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [_DEFAULT_CONFIG] DEFAULT_CONFIG_NAME = "default" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=_FEATURES, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): metadata_path = f"{DATA_DIR}/meta.jsonl" images_dir = f"{DATA_DIR}/images" conditioning_images_dir = f"{DATA_DIR}/processed_images" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "metadata_path": metadata_path, "images_dir": images_dir, "conditioning_images_dir": conditioning_images_dir, }, ), ] def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir): metadata = pd.read_json(metadata_path, lines=True) for _, row in metadata.iterrows(): text = row["caption"] try: image_path = row["image"] image_path = os.path.join(images_dir, image_path) image = open(image_path, "rb").read() conditioning_image_path = row["conditioning_image"] conditioning_image_path = os.path.join( conditioning_images_dir, row["conditioning_image"] ) conditioning_image = open(conditioning_image_path, "rb").read() yield row["image"], { "text": text, "image": { "path": image_path, "bytes": image, }, "conditioning_image": { "path": conditioning_image_path, "bytes": conditioning_image, }, } except Exception as e: print(e)
community-events-main
jax-controlnet-sprint/dataset_tools/data.py
import argparse import logging import random import cv2 import jsonlines import numpy as np import requests from datasets import load_dataset from PIL import Image logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser( description="Example of a data preprocessing script." ) parser.add_argument( "--train_data_dir", type=str, required=True, help="The directory to store the dataset", ) parser.add_argument( "--cache_dir", type=str, required=True, help="The directory to store cache", ) parser.add_argument( "--max_train_samples", type=int, default=None, help="number of examples in the dataset", ) parser.add_argument( "--num_proc", type=int, default=1, help="number of processors to use in `dataset.map()`", ) args = parser.parse_args() return args # filter for `max_train_samples`` def filter_function(example): if example["clip_similarity_vitb32"] < 0.3: return False if example["watermark_score"] > 0.4: return False if example["aesthetic_score_laion_v2"] < 6.0: return False return True def filter_dataset(dataset, max_train_samples): small_dataset = dataset.select(range(max_train_samples)).filter(filter_function) return small_dataset if __name__ == "__main__": args = parse_args() # load coyo-700 dataset = load_dataset( "kakaobrain/coyo-700m", cache_dir=args.cache_dir, split="train", ) # estimation the % of images filtered filter_ratio = len(filter_dataset(dataset, 20000)) / 20000 # esimate max_train_samples based on # (1) filter_ratio we calculuted with 20k examples # (2) assumption that only 80% of the URLs are still valid max_train_samples = int(args.max_train_samples / filter_ratio / 0.8) # filter dataset down to 1 million small_dataset = filter_dataset(dataset, max_train_samples) def preprocess_and_save(example): image_url = example["url"] try: # download original image image = Image.open(requests.get(image_url, stream=True, timeout=5).raw) image_path = f"{args.train_data_dir}/images/{example['id']}.png" image.save(image_path) # generate and save canny image processed_image = np.array(image) # apply random threholds # note that this should normally be applied on the fly during training. # But that's fine when dealing with a larger dataset like here. threholds = ( random.randint(0, 255), random.randint(0, 255), ) processed_image = cv2.Canny(processed_image, min(threholds), max(threholds)) processed_image = processed_image[:, :, None] processed_image = np.concatenate( [processed_image, processed_image, processed_image], axis=2 ) processed_image = Image.fromarray(processed_image) processed_image_path = ( f"{args.train_data_dir}/processed_images/{example['id']}.png" ) processed_image.save(processed_image_path) # write to meta.jsonl meta = { "image": image_path, "conditioning_image": processed_image_path, "caption": example["text"], } with jsonlines.open( f"{args.train_data_dir}/meta.jsonl", "a" ) as writer: # for writing writer.write(meta) except Exception as e: logger.error(f"Failed to process image{image_url}: {str(e)}") # preprocess -> image, processed image and meta.jsonl small_dataset.map(preprocess_and_save, num_proc=args.num_proc) print(f"created data folder at: {args.train_data_dir}")
community-events-main
jax-controlnet-sprint/dataset_tools/coyo_1m_dataset_preprocess.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup from setuptools import find_packages extras = {} extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"] extras["docs"] = [] extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"] extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"] extras["testing"] = extras["test_prod"] + extras["test_dev"] extras["rich"] = ["rich"] extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"] extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] extras["sagemaker"] = [ "sagemaker", # boto3 is a required package in sagemaker ] setup( name="accelerate", version="0.24.0.dev0", description="Accelerate", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="sylvain@huggingface.co", url="https://github.com/huggingface/accelerate", package_dir={"": "src"}, packages=find_packages("src"), entry_points={ "console_scripts": [ "accelerate=accelerate.commands.accelerate_cli:main", "accelerate-config=accelerate.commands.config:main", "accelerate-estimate-memory=accelerate.commands.estimate:main", "accelerate-launch=accelerate.commands.launch:main", ] }, python_requires=">=3.8.0", install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub"], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one): # git checkout -b vXX.xx-release # The -b is only necessary for creation (so remove it when doing a patch) # 2. Change the version in __init__.py and setup.py to the proper value. # 3. Commit these changes with the message: "Release: v<VERSION>" # 4. Add a tag in git to mark the release: # git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' # Push the tag and release commit to git: git push --tags origin vXX.xx-release # 5. Run the following commands in the top-level directory: # rm -rf dist # rm -rf build # python setup.py bdist_wheel # python setup.py sdist # 6. Upload the package to the pypi test server first: # twine upload dist/* -r testpypi # 7. Check that you can install it in a virtualenv by running: # pip install accelerate # pip uninstall accelerate # pip install -i https://testpypi.python.org/pypi accelerate # accelerate env # accelerate test # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag in github once everything is looking hunky-dory. # 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to # main.
accelerate-main
setup.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate.big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from accelerate.hooks import remove_hook_from_submodules from accelerate.test_utils import require_bnb, require_cuda, require_mps, require_multi_gpu, slow from accelerate.utils import is_torch_version, offload_state_dict class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class ModelForTestCopy(nn.Module): def __init__(self, id: int): super().__init__() self.id = id self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))), self.id class ModelForTestTiedWeights(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(4, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 4) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class BiggerModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) self.batchnorm = nn.BatchNorm1d(5) self.linear3 = nn.Linear(5, 6) self.linear4 = nn.Linear(6, 5) def forward(self, x): return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) # To test preload_module_classes class ModuleWithUnusedSubModules(nn.Module): def __init__(self, input_dim, output_dim): super().__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): return x @ self.linear.weight.t() + self.linear.bias class ModelWithUnusedSubModulesForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = ModuleWithUnusedSubModules(3, 4) self.linear2 = ModuleWithUnusedSubModules(4, 5) self.batchnorm = nn.BatchNorm1d(5) self.linear3 = ModuleWithUnusedSubModules(5, 6) self.linear4 = ModuleWithUnusedSubModules(6, 5) def forward(self, x): return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) class BigModelingTester(unittest.TestCase): def test_init_empty_weights(self): # base use with init_empty_weights(): module = nn.Linear(4, 5) self.assertEqual(module.weight.device, torch.device("meta")) # base use with buffers, they are not touched with init_empty_weights(): module = nn.BatchNorm1d(4) self.assertEqual(module.weight.device, torch.device("meta")) self.assertEqual(module.running_mean.device, torch.device("cpu")) # Use with include_buffers=True register_parameter_func = nn.Module.register_parameter register_buffer_func = nn.Module.register_buffer with init_empty_weights(include_buffers=True): module = nn.BatchNorm1d(4) # nn.Module.register_parameter/buffer shouldn't be changed with torch >= 2.0 if is_torch_version(">=", "2.0"): self.assertEqual(register_parameter_func, nn.Module.register_parameter) self.assertEqual(register_buffer_func, nn.Module.register_buffer) self.assertEqual(module.weight.device, torch.device("meta")) self.assertEqual(module.running_mean.device, torch.device("meta")) # Double check we didn't break PyTorch module = nn.BatchNorm1d(4) self.assertEqual(module.weight.device, torch.device("cpu")) self.assertEqual(module.running_mean.device, torch.device("cpu")) def test_init_empty_weights_very_large_model(self): # This is a 100 billion parameters model. with init_empty_weights(): _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) @require_cuda def test_init_on_device_cuda(self): device = torch.device("cuda:0") with init_on_device(device): model = nn.Linear(10, 10) self.assertEqual(model.weight.device, device) self.assertEqual(model.weight.device, device) @require_mps def test_init_on_device_mps(self): device = torch.device("mps:0") with init_on_device(device): model = nn.Linear(10, 10) self.assertEqual(model.weight.device, device) self.assertEqual(model.weight.device, device) def test_cpu_offload(self): model = ModelForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") cpu_offload(model, execution_device=device) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) # Clean up for next test. remove_hook_from_submodules(model) cpu_offload(model, execution_device=device, offload_buffers=True) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) def test_cpu_offload_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") cpu_offload(model, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"]) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) # Clean up for next test. remove_hook_from_submodules(model) cpu_offload( model, execution_device=device, offload_buffers=True, preload_module_classes=["ModuleWithUnusedSubModules"], ) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) @slow @require_cuda def test_cpu_offload_gpt2(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") cpu_offload(gpt2, execution_device=0) outputs = gpt2.generate(inputs["input_ids"]) self.assertEqual( tokenizer.decode(outputs[0].tolist()), "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", ) def test_disk_offload(self): model = ModelForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") with TemporaryDirectory() as tmp_dir: disk_offload(model, tmp_dir, execution_device=device) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) # Clean up for next test. remove_hook_from_submodules(model) with TemporaryDirectory() as tmp_dir: disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) def test_disk_offload_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") with TemporaryDirectory() as tmp_dir: disk_offload( model, tmp_dir, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) # Clean up for next test. remove_hook_from_submodules(model) with TemporaryDirectory() as tmp_dir: disk_offload( model, tmp_dir, execution_device=device, offload_buffers=True, preload_module_classes=["ModuleWithUnusedSubModules"], ) output = model(x) self.assertTrue( torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" ) @slow @require_cuda def test_disk_offload_gpt2(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") with TemporaryDirectory() as tmp_dir: disk_offload(gpt2, tmp_dir, execution_device=0) outputs = gpt2.generate(inputs["input_ids"]) self.assertEqual( tokenizer.decode(outputs[0].tolist()), "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", ) @require_cuda def test_dispatch_model(self): model = ModelForTest() device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_mps def test_dispatch_model_mps(self): model = ModelForTest() device_map = {"linear1": "mps", "batchnorm": "disk", "linear2": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_dispatch_model_tied_weights(self): model = ModelForTestTiedWeights() model.linear1.weight = model.linear2.weight device_map = {"linear1": 0, "batchnorm": 0, "linear2": 0} dispatch_model(model, device_map) self.assertIs(model.linear2.weight, model.linear1.weight) @require_multi_gpu def test_dispatch_model_multi_gpu(self): model = BiggerModelForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_dispatch_model_copy(self): original_model = ModelForTestCopy(id=1) device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected, original_output_id = original_model(x) dispatch_model(original_model, device_map) copied_model = copy.deepcopy(original_model) copied_model.id = 2 output, copied_output_id = copied_model(x) self.assertEqual(original_model.id, original_output_id) self.assertEqual(copied_model.id, copied_output_id) self.assertFalse(copied_model.linear1.forward is original_model.linear1.forward) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_dispatch_model_move_offloaded_model(self): model = ModelForTest() device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0} with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) with self.assertRaises(RuntimeError): model.to(0) @require_multi_gpu def test_dispatch_model_move_model_warning(self): model = ModelForTest() device_map = {"linear1": 0, "batchnorm": 0, "linear2": 1} with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) with self.assertLogs("accelerate.big_modeling", level="WARNING"): model.to("cpu") with self.assertLogs("accelerate.big_modeling", level="WARNING"): model.cuda(0) with self.assertRaises(RuntimeError): x = torch.randn(2, 3) model(x) @slow @require_multi_gpu def test_dispatch_model_gpt2_on_two_gpus(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") # Dispatch on GPUs 0 and 1 device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.ln_f": 1, "lm_head": 0, } for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 gpt2 = dispatch_model(gpt2, device_map) outputs = gpt2.generate(inputs["input_ids"]) self.assertEqual( tokenizer.decode(outputs[0].tolist()), "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", ) # Dispatch with a bit of CPU offload gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") for i in range(4): device_map[f"transformer.h.{i}"] = "cpu" gpt2 = dispatch_model(gpt2, device_map) outputs = gpt2.generate(inputs["input_ids"]) self.assertEqual( tokenizer.decode(outputs[0].tolist()), "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", ) # Dispatch with a bit of CPU and disk offload gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") for i in range(2): device_map[f"transformer.h.{i}"] = "disk" with TemporaryDirectory() as tmp_dir: state_dict = { k: p for k, p in gpt2.state_dict().items() if "transformer.h.0" in k or "transformer.h.1" in k } offload_state_dict(tmp_dir, state_dict) gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir) outputs = gpt2.generate(inputs["input_ids"]) self.assertEqual( tokenizer.decode(outputs[0].tolist()), "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", ) @require_cuda def test_dispatch_model_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_mps def test_dispatch_model_with_unused_submodules_mps(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "mps", "linear2": "mps", "batchnorm": "mps", "linear3": "mps", "linear4": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_multi_gpu def test_dispatch_model_with_unused_submodules_multi_gpu(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_dispatch_model_force_hooks(self): model = ModelForTest() device_map = {"": 0} x = torch.randn(2, 3) expected = model(x) dispatch_model(model, device_map, force_hooks=True) output = model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_load_checkpoint_and_dispatch(self): model = ModelForTest() device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelForTest() new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.weight.device, torch.device("meta")) self.assertEqual(new_model.linear2.weight.device, torch.device(0)) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_mps def test_load_checkpoint_and_dispatch_mps(self): model = ModelForTest() device_map = {"linear1": "mps", "batchnorm": "mps", "linear2": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, offload_folder=tmp_dir ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.weight.device, torch.device("mps:0")) self.assertEqual(new_model.linear2.weight.device, torch.device("meta")) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_multi_gpu def test_load_checkpoint_and_dispatch_multi_gpu(self): model = BiggerModelForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = BiggerModelForTest() new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.weight.device, torch.device("meta")) self.assertEqual(new_model.linear2.weight.device, torch.device("meta")) self.assertEqual(new_model.linear3.weight.device, torch.device(0)) self.assertEqual(new_model.linear4.weight.device, torch.device(1)) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_load_checkpoint_and_dispatch_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta")) self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta")) self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0)) self.assertEqual(new_model.linear4.linear.weight.device, torch.device(0)) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_mps def test_load_checkpoint_and_dispatch_with_unused_submodules_mps(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "mps", "linear2": "mps", "batchnorm": "mps", "linear3": "disk", "linear4": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"], offload_folder=tmp_dir, ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.linear.weight.device, torch.device("mps:0")) self.assertEqual(new_model.linear2.linear.weight.device, torch.device("mps:0")) self.assertEqual(new_model.linear3.linear.weight.device, torch.device("meta")) self.assertEqual(new_model.linear4.linear.weight.device, torch.device("meta")) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_multi_gpu def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta")) self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta")) self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0)) self.assertEqual(new_model.linear4.linear.weight.device, torch.device(1)) output = new_model(x) self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) @require_cuda def test_cpu_offload_with_hook(self): model1 = torch.nn.Linear(4, 5) model1, hook1 = cpu_offload_with_hook(model1) self.assertEqual(model1.weight.device, torch.device("cpu")) inputs = torch.randn(3, 4) outputs = model1(inputs) self.assertEqual(outputs.device, torch.device(0)) self.assertEqual(model1.weight.device, torch.device(0)) hook1.offload() self.assertEqual(model1.weight.device, torch.device("cpu")) model2 = torch.nn.Linear(5, 5) model2, hook2 = cpu_offload_with_hook(model2, prev_module_hook=hook1) self.assertEqual(model2.weight.device, torch.device("cpu")) outputs = model1(inputs) self.assertEqual(outputs.device, torch.device(0)) self.assertEqual(model1.weight.device, torch.device(0)) outputs = model2(outputs) self.assertEqual(outputs.device, torch.device(0)) self.assertEqual(model1.weight.device, torch.device("cpu")) self.assertEqual(model2.weight.device, torch.device(0)) hook2.offload() self.assertEqual(model2.weight.device, torch.device("cpu")) @slow @require_bnb @require_multi_gpu def test_dispatch_model_bnb(self): """Tests that `dispatch_model` quantizes int8 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="balanced", ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) self.assertTrue(model.h[-1].self_attention.query_key_value.weight.dtype == torch.int8) self.assertTrue(model.h[-1].self_attention.query_key_value.weight.device.index == 1) @slow @require_bnb def test_dispatch_model_int8_simple(self): """Tests that `dispatch_model` quantizes int8 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") # test with auto model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="auto", ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with str device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": torch.device("cuda:0")}, ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with torch.device device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": "cuda:0"}, ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) @slow @require_bnb def test_dipatch_model_fp4_simple(self): """Tests that `dispatch_model` quantizes fp4 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") # test with auto model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="auto", ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with str device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": torch.device("cuda:0")}, ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0) with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with torch.device device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": "cuda:0"}, ) self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8) self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)
accelerate-main
tests/test_big_modeling.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu, require_cuda @require_cpu class OptimizerTester(unittest.TestCase): def test_accelerated_optimizer_pickling(self): model = torch.nn.Linear(10, 10) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator() optimizer = accelerator.prepare(optimizer) try: pickle.loads(pickle.dumps(optimizer)) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state() @require_cuda class CudaOptimizerTester(unittest.TestCase): def test_accelerated_optimizer_step_was_skipped(self): model = torch.nn.Linear(5, 5) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator(mixed_precision="fp16") model, optimizer = accelerator.prepare(model, optimizer) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() self.assertTrue(optimizer.step_was_skipped is False) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() self.assertTrue(optimizer.step_was_skipped is True) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() self.assertTrue(optimizer.step_was_skipped is True) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() self.assertTrue(optimizer.step_was_skipped is False) AcceleratorState._reset_state()
accelerate-main
tests/test_optimizer.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 stop = random.random() < self.p_stop class DataLoaderTester(unittest.TestCase): def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True): batch_sampler_shards = [ BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches) for i in range(2) ] batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected]) self.assertListEqual(batch_sampler_lists, expected) def test_batch_sampler_shards_with_no_splits(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected) def test_batch_sampler_shards_with_splits(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) def test_batch_sampler_shards_with_no_splits_no_even(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) def test_batch_sampler_shards_with_splits_no_even(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) def test_batch_sampler_with_varying_batch_size(self): batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)] self.assertEqual(len(batch_sampler_shards[0]), 3) self.assertEqual(len(batch_sampler_shards[1]), 2) self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]]) self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]]) def check_iterable_dataset_shards( self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False ): random.seed(seed) reference = list(dataset) iterable_dataset_shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i, split_batches=split_batches, ) for i in range(num_processes) ] iterable_dataset_lists = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(seed) iterable_dataset_lists.append(list(iterable_dataset_shard)) shard_batch_size = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size first_list = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(l), len(first_list)) self.assertTrue(len(l) % shard_batch_size == 0) observed = [] for idx in range(0, len(first_list), shard_batch_size): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_iterable_dataset_shard(self): seed = 42 dataset = RandomIterableDataset() self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) # Edge case with a very small dataset dataset = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) def test_skip_batch_sampler(self): batch_sampler = BatchSampler(range(16), batch_size=4, drop_last=False) new_batch_sampler = SkipBatchSampler(batch_sampler, 2) self.assertListEqual(list(new_batch_sampler), [[8, 9, 10, 11], [12, 13, 14, 15]]) def test_skip_data_loader(self): dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2) self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]]) def test_skip_first_batches(self): dataloader = DataLoader(list(range(16)), batch_size=4) new_dataloader = skip_first_batches(dataloader, num_batches=2) self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]]) def test_end_of_dataloader(self): dataloader = DataLoaderShard(list(range(16)), batch_size=4) for idx, _ in enumerate(dataloader): self.assertEqual(dataloader.end_of_dataloader, idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): self.assertEqual(dataloader.end_of_dataloader, idx == 3) def test_end_of_dataloader_dispatcher(self): Accelerator() dataloader = DataLoaderDispatcher(range(16), batch_size=4) for idx, _ in enumerate(dataloader): self.assertEqual(dataloader.end_of_dataloader, idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): self.assertEqual(dataloader.end_of_dataloader, idx == 3)
accelerate-main
tests/test_data_loader.py
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class MockLaunchConfig(SageMakerConfig): compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER fp16 = True ec2_instance_type = "ml.p3.2xlarge" iam_role_name = "accelerate_sagemaker_execution_role" profile = "hf-sm" region = "us-east-1" num_machines = 1 base_job_name = "accelerate-sagemaker-1" pytorch_version = "1.6" transformers_version = "4.4" training_script = "train.py" success_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] fail_training_script_args = [ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class SageMakerLaunch(unittest.TestCase): def test_args_convert(self): # If no defaults are changed, `to_kwargs` returns an empty dict. converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args["model_name_or_path"], str) assert isinstance(converted_args["do_train"], bool) assert isinstance(converted_args["epochs"], int) assert isinstance(converted_args["learning_rate"], float) assert isinstance(converted_args["max_steps"], float) with pytest.raises(ValueError): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
accelerate-main
tests/test_sagemaker.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import unittest from collections import UserDict, namedtuple import torch from accelerate.test_utils.testing import require_cuda, require_torch_min_version from accelerate.test_utils.training import RegressionModel from accelerate.utils import ( convert_outputs_to_fp32, extract_model_from_parallel, find_device, listify, patch_environment, recursively_apply, send_to_device, ) ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c") class UtilsTester(unittest.TestCase): def test_send_to_device(self): tensor = torch.randn(5, 2) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") result1 = send_to_device(tensor, device) self.assertTrue(torch.equal(result1.cpu(), tensor)) result2 = send_to_device((tensor, [tensor, tensor], 1), device) self.assertIsInstance(result2, tuple) self.assertTrue(torch.equal(result2[0].cpu(), tensor)) self.assertIsInstance(result2[1], list) self.assertTrue(torch.equal(result2[1][0].cpu(), tensor)) self.assertTrue(torch.equal(result2[1][1].cpu(), tensor)) self.assertEqual(result2[2], 1) result2 = send_to_device({"a": tensor, "b": [tensor, tensor], "c": 1}, device) self.assertIsInstance(result2, dict) self.assertTrue(torch.equal(result2["a"].cpu(), tensor)) self.assertIsInstance(result2["b"], list) self.assertTrue(torch.equal(result2["b"][0].cpu(), tensor)) self.assertTrue(torch.equal(result2["b"][1].cpu(), tensor)) self.assertEqual(result2["c"], 1) result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device) self.assertIsInstance(result3, ExampleNamedTuple) self.assertTrue(torch.equal(result3.a.cpu(), tensor)) self.assertIsInstance(result3.b, list) self.assertTrue(torch.equal(result3.b[0].cpu(), tensor)) self.assertTrue(torch.equal(result3.b[1].cpu(), tensor)) self.assertEqual(result3.c, 1) result4 = send_to_device(UserDict({"a": tensor, "b": [tensor, tensor], "c": 1}), device) self.assertIsInstance(result4, UserDict) self.assertTrue(torch.equal(result4["a"].cpu(), tensor)) self.assertIsInstance(result4["b"], list) self.assertTrue(torch.equal(result4["b"][0].cpu(), tensor)) self.assertTrue(torch.equal(result4["b"][1].cpu(), tensor)) self.assertEqual(result4["c"], 1) def test_honor_type(self): with self.assertRaises(TypeError) as cm: _ = recursively_apply(torch.tensor, (torch.tensor(1), 1), error_on_other_type=True) self.assertEqual( str(cm.exception), "Unsupported types (<class 'int'>) passed to `tensor`. Only nested list/tuple/dicts of objects that are valid for `is_torch_tensor` should be passed.", ) def test_listify(self): tensor = torch.tensor([1, 2, 3, 4, 5]) self.assertEqual(listify(tensor), [1, 2, 3, 4, 5]) tensor = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) self.assertEqual(listify(tensor), [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) tensor = torch.tensor([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]]) self.assertEqual( listify(tensor), [[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]] ) def test_patch_environment(self): with patch_environment(aa=1, BB=2): self.assertEqual(os.environ.get("AA"), "1") self.assertEqual(os.environ.get("BB"), "2") self.assertNotIn("AA", os.environ) self.assertNotIn("BB", os.environ) def test_patch_environment_key_exists(self): # check that patch_environment correctly restores pre-existing env vars with patch_environment(aa=1, BB=2): self.assertEqual(os.environ.get("AA"), "1") self.assertEqual(os.environ.get("BB"), "2") with patch_environment(Aa=10, bb="20", cC=30): self.assertEqual(os.environ.get("AA"), "10") self.assertEqual(os.environ.get("BB"), "20") self.assertEqual(os.environ.get("CC"), "30") self.assertEqual(os.environ.get("AA"), "1") self.assertEqual(os.environ.get("BB"), "2") self.assertNotIn("CC", os.environ) self.assertNotIn("AA", os.environ) self.assertNotIn("BB", os.environ) self.assertNotIn("CC", os.environ) def test_can_undo_convert_outputs(self): model = RegressionModel() model._original_forward = model.forward model.forward = convert_outputs_to_fp32(model.forward) model = extract_model_from_parallel(model, keep_fp32_wrapper=False) _ = pickle.dumps(model) @require_cuda def test_can_undo_fp16_conversion(self): model = RegressionModel() model._original_forward = model.forward model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward) model.forward = convert_outputs_to_fp32(model.forward) model = extract_model_from_parallel(model, keep_fp32_wrapper=False) _ = pickle.dumps(model) @require_cuda @require_torch_min_version(version="2.0") def test_dynamo(self): model = RegressionModel() model._original_forward = model.forward model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward) model.forward = convert_outputs_to_fp32(model.forward) model.forward = torch.compile(model.forward, backend="inductor") inputs = torch.randn(4, 10).cuda() _ = model(inputs) def test_extract_model(self): model = RegressionModel() # could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU distributed_model = torch.nn.parallel.DataParallel(model) model_unwrapped = extract_model_from_parallel(distributed_model) self.assertEqual(model, model_unwrapped) @require_torch_min_version(version="2.0") def test_dynamo_extract_model(self): model = RegressionModel() compiled_model = torch.compile(model) # could also do a test with DistributedDataParallel, but difficult to run on CPU or single GPU distributed_model = torch.nn.parallel.DataParallel(model) distributed_compiled_model = torch.compile(distributed_model) compiled_model_unwrapped = extract_model_from_parallel(distributed_compiled_model) self.assertEqual(compiled_model._orig_mod, compiled_model_unwrapped._orig_mod) def test_find_device(self): self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu")) self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu")) self.assertIsNone(find_device([1, "a"]))
accelerate-main
tests/test_utils.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class MultiCPUTester(unittest.TestCase): def test_cpu(self): debug_launcher(test_script.main) def test_ops(self): debug_launcher(test_ops.main)
accelerate-main
tests/test_cpu.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def raise_fake_out_of_memory(): raise RuntimeError("CUDA out of memory.") class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class MemoryTest(unittest.TestCase): def test_memory_implicit(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8]) def test_memory_explicit(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(batch_size, arg1): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arg1 bs, arg1 = mock_training_loop_function("hello") self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8]) self.assertListEqual([bs, arg1], [8, "hello"]) def test_start_zero(self): @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(batch_size): pass with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0]) def test_approach_zero(self): @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(batch_size): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0]) def test_verbose_guard(self): @find_executable_batch_size(starting_batch_size=128) def mock_training_loop_function(batch_size, arg1, arg2): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(TypeError) as cm: mock_training_loop_function(128, "hello", "world") self.assertIn("Batch size was passed into `f`", cm.exception.args[0]) self.assertIn("`f(arg1='hello', arg2='world')", cm.exception.args[0]) def test_any_other_error(self): @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(batch_size): raise ValueError("Oops, we had an error!") with self.assertRaises(ValueError) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!", cm.exception.args[0]) @require_cuda def test_release_memory(self): starting_memory = torch.cuda.memory_allocated() model = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated(), starting_memory) model = release_memory(model) self.assertEqual(torch.cuda.memory_allocated(), starting_memory)
accelerate-main
tests/test_memory_utils.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class MetricTester(unittest.TestCase): def setUp(self): mod_file = inspect.getfile(accelerate.test_utils) self.test_file_path = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 self.test_metrics = test_metrics @require_cpu def test_metric_cpu_noop(self): debug_launcher(self.test_metrics.main, num_processes=1) @require_cpu def test_metric_cpu_multi(self): debug_launcher(self.test_metrics.main) @require_single_gpu def test_metric_gpu(self): self.test_metrics.main() @require_multi_gpu def test_metric_gpu_multi(self): print(f"Found {torch.cuda.device_count()} devices.") cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1, ACCELERATE_LOG_LEVEL="INFO"): execute_subprocess_async(cmd, env=os.environ.copy())
accelerate-main
tests/test_metrics.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import AutocastKwargs, KwargsHandler, TorchDynamoPlugin, clear_environment @dataclass class MockClass(KwargsHandler): a: int = 0 b: bool = False c: float = 3.0 class KwargsHandlerTester(unittest.TestCase): def test_kwargs_handler(self): # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs(), {}) self.assertDictEqual(MockClass(a=2).to_kwargs(), {"a": 2}) self.assertDictEqual(MockClass(a=2, b=True).to_kwargs(), {"a": 2, "b": True}) self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {"a": 2, "c": 2.25}) @require_cuda def test_grad_scaler_kwargs(self): # If no defaults are changed, `to_kwargs` returns an empty dict. scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2) AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler]) print(accelerator.use_fp16) scaler = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale, 1024.0) self.assertEqual(scaler._growth_factor, 2.0) # Check the other values are at the default self.assertEqual(scaler._backoff_factor, 0.5) self.assertEqual(scaler._growth_interval, 2000) self.assertEqual(scaler._enabled, True) @require_multi_gpu def test_ddp_kwargs(self): cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)] execute_subprocess_async(cmd, env=os.environ.copy()) @require_cuda def test_autocast_kwargs(self): kwargs = AutocastKwargs(enabled=False) AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16") a_float32 = torch.rand((8, 8), device=accelerator.device) b_float32 = torch.rand((8, 8), device=accelerator.device) c_float32 = torch.rand((8, 8), device=accelerator.device) d_float32 = torch.rand((8, 8), device=accelerator.device) with accelerator.autocast(): e_float16 = torch.mm(a_float32, b_float32) assert e_float16.dtype == torch.float16 with accelerator.autocast(autocast_handler=kwargs): # Convert e_float16 to float32 f_float32 = torch.mm(c_float32, e_float16.float()) assert f_float32.dtype == torch.float32 g_float16 = torch.mm(d_float32, f_float32) # We should be back in fp16 assert g_float16.dtype == torch.float16 def test_torch_dynamo_plugin(self): with clear_environment(): prefix = "ACCELERATE_DYNAMO_" # nvfuser's dynamo backend name is "nvprims_nvfuser" # use "nvfuser" here to cause exception if this test causes os.environ changed permanently os.environ[prefix + "BACKEND"] = "nvfuser" os.environ[prefix + "MODE"] = "reduce-overhead" dynamo_plugin_kwargs = TorchDynamoPlugin().to_kwargs() self.assertEqual(dynamo_plugin_kwargs, {"backend": "nvfuser", "mode": "reduce-overhead"}) if __name__ == "__main__": ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) model = torch.nn.Linear(100, 200) model = accelerator.prepare(model) # Check the values changed in kwargs error_msg = "" observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
accelerate-main
tests/test_kwargs_handlers.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from functools import partial import torch from accelerate import Accelerator, debug_launcher from accelerate.state import AcceleratorState, GradientState from accelerate.test_utils import require_cpu, require_huggingface_suite from accelerate.utils import GradientAccumulationPlugin def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False): accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches) model = torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) # Optimizer has stepped scheduler.step() if step_scheduler_with_optimizer or (num_processes == 1): assert ( scheduler.scheduler.last_epoch == num_processes ), f"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})" else: assert ( scheduler.scheduler.last_epoch != num_processes ), f"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})" def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False): accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches) model = torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10) model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) # Optimizer has stepped optimizer._is_overflow = False scheduler.step() expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10 assert ( scheduler.get_last_lr()[0] == expected_lr ), f"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}" # Optimizer has not stepped optimizer._is_overflow = True scheduler.step() if not step_scheduler_with_optimizer: expected_lr = 1 - 2 / 10 assert ( scheduler.get_last_lr()[0] == expected_lr ), f"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}" def accumulation_test(num_processes: int = 2): """ With this test, an observed batch size of 64 should result in neglible differences in the scheduler after going through the correct number of steps. Uses single, two, and four steps to test. """ from transformers import get_linear_schedule_with_warmup steps = [1, 2, 4] for num_steps in steps: plugin = GradientAccumulationPlugin(num_steps=num_steps, adjust_scheduler=num_steps > 1) accelerator = Accelerator(gradient_accumulation_plugin=plugin) model = torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=10.0) scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=0, num_training_steps=20) model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) for i in range(10 * num_steps): with accelerator.accumulate(model): optimizer.step() scheduler.step() if i == (10 * num_steps - 2): assert ( scheduler.get_last_lr()[0] != 0 ), f"Wrong lr found at second-to-last step, expected non-zero, got {scheduler.get_last_lr()[0]}. num_steps: {num_steps}" assert ( scheduler.get_last_lr()[0] == 0 ), f"Wrong lr found at last step, expected 0, got {scheduler.get_last_lr()[0]}" GradientState._reset_state() @require_cpu class SchedulerTester(unittest.TestCase): def test_lambda_scheduler_steps_with_optimizer_single_process(self): debug_launcher(partial(lambda_test, num_processes=1), num_processes=1) debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1) def test_one_cycle_scheduler_steps_with_optimizer_single_process(self): debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1) debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1) def test_lambda_scheduler_not_step_with_optimizer_single_process(self): debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1) def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self): debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1) def test_lambda_scheduler_steps_with_optimizer_multiprocess(self): AcceleratorState._reset_state(True) debug_launcher(lambda_test) debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1) def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self): AcceleratorState._reset_state(True) debug_launcher(one_cycle_test) debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1) def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self): AcceleratorState._reset_state(True) debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False)) def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self): AcceleratorState._reset_state(True) debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False)) @require_huggingface_suite def test_accumulation(self): AcceleratorState._reset_state(True) debug_launcher(partial(accumulation_test, num_processes=1)) debug_launcher(accumulation_test)
accelerate-main
tests/test_scheduler.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from collections import OrderedDict import torch import torch.nn as nn from accelerate import init_empty_weights from accelerate.test_utils import require_cuda, require_huggingface_suite, require_multi_gpu, require_safetensors from accelerate.utils.modeling import ( check_device_map, clean_device_map, compute_module_sizes, convert_file_size_to_int, find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, ) class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) def sequential_model(num_layers): layers = OrderedDict([(f"linear{i}", nn.Linear(1000, 1000)) for i in range(1, num_layers + 1)]) return nn.Sequential(layers) class ModelingUtilsTester(unittest.TestCase): def check_set_module_tensor_for_device(self, model, device1, device2): self.assertEqual(model.linear1.weight.device, torch.device(device1)) with self.subTest("Access by submodule and direct name for a parameter"): set_module_tensor_to_device(model.linear1, "weight", device2) self.assertEqual(model.linear1.weight.device, torch.device(device2)) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.linear1, "weight", device1) set_module_tensor_to_device(model.linear1, "weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model.linear1, "weight", device1) self.assertEqual(model.linear1.weight.device, torch.device(device1)) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "linear1.weight", device2) self.assertEqual(model.linear1.weight.device, torch.device(device2)) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model, "linear1.weight", device1) set_module_tensor_to_device(model, "linear1.weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model, "linear1.weight", device1) self.assertEqual(model.linear1.weight.device, torch.device(device1)) self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) with self.subTest("Access by submodule and direct name for a buffer"): set_module_tensor_to_device(model.batchnorm, "running_mean", device2) self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2)) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.batchnorm, "running_mean", device1) set_module_tensor_to_device(model.batchnorm, "running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model.batchnorm, "running_mean", device1) self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "batchnorm.running_mean", device2) self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2)) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on CPU set_module_tensor_to_device(model, "batchnorm.running_mean", device1) set_module_tensor_to_device(model, "batchnorm.running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model, "batchnorm.running_mean", device1) self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) def test_set_module_tensor_to_meta_and_cpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", "meta") @require_cuda def test_set_module_tensor_to_cpu_and_gpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", 0) @require_cuda def test_set_module_tensor_to_meta_and_gpu(self): model = ModelForTest().to(0) self.check_set_module_tensor_for_device(model, 0, "meta") @require_multi_gpu def test_set_module_tensor_between_gpus(self): model = ModelForTest().to(0) self.check_set_module_tensor_for_device(model, 0, 1) def test_set_module_tensor_sets_dtype(self): model = ModelForTest() set_module_tensor_to_device(model, "linear1.weight", "cpu", value=model.linear1.weight, dtype=torch.float16) self.assertEqual(model.linear1.weight.dtype, torch.float16) def test_set_module_tensor_checks_shape(self): model = ModelForTest() tensor = torch.zeros((2, 2)) with self.assertRaises(ValueError) as cm: set_module_tensor_to_device(model, "linear1.weight", "cpu", value=tensor) self.assertEqual( str(cm.exception), 'Trying to set a tensor of shape torch.Size([2, 2]) in "weight" (which has shape torch.Size([4, 3])), this look incorrect.', ) def test_named_tensors(self): model = nn.BatchNorm1d(4) named_tensors = named_module_tensors(model) self.assertListEqual( [name for name, _ in named_tensors], ["weight", "bias", "running_mean", "running_var", "num_batches_tracked"], ) named_tensors = named_module_tensors(model, include_buffers=False) self.assertListEqual([name for name, _ in named_tensors], ["weight", "bias"]) model = ModelForTest() named_tensors = named_module_tensors(model) self.assertListEqual([name for name, _ in named_tensors], []) named_tensors = named_module_tensors(model, recurse=True) self.assertListEqual( [name for name, _ in named_tensors], [ "linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias", "batchnorm.running_mean", "batchnorm.running_var", "batchnorm.num_batches_tracked", ], ) named_tensors = named_module_tensors(model, include_buffers=False, recurse=True) self.assertListEqual( [name for name, _ in named_tensors], ["linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias"], ) def test_find_tied_parameters(self): model = sequential_model(4) self.assertListEqual(find_tied_parameters(model), []) model.linear2.weight = model.linear1.weight self.assertListEqual(find_tied_parameters(model), [["linear1.weight", "linear2.weight"]]) model.linear4.weight = model.linear1.weight self.assertListEqual(find_tied_parameters(model), [["linear1.weight", "linear2.weight", "linear4.weight"]]) model = sequential_model(5) model.linear1.weight = model.linear4.weight model.linear2.weight = model.linear3.weight model.linear5.weight = model.linear2.weight tied_params = sorted(find_tied_parameters(model), key=lambda x: len(x)) self.assertListEqual( tied_params, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]] ) model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) model.block1.linear1.weight = model.block2.linear1.weight self.assertListEqual(find_tied_parameters(model), [["block1.linear1.weight", "block2.linear1.weight"]]) def test_retie_parameters(self): model = sequential_model(2) retie_parameters(model, [["linear1.weight", "linear2.weight"]]) self.assertIs(model.linear1.weight, model.linear2.weight) model = sequential_model(3) retie_parameters(model, [["linear1.weight", "linear2.weight", "linear3.weight"]]) self.assertIs(model.linear1.weight, model.linear2.weight) self.assertIs(model.linear1.weight, model.linear3.weight) model = sequential_model(5) retie_parameters( model, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]] ) self.assertIs(model.linear1.weight, model.linear4.weight) self.assertIs(model.linear2.weight, model.linear3.weight) self.assertIs(model.linear2.weight, model.linear5.weight) model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) retie_parameters(model, [["block1.linear1.weight", "block2.linear1.weight"]]) self.assertIs(model.block1.linear1.weight, model.block2.linear1.weight) def test_compute_module_sizes(self): model = ModelForTest() expected_sizes = {"": 236, "linear1": 64, "linear1.weight": 48, "linear1.bias": 16} expected_sizes.update({"linear2": 100, "linear2.weight": 80, "linear2.bias": 20}) expected_sizes.update({"batchnorm": 72, "batchnorm.weight": 16, "batchnorm.bias": 16}) expected_sizes.update( {"batchnorm.running_mean": 16, "batchnorm.running_var": 16, "batchnorm.num_batches_tracked": 8} ) module_sizes = compute_module_sizes(model) self.assertDictEqual(module_sizes, expected_sizes) model.half() expected_sizes = {k: s // 2 for k, s in expected_sizes.items()} # This one is not converted to half. expected_sizes["batchnorm.num_batches_tracked"] = 8 # This impacts batchnorm and total expected_sizes["batchnorm"] += 4 expected_sizes[""] += 4 module_sizes = compute_module_sizes(model) self.assertDictEqual(module_sizes, expected_sizes) def test_check_device_map(self): model = ModelForTest() check_device_map(model, {"": 0}) with self.assertRaises(ValueError): check_device_map(model, {"linear1": 0, "linear2": 1}) check_device_map(model, {"linear1": 0, "linear2": 1, "batchnorm": 1}) def shard_test_model(self, model, tmp_dir): module_index = { "linear1": "checkpoint_part1.bin", "batchnorm": "checkpoint_part2.bin", "linear2": "checkpoint_part3.bin", } index = {} for name, _ in model.state_dict().items(): module = name.split(".")[0] index[name] = module_index[module] with open(os.path.join(tmp_dir, "weight_map.index.json"), "w") as f: json.dump(index, f) for module, fname in module_index.items(): state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)} full_fname = os.path.join(tmp_dir, fname) torch.save(state_dict, full_fname) def test_load_checkpoint_in_model(self): # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir) @require_cuda def test_load_checkpoint_in_model_one_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device("cpu")) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device("cpu")) # Check with sharded checkpoint folder model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device("cpu")) @require_cuda def test_load_checkpoint_in_model_disk_offload(self): device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir) self.assertEqual(model.linear1.weight.device, torch.device("cpu")) self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) # Buffers are not offloaded by default self.assertEqual(model.batchnorm.running_mean.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device("cpu")) model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True) self.assertEqual(model.linear1.weight.device, torch.device("cpu")) self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta")) self.assertEqual(model.linear2.weight.device, torch.device("cpu")) @require_multi_gpu def test_load_checkpoint_in_model_two_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device(1)) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device(1)) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) self.assertEqual(model.linear1.weight.device, torch.device(0)) self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) self.assertEqual(model.linear2.weight.device, torch.device(1)) def test_clean_device_map(self): # Regroup everything if all is on the same device self.assertDictEqual(clean_device_map({"a": 0, "b": 0, "c": 0}), {"": 0}) # Regroups children of level 1 on the same device self.assertDictEqual( clean_device_map({"a.x": 0, "a.y": 0, "b.x": 1, "b.y": 1, "c": 1}), {"a": 0, "b": 1, "c": 1} ) # Regroups children of level 2 on the same device self.assertDictEqual( clean_device_map({"a.x": 0, "a.y": 0, "b.x.0": 1, "b.x.1": 1, "b.y.0": 2, "b.y.1": 2, "c": 2}), {"a": 0, "b.x": 1, "b.y": 2, "c": 2}, ) def test_infer_auto_device_map(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) # only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1}) device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200}) # On device 1, we don't care about keeping size available for the max layer, so even if there is just the # size available for batchnorm + linear2, they fit here. self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1}) model.linear1.weight = model.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) # By tying weights, the whole model fits on device 0 self.assertDictEqual(device_map, {"": 0}) # When splitting a bigger model, the split is done at the layer level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500}) self.assertDictEqual(device_map, {"0": 0, "1.linear1": 0, "1.batchnorm": 0, "1.linear2": 1, "2": 1}) # With no_split_module_classes, it's done at that module level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map( model, max_memory={0: 500, 1: 500}, no_split_module_classes=["ModelForTest"] ) self.assertDictEqual(device_map, {"0": 0, "1": 1, "2": 1}) def test_infer_auto_device_map_with_tied_weights(self): model = nn.Sequential( OrderedDict([("layer1", ModelForTest()), ("layer2", ModelForTest()), ("layer3", ModelForTest())]) ) model.layer3.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = {"layer1": 0, "layer3.linear2": 0, "layer2": 1, "layer3.linear1": 1, "layer3.batchnorm": 1} self.assertDictEqual(device_map, expected) # With three weights tied together model.layer2.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.linear1": 1, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } self.assertDictEqual(device_map, expected) # With two groups of weights tied together model.layer2.linear1.weight = model.layer1.linear1.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } self.assertDictEqual(device_map, expected) # With weights ties in the same module model = nn.Sequential( OrderedDict( [ ("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(6, 6)), ("linear3", nn.Linear(4, 4)), ("linear4", nn.Linear(6, 6)), ] ) ) model.linear3.weight = model.linear1.weight model.linear3.bias = model.linear1.bias device_map = infer_auto_device_map(model, max_memory={0: 250, 1: 400}) expected = {"linear1": 0, "linear2": 1, "linear3": 0, "linear4": 1} self.assertDictEqual(device_map, expected) @require_huggingface_suite def test_infer_auto_device_map_on_t0pp(self): from transformers import AutoConfig, AutoModelForSeq2SeqLM config = AutoConfig.from_pretrained("bigscience/T0pp") with init_empty_weights(): model = AutoModelForSeq2SeqLM.from_config(config) model.tie_weights() special_dtypes = {n: torch.float32 for n, _ in model.named_parameters() if "wo" in n} max_memory = {0: 10**10, 1: 10**10, "cpu": 10**10} device_map = infer_auto_device_map( model, no_split_module_classes=["T5Block"], dtype=torch.float16, max_memory=max_memory, special_dtypes=special_dtypes, ) # The 3 tied weights should all be on device 0 self.assertEqual(device_map["shared"], 0) self.assertEqual(device_map["encoder.embed_tokens"], 0) self.assertEqual(device_map["decoder.embed_tokens"], 0) @require_cuda def test_get_balanced_memory(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200}) self.assertDictEqual({0: 200, 1: 200}, max_memory) # We should be able to set models on a non-contiguous sub-set of max_memory = get_balanced_memory(model, max_memory={0: 200, 2: 200}) self.assertDictEqual({0: 200, 2: 200}, max_memory) max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300}) self.assertDictEqual({0: 215, 1: 300}, max_memory) # Last device always get max memory to give more buffer and avoid accidental CPU offload max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500}) self.assertDictEqual({0: 215, 1: 500}, max_memory) # Last device always get max memory to give more buffer, even if CPU is provided max_memory = get_balanced_memory(model, max_memory={0: 300, "cpu": 1000}) self.assertDictEqual({0: 300, "cpu": 1000}, max_memory) # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300}) self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory) # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, "cpu": 100}) self.assertDictEqual({0: 0, "cpu": 100}, max_memory) @require_cuda @require_safetensors def test_load_state_dict(self): from safetensors.torch import save_file state_dict = {k: torch.randn(4, 5) for k in ["a", "b", "c"]} device_maps = [{"a": "cpu", "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": 0}] for device_map in device_maps: with tempfile.TemporaryDirectory() as tmp_dir: checkpoint_file = os.path.join(tmp_dir, "model.safetensors") save_file(state_dict, checkpoint_file, metadata={"format": "pt"}) loaded_state_dict = load_state_dict(checkpoint_file, device_map=device_map) for param, device in device_map.items(): device = device if device != "disk" else "cpu" self.assertEqual(loaded_state_dict[param].device, torch.device(device)) def test_convert_file_size(self): result = convert_file_size_to_int("100MB") self.assertEqual(result, 100 * (10**6)) result = convert_file_size_to_int("2GiB") self.assertEqual(result, 2 * (2**30)) result = convert_file_size_to_int("512KiB") self.assertEqual(result, 512 * (2**10)) result = convert_file_size_to_int("1.5GB") self.assertEqual(result, 1.5 * (10**9)) result = convert_file_size_to_int("100KB") self.assertEqual(result, 100 * (10**3)) result = convert_file_size_to_int(500) self.assertEqual(result, 500) with self.assertRaises(ValueError): convert_file_size_to_int("5MBB") with self.assertRaises(ValueError): convert_file_size_to_int("5k0MB") with self.assertRaises(ValueError): convert_file_size_to_int("-1GB")
accelerate-main
tests/test_modeling_utils.py
import json import os import pickle import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, require_safetensors, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment from accelerate.utils.modeling import load_checkpoint_in_model def create_components(): model = torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3]))) valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6]))) return model, optimizer, scheduler, train_dl, valid_dl def get_signature(model): return (model.weight.abs().sum() + model.bias.abs().sum()).item() def load_random_weights(model): state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict() model.load_state_dict(state) class AcceleratorTester(AccelerateTestCase): @require_cuda def test_accelerator_can_be_reinstantiated(self): _ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(ValueError): _ = Accelerator(cpu=True) def test_mutable_states(self): accelerator = Accelerator() state = GradientState() assert state.num_steps == 1 accelerator.gradient_accumulation_steps = 4 assert state.num_steps == 4 assert state.sync_gradients is True accelerator.sync_gradients = False assert state.sync_gradients is False GradientState._reset_state() def test_prepared_objects_are_referenced(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) self.assertTrue(prepared_model in accelerator._models) self.assertTrue(prepared_optimizer in accelerator._optimizers) self.assertTrue(prepared_scheduler in accelerator._schedulers) self.assertTrue(prepared_train_dl in accelerator._dataloaders) self.assertTrue(prepared_valid_dl in accelerator._dataloaders) def test_free_memory_dereferences_prepared_components(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) accelerator.free_memory() self.assertTrue(len(accelerator._models) == 0) self.assertTrue(len(accelerator._optimizers) == 0) self.assertTrue(len(accelerator._schedulers) == 0) self.assertTrue(len(accelerator._dataloaders) == 0) def test_env_var_device(self): """Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device.""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*args, **kwargs): pass with patch("torch.cuda.set_device", noop), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"): accelerator = Accelerator() self.assertEqual(str(accelerator.state.device), "cuda:64") def test_save_load_model(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname) # make sure random weights don't match load_random_weights(model) self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3) # make sure loaded weights match accelerator.load_state(tmpdirname) self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3) def test_save_model_pytorch(self): accelerator = Accelerator() model = torch.nn.Linear(10, 10) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_model(model, tmpdirname, safe_serialization=False) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3) @require_safetensors def test_save_model_safetensors(self): accelerator = Accelerator() model = torch.nn.Linear(10, 10) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_model(model, tmpdirname, safe_serialization=True) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3) def test_save_load_model_with_hooks(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) # saving hook def save_config(models, weights, output_dir): config = {"class_name": models[0].__class__.__name__} with open(os.path.join(output_dir, "data.json"), "w") as f: json.dump(config, f) # loading hook def load_config(models, input_dir): with open(os.path.join(input_dir, "data.json"), "r") as f: config = json.load(f) models[0].class_name = config["class_name"] save_hook = accelerator.register_save_state_pre_hook(save_config) load_hook = accelerator.register_load_state_pre_hook(load_config) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname) # make sure random weights don't match with hooks load_random_weights(model) self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3) # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks accelerator.load_state(tmpdirname) self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname) # make sure random weights don't match with hooks removed load_random_weights(model) self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3) # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks removed accelerator.load_state(tmpdirname) self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__) def test_accelerator_none(self): """Just test that passing None to accelerator.prepare() works.""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = None # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) self.assertTrue(dummy_obj is None) def test_is_accelerator_prepared(self): """Checks that `_is_accelerator_prepared` is set properly""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = [1, 2, 3] # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) self.assertEqual( getattr(dummy_obj, "_is_accelerate_prepared", False), False, "Dummy object should have `_is_accelerate_prepared` set to `True`", ) self.assertEqual( getattr(model, "_is_accelerate_prepared", False), True, "Model is missing `_is_accelerator_prepared` or is set to `False`", ) self.assertEqual( getattr(optimizer, "_is_accelerate_prepared", False), True, "Optimizer is missing `_is_accelerator_prepared` or is set to `False`", ) self.assertEqual( getattr(scheduler, "_is_accelerate_prepared", False), True, "Scheduler is missing `_is_accelerator_prepared` or is set to `False`", ) self.assertEqual( getattr(train_dl, "_is_accelerate_prepared", False), True, "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`", ) self.assertEqual( getattr(valid_dl, "_is_accelerate_prepared", False), True, "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`", ) @slow @require_bnb def test_accelerator_bnb(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map={"": 0}, ) accelerator = Accelerator() # This should work model = accelerator.prepare(model) @slow @require_bnb def test_accelerator_bnb_cpu_error(self): """Tests that the accelerator can be used with the BNB library. This should fail as we are trying to load a model that is loaded between cpu and gpu""" from transformers import AutoModelForCausalLM accelerator = Accelerator() with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = "cpu" model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True ) # This should not work and get value error with self.assertRaises(ValueError): model = accelerator.prepare(model) @slow @require_bnb @require_multi_gpu def test_accelerator_bnb_multi_gpu(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM PartialState._shared_state = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should not work and get value error with self.assertRaises(ValueError): _ = accelerator.prepare(model) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def test_accelerator_bnb_multi_gpu_no_distributed(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should work _ = accelerator.prepare(model) @require_cuda def test_accelerator_cpu_flag_prepare(self): model = torch.nn.Linear(10, 10) sgd = torch.optim.SGD(model.parameters(), lr=0.01) accelerator = Accelerator(cpu=True) _ = accelerator.prepare(sgd) @require_cuda def test_can_unwrap_model_fp16(self): # test for a regression introduced in #872 # before the fix, after unwrapping with keep_fp32_wrapper=False, there would be the following error: # Linear.forward() missing 1 required positional argument: 'input' model = create_components()[0] accelerator = Accelerator(mixed_precision="fp16") inputs = torch.randn(10, 2).cuda() model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) def test_can_unwrap_model(self): model = create_components()[0] accelerator = Accelerator(mixed_precision="no", cpu=True) inputs = torch.randn(10, 2) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs)
accelerate-main
tests/test_accelerator.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import logging import os import re import subprocess import tempfile import unittest import zipfile from pathlib import Path from typing import Optional from unittest import mock import torch # We use TF to parse the logs from accelerate import Accelerator from accelerate.test_utils.testing import ( MockingTestCase, TempDirTestCase, require_comet_ml, require_tensorboard, require_wandb, skip, ) from accelerate.tracking import CometMLTracker, GeneralTracker from accelerate.utils import ProjectConfiguration, is_comet_ml_available, is_tensorboard_available if is_comet_ml_available(): from comet_ml import OfflineExperiment if is_tensorboard_available(): import struct import tensorboard.compat.proto.event_pb2 as event_pb2 logger = logging.getLogger(__name__) @require_tensorboard class TensorBoardTrackingTest(unittest.TestCase): def test_init_trackers(self): project_name = "test_project_with_config" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers(project_name, config) accelerator.end_training() for child in Path(f"{dirpath}/{project_name}").glob("*/**"): log = list(filter(lambda x: x.is_file(), child.iterdir()))[0] self.assertNotEqual(str(log), "") def test_log(self): project_name = "test_project_with_log" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) accelerator.init_trackers(project_name) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord # Names are randomly generated each time log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0] self.assertNotEqual(str(log), "") def test_log_with_tensor(self): project_name = "test_project_with_log" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) accelerator.init_trackers(project_name) values = {"tensor": torch.tensor(1)} accelerator.log(values, step=0) accelerator.end_training() # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord # Names are randomly generated each time log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0] # Reading implementation based on https://github.com/pytorch/pytorch/issues/45327#issuecomment-703757685 with open(log, "rb") as f: data = f.read() found_tensor = False while data: header = struct.unpack("Q", data[:8]) event_str = data[12 : 12 + int(header[0])] # 8+4 data = data[12 + int(header[0]) + 4 :] event = event_pb2.Event() event.ParseFromString(event_str) if event.HasField("summary"): for value in event.summary.value: if value.simple_value == 1.0 and value.tag == "tensor": found_tensor = True self.assertTrue(found_tensor, "Converted tensor was not found in the log file!") def test_project_dir(self): with self.assertRaisesRegex(ValueError, "Logging with `tensorboard` requires a `logging_dir`"): _ = Accelerator(log_with="tensorboard") with tempfile.TemporaryDirectory() as dirpath: _ = Accelerator(log_with="tensorboard", project_dir=dirpath) def test_project_dir_with_config(self): config = ProjectConfiguration(total_limit=30) with tempfile.TemporaryDirectory() as dirpath: _ = Accelerator(log_with="tensorboard", project_dir=dirpath, project_config=config) @require_wandb @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) class WandBTrackingTest(TempDirTestCase, MockingTestCase): def setUp(self): super().setUp() # wandb let's us override where logs are stored to via the WANDB_DIR env var self.add_mocks(mock.patch.dict(os.environ, {"WANDB_DIR": self.tmpdir})) @staticmethod def parse_log(log: str, section: str, record: bool = True): """ Parses wandb log for `section` and returns a dictionary of all items in that section. Section names are based on the output of `wandb sync --view --verbose` and items starting with "Record" in that result """ # Big thanks to the W&B team for helping us parse their logs pattern = rf"{section} ([\S\s]*?)\n\n" if record: pattern = rf"Record: {pattern}" cleaned_record = re.findall(pattern, log)[0] # A config if section == "config" or section == "history": cleaned_record = re.findall(r'"([a-zA-Z0-9_.,]+)', cleaned_record) return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])} # Everything else else: return dict(re.findall(r'(\w+): "([^\s]+)"', cleaned_record)) @skip def test_wandb(self): project_name = "test_project_with_config" accelerator = Accelerator(log_with="wandb") config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} kwargs = {"wandb": {"tags": ["my_tag"]}} accelerator.init_trackers(project_name, config, kwargs) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() # The latest offline log is stored at wandb/latest-run/*.wandb for child in Path(f"{self.tmpdir}/wandb/latest-run").glob("*"): if child.is_file() and child.suffix == ".wandb": content = subprocess.check_output( ["wandb", "sync", "--view", "--verbose", str(child)], env=os.environ.copy() ).decode("utf8", "ignore") break # Check HPS through careful parsing and cleaning logged_items = self.parse_log(content, "config") self.assertEqual(logged_items["num_iterations"], "12") self.assertEqual(logged_items["learning_rate"], "0.01") self.assertEqual(logged_items["some_boolean"], "false") self.assertEqual(logged_items["some_string"], "some_value") self.assertEqual(logged_items["some_string"], "some_value") # Run tags logged_items = self.parse_log(content, "run", False) self.assertEqual(logged_items["tags"], "my_tag") # Actual logging logged_items = self.parse_log(content, "history") self.assertEqual(logged_items["total_loss"], "0.1") self.assertEqual(logged_items["iteration"], "1") self.assertEqual(logged_items["my_text"], "some_value") self.assertEqual(logged_items["_step"], "0") # Comet has a special `OfflineExperiment` we need to use for testing def offline_init(self, run_name: str, tmpdir: str): self.run_name = run_name self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir) logger.info(f"Initialized offline CometML project {self.run_name}") logger.info("Make sure to log any initial configurations with `self.store_init_configuration` before training!") @require_comet_ml @mock.patch.object(CometMLTracker, "__init__", offline_init) class CometMLTest(unittest.TestCase): @staticmethod def get_value_from_key(log_list, key: str, is_param: bool = False): "Extracts `key` from Comet `log`" for log in log_list: j = json.loads(log)["payload"] if is_param and "param" in j.keys(): if j["param"]["paramName"] == key: return j["param"]["paramValue"] if "log_other" in j.keys(): if j["log_other"]["key"] == key: return j["log_other"]["val"] if "metric" in j.keys(): if j["metric"]["metricName"] == key: return j["metric"]["metricValue"] def test_init_trackers(self): with tempfile.TemporaryDirectory() as d: tracker = CometMLTracker("test_project_with_config", d) accelerator = Accelerator(log_with=tracker) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers(None, config) accelerator.end_training() log = os.listdir(d)[0] # Comet is nice, it's just a zip file here # We parse the raw logs p = os.path.join(d, log) archive = zipfile.ZipFile(p, "r") log = archive.open("messages.json").read().decode("utf-8") list_of_json = log.split("\n")[:-1] self.assertEqual(self.get_value_from_key(list_of_json, "num_iterations", True), 12) self.assertEqual(self.get_value_from_key(list_of_json, "learning_rate", True), 0.01) self.assertEqual(self.get_value_from_key(list_of_json, "some_boolean", True), False) self.assertEqual(self.get_value_from_key(list_of_json, "some_string", True), "some_value") def test_log(self): with tempfile.TemporaryDirectory() as d: tracker = CometMLTracker("test_project_with_config", d) accelerator = Accelerator(log_with=tracker) accelerator.init_trackers(None) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() log = os.listdir(d)[0] # Comet is nice, it's just a zip file here # We parse the raw logs p = os.path.join(d, log) archive = zipfile.ZipFile(p, "r") log = archive.open("messages.json").read().decode("utf-8") list_of_json = log.split("\n")[:-1] self.assertEqual(self.get_value_from_key(list_of_json, "curr_step", True), 0) self.assertEqual(self.get_value_from_key(list_of_json, "total_loss"), 0.1) self.assertEqual(self.get_value_from_key(list_of_json, "iteration"), 1) self.assertEqual(self.get_value_from_key(list_of_json, "my_text"), "some_value") class MyCustomTracker(GeneralTracker): "Basic tracker that writes to a csv for testing" _col_names = [ "total_loss", "iteration", "my_text", "learning_rate", "num_iterations", "some_boolean", "some_string", ] name = "my_custom_tracker" requires_logging_directory = False def __init__(self, dir: str): self.f = open(f"{dir}/log.csv", "w+") self.writer = csv.DictWriter(self.f, fieldnames=self._col_names) self.writer.writeheader() @property def tracker(self): return self.writer def store_init_configuration(self, values: dict): logger.info("Call init") self.writer.writerow(values) def log(self, values: dict, step: Optional[int]): logger.info("Call log") self.writer.writerow(values) def finish(self): self.f.close() class CustomTrackerTestCase(unittest.TestCase): def test_init_trackers(self): with tempfile.TemporaryDirectory() as d: tracker = MyCustomTracker(d) accelerator = Accelerator(log_with=tracker) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers("Some name", config) accelerator.end_training() with open(f"{d}/log.csv", "r") as f: data = csv.DictReader(f) data = next(data) truth = { "total_loss": "", "iteration": "", "my_text": "", "learning_rate": "0.01", "num_iterations": "12", "some_boolean": "False", "some_string": "some_value", } self.assertDictEqual(data, truth) def test_log(self): with tempfile.TemporaryDirectory() as d: tracker = MyCustomTracker(d) accelerator = Accelerator(log_with=tracker) accelerator.init_trackers("Some name") values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() with open(f"{d}/log.csv", "r") as f: data = csv.DictReader(f) data = next(data) truth = { "total_loss": "0.1", "iteration": "1", "my_text": "some_value", "learning_rate": "", "num_iterations": "", "some_boolean": "", "some_string": "", } self.assertDictEqual(data, truth)
accelerate-main
tests/test_tracking.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import torch import torch.nn as nn from accelerate import Accelerator, init_empty_weights from accelerate.test_utils import require_bnb, require_cuda, require_huggingface_suite, require_multi_gpu, slow from accelerate.utils.bnb import load_and_quantize_model from accelerate.utils.dataclasses import BnbQuantizationConfig class BitsAndBytesConfigIntegration(unittest.TestCase): def test_BnbQuantizationConfig(self): with self.assertRaises(ValueError): BnbQuantizationConfig(load_in_8bit=True, load_in_4bit=True) @slow @require_cuda @require_bnb @require_huggingface_suite class MixedInt8EmptyModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a Quadro RTX 8000 so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 1.540025 input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from empty model """ from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) # create model on meta device with init_empty_weights(): self.model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) self.model_8bit.tie_weights() self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin") self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) self.model_8bit = load_and_quantize_model( self.model_8bit, self.bnb_quantization_config, weights_location=self.weights_location, device_map={"": 0}, no_split_module_classes=["BloomBlock"], ) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") self.accelerate = Accelerator() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): modules_not_converted = ( self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ) if name not in modules_not_converted: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig( load_in_8bit=True, skip_modules=["lm_head", "transformer.word_embeddings"] ) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) self.assertTrue(model.transformer.h[1].mlp.dense_4h_to_h.weight.dtype == torch.int8) self.assertTrue(isinstance(model.transformer.h[1].mlp.dense_4h_to_h, bnb.nn.Linear8bitLt)) self.assertTrue(isinstance(model.lm_head, nn.Linear)) self.assertTrue(model.lm_head.weight.dtype != torch.int8) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertEqual(output_text, self.EXPECTED_OUTPUT) def test_generate_quality(self): self.check_inference_correctness(self.model_8bit) def test_fp32_8bit_conversion(self): r""" Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"]) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) self.assertTrue(model.lm_head.weight.dtype == torch.float32) @require_multi_gpu def test_cpu_gpu_loading_custom_device_map(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) self.assertTrue(model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.check_inference_correctness(model_8bit) @require_multi_gpu def test_cpu_gpu_loading_custom_device_map_offload_state_dict(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map` and offload_state_dict=True. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_state_dict=True, ) self.assertTrue(model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.check_inference_correctness(model_8bit) @require_multi_gpu def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.h.5": "disk", "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() with tempfile.TemporaryDirectory() as tmpdirname: model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname, offload_state_dict=True, ) self.assertTrue(model_8bit.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(model_8bit.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.check_inference_correctness(model_8bit) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname) with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname, device_map="auto", no_split_module_classes=["BloomBlock"], ) self.assertTrue(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB")) self.assertTrue(hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB")) self.check_inference_correctness(model_8bit_from_saved) @require_multi_gpu def test_int8_serialization_offload(self): r""" Test whether it is possible to serialize a model in 8-bit and offload weights to cpu/disk """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname) with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.h.5": "disk", "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname + "/pytorch_model.bin", device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname + "/tmp", offload_state_dict=True, ) self.assertTrue(model_8bit_from_saved.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(model_8bit_from_saved.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.check_inference_correctness(model_8bit_from_saved) def test_int8_serialization_shard(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname, max_shard_size="1GB") with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname, device_map="auto", no_split_module_classes=["BloomBlock"], ) self.assertTrue(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB")) self.assertTrue(hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB")) self.check_inference_correctness(model_8bit_from_saved) @slow @require_cuda @require_bnb @require_huggingface_suite class MixedInt8LoaddedModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a Quadro RTX 8000 so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 1.540025 input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from loaded model """ from transformers import AutoModelForCausalLM, AutoTokenizer # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) self.model_8bit = load_and_quantize_model(self.model_8bit, self.bnb_quantization_config) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): modules_not_converted = ( self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ) if name not in modules_not_converted: self.assertTrue(module.weight.dtype == torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10 ) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_fp32_8bit_conversion(self): r""" Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"]) model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) model = load_and_quantize_model(model, bnb_quantization_config) self.assertTrue(model.lm_head.weight.dtype == torch.float32) @slow @require_cuda @require_bnb @require_huggingface_suite class Bnb4BitEmptyModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a RTX Titan so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574 input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 def setUp(self): from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) # create model on meta device with init_empty_weights(): self.model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) self.model_4bit.tie_weights() self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin") self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) self.model_4bit = load_and_quantize_model( self.model_4bit, self.bnb_quantization_config, weights_location=self.weights_location, device_map={"": 0}, no_split_module_classes=["BloomBlock"], ) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): """ TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ super().tearDown() del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): self.check_inference_correctness(self.model_4bit) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if ( name not in self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ): # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"]) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) self.assertTrue(model.lm_head.weight.dtype == torch.float32) @require_multi_gpu def test_cpu_gpu_loading_random_device_map(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) self.check_inference_correctness(model_4bit) @require_multi_gpu def test_cpu_gpu_loading_custom_device_map(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) self.check_inference_correctness(model_4bit) @require_multi_gpu def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "disk", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "cpu", } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() with tempfile.TemporaryDirectory() as tmpdirname: model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname, offload_state_dict=True, ) self.check_inference_correctness(model_4bit) @slow @require_cuda @require_bnb @require_huggingface_suite class Bnb4BitTestLoadedModel(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a RTX Titan so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574 input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from loaded model """ from transformers import AutoModelForCausalLM, AutoTokenizer super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) self.model_4bit = load_and_quantize_model(self.model_4bit, self.bnb_quantization_config) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): """ TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ super().tearDown() del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if ( name not in self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ): # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate( input_ids=encoded_input["input_ids"].to(self.model_4bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"]) model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) model = load_and_quantize_model(model, bnb_quantization_config) self.assertTrue(model.lm_head.weight.dtype == torch.float32)
accelerate-main
tests/test_quantization.py