# coding=utf-8 # Copyright 2022 The Metaseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OPT model configuration""" import os from typing import Tuple, Union from transformers import AutoConfig from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) OPT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/opt-125m": "https://huggingface.co/facebook/opt-125m/blob/main/config.json", "facebook/opt-350m": "https://huggingface.co/facebook/opt-350m/blob/main/config.json", "facebook/opt-1.3b": "https://huggingface.co/facebook/opt-1.3b/blob/main/config.json", "facebook/opt-2.7b": "https://huggingface.co/facebook/opt-2.7b/blob/main/config.json", "facebook/opt-6.7b": "https://huggingface.co/facebook/opt-6.7b/blob/main/config.json", "facebook/opt-13b": "https://huggingface.co/facebook/opt-13b/blob/main/config.json", } class VOPTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. TODO: this doc is completely out of sync with the actual args Args: vocab_size (`int`, *optional*, defaults to 50272): Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OPTModel`] additional_vocab_size (`int`, *optional`, defaults to 0): Additional vocabulary size of the model, typically for the special "" token. Additional vocab tokens are always trainable whereas regular vocab tokens can be frozen or not. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). do_layer_norm_before (`bool`, *optional*, defaults to `True`): Whether to perform layer normalization before the attention block. word_embed_proj_dim (`int`, *optional*): `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to `hidden_size`. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layerdrop: (`float`, *optional*, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. alpha_initializer (`str`, *optional*, defaults to `"ones"`): Initialization type for the alphas. alphas_initializer_range (`float`, *optional*, defaults to 0.0): The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross Attention. alpha_type (`str`, *optional*, defaults to `"vector"`): Whether the gating alphas should be vectors or single floats. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). cross_layer_interval (`int`, *optional*, default to 1) Interval for cross attention (from text to image) layers. Example: ```python >>> from transformers import OPTModel, OPTConfig >>> # Initializing a OPT facebook/opt-large style configuration >>> configuration = OPTConfig() >>> # Initializing a model from the facebook/opt-large style configuration >>> model = OPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vopt" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50272, additional_vocab_size=0, hidden_size=768, num_hidden_layers=12, ffn_dim=3072, max_position_embeddings=2048, do_layer_norm_before=True, _remove_final_layer_norm=False, word_embed_proj_dim=None, dropout=0.1, attention_dropout=0.0, num_attention_heads=12, activation_function="relu", layerdrop=0.0, init_std=0.02, alpha_initializer="ones", alphas_initializer_range=0.0, alpha_type="vector", use_cache=True, pad_token_id=1, bos_token_id=2, eos_token_id=2, cross_layer_interval=1, cross_layer_activation_function="swiglu", normformer_layer_norms=False, qk_layer_norms=False, rms_norm=False, qk_layer_norms_perceiver=False, tie_word_embeddings=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], vision_model_name="google/vit-base-patch16-224", vision_model_params="{}", vision_embed_dim=768, vision_image_size=224, image_token_index=50257, # TODO: change this to right value use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.additional_vocab_size = additional_vocab_size self.max_position_embeddings = max_position_embeddings self.num_attention_heads = num_attention_heads self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size self.ffn_dim = ffn_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_function = activation_function self.init_std = init_std self.alpha_initializer = alpha_initializer self.alphas_initializer_range = alphas_initializer_range self.alpha_type = alpha_type self.layerdrop = layerdrop self.use_cache = use_cache self.do_layer_norm_before = do_layer_norm_before # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 self._remove_final_layer_norm = _remove_final_layer_norm self.cross_layer_interval = cross_layer_interval self.cross_layer_activation_function = cross_layer_activation_function self.normformer_layer_norms = normformer_layer_norms self.qk_layer_norms = qk_layer_norms self.rms_norm = rms_norm self.qk_layer_norms_perceiver = qk_layer_norms_perceiver self.freeze_vision_layers = freeze_vision_layers self.vision_model_name = vision_model_name self.vision_model_params = vision_model_params self.tie_word_embeddings = tie_word_embeddings self.freeze_text_layers = freeze_text_layers self.freeze_text_module_exceptions = freeze_text_module_exceptions self.freeze_vision_module_exceptions = freeze_vision_module_exceptions self.freeze_lm_head = freeze_lm_head self.image_token_index = image_token_index self.vision_embed_dim = vision_embed_dim self.vision_image_size = vision_image_size # Resampler params self.use_resampler = use_resampler self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.resampler_head_dim = resampler_head_dim # IMPORTANT: Do not do any __init__ args-based checks in the constructor, since # PretrainedConfig.from_dict first instantiates the class with the config dict and only then # updates the config object with `kwargs` from from_pretrained, so during the instantiation # of this object many attributes have default values and haven't yet been overridden. # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run. def check_compatibilities(self): vision_model_params = eval(self.vision_model_params) config = AutoConfig.from_pretrained(self.vision_model_name, **vision_model_params) if hasattr(config, "vision_config"): vision_config = config.vision_config else: vision_config = config vision_embed_dim = vision_config.hidden_size if self.vision_embed_dim != vision_embed_dim: raise ValueError( f"vision_embed_dim ({self.vision_embed_dim}) must match the hidden size of the vision model" f" ({vision_embed_dim})" ) vision_image_size = vision_config.image_size if self.vision_image_size != vision_image_size: raise ValueError( f"vision_image_size ({self.vision_image_size}) must match the hidden size of the vision model" f" ({vision_image_size})" ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": outputs = super(VOPTConfig, cls).from_pretrained(pretrained_model_name_or_path, **kwargs) if isinstance(outputs, Tuple): # When called with return_unused_kwargs=True, the first item will be the config outputs[0].check_compatibilities() else: outputs.check_compatibilities() return outputs