Spaces:
Build error
Build error
File size: 12,037 Bytes
e7d3e35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
# coding=utf-8
# Copyright 2022 The Metaseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OPT model configuration"""
import os
from typing import Tuple, Union
from transformers import AutoConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
OPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/opt-125m": "https://huggingface.co/facebook/opt-125m/blob/main/config.json",
"facebook/opt-350m": "https://huggingface.co/facebook/opt-350m/blob/main/config.json",
"facebook/opt-1.3b": "https://huggingface.co/facebook/opt-1.3b/blob/main/config.json",
"facebook/opt-2.7b": "https://huggingface.co/facebook/opt-2.7b/blob/main/config.json",
"facebook/opt-6.7b": "https://huggingface.co/facebook/opt-6.7b/blob/main/config.json",
"facebook/opt-13b": "https://huggingface.co/facebook/opt-13b/blob/main/config.json",
}
class VOPTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the OPT
[facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
TODO: this doc is completely out of sync with the actual args
Args:
vocab_size (`int`, *optional*, defaults to 50272):
Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OPTModel`]
additional_vocab_size (`int`, *optional`, defaults to 0):
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
are always trainable whereas regular vocab tokens can be frozen or not.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
ffn_dim (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
do_layer_norm_before (`bool`, *optional*, defaults to `True`):
Whether to perform layer normalization before the attention block.
word_embed_proj_dim (`int`, *optional*):
`word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to
`hidden_size`.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
layerdrop: (`float`, *optional*, defaults to 0.0):
The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
details.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
alpha_initializer (`str`, *optional*, defaults to `"ones"`):
Initialization type for the alphas.
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross Attention.
alpha_type (`str`, *optional*, defaults to `"vector"`):
Whether the gating alphas should be vectors or single floats.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
cross_layer_interval (`int`, *optional*, default to 1)
Interval for cross attention (from text to image) layers.
Example:
```python
>>> from transformers import OPTModel, OPTConfig
>>> # Initializing a OPT facebook/opt-large style configuration
>>> configuration = OPTConfig()
>>> # Initializing a model from the facebook/opt-large style configuration
>>> model = OPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vopt"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50272,
additional_vocab_size=0,
hidden_size=768,
num_hidden_layers=12,
ffn_dim=3072,
max_position_embeddings=2048,
do_layer_norm_before=True,
_remove_final_layer_norm=False,
word_embed_proj_dim=None,
dropout=0.1,
attention_dropout=0.0,
num_attention_heads=12,
activation_function="relu",
layerdrop=0.0,
init_std=0.02,
alpha_initializer="ones",
alphas_initializer_range=0.0,
alpha_type="vector",
use_cache=True,
pad_token_id=1,
bos_token_id=2,
eos_token_id=2,
cross_layer_interval=1,
cross_layer_activation_function="swiglu",
normformer_layer_norms=False,
qk_layer_norms=False,
rms_norm=False,
qk_layer_norms_perceiver=False,
tie_word_embeddings=False,
freeze_text_layers=True,
freeze_text_module_exceptions=[],
freeze_lm_head=False,
freeze_vision_layers=True,
freeze_vision_module_exceptions=[],
vision_model_name="google/vit-base-patch16-224",
vision_model_params="{}",
vision_embed_dim=768,
vision_image_size=224,
image_token_index=50257, # TODO: change this to right value
use_resampler=False,
resampler_n_latents=64,
resampler_depth=6,
resampler_n_heads=16,
resampler_head_dim=96,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.vocab_size = vocab_size
self.additional_vocab_size = additional_vocab_size
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size
self.ffn_dim = ffn_dim
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_function = activation_function
self.init_std = init_std
self.alpha_initializer = alpha_initializer
self.alphas_initializer_range = alphas_initializer_range
self.alpha_type = alpha_type
self.layerdrop = layerdrop
self.use_cache = use_cache
self.do_layer_norm_before = do_layer_norm_before
# Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
self._remove_final_layer_norm = _remove_final_layer_norm
self.cross_layer_interval = cross_layer_interval
self.cross_layer_activation_function = cross_layer_activation_function
self.normformer_layer_norms = normformer_layer_norms
self.qk_layer_norms = qk_layer_norms
self.rms_norm = rms_norm
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
self.freeze_vision_layers = freeze_vision_layers
self.vision_model_name = vision_model_name
self.vision_model_params = vision_model_params
self.tie_word_embeddings = tie_word_embeddings
self.freeze_text_layers = freeze_text_layers
self.freeze_text_module_exceptions = freeze_text_module_exceptions
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
self.freeze_lm_head = freeze_lm_head
self.image_token_index = image_token_index
self.vision_embed_dim = vision_embed_dim
self.vision_image_size = vision_image_size
# Resampler params
self.use_resampler = use_resampler
self.resampler_n_latents = resampler_n_latents
self.resampler_depth = resampler_depth
self.resampler_n_heads = resampler_n_heads
self.resampler_head_dim = resampler_head_dim
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
# PretrainedConfig.from_dict first instantiates the class with the config dict and only then
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
# of this object many attributes have default values and haven't yet been overridden.
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
def check_compatibilities(self):
vision_model_params = eval(self.vision_model_params)
config = AutoConfig.from_pretrained(self.vision_model_name, **vision_model_params)
if hasattr(config, "vision_config"):
vision_config = config.vision_config
else:
vision_config = config
vision_embed_dim = vision_config.hidden_size
if self.vision_embed_dim != vision_embed_dim:
raise ValueError(
f"vision_embed_dim ({self.vision_embed_dim}) must match the hidden size of the vision model"
f" ({vision_embed_dim})"
)
vision_image_size = vision_config.image_size
if self.vision_image_size != vision_image_size:
raise ValueError(
f"vision_image_size ({self.vision_image_size}) must match the hidden size of the vision model"
f" ({vision_image_size})"
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
outputs = super(VOPTConfig, cls).from_pretrained(pretrained_model_name_or_path, **kwargs)
if isinstance(outputs, Tuple):
# When called with return_unused_kwargs=True, the first item will be the config
outputs[0].check_compatibilities()
else:
outputs.check_compatibilities()
return outputs
|