File size: 5,484 Bytes
5169b80 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
from dataclasses import fields
from typing import List, Optional, Tuple, Union
import torch
from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.models.auto import AutoModelForCausalLM
from olmo.config import ModelConfig
from olmo.model import OLMo
from .configuration_olmo import OLMoConfig
def create_model_config_from_pretrained_config(config: OLMoConfig):
"""
Utility function
"""
kwargs = {}
for field in fields(ModelConfig):
kwargs[field.name] = getattr(config, field.name)
model_config = ModelConfig(**kwargs)
return model_config
class OLMoForCausalLM(PreTrainedModel):
"""
Extremely barebones HF model wrapper.
"""
config_class = OLMoConfig
base_model_prefix = "model"
_no_split_modules = ["OLMoBlock"]
def __init__(self, config: OLMoConfig, model: Optional[OLMo] = None, init_params: bool = False):
super().__init__(config)
if not model:
model_config = create_model_config_from_pretrained_config(config)
# Initialize model (always on CPU to start with so we don't run out of GPU memory).
model_config.init_device = "cpu"
self.model = OLMo(model_config, init_params=init_params)
else:
self.model = model
def forward(
self,
input_ids: torch.LongTensor = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
attention_bias: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
if use_cache is None:
use_cache = self.config.use_cache
if output_attentions:
raise ValueError("output_attentions is not yet supported in OLMo")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.forward(
input_ids=input_ids,
input_embeddings=inputs_embeds,
attention_mask=attention_mask,
attention_bias=attention_bias,
past_key_values=past_key_values,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
)
logits = outputs.logits
hidden_states = outputs.hidden_states
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = torch.nn.CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.embedding_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.attn_key_values,
hidden_states=hidden_states,
)
def can_generate(self) -> bool:
return True
def prepare_inputs_for_generation(
self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple]] = None, **kwargs
):
if past_key_values:
# This is because we want the model to only process the last generated token.
input_ids = input_ids[:, -1:]
model_inputs = {"input_ids": input_ids, "past_key_values": past_key_values}
model_inputs.update(kwargs)
model_inputs["use_cache"] = kwargs.pop("use_cache", self.config.use_cache)
return model_inputs
# TODO: these are required to make the implementation complete.
# def resize_position_embeddings(self, new_num_position_embeddings: int):
# pass
#
# def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
# pass
#
# def _reorder_cache(self, past_key_values, beam_idx):
# pass
def get_input_embeddings(self) -> torch.nn.Module:
return self.model.transformer.wte
def set_input_embeddings(self, value: torch.nn.Module):
self.model.transformer.wte = value
def get_output_embeddings(self):
if self.config.weight_tying:
return self.model.transformer.wte
else:
return self.model.transformer.ff_out
def set_output_embeddings(self, value: torch.nn.Module):
if self.config.weight_tying:
self.model.transformer.wte = value
else:
self.model.transformer.ff_out = value
def tie_weights(self):
if self.config.weight_tying:
self.model.transformer.ff_out = self.model.transformer.wte
# Register the model so that it is available for transformer pipelines, auto-loading, etc.
AutoModelForCausalLM.register(OLMoConfig, OLMoForCausalLM)
|