Nemotron-Flash-1B / delta_net.py
YongganFu's picture
Upload FastSLMForCausalLM
fb7c603 verified
# -*- coding: utf-8 -*-
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Tuple
import torch
import torch.nn as nn
from einops import rearrange
from torch.nn import functional as F
from fla.modules import FusedRMSNormGated, RMSNorm, ShortConvolution
from fla.ops.delta_rule import chunk_delta_rule, fused_recurrent_delta_rule
from typing import Any, Dict, List, Optional, Tuple
import torch
import transformers
if TYPE_CHECKING:
from transformers.processing_utils import Unpack
from fla.models.utils import Cache
def elu_p1(x):
return (F.elu(x, 1., False) + 1.).to(x)
def sum_norm(x):
return (x / x.sum(-1, keepdim=True)).to(x)
class DeltaNet(nn.Module):
r"""
The layer implementaion for [Parallelizing Linear Transformers with the Delta Rule over Sequence Length](https://arxiv.org/abs/2406.06484). # noqa:
DeltaNet was originally proposed in [Linear Transformers Are Secretly Fast Weight Programmers](https://arxiv.org/abs/2102.11174). # noqa
Args:
mode (str, Optional):
Which DeltaNet kernel to use.
Currently available: `chunk`, `fused_recurrent`, and `fused_chunk`.
Default: `chunk`.
hidden_size (int, Optional):
The hidden size of the input. Default: 1024.
expand_k (float, Optional):
The expansion ratio for the key dim. Default: 1.0.
expand_v (float, Optional):
The expansion ratio for the value dim. Default: 1.0.
num_heads (int, Optional):
The number of heads. Default: 4.
use_beta (bool, Optional):
Whether to use beta. Default: `True`.
use_gate (bool, Optional):
Whether to use output gate. Default: `False`.
use_short_conv (bool, Optional):
Whether to use short convolutions. Default: `True`.
conv_size (int, Optional):
The kernel size of the short convolution, only used when `use_short_conv` is `True`. Default: 4.
conv_bias (bool, Optional):
Whether to use bias in the short convolution, only used when `use_short_conv` is `True`. Default: `False`.
allow_neg_eigval (bool, Optional):
Allow negative eigenvalues. Default: `False`. If set to `True`, the beta will be multiplied by 2.
See reference: [Unlocking State-Tracking in Linear RNNs Through Negative Eigenvalues](https://arxiv.org/abs/2411.12537)
layer_idx (int, Optional):
The index of the layer. Default: None.
norm_eps (float, Optional):
The epsilon value for the layernorm/rmsnorm layer. Default: 1e-5.
qk_activation (str, Optional):
The activation function for the query and key. Default: `silu`.
qk_norm (str, Optional):
The normalization method for the query and key. Default: `l2`.
"""
def __init__(
self,
mode: str = 'chunk',
d_model: int = None,
hidden_size: int = 1024,
expand_k: float = 1.0,
expand_v: float = 1.0,
num_heads: int = 4,
use_beta: bool = True,
use_gate: bool = False,
use_short_conv: bool = True,
conv_size: int = 4,
conv_bias: bool = False,
allow_neg_eigval: bool = False,
layer_idx: int = None,
qk_activation: str = 'silu',
qk_norm: str = 'l2',
norm_eps: float = 1e-5,
config = None,
**kwargs
) -> DeltaNet:
super().__init__()
self.mode = mode
self.qk_activation = qk_activation
self.qk_norm = qk_norm
assert self.qk_activation in ['silu', 'relu', 'elu', 'identity']
assert self.qk_norm in ['l2', 'sum']
if d_model is not None:
hidden_size = d_model
self.hidden_size = hidden_size
self.expand_k = expand_k
self.expand_v = expand_v
self.num_heads = num_heads
self.use_gate = use_gate
self.use_short_conv = use_short_conv
self.conv_size = conv_size
self.conv_bias = conv_bias
self.allow_neg_eigval = allow_neg_eigval
self.key_dim = int(hidden_size * expand_k)
self.value_dim = int(hidden_size * expand_v)
self.head_k_dim = self.key_dim // num_heads
self.head_v_dim = self.value_dim // num_heads
self.layer_idx = layer_idx
self.silu = nn.SiLU()
if mode == 'fused_chunk':
raise NotImplementedError("fused_chunk_delta_rule is now deprecated. Please use `chunk_delta_rule` instead.")
assert mode in ['chunk', 'fused_recurrent'], f"Not suppoerted mode `{mode}`."
assert self.key_dim % num_heads == 0, f"key dim must be divisible by num_heads of {num_heads}"
assert self.value_dim % num_heads == 0, f"value dim must be divisible by num_heads of {num_heads}"
self.q_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
self.k_proj = nn.Linear(hidden_size, self.key_dim, bias=False)
self.v_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
self.use_beta = use_beta
if self.use_beta:
self.b_proj = nn.Linear(hidden_size, self.num_heads, bias=False)
if use_short_conv:
self.conv_size = conv_size
self.q_conv1d = ShortConvolution(
hidden_size=self.key_dim,
kernel_size=conv_size,
activation='silu' if qk_activation == 'silu' else None
)
self.k_conv1d = ShortConvolution(
hidden_size=self.key_dim,
kernel_size=conv_size,
activation='silu' if qk_activation == 'silu' else None
)
self.v_conv1d = ShortConvolution(
hidden_size=self.value_dim,
kernel_size=conv_size,
activation='silu'
)
else:
raise UserWarning(
"ShortConvolution is crucial to the performance. "
"Do not turn it off, i.e., setting `use_short_conv=False` unless you know what you are doing."
)
if use_gate:
self.g_proj = nn.Linear(hidden_size, self.value_dim, bias=False)
self.o_norm = FusedRMSNormGated(self.head_v_dim, eps=norm_eps)
else:
self.o_norm = RMSNorm(self.head_v_dim, eps=norm_eps)
self.o_proj = nn.Linear(self.value_dim, hidden_size, bias=False)
self.apply(self._initialize_weights)
def _initialize_weights(self, module: nn.Module):
if getattr(module, "_is_hf_initialized", False):
return
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight, gain=2 ** -2.5)
if module.bias is not None:
nn.init.zeros_(module.bias)
module._is_hf_initialized = True
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
**kwargs: Unpack[Dict]
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
if attention_mask is not None:
assert len(attention_mask.shape) == 2, (
"Expected attention_mask as a 0-1 matrix with shape [batch_size, seq_len] "
"for padding purposes (0 indicating padding). "
"Arbitrary attention masks of shape [batch_size, seq_len, seq_len] are not allowed."
)
# change to inference mode.
mode = 'fused_recurrent' if hidden_states.shape[1] <= 64 else self.mode
last_state = None
if past_key_values is not None and len(past_key_values) > self.layer_idx:
last_state = past_key_values[self.layer_idx]
if self.use_short_conv:
conv_state_q, conv_state_k, conv_state_v = None, None, None
if last_state is not None:
conv_state_q, conv_state_k, conv_state_v = last_state['conv_state']
conv_mask = attention_mask[:, -hidden_states.shape[1]:] if attention_mask is not None else None
position_ids = kwargs.get('position_ids', None)
q = self.q_proj(hidden_states)
q, conv_state_q = self.q_conv1d(x=q,
mask=conv_mask,
cache=conv_state_q,
output_final_state=use_cache,
seq_idx=position_ids)
k = self.k_proj(hidden_states)
k, conv_state_k = self.k_conv1d(x=k,
mask=conv_mask,
cache=conv_state_k,
output_final_state=use_cache,
seq_idx=position_ids)
v = self.v_proj(hidden_states)
v, conv_state_v = self.v_conv1d(x=v,
mask=conv_mask,
cache=conv_state_v,
output_final_state=use_cache,
seq_idx=position_ids)
else:
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
if self.qk_activation == 'silu':
q, k = self.silu(q), self.silu(k)
v = self.silu(v)
q, k = map(lambda x: rearrange(x, '... (h d) -> ... h d', d=self.head_k_dim), (q, k))
v = rearrange(v, '... (h d) -> ... h d', d=self.head_v_dim)
if self.qk_activation != 'silu':
if self.qk_activation == 'relu':
q, k = q.relu(), k.relu()
elif self.qk_activation == 'elu':
q, k = elu_p1(q), elu_p1(k)
elif self.qk_activation == 'identity':
pass
else:
raise NotImplementedError
if self.qk_norm == 'sum':
q = sum_norm(q).to(q)
k = sum_norm(k).to(k)
if self.use_beta:
beta = self.b_proj(hidden_states)
beta = beta.sigmoid()
else:
beta = q.new_ones(q.shape[0], q.shape[1], q.shape[2])
if self.allow_neg_eigval:
beta = beta * 2.
# dealing with padding
if attention_mask is not None:
beta = beta.mul(attention_mask[:, -beta.shape[-2]:, None])
recurrent_state = last_state['recurrent_state'] if last_state is not None else None
cu_seqlens = kwargs.get('cu_seqlens', None)
if mode == 'fused_recurrent':
o, recurrent_state = fused_recurrent_delta_rule(
q=q,
k=k,
v=v,
beta=beta,
initial_state=recurrent_state,
output_final_state=use_cache,
cu_seqlens=cu_seqlens,
use_qk_l2norm_in_kernel=True if self.qk_norm == 'l2' else False
)
elif mode == 'chunk':
o, recurrent_state = chunk_delta_rule(
q=q,
k=k,
v=v,
beta=beta,
initial_state=recurrent_state,
output_final_state=use_cache,
cu_seqlens=cu_seqlens,
use_qk_l2norm_in_kernel=True if self.qk_norm == 'l2' else False
)
else:
raise NotImplementedError(f"Not supported mode `{mode}`.")
if past_key_values is not None:
past_key_values.update(
recurrent_state=recurrent_state,
conv_state=(conv_state_q, conv_state_k, conv_state_v) if self.use_short_conv else None,
layer_idx=self.layer_idx,
offset=q.shape[1]
)
if self.use_gate:
g = rearrange(self.g_proj(hidden_states), '... (h d) -> ... h d', d=self.head_v_dim)
o = self.o_norm(o, g)
else:
o = self.o_norm(o)
o = rearrange(o, 'b t h d -> b t (h d)')
o = self.o_proj(o)
return o, None, past_key_values
class Cache(transformers.cache_utils.Cache):
"""
A cache used for storing hidden states produced by flash linear attention models.
It stores the states of each layer as the tensor of shape `[batch_size, key_dim, value_dim]`.
"""
is_compileable = True
def __init__(
self,
seen_tokens: int = 0
) -> Cache:
super().__init__(layers=[0])
self.states: List[Dict[str, Any]] = []
self._seen_tokens = seen_tokens # Used in `generate` to keep tally of how many tokens the cache has seen
def __getitem__(self, layer_idx: int) -> Dict[str, Any]:
if layer_idx < len(self):
return self.states[layer_idx]
else:
raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
def __iter__(self):
for state in self.states:
yield state
def __len__(self):
return len(self.states)
def reset(self):
for state in self.states:
for key in state:
if state[key] is not None:
if type(state[key]) == tuple:
for subkey in state[key]:
subkey.zero_()
else:
state[key].zero_()
self._seen_tokens = 0
def update(
self,
recurrent_state: Optional[Tuple[torch.Tensor]] = None,
attn_state: Optional[Tuple[torch.Tensor]] = None,
conv_state: Optional[Tuple[torch.Tensor]] = None,
ffn_state: Optional[Tuple[torch.Tensor]] = None,
layer_idx: int = 0,
offset: Optional[int] = 1,
cache_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Args:
recurrent_state (`torch.Tensor`):
The new recurrent state to cache.
attn_state (`Tuple[torch.Tensor]`):
The new attention key/value states to cache.
conv_state (`Tuple[torch.Tensor]`):
The new convolution state to cache.
ffn_state (`Tuple[torch.Tensor]`):
The new feed-forward state to cache.
layer_idx (`int`, defaults to 0):
The index of the layer to cache the states for.
offset (`int`, defaults to 1):
The number of new tokens being processed.
cache_kwargs (`Dict[str, Any]`):
Additional arguments for the cache subclass.
Return:
Dictionary of the updated state.
"""
if cache_kwargs is None:
cache_kwargs = {}
if attn_state is not None:
input_size = attn_state[0].shape[1]
window_size = cache_kwargs.get('window_size', None)
if not (isinstance(attn_state, Tuple) or isinstance(attn_state, List)):
raise ValueError("`attn_state` must be a tuple of tensors for key/value states")
if len(self.states) <= layer_idx:
# update the number of seen tokens
if layer_idx == 0:
self._seen_tokens += offset
if attn_state is not None:
if window_size is not None and input_size > window_size:
attn_state = [state[:, -window_size:].contiguous() for state in attn_state]
state = dict(
recurrent_state=recurrent_state,
attn_state=attn_state,
conv_state=conv_state,
ffn_state=ffn_state
)
self.states.append(state)
else:
# update the number of seen tokens
if layer_idx == len(self.states) - 1:
self._seen_tokens += offset
state = self.states[layer_idx]
if recurrent_state is not None:
state['recurrent_state'].copy_(recurrent_state)
if attn_state is not None:
if window_size is not None and state['attn_state'][0].shape[1] == window_size:
for i, (old_state, new_state) in enumerate(zip(state['attn_state'], attn_state)):
# DO NOT allocate new memory if the cache is full
# roll the key/value states to the left by `input_size`
old_state = old_state.roll(-input_size, 1)
# replace the last `input_size` tokens with the new key/value states
old_state[:, -input_size:] = new_state
state['attn_state'][i].copy_(old_state)
else:
attn_state = [
torch.cat([old_state, new_state], 1)
for old_state, new_state in zip(state['attn_state'], attn_state)
]
state['attn_state'].copy_(attn_state)
if conv_state is not None:
conv_state_q, conv_state_k, conv_state_v = state['conv_state']
conv_state_q.copy_(conv_state[0])
conv_state_k.copy_(conv_state[1])
conv_state_v.copy_(conv_state[2])
if ffn_state is not None:
state['ffn_state'].copy_(ffn_state)
return state
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
if len(self.states) <= layer_idx:
return 0
return self._seen_tokens
def get_max_length(self) -> Optional[int]:
"""Returns the maximum sequence length of the cached states. Cache does not have a maximum length."""
return None
def to_legacy_cache(self) -> Tuple:
return tuple(self.states)
@classmethod
@torch.compiler.disable
def from_legacy_cache(
cls,
past_key_values: Optional[Tuple] = None,
seen_tokens: int = 0
) -> Cache:
"""Converts a cache in the legacy cache format into an equivalent `Cache`."""
cache = cls(seen_tokens)
if isinstance(past_key_values, list):
for layer_idx in range(len(past_key_values)):
cache.states.append(past_key_values[layer_idx])
return cache