Panchovix commited on
Commit
211cff1
1 Parent(s): e88ea36

Upload llama_rope_scaled_monkey_patch.py

Browse files
Files changed (1) hide show
  1. llama_rope_scaled_monkey_patch.py +65 -0
llama_rope_scaled_monkey_patch.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import transformers
3
+ import transformers.models.llama.modeling_llama
4
+ from einops import rearrange
5
+ import random
6
+
7
+ # This monkey patch file is not needed if using ExLlama, or if using `trust_remote_code=True``
8
+
9
+ class ScaledRotaryEmbedding(torch.nn.Module):
10
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
11
+ super().__init__()
12
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
13
+ self.register_buffer("inv_freq", inv_freq)
14
+
15
+ max_position_embeddings = 8192
16
+
17
+ # Build here to make `torch.jit.trace` work.
18
+ self.max_seq_len_cached = max_position_embeddings
19
+ t = torch.arange(
20
+ self.max_seq_len_cached,
21
+ device=self.inv_freq.device,
22
+ dtype=self.inv_freq.dtype,
23
+ )
24
+
25
+ self.scale = 1 / 4
26
+ t *= self.scale
27
+
28
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
29
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
30
+ emb = torch.cat((freqs, freqs), dim=-1)
31
+ self.register_buffer(
32
+ "cos_cached", emb.cos()[None, None, :, :], persistent=False
33
+ )
34
+ self.register_buffer(
35
+ "sin_cached", emb.sin()[None, None, :, :], persistent=False
36
+ )
37
+
38
+ def forward(self, x, seq_len=None):
39
+ # x: [bs, num_attention_heads, seq_len, head_size]
40
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
41
+ if seq_len > self.max_seq_len_cached:
42
+ self.max_seq_len_cached = seq_len
43
+ t = torch.arange(
44
+ self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
45
+ )
46
+ t *= self.scale
47
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
48
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
49
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
50
+ self.register_buffer(
51
+ "cos_cached", emb.cos()[None, None, :, :], persistent=False
52
+ )
53
+ self.register_buffer(
54
+ "sin_cached", emb.sin()[None, None, :, :], persistent=False
55
+ )
56
+ return (
57
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
58
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
59
+ )
60
+
61
+
62
+ def replace_llama_rope_with_scaled_rope():
63
+ transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = (
64
+ ScaledRotaryEmbedding
65
+ )