fix for transformers-4.33.1
Browse files- modeling_decicoder.py +1 -0
modeling_decicoder.py
CHANGED
@@ -36,6 +36,7 @@ class DeciCoderAttention(LlamaAttention):
|
|
36 |
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
37 |
self.pretraining_tp = config.pretraining_tp
|
38 |
self.max_position_embeddings = config.max_position_embeddings
|
|
|
39 |
|
40 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
41 |
raise ValueError(
|
|
|
36 |
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
37 |
self.pretraining_tp = config.pretraining_tp
|
38 |
self.max_position_embeddings = config.max_position_embeddings
|
39 |
+
self.rope_theta = getattr(config, 'rope_theta', None)
|
40 |
|
41 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
42 |
raise ValueError(
|