zpn commited on
Commit
a892f3a
1 Parent(s): 8a307ec

Update configuration_hf_nomic_bert.py

Browse files
Files changed (1) hide show
  1. configuration_hf_nomic_bert.py +5 -2
configuration_hf_nomic_bert.py CHANGED
@@ -4,7 +4,8 @@ from transformers import GPT2Config
4
  class NomicBertConfig(GPT2Config):
5
  model_type = "nomic_bert"
6
 
7
- def __init__(self,
 
8
  prenorm=False,
9
  parallel_block=False,
10
  parallel_block_tied_norm=False,
@@ -26,6 +27,7 @@ class NomicBertConfig(GPT2Config):
26
  pad_vocab_size_multiple=1,
27
  tie_word_embeddings=True,
28
  rotary_scaling_factor=1.0,
 
29
  **kwargs,
30
  ):
31
  self.prenorm = prenorm
@@ -49,5 +51,6 @@ class NomicBertConfig(GPT2Config):
49
  self.dense_seq_output = dense_seq_output
50
  self.pad_vocab_size_multiple = pad_vocab_size_multiple
51
  self.rotary_scaling_factor = rotary_scaling_factor
 
52
 
53
- super().__init__(**kwargs)
 
4
  class NomicBertConfig(GPT2Config):
5
  model_type = "nomic_bert"
6
 
7
+ def __init__(
8
+ self,
9
  prenorm=False,
10
  parallel_block=False,
11
  parallel_block_tied_norm=False,
 
27
  pad_vocab_size_multiple=1,
28
  tie_word_embeddings=True,
29
  rotary_scaling_factor=1.0,
30
+ max_trained_positions=2048,
31
  **kwargs,
32
  ):
33
  self.prenorm = prenorm
 
51
  self.dense_seq_output = dense_seq_output
52
  self.pad_vocab_size_multiple = pad_vocab_size_multiple
53
  self.rotary_scaling_factor = rotary_scaling_factor
54
+ self.max_trained_positions = max_trained_positions
55
 
56
+ super().__init__(**kwargs)