root commited on
Commit
34382d7
1 Parent(s): 3d536b8

Upload model

Browse files
Files changed (41) hide show
  1. config.json +31 -0
  2. configuration_stablelm_epoch.py +110 -0
  3. model-00001-of-00035.safetensors +3 -0
  4. model-00002-of-00035.safetensors +3 -0
  5. model-00003-of-00035.safetensors +3 -0
  6. model-00004-of-00035.safetensors +3 -0
  7. model-00005-of-00035.safetensors +3 -0
  8. model-00006-of-00035.safetensors +3 -0
  9. model-00007-of-00035.safetensors +3 -0
  10. model-00008-of-00035.safetensors +3 -0
  11. model-00009-of-00035.safetensors +3 -0
  12. model-00010-of-00035.safetensors +3 -0
  13. model-00011-of-00035.safetensors +3 -0
  14. model-00012-of-00035.safetensors +3 -0
  15. model-00013-of-00035.safetensors +3 -0
  16. model-00014-of-00035.safetensors +3 -0
  17. model-00015-of-00035.safetensors +3 -0
  18. model-00016-of-00035.safetensors +3 -0
  19. model-00017-of-00035.safetensors +3 -0
  20. model-00018-of-00035.safetensors +3 -0
  21. model-00019-of-00035.safetensors +3 -0
  22. model-00020-of-00035.safetensors +3 -0
  23. model-00021-of-00035.safetensors +3 -0
  24. model-00022-of-00035.safetensors +3 -0
  25. model-00023-of-00035.safetensors +3 -0
  26. model-00024-of-00035.safetensors +3 -0
  27. model-00025-of-00035.safetensors +3 -0
  28. model-00026-of-00035.safetensors +3 -0
  29. model-00027-of-00035.safetensors +3 -0
  30. model-00028-of-00035.safetensors +3 -0
  31. model-00029-of-00035.safetensors +3 -0
  32. model-00030-of-00035.safetensors +3 -0
  33. model-00031-of-00035.safetensors +3 -0
  34. model-00032-of-00035.safetensors +3 -0
  35. model-00033-of-00035.safetensors +3 -0
  36. model-00034-of-00035.safetensors +3 -0
  37. model-00035-of-00035.safetensors +3 -0
  38. model.safetensors.index.json +1 -0
  39. special_tokens_map.json +5 -0
  40. tokenizer.json +0 -0
  41. tokenizer_config.json +211 -0
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/models/StableLM-3B",
3
+ "architectures": [
4
+ "StableLMEpochForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_stablelm_epoch.StableLMEpochConfig",
8
+ "AutoModelForCausalLM": "modeling_stablelm_epoch.StableLMEpochForCausalLM"
9
+ },
10
+ "bos_token_id": 0,
11
+ "eos_token_id": 0,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2560,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 6912,
16
+ "max_position_embeddings": 4096,
17
+ "model_type": "stablelm_epoch",
18
+ "norm_eps": 1e-05,
19
+ "num_attention_heads": 32,
20
+ "num_heads": 32,
21
+ "num_hidden_layers": 32,
22
+ "num_key_value_heads": 32,
23
+ "rope_pct": 0.25,
24
+ "rope_theta": 10000,
25
+ "rotary_scaling_factor": 1.0,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.35.2",
29
+ "use_cache": true,
30
+ "vocab_size": 50304
31
+ }
configuration_stablelm_epoch.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Stability and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ StableLM Epoch model configuration"""
16
+ from transformers import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ class StableLMEpochConfig(PretrainedConfig):
24
+ r"""
25
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
26
+ documentation from [`PretrainedConfig`] for more information.
27
+
28
+ Args:
29
+ vocab_size (`int`, *optional*, defaults to 50_304):
30
+ Vocabulary size of the StableLM model. Defines the number of different tokens that
31
+ can be represented by the `inputs_ids` passed when calling [`StableLMEpochModel`].
32
+ intermediate_size (`int`, *optional*, defaults to 6912):
33
+ Dimension of the MLP representations.
34
+ hidden_size (`int`, *optional*, defaults to 2560):
35
+ Dimension of the decoder layers and the pooler layer.
36
+ num_hidden_layers (`int`, *optional*, defaults to 32):
37
+ Number of hidden layers in the Transformer decoder.
38
+ num_attention_heads (`int`, *optional*, defaults to 32):
39
+ Number of attention heads for each attention layer in the Transformer encoder.
40
+ num_key_value_heads (`int`, *optional*):
41
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
42
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
43
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
44
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
45
+ by meanpooling all the original heads within that group. For more details checkout [this
46
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
47
+ `num_attention_heads`.
48
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
49
+ The non-linear activation function (function or string).
50
+ rope_pct (`float`, *optional*, defaults to 1.0):
51
+ Percentage of hidden dimensions to allocate to rotary embeddings.
52
+ rope_theta (`float`, *optional*, defaults to 10000.0):
53
+ The base period of the RoPE embeddings.
54
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
55
+ The maximum sequence length that this model might ever be used with.
56
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
57
+ initializer_range (`float`, *optional*, defaults to 1e-5):
58
+ The standard deviation of the truncated_normal_initializer for initializing
59
+ all weight matrices.
60
+ norm_eps (`float`, *optional*, defaults to 1e-8):
61
+ The epsilon used by the normalization layers.
62
+ use_cache (`bool`, *optional*, defaults to `True`):
63
+ Whether or not the model should return the last key/values attentions
64
+ (not used by all models). Only relevant if `config.is_decoder=True`.
65
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
66
+ Whether to tie weight embeddings
67
+ """
68
+ model_type = "stablelm_epoch"
69
+ keys_to_ignore_at_inference = ["past_key_values"]
70
+
71
+ def __init__(
72
+ self,
73
+ vocab_size=50_304,
74
+ intermediate_size=6912,
75
+ hidden_size=2560,
76
+ num_hidden_layers=32,
77
+ num_attention_heads=32,
78
+ num_key_value_heads=32,
79
+ hidden_act="silu",
80
+ rope_pct=0.25,
81
+ rope_theta=10_000,
82
+ max_position_embeddings=4096,
83
+ initializer_range=0.02,
84
+ norm_eps=1.0e-5,
85
+ use_cache=True,
86
+ bos_token_id=0,
87
+ eos_token_id=2,
88
+ tie_word_embeddings=False,
89
+ **kwargs,
90
+ ):
91
+ self.vocab_size = vocab_size
92
+ self.max_position_embeddings = max_position_embeddings
93
+ self.intermediate_size = intermediate_size
94
+ self.hidden_size = hidden_size
95
+ self.num_hidden_layers = num_hidden_layers
96
+ self.num_attention_heads = num_attention_heads
97
+ self.num_key_value_heads = num_key_value_heads
98
+ self.hidden_act = hidden_act
99
+ self.rope_pct = rope_pct
100
+ self.rope_theta = rope_theta
101
+ self.initializer_range = initializer_range
102
+ self.norm_eps = norm_eps
103
+ self.use_cache = use_cache
104
+ self.tie_word_embeddings = tie_word_embeddings
105
+ super().__init__(
106
+ bos_token_id=bos_token_id,
107
+ eos_token_id=eos_token_id,
108
+ tie_word_embeddings=tie_word_embeddings,
109
+ **kwargs,
110
+ )
model-00001-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cad99b98ee94c59329f71a6064de5afe5b3e89b438e01a2785cee826ef2f6821
3
+ size 257556616
model-00002-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:415b0487b436334ff32cee993becb8e0c1a9b612ffa653f3f920a32ba9b1b745
3
+ size 158629304
model-00003-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4e478acda93bc1365dea58f5670c2054f670012df9653df2182a334b83cd1fc
3
+ size 158618864
model-00004-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20a98c35d3f06d2727382d630fb622ed86602f05201d4e36530ba2a3f3243819
3
+ size 158618864
model-00005-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f909ee2c7281b56f8e130c85a78af6e5c7a0726fe90a6c40785bdf8085a87580
3
+ size 158608408
model-00006-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f48cb7244b2bd50167e895ad07edeed752e0b5f5cb0f2162d36604f08c1b7ae
3
+ size 158597944
model-00007-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9fc3126a4e4a2ff2c0344c7282d4fb139075cbbeb835e17fd81eb50b23d0864
3
+ size 158629312
model-00008-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0467d41b07675242263dd27ba2d5e04fd82175eda449805d81cf6318ff183b1e
3
+ size 158618856
model-00009-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48630e77a2557430311a56ef2f3311e1d5e15c2d6d89ac6e9726013660a637d8
3
+ size 158618856
model-00010-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfb2b2860e5df49e2b1ca3a96e601181a49f635ab37d5936eb9a13d962d273e6
3
+ size 158618856
model-00011-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e8f32367ca903db2c44534dd2a876675c76d546aa29a4628f83ac360a7defa
3
+ size 158618856
model-00012-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a21231d1c360231bdd9b8e83227be229e2e3763be9eb3ec3d6c12ead4dfb5f2e
3
+ size 158639768
model-00013-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e70e7ad9d803a0aae4d156b54a56cda418cfa7fdfa0307c18fc73699ac314d1
3
+ size 158618872
model-00014-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db54223efab9dcf6c0905c57d784ba4e7ad91e0368e1dcd3886e02e1ca3b53fe
3
+ size 158618872
model-00015-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9805cde33ba747cc1fceb29f4aca182e6ec8ddc7224f4fdec523840853dad223
3
+ size 158618872
model-00016-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e30d8748be612f871b3014e2ed08c88303de3634978651d851f05e5aff6ea0
3
+ size 158618872
model-00017-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a475eeff76bb6d2e9ce102f65263cd4bfbea74ef91eddd00084bf40b3aa2eb8c
3
+ size 158618872
model-00018-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07292e3c7c4630e05582696e9e67785a4eb52705c7f55c2cea0272ada48ed3a7
3
+ size 158618872
model-00019-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34aabdcbc2eaed2175cd54ab6f833e75aa734c4c83be6a4bfa99610ad170a3d2
3
+ size 158618872
model-00020-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:781310f005f0f78ff4ab3d663200b60244c240f137cda13021d82103ce5355a1
3
+ size 158618872
model-00021-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1c0a6b4769049a0559ace8796b57b0dc4cf79389f251d1538898d57dfb45844
3
+ size 158618872
model-00022-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40879e45708d09a2219ef9cc665be6e93f17b80cc01a12668655b1f38ffaf92e
3
+ size 158618872
model-00023-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88451a83603276e2fbc37fb8e5b956c21883013be8ef9843db44be04364a1ba3
3
+ size 158618872
model-00024-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b2b4b32bd934cd727117bde8f83d8c5c094a935db3129e1b75e90f1468ba5a9
3
+ size 158608416
model-00025-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:827d1f09f71e7d2ef3bcfe05cef1819aa0bc5d9152d549b032c726877832e1c3
3
+ size 158629312
model-00026-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb4dced55865fb7ef2e38305a5fa06cfadcd705896341f5bebf00c30bfe7283b
3
+ size 158618872
model-00027-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2449d7f0062f5bf2092f4a3ef2668792a675c1e3087ed2ae3087f10ccabff3de
3
+ size 158618872
model-00028-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b65a426db112aa56a79d5cf1ec31367fc348bb2b21a687621f648931d50e9f73
3
+ size 158618872
model-00029-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feb10ba5b2d82aa0e5d3929c51e996d4d849e321b6a8a3d3bf454be2e26739ba
3
+ size 158618872
model-00030-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f75f71022765cdbc73e09210820886d35542c5e13639967f4afa266848fe0739
3
+ size 158608416
model-00031-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:479f8718971f6f1d9a9bce40db49669dbd5fbbef4b0bf51aa759193146fae847
3
+ size 52429288
model-00032-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854c6c4652d86ec6645ec93af1fb1d75af7ed688a46ca3fcd2c910145578d355
3
+ size 257556608
model-00033-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48e751952dbb37ed9a70195d2d7fe8b34dd6fb6276a433ad4b676b286de9e725
3
+ size 141589608
model-00034-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4326f5523d19e65390253c47a95ca0c8797f8ddda70027b55e4ad404cd1882ee
3
+ size 158618864
model-00035-of-00035.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4766d49a6acbf654b83815293c9bb850f2fde2bad4888ad45d49ca5cc1c23801
3
+ size 123229272
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.3"}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00035.safetensors", "model.layers.0.input_layernorm.bias": "model-00002-of-00035.safetensors", "model.layers.0.input_layernorm.weight": "model-00002-of-00035.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.post_attention_layernorm.bias": "model-00002-of-00035.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00002-of-00035.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00035.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00002-of-00035.safetensors", "model.layers.1.input_layernorm.bias": "model-00002-of-00035.safetensors", "model.layers.1.input_layernorm.weight": "model-00002-of-00035.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.post_attention_layernorm.bias": "model-00003-of-00035.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00003-of-00035.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00003-of-00035.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00003-of-00035.safetensors", "model.layers.2.input_layernorm.bias": "model-00003-of-00035.safetensors", "model.layers.2.input_layernorm.weight": "model-00003-of-00035.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.post_attention_layernorm.bias": "model-00004-of-00035.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00004-of-00035.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00004-of-00035.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00004-of-00035.safetensors", "model.layers.3.input_layernorm.bias": "model-00004-of-00035.safetensors", "model.layers.3.input_layernorm.weight": "model-00004-of-00035.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.post_attention_layernorm.bias": "model-00005-of-00035.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00005-of-00035.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00005-of-00035.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00005-of-00035.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00006-of-00035.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00006-of-00035.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00006-of-00035.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00006-of-00035.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00006-of-00035.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00006-of-00035.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00006-of-00035.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00007-of-00035.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00007-of-00035.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00007-of-00035.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00007-of-00035.safetensors", "model.layers.4.input_layernorm.bias": "model-00007-of-00035.safetensors", "model.layers.4.input_layernorm.weight": "model-00007-of-00035.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00007-of-00035.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00007-of-00035.safetensors", "model.layers.4.post_attention_layernorm.bias": "model-00007-of-00035.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00007-of-00035.safetensors", "model.layers.5.input_layernorm.bias": "model-00007-of-00035.safetensors", "model.layers.5.input_layernorm.weight": "model-00007-of-00035.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00007-of-00035.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00008-of-00035.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00008-of-00035.safetensors", "model.layers.5.post_attention_layernorm.bias": "model-00008-of-00035.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00008-of-00035.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00008-of-00035.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00008-of-00035.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00008-of-00035.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00008-of-00035.safetensors", "model.layers.6.input_layernorm.bias": "model-00008-of-00035.safetensors", "model.layers.6.input_layernorm.weight": "model-00008-of-00035.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00008-of-00035.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00009-of-00035.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00009-of-00035.safetensors", "model.layers.6.post_attention_layernorm.bias": "model-00009-of-00035.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00009-of-00035.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00009-of-00035.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00009-of-00035.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00009-of-00035.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00009-of-00035.safetensors", "model.layers.7.input_layernorm.bias": "model-00009-of-00035.safetensors", "model.layers.7.input_layernorm.weight": "model-00009-of-00035.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00009-of-00035.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00010-of-00035.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00010-of-00035.safetensors", "model.layers.7.post_attention_layernorm.bias": "model-00010-of-00035.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00010-of-00035.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00010-of-00035.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00010-of-00035.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00010-of-00035.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00010-of-00035.safetensors", "model.layers.8.input_layernorm.bias": "model-00010-of-00035.safetensors", "model.layers.8.input_layernorm.weight": "model-00010-of-00035.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00010-of-00035.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00011-of-00035.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00011-of-00035.safetensors", "model.layers.8.post_attention_layernorm.bias": "model-00011-of-00035.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00011-of-00035.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00011-of-00035.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00011-of-00035.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00011-of-00035.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00011-of-00035.safetensors", "model.layers.9.input_layernorm.bias": "model-00011-of-00035.safetensors", "model.layers.9.input_layernorm.weight": "model-00011-of-00035.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00011-of-00035.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00012-of-00035.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00012-of-00035.safetensors", "model.layers.9.post_attention_layernorm.bias": "model-00012-of-00035.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00012-of-00035.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00012-of-00035.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00012-of-00035.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00012-of-00035.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00012-of-00035.safetensors", "model.layers.10.input_layernorm.bias": "model-00012-of-00035.safetensors", "model.layers.10.input_layernorm.weight": "model-00012-of-00035.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00012-of-00035.safetensors", "model.layers.10.post_attention_layernorm.bias": "model-00012-of-00035.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00012-of-00035.safetensors", "model.layers.11.input_layernorm.bias": "model-00012-of-00035.safetensors", "model.layers.11.input_layernorm.weight": "model-00012-of-00035.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.post_attention_layernorm.bias": "model-00013-of-00035.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00013-of-00035.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00013-of-00035.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00013-of-00035.safetensors", "model.layers.12.input_layernorm.bias": "model-00013-of-00035.safetensors", "model.layers.12.input_layernorm.weight": "model-00013-of-00035.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.post_attention_layernorm.bias": "model-00014-of-00035.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00014-of-00035.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00014-of-00035.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00014-of-00035.safetensors", "model.layers.13.input_layernorm.bias": "model-00014-of-00035.safetensors", "model.layers.13.input_layernorm.weight": "model-00014-of-00035.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.post_attention_layernorm.bias": "model-00015-of-00035.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00015-of-00035.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00015-of-00035.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00015-of-00035.safetensors", "model.layers.14.input_layernorm.bias": "model-00015-of-00035.safetensors", "model.layers.14.input_layernorm.weight": "model-00015-of-00035.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.post_attention_layernorm.bias": "model-00016-of-00035.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00016-of-00035.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00016-of-00035.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00016-of-00035.safetensors", "model.layers.15.input_layernorm.bias": "model-00016-of-00035.safetensors", "model.layers.15.input_layernorm.weight": "model-00016-of-00035.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.post_attention_layernorm.bias": "model-00017-of-00035.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00017-of-00035.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00017-of-00035.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00017-of-00035.safetensors", "model.layers.16.input_layernorm.bias": "model-00017-of-00035.safetensors", "model.layers.16.input_layernorm.weight": "model-00017-of-00035.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.post_attention_layernorm.bias": "model-00018-of-00035.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00018-of-00035.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00018-of-00035.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00018-of-00035.safetensors", "model.layers.17.input_layernorm.bias": "model-00018-of-00035.safetensors", "model.layers.17.input_layernorm.weight": "model-00018-of-00035.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.post_attention_layernorm.bias": "model-00019-of-00035.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00019-of-00035.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00019-of-00035.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00019-of-00035.safetensors", "model.layers.18.input_layernorm.bias": "model-00019-of-00035.safetensors", "model.layers.18.input_layernorm.weight": "model-00019-of-00035.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.post_attention_layernorm.bias": "model-00020-of-00035.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00020-of-00035.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00020-of-00035.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00020-of-00035.safetensors", "model.layers.19.input_layernorm.bias": "model-00020-of-00035.safetensors", "model.layers.19.input_layernorm.weight": "model-00020-of-00035.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.post_attention_layernorm.bias": "model-00021-of-00035.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00021-of-00035.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00021-of-00035.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00021-of-00035.safetensors", "model.layers.20.input_layernorm.bias": "model-00021-of-00035.safetensors", "model.layers.20.input_layernorm.weight": "model-00021-of-00035.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.post_attention_layernorm.bias": "model-00022-of-00035.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00022-of-00035.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00022-of-00035.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00022-of-00035.safetensors", "model.layers.21.input_layernorm.bias": "model-00022-of-00035.safetensors", "model.layers.21.input_layernorm.weight": "model-00022-of-00035.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.post_attention_layernorm.bias": "model-00023-of-00035.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00023-of-00035.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00023-of-00035.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00023-of-00035.safetensors", "model.layers.22.input_layernorm.bias": "model-00023-of-00035.safetensors", "model.layers.22.input_layernorm.weight": "model-00023-of-00035.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.post_attention_layernorm.bias": "model-00024-of-00035.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00024-of-00035.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00024-of-00035.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00024-of-00035.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.input_layernorm.bias": "model-00025-of-00035.safetensors", "model.layers.23.input_layernorm.weight": "model-00025-of-00035.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00025-of-00035.safetensors", "model.layers.23.post_attention_layernorm.bias": "model-00025-of-00035.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00025-of-00035.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00025-of-00035.safetensors", "model.layers.24.input_layernorm.bias": "model-00025-of-00035.safetensors", "model.layers.24.input_layernorm.weight": "model-00025-of-00035.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.post_attention_layernorm.bias": "model-00026-of-00035.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00026-of-00035.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00026-of-00035.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00026-of-00035.safetensors", "model.layers.25.input_layernorm.bias": "model-00026-of-00035.safetensors", "model.layers.25.input_layernorm.weight": "model-00026-of-00035.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.post_attention_layernorm.bias": "model-00027-of-00035.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00027-of-00035.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00027-of-00035.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00027-of-00035.safetensors", "model.layers.26.input_layernorm.bias": "model-00027-of-00035.safetensors", "model.layers.26.input_layernorm.weight": "model-00027-of-00035.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.post_attention_layernorm.bias": "model-00028-of-00035.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00028-of-00035.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00028-of-00035.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00028-of-00035.safetensors", "model.layers.27.input_layernorm.bias": "model-00028-of-00035.safetensors", "model.layers.27.input_layernorm.weight": "model-00028-of-00035.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.post_attention_layernorm.bias": "model-00029-of-00035.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00029-of-00035.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00029-of-00035.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00029-of-00035.safetensors", "model.layers.28.input_layernorm.bias": "model-00029-of-00035.safetensors", "model.layers.28.input_layernorm.weight": "model-00029-of-00035.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.post_attention_layernorm.bias": "model-00030-of-00035.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00030-of-00035.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00030-of-00035.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00030-of-00035.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00031-of-00035.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00031-of-00035.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00031-of-00035.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00031-of-00035.safetensors", "lm_head.weight": "model-00032-of-00035.safetensors", "model.layers.29.input_layernorm.bias": "model-00033-of-00035.safetensors", "model.layers.29.input_layernorm.weight": "model-00033-of-00035.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00033-of-00035.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00033-of-00035.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00033-of-00035.safetensors", "model.layers.29.post_attention_layernorm.bias": "model-00033-of-00035.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00033-of-00035.safetensors", "model.layers.30.input_layernorm.bias": "model-00033-of-00035.safetensors", "model.layers.30.input_layernorm.weight": "model-00033-of-00035.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00033-of-00035.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00034-of-00035.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00034-of-00035.safetensors", "model.layers.30.post_attention_layernorm.bias": "model-00034-of-00035.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00034-of-00035.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00034-of-00035.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00034-of-00035.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00034-of-00035.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00034-of-00035.safetensors", "model.layers.31.input_layernorm.bias": "model-00034-of-00035.safetensors", "model.layers.31.input_layernorm.weight": "model-00034-of-00035.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00034-of-00035.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00035-of-00035.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00035-of-00035.safetensors", "model.layers.31.post_attention_layernorm.bias": "model-00035-of-00035.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00035-of-00035.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00035-of-00035.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00035-of-00035.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00035-of-00035.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00035-of-00035.safetensors", "model.norm.bias": "model-00035-of-00035.safetensors", "model.norm.weight": "model-00035-of-00035.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|padding|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "50254": {
21
+ "content": " ",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "50255": {
29
+ "content": " ",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "50256": {
37
+ "content": " ",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "50257": {
45
+ "content": " ",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "50258": {
53
+ "content": " ",
54
+ "lstrip": false,
55
+ "normalized": true,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "50259": {
61
+ "content": " ",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "50260": {
69
+ "content": " ",
70
+ "lstrip": false,
71
+ "normalized": true,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": false
75
+ },
76
+ "50261": {
77
+ "content": " ",
78
+ "lstrip": false,
79
+ "normalized": true,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": false
83
+ },
84
+ "50262": {
85
+ "content": " ",
86
+ "lstrip": false,
87
+ "normalized": true,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "50263": {
93
+ "content": " ",
94
+ "lstrip": false,
95
+ "normalized": true,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "50264": {
101
+ "content": " ",
102
+ "lstrip": false,
103
+ "normalized": true,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "50265": {
109
+ "content": " ",
110
+ "lstrip": false,
111
+ "normalized": true,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "50266": {
117
+ "content": " ",
118
+ "lstrip": false,
119
+ "normalized": true,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "50267": {
125
+ "content": " ",
126
+ "lstrip": false,
127
+ "normalized": true,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "50268": {
133
+ "content": " ",
134
+ "lstrip": false,
135
+ "normalized": true,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "50269": {
141
+ "content": " ",
142
+ "lstrip": false,
143
+ "normalized": true,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "50270": {
149
+ "content": " ",
150
+ "lstrip": false,
151
+ "normalized": true,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "50271": {
157
+ "content": " ",
158
+ "lstrip": false,
159
+ "normalized": true,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "50272": {
165
+ "content": " ",
166
+ "lstrip": false,
167
+ "normalized": true,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": false
171
+ },
172
+ "50273": {
173
+ "content": " ",
174
+ "lstrip": false,
175
+ "normalized": true,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": false
179
+ },
180
+ "50274": {
181
+ "content": " ",
182
+ "lstrip": false,
183
+ "normalized": true,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": false
187
+ },
188
+ "50275": {
189
+ "content": " ",
190
+ "lstrip": false,
191
+ "normalized": true,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": false
195
+ },
196
+ "50276": {
197
+ "content": " ",
198
+ "lstrip": false,
199
+ "normalized": true,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": false
203
+ }
204
+ },
205
+ "bos_token": "<|endoftext|>",
206
+ "clean_up_tokenization_spaces": true,
207
+ "eos_token": "<|endoftext|>",
208
+ "model_max_length": 1000000000000000019884624838656,
209
+ "tokenizer_class": "GPTNeoXTokenizer",
210
+ "unk_token": "<|endoftext|>"
211
+ }