ehristoforu commited on
Commit
c38fd27
1 Parent(s): 0e57c6e

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - unsloth/Mistral-Nemo-Instruct-2407
4
+ library_name: transformers
5
+ tags:
6
+ - mergekit
7
+ - merge
8
+
9
+ ---
10
+ # merge
11
+
12
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
13
+
14
+ ## Merge Details
15
+ ### Merge Method
16
+
17
+ This model was merged using the passthrough merge method.
18
+
19
+ ### Models Merged
20
+
21
+ The following models were included in the merge:
22
+ * [unsloth/Mistral-Nemo-Instruct-2407](https://huggingface.co/unsloth/Mistral-Nemo-Instruct-2407)
23
+
24
+ ### Configuration
25
+
26
+ The following YAML configuration was used to produce this model:
27
+
28
+ ```yaml
29
+ slices:
30
+ - sources:
31
+ - model: unsloth/Mistral-Nemo-Instruct-2407
32
+ layer_range: [0, 21]
33
+ - sources:
34
+ - model: unsloth/Mistral-Nemo-Instruct-2407
35
+ layer_range: [22, 25]
36
+ merge_method: passthrough
37
+ dtype: bfloat16
38
+
39
+ ```
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "unsloth/Mistral-Nemo-Instruct-2407",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 5120,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 1024000,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 8,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.39.3",
25
+ "use_cache": true,
26
+ "vocab_size": 131072
27
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ slices:
2
+ - sources:
3
+ - model: unsloth/Mistral-Nemo-Instruct-2407
4
+ layer_range: [0, 21]
5
+ - sources:
6
+ - model: unsloth/Mistral-Nemo-Instruct-2407
7
+ layer_range: [22, 25]
8
+ merge_method: passthrough
9
+ dtype: bfloat16
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0976f749096a1de7efab905b13563c1685c23f06092ba41afb4d73dc595cc4f
3
+ size 1887501944
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34b1e77558cf740a0e06a6c8db15b949c95af782123136b294cc8b6253dccd6b
3
+ size 1635843120
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f60b9ea2403a077f62ae49c2d96fadf59988e8a3aa5e5fa6e8677bef0643ea1b
3
+ size 1887437760
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cda96ae5d756aeb196e5606aa9e598a66559bc61f0b01e756a5ce2b805f1dea
3
+ size 1887522688
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f7ea9a498a25c4cd2f600638ab8d03cab3b7d63534d97b56fa81d2542476496
3
+ size 1929444648
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48defdcce051492ef1048d4a27dea892777484fc9c73e91dcef6eec019e5048e
3
+ size 1887522680
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c5eb09811ba0be2b56b3afb361c23f03d4063dbc4b9840b4976b6457f701cd8
3
+ size 1929444672
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82fc3ea2b4bc8904fa37a593e013bde75a4c1c50ad4bbb1d68a96479515af9c8
3
+ size 1384205032
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75f675510870b5495483236d89ae434a8d7277684dea3295ce4e02341e420a4c
3
+ size 1342187744
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.2", "total_size": 15771084800}, "weight_map": {"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00009.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00009.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00009.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00009.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00009.safetensors", "model.layers.5.input_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00009.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00009.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00009.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "model.layers.2.input_layernorm.weight": "model-00002-of-00009.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "model.layers.1.input_layernorm.weight": "model-00002-of-00009.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00002-of-00009.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00009.safetensors", "model.layers.0.input_layernorm.weight": "model-00002-of-00009.safetensors", "model.embed_tokens.weight": "model-00003-of-00009.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00009.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00009.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00009.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00009.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00003-of-00009.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00003-of-00009.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "model.layers.14.input_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "model.layers.13.input_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "model.layers.12.input_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00004-of-00009.safetensors", "model.layers.11.input_layernorm.weight": "model-00004-of-00009.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "model.layers.10.input_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "model.layers.9.input_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00005-of-00009.safetensors", "model.layers.8.input_layernorm.weight": "model-00005-of-00009.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00005-of-00009.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00005-of-00009.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "model.layers.7.input_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.6.input_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "model.layers.22.input_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00006-of-00009.safetensors", "model.layers.21.input_layernorm.weight": "model-00006-of-00009.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "model.layers.20.input_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "model.layers.19.input_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00007-of-00009.safetensors", "model.layers.18.input_layernorm.weight": "model-00007-of-00009.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00007-of-00009.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00007-of-00009.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00008-of-00009.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "model.layers.17.input_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00008-of-00009.safetensors", "model.layers.16.input_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00008-of-00009.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.15.input_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00008-of-00009.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00008-of-00009.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00008-of-00009.safetensors", "model.layers.23.input_layernorm.weight": "model-00008-of-00009.safetensors", "lm_head.weight": "model-00009-of-00009.safetensors", "model.norm.weight": "model-00009-of-00009.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff