yentinglin commited on
Commit
1f7e786
1 Parent(s): 22b03da

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,22 +1,24 @@
1
  {
2
- "_name_or_path": "/fsx/mistral-7b-pretrain/checkpoint-1000/",
3
  "architectures": [
4
- "MistralForCausalLM"
5
  ],
 
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
8
  "hidden_act": "silu",
9
  "hidden_size": 4096,
10
  "initializer_range": 0.02,
11
- "intermediate_size": 14336,
12
- "max_position_embeddings": 32768,
13
- "model_type": "mistral",
14
  "num_attention_heads": 32,
15
  "num_hidden_layers": 32,
16
- "num_key_value_heads": 8,
 
17
  "rms_norm_eps": 1e-05,
 
18
  "rope_theta": 10000.0,
19
- "sliding_window": 4096,
20
  "tie_word_embeddings": false,
21
  "torch_dtype": "bfloat16",
22
  "transformers_version": "4.34.0",
 
1
  {
2
+ "_name_or_path": "/fsx/llama2-7b-pretrain/checkpoint-1000",
3
  "architectures": [
4
+ "LlamaForCausalLM"
5
  ],
6
+ "attention_bias": false,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
12
+ "intermediate_size": 11008,
13
+ "max_position_embeddings": 4096,
14
+ "model_type": "llama",
15
  "num_attention_heads": 32,
16
  "num_hidden_layers": 32,
17
+ "num_key_value_heads": 32,
18
+ "pretraining_tp": 1,
19
  "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
  "rope_theta": 10000.0,
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.34.0",
generation_config.json CHANGED
@@ -1,6 +1,10 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 1,
 
4
  "eos_token_id": 2,
 
 
 
 
5
  "transformers_version": "4.34.0"
6
  }
 
1
  {
 
2
  "bos_token_id": 1,
3
+ "do_sample": true,
4
  "eos_token_id": 2,
5
+ "max_length": 4096,
6
+ "pad_token_id": 0,
7
+ "temperature": 0.6,
8
+ "top_p": 0.9,
9
  "transformers_version": "4.34.0"
10
  }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f7e7fa18a841bc304c2bc18526a8b3ecaaa6732352200d74483f68e52ab8e72
3
- size 9942981696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce2b3e4e77ea3a33ba84da66e68512739525e74e3046ffc0969374b2c3e9f9a0
3
+ size 9976570520
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9e75a5484d4163faf2bcf615b2eaba6dd92aa97ac982aa2c60784c66f672f8e
3
- size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56cf9830abdde60dfe3ea0cb159a7243612e463cf4c4b31a75325e3412bf21c9
3
+ size 3500294544
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 14483464192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00002-of-00002.safetensors",
@@ -140,24 +140,24 @@
140
  "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
152
- "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
- "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
- "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
155
- "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
156
- "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 13476831232
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00002-of-00002.safetensors",
 
140
  "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",