Omkar Thawakar commited on
Commit
654a234
1 Parent(s): 9a0631c

initial upload

Browse files
Files changed (1) hide show
  1. config.json +9 -6
config.json CHANGED
@@ -1,17 +1,20 @@
1
  {
2
- "_name_or_path": "./tinyllama_08b/ckpt_81",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
 
 
 
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
  "eos_token_id": 2,
10
  "hidden_act": "silu",
11
- "hidden_size": 2560,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 10240,
14
- "max_position_embeddings": 2560,
15
  "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 22,
@@ -25,4 +28,4 @@
25
  "transformers_version": "4.36.1",
26
  "use_cache": true,
27
  "vocab_size": 32000
28
- }
 
1
  {
2
+ "_name_or_path": "MobiLlama",
3
  "architectures": [
4
+ "MobiLlamaForCausalLM"
5
  ],
6
+ "auto_map": {
7
+ "AutoModelForCausalLM": "modelling_mobillama.MobiLlamaForCausalLM"
8
+ },
9
  "attention_bias": false,
10
  "attention_dropout": 0.0,
11
  "bos_token_id": 1,
12
  "eos_token_id": 2,
13
  "hidden_act": "silu",
14
+ "hidden_size": 2048,
15
  "initializer_range": 0.02,
16
+ "intermediate_size": 5632,
17
+ "max_position_embeddings": 2048,
18
  "model_type": "llama",
19
  "num_attention_heads": 32,
20
  "num_hidden_layers": 22,
 
28
  "transformers_version": "4.36.1",
29
  "use_cache": true,
30
  "vocab_size": 32000
31
+ }