File size: 2,541 Bytes
c46937d
 
 
204da06
4b203f9
c46937d
6aca308
 
 
4b203f9
c46937d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37eee20
 
70bb53a
c46937d
 
 
 
 
 
 
 
 
 
 
 
 
e8046aa
c46937d
a05f134
c46937d
 
 
 
 
 
 
 
 
 
a05f134
c46937d
 
 
37eee20
 
e0229bb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
{
  "allow_embedding_resizing": true,
  "architectures": [
    "FlexBertModel",
    "FlexBertForCausalLM"
  ],
  "auto_map": {
      "AutoConfig": "orionweller/test-flex-gpt--configuration_bert.FlexBertConfig",
      "AutoModel": "orionweller/test-flex-gpt--modeling_flexbert.FlexBertModel",
      "AutoModelForCausalLM": "orionweller/test-flex-gpt--modeling_flexbert.FlexBertForCausalLM"
  },
  "attention_layer": "rope",
  "attention_probs_dropout_prob": 0.0,
  "attn_out_bias": false,
  "attn_out_dropout_prob": 0.1,
  "attn_qkv_bias": false,
  "bert_layer": "prenorm",
  "classifier_dropout": null,
  "compile_model": true,
  "decoder_bias": true,
  "deterministic_fa2": false,
  "embed_dropout_prob": 0.0,
  "embed_norm": true,
  "embedding_layer": "sans_pos",
  "encoder_layer": "base",
  "final_norm": true,
  "global_attn_every_n_layers": 3,
  "gradient_checkpointing": false,
  "head_class_act": "silu",
  "head_class_bias": false,
  "head_class_dropout": 0.0,
  "head_class_norm": false,
  "head_pred_act": "gelu",
  "head_pred_bias": false,
  "head_pred_dropout": 0.0,
  "head_pred_norm": true,
  "hidden_act": "gelu",
  "hidden_dropout_prob": 0.1,
  "hidden_size": 768,
  "init_cutoff_factor": 2.0,
  "init_method": "full_megatron",
  "init_small_embedding": false,
  "init_std": 0.02,
  "initial_attention_layer": null,
  "initial_bert_layer": null,
  "initial_mlp_layer": null,
  "initializer_range": 0.02,
  "intermediate_size": 1152,
  "layer_norm_eps": 1e-12,
  "local_attn_rotary_emb_base": -1,
  "local_attn_rotary_emb_dim": null,
  "loss_function": "fa_cross_entropy",
  "loss_kwargs": {
    "reduction": "mean"
  },
  "masked_prediction": false,
  "casual_mask": true,
  "max_position_embeddings": 1024,
  "mlp_dropout_prob": 0.0,
  "mlp_in_bias": false,
  "mlp_layer": "glu",
  "mlp_out_bias": false,
  "model_type": "flex_bert",
  "norm_kwargs": {
    "bias": false,
    "eps": 1e-05
  },
  "normalization": "layernorm",
  "num_attention_heads": 12,
  "num_hidden_layers": 22,
  "num_initial_layers": 1,
  "pad_logits": true,
  "pad_token_id": 0,
  "padding": "unpadded",
  "pooling_type": "cls",
  "position_embedding_type": "absolute",
  "rotary_emb_base": 10000.0,
  "rotary_emb_dim": 64,
  "rotary_emb_interleaved": false,
  "rotary_emb_scale_base": null,
  "skip_first_prenorm": true,
  "sliding_window": 128,
  "transformers_version": "4.44.1",
  "type_vocab_size": 2,
  "unpad_embeddings": true,
  "use_cache": true,
  "use_fa2": true,
  "use_sdpa_attn_mask": false,
  "vocab_size": 50368,
  "is_casual": true
}