VISOR-GPT / train /models /albert /xxlarge_config.json
szukevin's picture
upload
7900c16
raw
history blame
408 Bytes
{
"emb_size": 128,
"feedforward_size": 16384,
"hidden_size": 4096,
"hidden_act": "relu",
"heads_num": 16,
"layers_num": 12,
"max_seq_length": 512,
"dropout": 0.0,
"data_processor": "albert",
"embedding": ["word", "pos", "seg"],
"encoder": "transformer",
"mask": "fully_visible",
"factorized_embedding_parameterization": true,
"parameter_sharing": true,
"target": ["mlm", "sp"]
}