DNAPerceiver1_2epochs / config.json
simecek's picture
Training in progress, step 6000
5a585dd
raw
history blame contribute delete
933 Bytes
{
"architectures": [
"PerceiverForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"audio_samples_per_frame": 1920,
"cross_attention_shape_for_attention": "kv",
"cross_attention_widening_factor": 1,
"d_latents": 1280,
"d_model": 768,
"hidden_act": "gelu",
"image_size": 56,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 2048,
"model_type": "perceiver",
"num_blocks": 1,
"num_cross_attention_heads": 8,
"num_frames": 16,
"num_hidden_layers": 6,
"num_latents": 256,
"num_self_attends_per_block": 26,
"num_self_attention_heads": 8,
"output_shape": [
1,
16,
224,
224
],
"qk_channels": null,
"samples_per_patch": 16,
"self_attention_widening_factor": 1,
"torch_dtype": "float32",
"train_size": [
368,
496
],
"transformers_version": "4.19.4",
"use_query_residual": true,
"v_channels": null,
"vocab_size": 262
}