{ | |
"architectures": [ | |
"Blip2ForConditionalGeneration" | |
], | |
"image_text_hidden_size": 256, | |
"image_token_index": 50265, | |
"initializer_factor": 1.0, | |
"initializer_range": 0.02, | |
"model_type": "blip-2", | |
"num_query_tokens": 32, | |
"qformer_config": { | |
"classifier_dropout": null, | |
"model_type": "blip_2_qformer" | |
}, | |
"text_config": { | |
"activation_dropout": 0.0, | |
"architectures": [ | |
"OPTForCausalLM" | |
], | |
"eos_token_id": 50118, | |
"ffn_dim": 16384, | |
"hidden_size": 4096, | |
"model_type": "opt", | |
"num_attention_heads": 32, | |
"num_hidden_layers": 32, | |
"prefix": "</s>", | |
"torch_dtype": "float16", | |
"vocab_size": 50304, | |
"word_embed_proj_dim": 4096 | |
}, | |
"torch_dtype": "float32", | |
"transformers_version": "4.47.0.dev0", | |
"use_decoder_only_language_model": true, | |
"vision_config": { | |
"dropout": 0.0, | |
"image_size": 364, | |
"initializer_factor": 1.0, | |
"model_type": "blip_2_vision_model", | |
"num_channels": 3, | |
"projection_dim": 512 | |
} | |
} | |