{ "_name_or_path": "sentence-transformers/all-MiniLM-L6-v2", "architectures": [ "BertModel" ], "attention_probs_dropout_prob": 0.1, "auto_cast": null, "auto_cast_type": null, "classifier_dropout": null, "dynamic_batch_size": true, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 384, "initializer_range": 0.02, "input_names": [ "input_ids", "attention_mask", "token_type_ids" ], "intermediate_size": 1536, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "neuron_batch_size": 1, "neuron_sequence_length": 128, "num_attention_heads": 12, "num_hidden_layers": 6, "output_names": [ "last_hidden_state", "pooler_output" ], "pad_token_id": 0, "position_embedding_type": "absolute", "torchscript": true, "transformers_version": "4.29.2", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30522 }