{ "architectures": [ "EfficientVitForSemanticSegmentation" ], "attention_probs_dropout_prob": 0.0, "classifier_dropout_prob": 0.0, "decoder_hidden_size": 512, "decoder_layer_hidden_size": 128, "depths": [ 1, 1, 1, 6, 6 ], "head_dim": 32, "hidden_dropout_prob": 0.0, "hidden_sizes": [ 32, 64, 160, 256 ], "initializer_range": 0.02, "layer_norm_eps": 1e-06, "model_type": "efficientvit", "num_channels": 3, "num_classes": 2, "num_stages": 4, "patch_size": [ 7, 7 ], "semantic_loss_ignore_index": -1, "torch_dtype": "float32", "transformers_version": "4.42.3", "widths": [ 32, 64, 128, 256, 512 ] }