{ "model_type": "CustomModel", "architecture": "CustomModel", "input_size": 512, "hidden_size": 128, "output_size": 768, "vocab_size": 30522, // Example vocab size, adjust according to your tokenizer's vocabulary "layer_norm_epsilon": 1e-12, "hidden_dropout_prob": 0.1, "num_attention_heads": 12, // Adjust if your model uses attention mechanisms "attention_probs_dropout_prob": 0.1, "intermediate_size": 3072, // Example size, adjust based on your model's architecture "num_hidden_layers": 12, // Adjust based on your model's depth "initializer_range": 0.02 }