x-lora-gemma-7b / xlora_config.json
mjbuehler's picture
Upload xlora_config.json
c1dea12 verified
raw
history blame
608 Bytes
{"base_model_id":"google/gemma-7b-it", "hidden_size": 3072, "adapters": {"adapter_1": "lamm-mit/x-lora-gemma-7b/adapter_1", "adapter_2": "lamm-mit/x-lora-gemma-7b/adapter_2", "adapter_3": "lamm-mit/x-lora-gemma-7b/adapter_3", "adapter_4": "lamm-mit/x-lora-gemma-7b/adapter_4"}, "enable_softmax": true, "enable_softmax_topk": false, "layerwise_scalings": true, "xlora_depth": 2, "xlora_size": 2048, "enable_relu_and_dropout": true, "use_bias": true, "xlora_dropout_p": 0.2, "use_trainable_adapters": false, "softmax_temperature": 1.0, "top_k_lora": null, "scaling_pass_value": 0, "global_scaling_weight": 1.0}