Update xlora_config.json
Browse files- xlora_config.json +23 -1
xlora_config.json
CHANGED
@@ -1 +1,23 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_id": "google/gemma-7b-it",
|
3 |
+
"hidden_size": 3072,
|
4 |
+
"adapters": {
|
5 |
+
"adapter_1": "lamm-mit/x-lora-gemma-7b/adapter_1",
|
6 |
+
"adapter_2": "lamm-mit/x-lora-gemma-7b/adapter_2",
|
7 |
+
"adapter_3": "lamm-mit/x-lora-gemma-7b/adapter_3",
|
8 |
+
"adapter_4": "lamm-mit/x-lora-gemma-7b/adapter_4"
|
9 |
+
},
|
10 |
+
"enable_softmax": true,
|
11 |
+
"enable_softmax_topk": false,
|
12 |
+
"layerwise_scalings": true,
|
13 |
+
"xlora_depth": 2,
|
14 |
+
"xlora_size": 2048,
|
15 |
+
"enable_relu_and_dropout": true,
|
16 |
+
"use_bias": true,
|
17 |
+
"xlora_dropout_p": 0.2,
|
18 |
+
"use_trainable_adapters": false,
|
19 |
+
"softmax_temperature": 1,
|
20 |
+
"top_k_lora": null,
|
21 |
+
"scaling_pass_value": 0,
|
22 |
+
"global_scaling_weight": 1
|
23 |
+
}
|