Text Generation
Adapters
llama
llama-2
llama2-13b-qlora-openassistant / adapter_config.json
calpt's picture
Upload LlamaForCausalLM
67704a4 verified
raw
history blame
531 Bytes
{
"config": {
"alpha": 16,
"architecture": "lora",
"attn_matrices": [
"q",
"k",
"v"
],
"composition_mode": "add",
"dropout": 0.1,
"init_weights": "lora",
"intermediate_lora": true,
"leave_out": [],
"output_lora": true,
"r": 64,
"selfattn_lora": true,
"use_gating": false
},
"hidden_size": 5120,
"model_class": "LlamaForCausalLM",
"model_name": "meta-llama/Llama-2-13b-hf",
"model_type": "llama",
"name": "assistant_adapter",
"version": "0.1.2"
}