Upload LlamaForCausalLM
Browse files- README.md +42 -0
- adapter_config.json +26 -0
- head_config.json +14 -0
- pytorch_adapter.bin +3 -0
- pytorch_model_head.bin +3 -0
README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- adapter-transformers
|
4 |
+
- llama
|
5 |
+
datasets:
|
6 |
+
- timdettmers/openassistant-guanaco
|
7 |
+
---
|
8 |
+
|
9 |
+
# Adapter `AdapterHub/llama2-13b-qlora-openassistant` for meta-llama/Llama-2-13b-hf
|
10 |
+
|
11 |
+
An [adapter](https://adapterhub.ml) for the `meta-llama/Llama-2-13b-hf` model that was trained on the [timdettmers/openassistant-guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco/) dataset.
|
12 |
+
|
13 |
+
This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library.
|
14 |
+
|
15 |
+
## Usage
|
16 |
+
|
17 |
+
First, install `adapters`:
|
18 |
+
|
19 |
+
```
|
20 |
+
pip install -U adapters
|
21 |
+
```
|
22 |
+
|
23 |
+
Now, the adapter can be loaded and activated like this:
|
24 |
+
|
25 |
+
```python
|
26 |
+
from adapters import AutoAdapterModel
|
27 |
+
|
28 |
+
model = AutoAdapterModel.from_pretrained("meta-llama/Llama-2-13b-hf")
|
29 |
+
adapter_name = model.load_adapter("AdapterHub/llama2-13b-qlora-openassistant", source="hf", set_active=True)
|
30 |
+
```
|
31 |
+
|
32 |
+
## Architecture & Training
|
33 |
+
|
34 |
+
<!-- Add some description here -->
|
35 |
+
|
36 |
+
## Evaluation results
|
37 |
+
|
38 |
+
<!-- Add some description here -->
|
39 |
+
|
40 |
+
## Citation
|
41 |
+
|
42 |
+
<!-- Add some description here -->
|
adapter_config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"alpha": 16,
|
4 |
+
"architecture": "lora",
|
5 |
+
"attn_matrices": [
|
6 |
+
"q",
|
7 |
+
"k",
|
8 |
+
"v"
|
9 |
+
],
|
10 |
+
"composition_mode": "add",
|
11 |
+
"dropout": 0.1,
|
12 |
+
"init_weights": "lora",
|
13 |
+
"intermediate_lora": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"output_lora": true,
|
16 |
+
"r": 64,
|
17 |
+
"selfattn_lora": true,
|
18 |
+
"use_gating": false
|
19 |
+
},
|
20 |
+
"hidden_size": 5120,
|
21 |
+
"model_class": "LlamaForCausalLM",
|
22 |
+
"model_name": "meta-llama/Llama-2-13b-hf",
|
23 |
+
"model_type": "llama",
|
24 |
+
"name": "assistant_adapter",
|
25 |
+
"version": "0.1.2"
|
26 |
+
}
|
head_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": null,
|
3 |
+
"hidden_size": 5120,
|
4 |
+
"label2id": {
|
5 |
+
"LABEL_0": 0,
|
6 |
+
"LABEL_1": 1
|
7 |
+
},
|
8 |
+
"model_class": "LlamaForCausalLM",
|
9 |
+
"model_name": "meta-llama/Llama-2-13b-hf",
|
10 |
+
"model_type": "llama",
|
11 |
+
"name": null,
|
12 |
+
"num_labels": 2,
|
13 |
+
"version": "0.1.2"
|
14 |
+
}
|
pytorch_adapter.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a135bcbac28185700669281ccc99b4a67d594bf3f0bc512edd9feac81b2dce5
|
3 |
+
size 702691378
|
pytorch_model_head.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d64423b83dcacfa1d4449a34c264a1eaaf846946f2612483e8797a85758e951f
|
3 |
+
size 327681299
|