atsuki-yamaguchi
commited on
Commit
•
4df801c
1
Parent(s):
5f9a781
Upload folder using huggingface_hub
Browse files- README.md +16 -24
- adapter_config.json +1 -23
- optimizer.pt +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
README.md
CHANGED
@@ -1,29 +1,21 @@
|
|
1 |
---
|
2 |
-
|
3 |
-
language:
|
4 |
-
- sw
|
5 |
---
|
6 |
-
|
7 |
-
===
|
8 |
|
9 |
-
## How to use
|
10 |
-
```python
|
11 |
-
from transformers import AutoModelForCausalLM
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
@article{yamaguchi2024empirical,
|
22 |
-
title={An Empirical Study on Cross-lingual Vocabulary Adaptation for Efficient Generative {LLM} Inference},
|
23 |
-
author={Atsuki Yamaguchi and Aline Villavicencio and Nikolaos Aletras},
|
24 |
-
journal={ArXiv},
|
25 |
-
year={2024},
|
26 |
-
volume={abs/2402.10712},
|
27 |
-
url={https://arxiv.org/abs/2402.10712}
|
28 |
-
}
|
29 |
-
```
|
|
|
1 |
---
|
2 |
+
library_name: peft
|
|
|
|
|
3 |
---
|
4 |
+
## Training procedure
|
|
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- quant_method: bitsandbytes
|
9 |
+
- load_in_8bit: True
|
10 |
+
- load_in_4bit: False
|
11 |
+
- llm_int8_threshold: 6.0
|
12 |
+
- llm_int8_skip_modules: None
|
13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
14 |
+
- llm_int8_has_fp16_weight: False
|
15 |
+
- bnb_4bit_quant_type: fp4
|
16 |
+
- bnb_4bit_use_double_quant: False
|
17 |
+
- bnb_4bit_compute_dtype: float32
|
18 |
+
### Framework versions
|
19 |
|
20 |
+
|
21 |
+
- PEFT 0.5.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
adapter_config.json
CHANGED
@@ -1,23 +1 @@
|
|
1 |
-
{
|
2 |
-
"auto_mapping": null,
|
3 |
-
"base_model_name_or_path": "/mnt/parscratch/users/acp23ay/private/models/bloom-1b1-sw-pruned",
|
4 |
-
"bias": "none",
|
5 |
-
"fan_in_fan_out": false,
|
6 |
-
"inference_mode": true,
|
7 |
-
"init_lora_weights": true,
|
8 |
-
"layers_pattern": null,
|
9 |
-
"layers_to_transform": null,
|
10 |
-
"lora_alpha": 32,
|
11 |
-
"lora_dropout": 0.05,
|
12 |
-
"modules_to_save": null,
|
13 |
-
"peft_type": "LORA",
|
14 |
-
"r": 8,
|
15 |
-
"revision": null,
|
16 |
-
"target_modules": [
|
17 |
-
"query_key_value",
|
18 |
-
"dense",
|
19 |
-
"dense_h_to_4h",
|
20 |
-
"dense_4h_to_h"
|
21 |
-
],
|
22 |
-
"task_type": "CAUSAL_LM"
|
23 |
-
}
|
|
|
1 |
+
{"auto_mapping": null, "base_model_name_or_path": "bigscience/bloom-1b1", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": null, "peft_type": "LORA", "r": 8, "revision": null, "target_modules": ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], "task_type": "CAUSAL_LM"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ccb5bd0294e777f087e727fbb22312ec569ccd6d91fd79f086cc659c6206f3af
|
3 |
+
size 9671738
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14dc63b8088e5f154919e2f30dfb59a9c87431aa177fd55c6cbb110da46ba761
|
3 |
+
size 14244
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e67065b773fbfa9ad2f6876a2d097ec78b70a850ce3ddb1c1d5f72245a3906b6
|
3 |
+
size 1064
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02cb20098a74f6bcdc58b919ae4d85e1127d3e50cf5f4de81b9f25426a213740
|
3 |
+
size 4664
|