nielsbantilan
commited on
Commit
•
896ccbc
1
Parent(s):
7fd7ee6
Upload folder using huggingface_hub
Browse files- README.md +18 -13
- adapter_config.json +22 -0
- adapter_model.bin +3 -0
- checkpoint-1000/README.md +21 -0
- checkpoint-1000/adapter_config.json +22 -0
- checkpoint-1000/adapter_model.bin +3 -0
- checkpoint-1000/optimizer.pt +3 -0
- checkpoint-1000/rng_state.pth +3 -0
- checkpoint-1000/scheduler.pt +3 -0
- checkpoint-1000/trainer_state.json +0 -0
- checkpoint-1000/training_args.bin +3 -0
- training_args.bin +3 -0
README.md
CHANGED
@@ -1,16 +1,21 @@
|
|
1 |
---
|
2 |
-
|
3 |
-
- en
|
4 |
-
license: apache-2.0
|
5 |
-
tags:
|
6 |
-
- pytorch
|
7 |
-
- causal-lm
|
8 |
-
- llama2
|
9 |
-
- code llama
|
10 |
-
- fine-tuning
|
11 |
-
- flyte llama
|
12 |
-
- flyte repo dataset
|
13 |
-
|
14 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
|
|
1 |
---
|
2 |
+
library_name: peft
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- quant_method: bitsandbytes
|
9 |
+
- load_in_8bit: False
|
10 |
+
- load_in_4bit: True
|
11 |
+
- llm_int8_threshold: 6.0
|
12 |
+
- llm_int8_skip_modules: None
|
13 |
+
- llm_int8_enable_fp32_cpu_offload: True
|
14 |
+
- llm_int8_has_fp16_weight: False
|
15 |
+
- bnb_4bit_quant_type: nf4
|
16 |
+
- bnb_4bit_use_double_quant: True
|
17 |
+
- bnb_4bit_compute_dtype: bfloat16
|
18 |
+
### Framework versions
|
19 |
+
|
20 |
|
21 |
+
- PEFT 0.6.0.dev0
|
adapter_config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "codellama/CodeLlama-7b-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 16,
|
11 |
+
"lora_dropout": 0.05,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"k_proj",
|
19 |
+
"v_proj"
|
20 |
+
],
|
21 |
+
"task_type": "CAUSAL_LM"
|
22 |
+
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:65fba583ea021926e6166454a52242012af6b738e559ebeea7ea700580afeda2
|
3 |
+
size 25234701
|
checkpoint-1000/README.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- quant_method: bitsandbytes
|
9 |
+
- load_in_8bit: False
|
10 |
+
- load_in_4bit: True
|
11 |
+
- llm_int8_threshold: 6.0
|
12 |
+
- llm_int8_skip_modules: None
|
13 |
+
- llm_int8_enable_fp32_cpu_offload: True
|
14 |
+
- llm_int8_has_fp16_weight: False
|
15 |
+
- bnb_4bit_quant_type: nf4
|
16 |
+
- bnb_4bit_use_double_quant: True
|
17 |
+
- bnb_4bit_compute_dtype: bfloat16
|
18 |
+
### Framework versions
|
19 |
+
|
20 |
+
|
21 |
+
- PEFT 0.6.0.dev0
|
checkpoint-1000/adapter_config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "codellama/CodeLlama-7b-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 16,
|
11 |
+
"lora_dropout": 0.05,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"k_proj",
|
19 |
+
"v_proj"
|
20 |
+
],
|
21 |
+
"task_type": "CAUSAL_LM"
|
22 |
+
}
|
checkpoint-1000/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a48c0c39953e7da1c781a020ac877737ef94f7d5097e8193ad1dd7834d276884
|
3 |
+
size 25234701
|
checkpoint-1000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8569b0428ca9a60ad255609bd91e1860cbf2bf63c029e11719667b4b2bb8d7b4
|
3 |
+
size 12851653
|
checkpoint-1000/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb6945f991f0d8960ba634533dddffce6efa2657ea95f847cfeeac8d3545ae2b
|
3 |
+
size 14575
|
checkpoint-1000/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df8fa21f08a145b2f0826ed3aa74cbe464877da0bc7cb032b6fb04e5bb40df1c
|
3 |
+
size 627
|
checkpoint-1000/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1000/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c09776b37bac13462722ac2da0b8b15edb74f5adf047d34dceefe023d86394f8
|
3 |
+
size 4027
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c09776b37bac13462722ac2da0b8b15edb74f5adf047d34dceefe023d86394f8
|
3 |
+
size 4027
|