ZeroUniqueness
commited on
Commit
•
dca7161
1
Parent(s):
bf42176
Training in progress, step 200
Browse files- adapter_config.json +5 -5
- adapter_model.bin +3 -0
- checkpoint-200/README.md +20 -0
- checkpoint-200/adapter_config.json +26 -0
- checkpoint-200/adapter_model.bin +3 -0
- checkpoint-200/optimizer.pt +3 -0
- checkpoint-200/rng_state_0.pth +3 -0
- checkpoint-200/rng_state_1.pth +3 -0
- checkpoint-200/rng_state_10.pth +3 -0
- checkpoint-200/rng_state_11.pth +3 -0
- checkpoint-200/rng_state_12.pth +3 -0
- checkpoint-200/rng_state_13.pth +3 -0
- checkpoint-200/rng_state_2.pth +3 -0
- checkpoint-200/rng_state_3.pth +3 -0
- checkpoint-200/rng_state_4.pth +3 -0
- checkpoint-200/rng_state_5.pth +3 -0
- checkpoint-200/rng_state_6.pth +3 -0
- checkpoint-200/rng_state_7.pth +3 -0
- checkpoint-200/rng_state_8.pth +3 -0
- checkpoint-200/rng_state_9.pth +3 -0
- checkpoint-200/scheduler.pt +3 -0
- checkpoint-200/trainer_state.json +40 -0
- checkpoint-200/training_args.bin +3 -0
- training_args.bin +3 -0
adapter_config.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": null,
|
6 |
-
"inference_mode":
|
7 |
"init_lora_weights": true,
|
8 |
"layers_pattern": null,
|
9 |
"layers_to_transform": null,
|
@@ -14,13 +14,13 @@
|
|
14 |
"r": 32,
|
15 |
"revision": null,
|
16 |
"target_modules": [
|
|
|
|
|
|
|
17 |
"k_proj",
|
18 |
"down_proj",
|
19 |
"o_proj",
|
20 |
-
"v_proj"
|
21 |
-
"gate_proj",
|
22 |
-
"up_proj",
|
23 |
-
"q_proj"
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
|
|
3 |
"base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": null,
|
6 |
+
"inference_mode": true,
|
7 |
"init_lora_weights": true,
|
8 |
"layers_pattern": null,
|
9 |
"layers_to_transform": null,
|
|
|
14 |
"r": 32,
|
15 |
"revision": null,
|
16 |
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"up_proj",
|
19 |
+
"gate_proj",
|
20 |
"k_proj",
|
21 |
"down_proj",
|
22 |
"o_proj",
|
23 |
+
"v_proj"
|
|
|
|
|
|
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15e3346b176d6fcd29f1030224d15a9659a7886dfde431254b0c2c482f7f12ee
|
3 |
+
size 500897101
|
checkpoint-200/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: bfloat16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.5.0.dev0
|
checkpoint-200/adapter_config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": null,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 16,
|
11 |
+
"lora_dropout": 0.05,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 32,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"up_proj",
|
19 |
+
"gate_proj",
|
20 |
+
"k_proj",
|
21 |
+
"down_proj",
|
22 |
+
"o_proj",
|
23 |
+
"v_proj"
|
24 |
+
],
|
25 |
+
"task_type": "CAUSAL_LM"
|
26 |
+
}
|
checkpoint-200/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15e3346b176d6fcd29f1030224d15a9659a7886dfde431254b0c2c482f7f12ee
|
3 |
+
size 500897101
|
checkpoint-200/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7279df0ba6a87a3f75b7d314713b3e2ad235ea556deace54c5ed88731053c8e
|
3 |
+
size 1001736445
|
checkpoint-200/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da64efc9dde53926f928c894617fbf0d3cb2ae763b94af40d50272f40390946d
|
3 |
+
size 27772
|
checkpoint-200/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8db417e21e6b6d1ae789628c94b5f1ed08d62047fc09f30b693fcd005bebda36
|
3 |
+
size 27772
|
checkpoint-200/rng_state_10.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf26229b77cf29a765bf52ae8a7c1da17f8f3f15fc5f78763162e594fa63670b
|
3 |
+
size 27789
|
checkpoint-200/rng_state_11.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db97f6b3f80c0bc222d155530394c9cde17cdd394f4271b4f329eae8a9fce649
|
3 |
+
size 27789
|
checkpoint-200/rng_state_12.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8136f8788ad62a1b95fc461ab5c54ebfc79c5c3147e5496579a03d3b2c4a073
|
3 |
+
size 27789
|
checkpoint-200/rng_state_13.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46ee750ae3cf6051a5897d7940e410cb3e8326ebf95634e2106ede6d0f1a164f
|
3 |
+
size 27789
|
checkpoint-200/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ff35143fc37295c6bb931e10ada59cb846c3becb29a8c330c4bcb2e2d7dbf1c
|
3 |
+
size 27772
|
checkpoint-200/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4618a675e687e5c0fb8f6418051239a91b2cdaf24246d5cedb509956f23ec5f
|
3 |
+
size 27772
|
checkpoint-200/rng_state_4.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f92bb724819a9b37f50673a2ef764c216893ce38d47800c4d1247ca27567b0e
|
3 |
+
size 27772
|
checkpoint-200/rng_state_5.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f65da5c3fd524d57e1e393855ca2e460e1ca1e0e63675872313d79b6fda74be
|
3 |
+
size 27772
|
checkpoint-200/rng_state_6.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab194c2a0e0b5f9bfccfa318cc6a981f235da2b7d08e65be576e723aac19c318
|
3 |
+
size 27772
|
checkpoint-200/rng_state_7.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4046dacc20fadf115d03161f72d3531c63569d4b39f03bfc1090911d6e1e9f93
|
3 |
+
size 27772
|
checkpoint-200/rng_state_8.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21fea850cb0c10a02794ed024b9d4248ecf9bd626138e47995dba6aa8502927a
|
3 |
+
size 27772
|
checkpoint-200/rng_state_9.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74a308f6513afeac6867267883a39bd0a3c95a2bea71b416b4390e0023b46dae
|
3 |
+
size 27772
|
checkpoint-200/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:024d4cf2a426f07c2e2ce03edd6538c1ac6fcaded49d4ec29b7b8a390c1fd54c
|
3 |
+
size 627
|
checkpoint-200/trainer_state.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.07754943776657619,
|
5 |
+
"global_step": 200,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.02,
|
12 |
+
"learning_rate": 0.0001999867761371633,
|
13 |
+
"loss": 1.0435,
|
14 |
+
"step": 50
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.04,
|
18 |
+
"learning_rate": 0.00019993306018843102,
|
19 |
+
"loss": 0.8918,
|
20 |
+
"step": 100
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.06,
|
24 |
+
"learning_rate": 0.00019983804784290833,
|
25 |
+
"loss": 0.8874,
|
26 |
+
"step": 150
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.08,
|
30 |
+
"learning_rate": 0.00019970177836355307,
|
31 |
+
"loss": 0.8839,
|
32 |
+
"step": 200
|
33 |
+
}
|
34 |
+
],
|
35 |
+
"max_steps": 7737,
|
36 |
+
"num_train_epochs": 3,
|
37 |
+
"total_flos": 8.600712554731274e+17,
|
38 |
+
"trial_name": null,
|
39 |
+
"trial_params": null
|
40 |
+
}
|
checkpoint-200/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7d8e6ca3ee52d610ea6af580762703958acb68894884ff8627b9a467240686f
|
3 |
+
size 4027
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7d8e6ca3ee52d610ea6af580762703958acb68894884ff8627b9a467240686f
|
3 |
+
size 4027
|