Training in progress, step 10
Browse files- .gitattributes +1 -0
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- llama3_lora_sft.yaml +46 -0
- special_tokens_map.json +24 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
- trainer_log.jsonl +11 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "mistralai/Ministral-8B-Instruct-2410",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"gate_proj",
|
25 |
+
"k_proj",
|
26 |
+
"up_proj",
|
27 |
+
"down_proj",
|
28 |
+
"v_proj",
|
29 |
+
"o_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cba75385b157f18b5d64118f11cd95f228998ec1a8a92bd9bff91d5b858f87da
|
3 |
+
size 87360584
|
llama3_lora_sft.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### model
|
2 |
+
model_name_or_path: mistralai/Ministral-8B-Instruct-2410
|
3 |
+
trust_remote_code: true
|
4 |
+
|
5 |
+
### method
|
6 |
+
stage: sft
|
7 |
+
do_train: true
|
8 |
+
finetuning_type: lora
|
9 |
+
lora_target: all
|
10 |
+
|
11 |
+
### dataset
|
12 |
+
dataset: course-doc-train-fold10
|
13 |
+
dataset_dir: data_private
|
14 |
+
template: mistral
|
15 |
+
cutoff_len: 2048
|
16 |
+
# max_samples: 1000
|
17 |
+
overwrite_cache: true
|
18 |
+
preprocessing_num_workers: 16
|
19 |
+
|
20 |
+
### output
|
21 |
+
output_dir: saves/psy-course-doc/Ministral-8B-Instruct-2410/train/fold10
|
22 |
+
logging_steps: 1
|
23 |
+
save_steps: 10
|
24 |
+
plot_loss: true
|
25 |
+
overwrite_output_dir: true
|
26 |
+
save_total_limit: 3
|
27 |
+
push_to_hub: true
|
28 |
+
hub_model_id: chchen/Ministral-8B-Instruct-2410-PsyCourse-doc-fold10
|
29 |
+
load_best_model_at_end: true
|
30 |
+
|
31 |
+
### train
|
32 |
+
per_device_train_batch_size: 1
|
33 |
+
gradient_accumulation_steps: 16
|
34 |
+
learning_rate: 1.0e-4
|
35 |
+
num_train_epochs: 5.0
|
36 |
+
lr_scheduler_type: cosine
|
37 |
+
warmup_ratio: 0.1
|
38 |
+
bf16: true
|
39 |
+
ddp_timeout: 180000000
|
40 |
+
enable_liger_kernel: true
|
41 |
+
|
42 |
+
### eval
|
43 |
+
val_size: 0.1
|
44 |
+
per_device_eval_batch_size: 1
|
45 |
+
eval_strategy: steps
|
46 |
+
eval_steps: 10
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "</s>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7edbeaf20dd7f571b5dd1c54d9ace4f9b6299127cc7ba2afb14a6d51a4a79a4
|
3 |
+
size 17078136
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 1, "total_steps": 125, "loss": 0.2122, "lr": 7.692307692307694e-06, "epoch": 0.03950617283950617, "percentage": 0.8, "elapsed_time": "0:00:29", "remaining_time": "1:01:17"}
|
2 |
+
{"current_steps": 2, "total_steps": 125, "loss": 0.2224, "lr": 1.5384615384615387e-05, "epoch": 0.07901234567901234, "percentage": 1.6, "elapsed_time": "0:01:00", "remaining_time": "1:01:49"}
|
3 |
+
{"current_steps": 3, "total_steps": 125, "loss": 0.1749, "lr": 2.307692307692308e-05, "epoch": 0.11851851851851852, "percentage": 2.4, "elapsed_time": "0:01:30", "remaining_time": "1:01:38"}
|
4 |
+
{"current_steps": 4, "total_steps": 125, "loss": 0.2466, "lr": 3.0769230769230774e-05, "epoch": 0.1580246913580247, "percentage": 3.2, "elapsed_time": "0:01:59", "remaining_time": "1:00:26"}
|
5 |
+
{"current_steps": 5, "total_steps": 125, "loss": 0.2216, "lr": 3.846153846153846e-05, "epoch": 0.19753086419753085, "percentage": 4.0, "elapsed_time": "0:02:26", "remaining_time": "0:58:28"}
|
6 |
+
{"current_steps": 6, "total_steps": 125, "loss": 0.2093, "lr": 4.615384615384616e-05, "epoch": 0.23703703703703705, "percentage": 4.8, "elapsed_time": "0:02:57", "remaining_time": "0:58:35"}
|
7 |
+
{"current_steps": 7, "total_steps": 125, "loss": 0.2611, "lr": 5.384615384615385e-05, "epoch": 0.2765432098765432, "percentage": 5.6, "elapsed_time": "0:03:25", "remaining_time": "0:57:44"}
|
8 |
+
{"current_steps": 8, "total_steps": 125, "loss": 0.1887, "lr": 6.153846153846155e-05, "epoch": 0.3160493827160494, "percentage": 6.4, "elapsed_time": "0:03:52", "remaining_time": "0:56:34"}
|
9 |
+
{"current_steps": 9, "total_steps": 125, "loss": 0.1552, "lr": 6.923076923076924e-05, "epoch": 0.35555555555555557, "percentage": 7.2, "elapsed_time": "0:04:20", "remaining_time": "0:55:51"}
|
10 |
+
{"current_steps": 10, "total_steps": 125, "loss": 0.1441, "lr": 7.692307692307693e-05, "epoch": 0.3950617283950617, "percentage": 8.0, "elapsed_time": "0:04:50", "remaining_time": "0:55:42"}
|
11 |
+
{"current_steps": 10, "total_steps": 125, "eval_loss": 0.12334010004997253, "epoch": 0.3950617283950617, "percentage": 8.0, "elapsed_time": "0:05:15", "remaining_time": "1:00:27"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c092341f3caa37fbea739d2d54455de69f32912a6b8f6601d520b1caf87a6434
|
3 |
+
size 5624
|