lewtun HF staff commited on
Commit
aefb0c8
1 Parent(s): 14cbaf2

End of training

Browse files
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-0.5B-Instruct
3
+ library_name: peft
4
+ license: apache-2.0
5
+ tags:
6
+ - trl
7
+ - dpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: dpo-model-lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # dpo-model-lora
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6534
22
+ - Rewards/chosen: -0.7320
23
+ - Rewards/rejected: -0.8303
24
+ - Rewards/accuracies: 0.6172
25
+ - Rewards/margins: 0.0983
26
+ - Logps/rejected: -359.0921
27
+ - Logps/chosen: -378.4928
28
+ - Logits/rejected: -2.2715
29
+ - Logits/chosen: -2.3471
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 8
54
+ - gradient_accumulation_steps: 4
55
+ - total_train_batch_size: 128
56
+ - total_eval_batch_size: 64
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: linear
59
+ - num_epochs: 1.0
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.6884 | 0.1030 | 50 | 0.6879 | -0.0543 | -0.0734 | 0.6484 | 0.0191 | -351.5229 | -371.7161 | -2.2877 | -2.3628 |
66
+ | 0.6787 | 0.2060 | 100 | 0.6770 | -0.1811 | -0.2114 | 0.6016 | 0.0303 | -352.9030 | -372.9836 | -2.2815 | -2.3565 |
67
+ | 0.6721 | 0.3090 | 150 | 0.6721 | -0.2679 | -0.3094 | 0.6562 | 0.0415 | -353.8831 | -373.8524 | -2.2782 | -2.3536 |
68
+ | 0.6668 | 0.4119 | 200 | 0.6665 | -0.4037 | -0.4625 | 0.6016 | 0.0588 | -355.4139 | -375.2100 | -2.2758 | -2.3515 |
69
+ | 0.6597 | 0.5149 | 250 | 0.6612 | -0.4907 | -0.5505 | 0.6172 | 0.0598 | -356.2946 | -376.0805 | -2.2757 | -2.3510 |
70
+ | 0.6581 | 0.6179 | 300 | 0.6578 | -0.6137 | -0.6975 | 0.625 | 0.0838 | -357.7639 | -377.3098 | -2.2736 | -2.3491 |
71
+ | 0.6536 | 0.7209 | 350 | 0.6556 | -0.6458 | -0.7367 | 0.6328 | 0.0909 | -358.1565 | -377.6311 | -2.2732 | -2.3489 |
72
+ | 0.6486 | 0.8239 | 400 | 0.6556 | -0.7025 | -0.7958 | 0.6328 | 0.0933 | -358.7473 | -378.1981 | -2.2737 | -2.3493 |
73
+ | 0.649 | 0.9269 | 450 | 0.6556 | -0.7432 | -0.8327 | 0.6484 | 0.0896 | -359.1166 | -378.6048 | -2.2726 | -2.3482 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - PEFT 0.12.0
79
+ - Transformers 4.44.2
80
+ - Pytorch 2.4.0+cu121
81
+ - Datasets 2.21.0
82
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 32,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79b8f24518a887f72a89f523c1d25e75dd3eb31abb012ab29d9d71197edaa659
3
+ size 8663400
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9989701338825953,
3
+ "eval_logits/chosen": -2.3471405506134033,
4
+ "eval_logits/rejected": -2.2715110778808594,
5
+ "eval_logps/chosen": -378.4928283691406,
6
+ "eval_logps/rejected": -359.0921325683594,
7
+ "eval_loss": 0.6533845067024231,
8
+ "eval_rewards/accuracies": 0.6171875,
9
+ "eval_rewards/chosen": -0.7319744229316711,
10
+ "eval_rewards/margins": 0.09831348061561584,
11
+ "eval_rewards/rejected": -0.8302878737449646,
12
+ "eval_runtime": 2.1287,
13
+ "eval_samples_per_second": 469.776,
14
+ "eval_steps_per_second": 7.516
15
+ }
eval_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9989701338825953,
3
+ "eval_logits/chosen": -2.3471405506134033,
4
+ "eval_logits/rejected": -2.2715110778808594,
5
+ "eval_logps/chosen": -378.4928283691406,
6
+ "eval_logps/rejected": -359.0921325683594,
7
+ "eval_loss": 0.6533845067024231,
8
+ "eval_rewards/accuracies": 0.6171875,
9
+ "eval_rewards/chosen": -0.7319744229316711,
10
+ "eval_rewards/margins": 0.09831348061561584,
11
+ "eval_rewards/rejected": -0.8302878737449646,
12
+ "eval_runtime": 2.1287,
13
+ "eval_samples_per_second": 469.776,
14
+ "eval_steps_per_second": 7.516
15
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0dbfc76c19aa63e0245b5d5fb04969b3bd90615f0365aefa098d375c76d927f
3
+ size 5944
vocab.json ADDED
The diff for this file is too large to render. See raw diff