guoyu-zhang commited on
Commit
721f2e8
·
verified ·
1 Parent(s): 8a24890

model_shp1_dpo1

Browse files
README.md CHANGED
@@ -18,15 +18,15 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 1.5212
22
- - Rewards/chosen: -8.7548
23
- - Rewards/rejected: -9.0826
24
- - Rewards/accuracies: 0.5200
25
- - Rewards/margins: 0.3277
26
- - Logps/rejected: -311.0196
27
- - Logps/chosen: -308.8270
28
- - Logits/rejected: -0.8356
29
- - Logits/chosen: -0.7989
30
 
31
  ## Model description
32
 
@@ -60,22 +60,22 @@ The following hyperparameters were used during training:
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
- | 0.1427 | 2.67 | 100 | 0.8106 | -0.8889 | -0.9278 | 0.5100 | 0.0389 | -229.4721 | -230.1675 | -1.1688 | -1.1678 |
64
- | 0.0025 | 5.33 | 200 | 1.6177 | -6.5662 | -6.5650 | 0.5200 | -0.0012 | -285.8441 | -286.9406 | -1.1309 | -1.1114 |
65
- | 0.0001 | 8.0 | 300 | 1.4671 | -8.3255 | -8.6477 | 0.5400 | 0.3222 | -306.6713 | -304.5340 | -0.8483 | -0.8117 |
66
- | 0.0001 | 10.67 | 400 | 1.4895 | -8.5230 | -8.8582 | 0.5200 | 0.3353 | -308.7761 | -306.5081 | -0.8422 | -0.8057 |
67
- | 0.0001 | 13.33 | 500 | 1.5102 | -8.6434 | -8.9737 | 0.5300 | 0.3303 | -309.9312 | -307.7126 | -0.8395 | -0.8031 |
68
- | 0.0001 | 16.0 | 600 | 1.5145 | -8.7072 | -9.0466 | 0.5400 | 0.3394 | -310.6606 | -308.3510 | -0.8372 | -0.8007 |
69
- | 0.0001 | 18.67 | 700 | 1.5192 | -8.7369 | -9.0738 | 0.5200 | 0.3369 | -310.9317 | -308.6474 | -0.8362 | -0.7996 |
70
- | 0.0001 | 21.33 | 800 | 1.5269 | -8.7530 | -9.0797 | 0.5400 | 0.3267 | -310.9907 | -308.8084 | -0.8355 | -0.7993 |
71
- | 0.0001 | 24.0 | 900 | 1.5211 | -8.7521 | -9.0876 | 0.5400 | 0.3355 | -311.0706 | -308.7999 | -0.8360 | -0.7990 |
72
- | 0.0001 | 26.67 | 1000 | 1.5212 | -8.7548 | -9.0826 | 0.5200 | 0.3277 | -311.0196 | -308.8270 | -0.8356 | -0.7989 |
73
 
74
 
75
  ### Framework versions
76
 
77
  - PEFT 0.10.0
78
- - Transformers 4.39.3
79
- - Pytorch 2.2.2+cu121
80
  - Datasets 2.18.0
81
  - Tokenizers 0.15.2
 
18
 
19
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 2.0112
22
+ - Rewards/chosen: -9.7625
23
+ - Rewards/rejected: -9.2926
24
+ - Rewards/accuracies: 0.4700
25
+ - Rewards/margins: -0.4699
26
+ - Logps/rejected: -307.4124
27
+ - Logps/chosen: -345.0927
28
+ - Logits/rejected: -1.0692
29
+ - Logits/chosen: -1.0975
30
 
31
  ## Model description
32
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.0741 | 2.67 | 100 | 1.1825 | -3.1899 | -3.0393 | 0.4800 | -0.1506 | -244.8796 | -279.3668 | -1.2034 | -1.2620 |
64
+ | 0.0016 | 5.33 | 200 | 2.1179 | -8.9597 | -8.2224 | 0.4100 | -0.7372 | -296.7111 | -337.0645 | -1.1154 | -1.1503 |
65
+ | 0.0001 | 8.0 | 300 | 1.9624 | -9.5308 | -9.0562 | 0.4500 | -0.4746 | -305.0487 | -342.7763 | -1.0878 | -1.1168 |
66
+ | 0.0001 | 10.67 | 400 | 1.9799 | -9.6041 | -9.1296 | 0.4500 | -0.4745 | -305.7831 | -343.5089 | -1.0797 | -1.1079 |
67
+ | 0.0001 | 13.33 | 500 | 1.9938 | -9.6787 | -9.2063 | 0.4500 | -0.4724 | -306.5495 | -344.2545 | -1.0746 | -1.1031 |
68
+ | 0.0001 | 16.0 | 600 | 2.0046 | -9.7222 | -9.2446 | 0.4600 | -0.4776 | -306.9330 | -344.6898 | -1.0722 | -1.0999 |
69
+ | 0.0001 | 18.67 | 700 | 2.0079 | -9.7525 | -9.2749 | 0.4500 | -0.4776 | -307.2361 | -344.9933 | -1.0706 | -1.0984 |
70
+ | 0.0001 | 21.33 | 800 | 2.0091 | -9.7588 | -9.2867 | 0.4600 | -0.4721 | -307.3541 | -345.0561 | -1.0699 | -1.0978 |
71
+ | 0.0001 | 24.0 | 900 | 2.0158 | -9.7704 | -9.2915 | 0.4500 | -0.4789 | -307.4015 | -345.1719 | -1.0694 | -1.0975 |
72
+ | 0.0001 | 26.67 | 1000 | 2.0112 | -9.7625 | -9.2926 | 0.4700 | -0.4699 | -307.4124 | -345.0927 | -1.0692 | -1.0975 |
73
 
74
 
75
  ### Framework versions
76
 
77
  - PEFT 0.10.0
78
+ - Transformers 4.39.1
79
+ - Pytorch 2.2.1+cu121
80
  - Datasets 2.18.0
81
  - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "fc_in",
24
- "k_proj",
25
- "out_proj",
26
- "q_proj",
27
  "wte",
 
28
  "fc_out",
29
- "v_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
 
 
 
24
  "wte",
25
+ "fc_in",
26
  "fc_out",
27
+ "out_proj",
28
+ "k_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f47bcd4ab0874947c711f3708a6f67e7f58b3c4e2c54869e32319a12af323e2a
3
  size 12608472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:710c08c13c487bab09a782e9ba29fb15c79c7236fb94d0b715d2601fe87c5e75
3
  size 12608472
final_checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "fc_in",
24
- "k_proj",
25
- "out_proj",
26
- "q_proj",
27
  "wte",
 
28
  "fc_out",
29
- "v_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
 
 
 
24
  "wte",
25
+ "fc_in",
26
  "fc_out",
27
+ "out_proj",
28
+ "k_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
final_checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f47bcd4ab0874947c711f3708a6f67e7f58b3c4e2c54869e32319a12af323e2a
3
  size 12608472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:710c08c13c487bab09a782e9ba29fb15c79c7236fb94d0b715d2601fe87c5e75
3
  size 12608472
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cceba7c9ae20269a1e6b8e8840b681653780e6490a07f08de441e0f8ab1ec90d
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c25d4b1a8aff01241271b4db5557cd1f5c24059164325967c7c0ebe5b3c219d0
3
  size 4984