AlbelTec/dpo_mistral_7B_v_0_1
Browse files- README.md +95 -0
- adapter_config.json +26 -0
- adapter_model.safetensors +3 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +42 -0
- training_args.bin +3 -0
README.md
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
library_name: peft
|
4 |
+
tags:
|
5 |
+
- trl
|
6 |
+
- dpo
|
7 |
+
- generated_from_trainer
|
8 |
+
base_model: TheBloke/Mistral-7B-v0.1-GPTQ
|
9 |
+
model-index:
|
10 |
+
- name: mistral-dpo
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# mistral-dpo
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [TheBloke/Mistral-7B-v0.1-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-v0.1-GPTQ) on the None dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.0000
|
22 |
+
- Rewards/chosen: -2.0502
|
23 |
+
- Rewards/rejected: -28.3632
|
24 |
+
- Rewards/accuracies: 1.0
|
25 |
+
- Rewards/margins: 26.3129
|
26 |
+
- Logps/rejected: -399.8283
|
27 |
+
- Logps/chosen: -35.7179
|
28 |
+
- Logits/rejected: -2.1171
|
29 |
+
- Logits/chosen: -1.8480
|
30 |
+
|
31 |
+
## Model description
|
32 |
+
|
33 |
+
More information needed
|
34 |
+
|
35 |
+
## Intended uses & limitations
|
36 |
+
|
37 |
+
More information needed
|
38 |
+
|
39 |
+
## Training and evaluation data
|
40 |
+
|
41 |
+
More information needed
|
42 |
+
|
43 |
+
## Training procedure
|
44 |
+
|
45 |
+
### Training hyperparameters
|
46 |
+
|
47 |
+
The following hyperparameters were used during training:
|
48 |
+
- learning_rate: 0.0002
|
49 |
+
- train_batch_size: 1
|
50 |
+
- eval_batch_size: 8
|
51 |
+
- seed: 42
|
52 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
53 |
+
- lr_scheduler_type: linear
|
54 |
+
- lr_scheduler_warmup_steps: 2
|
55 |
+
- training_steps: 250
|
56 |
+
- mixed_precision_training: Native AMP
|
57 |
+
|
58 |
+
### Training results
|
59 |
+
|
60 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
61 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
62 |
+
| 0.6453 | 0.2 | 10 | 0.4086 | 0.1393 | -0.7001 | 1.0 | 0.8394 | -123.1976 | -13.8225 | -2.5461 | -2.5162 |
|
63 |
+
| 0.1759 | 0.4 | 20 | 0.0051 | 0.3963 | -6.4413 | 1.0 | 6.8376 | -180.6101 | -11.2527 | -2.5253 | -2.4045 |
|
64 |
+
| 0.0015 | 0.6 | 30 | 0.0000 | 0.2885 | -20.7441 | 1.0 | 21.0326 | -323.6376 | -12.3309 | -2.2440 | -1.8851 |
|
65 |
+
| 0.0 | 0.8 | 40 | 0.0000 | -0.6913 | -26.5964 | 1.0 | 25.9051 | -382.1607 | -22.1282 | -1.9054 | -1.5507 |
|
66 |
+
| 0.0 | 1.0 | 50 | 0.0000 | -1.6661 | -28.8376 | 1.0 | 27.1715 | -404.5731 | -31.8766 | -1.7581 | -1.4145 |
|
67 |
+
| 0.0 | 1.2 | 60 | 0.0000 | -2.1659 | -29.6823 | 1.0 | 27.5164 | -413.0200 | -36.8745 | -1.7071 | -1.3649 |
|
68 |
+
| 0.0 | 1.4 | 70 | 0.0000 | -2.0973 | -30.0476 | 1.0 | 27.9503 | -416.6729 | -36.1886 | -1.6955 | -1.3541 |
|
69 |
+
| 0.0 | 1.6 | 80 | 0.0000 | -2.0065 | -30.1726 | 1.0 | 28.1661 | -417.9230 | -35.2805 | -1.6941 | -1.3519 |
|
70 |
+
| 0.0 | 1.8 | 90 | 0.0000 | -1.9541 | -30.2266 | 1.0 | 28.2724 | -418.4622 | -34.7568 | -1.6935 | -1.3518 |
|
71 |
+
| 0.0023 | 2.0 | 100 | 0.0000 | -0.7061 | -30.2814 | 1.0 | 29.5753 | -419.0107 | -22.2763 | -1.7664 | -1.4215 |
|
72 |
+
| 0.0 | 2.2 | 110 | 0.0000 | -1.6234 | -29.4682 | 1.0 | 27.8448 | -410.8783 | -31.4494 | -2.0371 | -1.7164 |
|
73 |
+
| 0.0 | 2.4 | 120 | 0.0000 | -1.9528 | -28.6154 | 1.0 | 26.6626 | -402.3507 | -34.7431 | -2.0991 | -1.8126 |
|
74 |
+
| 0.0 | 2.6 | 130 | 0.0000 | -2.0210 | -28.3739 | 1.0 | 26.3529 | -399.9358 | -35.4253 | -2.1141 | -1.8394 |
|
75 |
+
| 0.0 | 2.8 | 140 | 0.0000 | -2.0443 | -28.2878 | 1.0 | 26.2435 | -399.0752 | -35.6588 | -2.1185 | -1.8487 |
|
76 |
+
| 0.0 | 3.0 | 150 | 0.0000 | -2.0504 | -28.2651 | 1.0 | 26.2147 | -398.8474 | -35.7192 | -2.1201 | -1.8510 |
|
77 |
+
| 0.0 | 3.2 | 160 | 0.0000 | -2.0500 | -28.2657 | 1.0 | 26.2157 | -398.8541 | -35.7157 | -2.1202 | -1.8519 |
|
78 |
+
| 0.0 | 3.4 | 170 | 0.0000 | -2.0530 | -28.2687 | 1.0 | 26.2157 | -398.8837 | -35.7460 | -2.1205 | -1.8521 |
|
79 |
+
| 0.0 | 3.6 | 180 | 0.0000 | -2.0529 | -28.2660 | 1.0 | 26.2131 | -398.8570 | -35.7444 | -2.1202 | -1.8515 |
|
80 |
+
| 0.0 | 3.8 | 190 | 0.0000 | -2.0531 | -28.2649 | 1.0 | 26.2119 | -398.8461 | -35.7464 | -2.1202 | -1.8519 |
|
81 |
+
| 0.0 | 4.0 | 200 | 0.0000 | -2.0579 | -28.3150 | 1.0 | 26.2571 | -399.3466 | -35.7943 | -2.1191 | -1.8507 |
|
82 |
+
| 0.0 | 4.2 | 210 | 0.0000 | -2.0509 | -28.3341 | 1.0 | 26.2832 | -399.5381 | -35.7246 | -2.1178 | -1.8487 |
|
83 |
+
| 0.0 | 4.4 | 220 | 0.0000 | -2.0516 | -28.3405 | 1.0 | 26.2889 | -399.6018 | -35.7316 | -2.1178 | -1.8490 |
|
84 |
+
| 0.0 | 4.6 | 230 | 0.0000 | -2.0516 | -28.3495 | 1.0 | 26.2979 | -399.6917 | -35.7317 | -2.1176 | -1.8489 |
|
85 |
+
| 0.0 | 4.8 | 240 | 0.0000 | -2.0508 | -28.3684 | 1.0 | 26.3176 | -399.8806 | -35.7236 | -2.1173 | -1.8488 |
|
86 |
+
| 0.0 | 5.0 | 250 | 0.0000 | -2.0502 | -28.3632 | 1.0 | 26.3129 | -399.8283 | -35.7179 | -2.1171 | -1.8480 |
|
87 |
+
|
88 |
+
|
89 |
+
### Framework versions
|
90 |
+
|
91 |
+
- PEFT 0.7.1
|
92 |
+
- Transformers 4.36.2
|
93 |
+
- Pytorch 2.0.1+cu118
|
94 |
+
- Datasets 2.15.0
|
95 |
+
- Tokenizers 0.15.0
|
adapter_config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "TheBloke/Mistral-7B-v0.1-GPTQ",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layers_pattern": null,
|
10 |
+
"layers_to_transform": null,
|
11 |
+
"loftq_config": {},
|
12 |
+
"lora_alpha": 6,
|
13 |
+
"lora_dropout": 0.1,
|
14 |
+
"megatron_config": null,
|
15 |
+
"megatron_core": "megatron.core",
|
16 |
+
"modules_to_save": null,
|
17 |
+
"peft_type": "LORA",
|
18 |
+
"r": 4,
|
19 |
+
"rank_pattern": {},
|
20 |
+
"revision": null,
|
21 |
+
"target_modules": [
|
22 |
+
"v_proj",
|
23 |
+
"q_proj"
|
24 |
+
],
|
25 |
+
"task_type": "CAUSAL_LM"
|
26 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f2938efd7419eaf8c1f9d06c878141649471a41c08f6698cff91d23c7d7e9a1
|
3 |
+
size 6832600
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "</s>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [],
|
31 |
+
"bos_token": "<s>",
|
32 |
+
"clean_up_tokenization_spaces": false,
|
33 |
+
"eos_token": "</s>",
|
34 |
+
"legacy": true,
|
35 |
+
"model_max_length": 1000000000000000019884624838656,
|
36 |
+
"pad_token": "</s>",
|
37 |
+
"sp_model_kwargs": {},
|
38 |
+
"spaces_between_special_tokens": false,
|
39 |
+
"tokenizer_class": "LlamaTokenizer",
|
40 |
+
"unk_token": "<unk>",
|
41 |
+
"use_default_system_prompt": true
|
42 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfccc0fe051dacb57a127d5cddec12bcaa5a126c50cbaf4655f1d80b9c78ceb7
|
3 |
+
size 4219
|