Quuinn commited on
Commit
4be6c41
1 Parent(s): e6a80fb

Model save

Browse files
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: zephyr-7b-dpo-lora
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # zephyr-7b-dpo-lora
15
+
16
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.5655
19
+ - Rewards/chosen: -0.0745
20
+ - Rewards/rejected: -0.5329
21
+ - Rewards/accuracies: 0.7000
22
+ - Rewards/margins: 0.4583
23
+ - Logps/rejected: -224.6270
24
+ - Logps/chosen: -265.4236
25
+ - Logits/rejected: -2.0002
26
+ - Logits/chosen: -2.1215
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
+ - train_batch_size: 2
47
+ - eval_batch_size: 4
48
+ - seed: 42
49
+ - distributed_type: multi-GPU
50
+ - num_devices: 4
51
+ - gradient_accumulation_steps: 32
52
+ - total_train_batch_size: 256
53
+ - total_eval_batch_size: 16
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: linear
56
+ - lr_scheduler_warmup_ratio: 0.1
57
+ - num_epochs: 3
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6125 | 1.0 | 242 | 0.6057 | 0.0079 | -0.2400 | 0.6760 | 0.2479 | -221.6983 | -264.5998 | -2.0242 | -2.1445 |
64
+ | 0.5849 | 2.0 | 484 | 0.5731 | -0.0578 | -0.4714 | 0.6900 | 0.4136 | -224.0123 | -265.2563 | -2.0071 | -2.1279 |
65
+ | 0.5671 | 3.0 | 726 | 0.5655 | -0.0745 | -0.5329 | 0.7000 | 0.4583 | -224.6270 | -265.4236 | -2.0002 | -2.1215 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.35.0
71
+ - Pytorch 2.1.0+cu121
72
+ - Datasets 2.14.6
73
+ - Tokenizers 0.14.1
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 16,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 64,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "k_proj",
20
+ "q_proj",
21
+ "o_proj",
22
+ "v_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a12170be8284c32127d345d0aeb5e678dc64dd7d96ffcc57f28c5426a4bde28
3
+ size 218138576
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.1214964389801025,
4
+ "eval_logits/rejected": -2.000164031982422,
5
+ "eval_logps/chosen": -265.42364501953125,
6
+ "eval_logps/rejected": -224.6269989013672,
7
+ "eval_loss": 0.5655443072319031,
8
+ "eval_rewards/accuracies": 0.699999988079071,
9
+ "eval_rewards/chosen": -0.07451467216014862,
10
+ "eval_rewards/margins": 0.4583480656147003,
11
+ "eval_rewards/rejected": -0.5328627228736877,
12
+ "eval_runtime": 278.2904,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 7.187,
15
+ "eval_steps_per_second": 0.449,
16
+ "train_loss": 0.6037390495627379,
17
+ "train_runtime": 36278.6969,
18
+ "train_samples": 61966,
19
+ "train_samples_per_second": 5.124,
20
+ "train_steps_per_second": 0.02
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.1214964389801025,
4
+ "eval_logits/rejected": -2.000164031982422,
5
+ "eval_logps/chosen": -265.42364501953125,
6
+ "eval_logps/rejected": -224.6269989013672,
7
+ "eval_loss": 0.5655443072319031,
8
+ "eval_rewards/accuracies": 0.699999988079071,
9
+ "eval_rewards/chosen": -0.07451467216014862,
10
+ "eval_rewards/margins": 0.4583480656147003,
11
+ "eval_rewards/rejected": -0.5328627228736877,
12
+ "eval_runtime": 278.2904,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 7.187,
15
+ "eval_steps_per_second": 0.449
16
+ }
runs/Nov30_05-13-52_0917-210255-mz7ml79v-10-139-176-13/events.out.tfevents.1701321307.0917-210255-mz7ml79v-10-139-176-13.48027.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:765ff7322ab2389b89c35d189b49c822f9f1a40b3e3fdb0317bef159aa7a23b1
3
+ size 4136
runs/Nov30_11-44-37_0917-210255-mz7ml79v-10-139-176-13/events.out.tfevents.1701344763.0917-210255-mz7ml79v-10-139-176-13.805961.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dda10e4a055e74a72ed2081cc512ce50d2b2e35b42494c736c5143ae874cff33
3
+ size 53015
runs/Nov30_11-44-37_0917-210255-mz7ml79v-10-139-176-13/events.out.tfevents.1701381319.0917-210255-mz7ml79v-10-139-176-13.805961.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c843174df406938a3a31fb859c99a7d45f09a438d75970aeea02be33989ff6
3
+ size 780
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": true,
34
+ "model_max_length": 2048,
35
+ "pad_token": "</s>",
36
+ "sp_model_kwargs": {},
37
+ "spaces_between_special_tokens": false,
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": true
41
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.6037390495627379,
4
+ "train_runtime": 36278.6969,
5
+ "train_samples": 61966,
6
+ "train_samples_per_second": 5.124,
7
+ "train_steps_per_second": 0.02
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1098 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9992254066615027,
5
+ "eval_steps": 100,
6
+ "global_step": 726,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 6.84931506849315e-09,
14
+ "logits/chosen": -2.3491616249084473,
15
+ "logits/rejected": -2.418564796447754,
16
+ "logps/chosen": -271.3881530761719,
17
+ "logps/rejected": -208.9749298095703,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.04,
27
+ "learning_rate": 6.84931506849315e-08,
28
+ "logits/chosen": -2.4231245517730713,
29
+ "logits/rejected": -2.3566601276397705,
30
+ "logps/chosen": -293.38800048828125,
31
+ "logps/rejected": -226.29283142089844,
32
+ "loss": 0.6933,
33
+ "rewards/accuracies": 0.4548611044883728,
34
+ "rewards/chosen": 0.003188559552654624,
35
+ "rewards/margins": 0.0021638227626681328,
36
+ "rewards/rejected": 0.0010247372556477785,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.08,
41
+ "learning_rate": 1.36986301369863e-07,
42
+ "logits/chosen": -2.431933879852295,
43
+ "logits/rejected": -2.405198574066162,
44
+ "logps/chosen": -278.5166931152344,
45
+ "logps/rejected": -216.7791290283203,
46
+ "loss": 0.6942,
47
+ "rewards/accuracies": 0.504687488079071,
48
+ "rewards/chosen": -0.000816329091321677,
49
+ "rewards/margins": 0.0019420869648456573,
50
+ "rewards/rejected": -0.0027584161143749952,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.12,
55
+ "learning_rate": 2.054794520547945e-07,
56
+ "logits/chosen": -2.389657497406006,
57
+ "logits/rejected": -2.348972797393799,
58
+ "logps/chosen": -252.9993438720703,
59
+ "logps/rejected": -207.1633758544922,
60
+ "loss": 0.6935,
61
+ "rewards/accuracies": 0.53125,
62
+ "rewards/chosen": -0.0014556010719388723,
63
+ "rewards/margins": 0.0006705918349325657,
64
+ "rewards/rejected": -0.0021261931397020817,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.17,
69
+ "learning_rate": 2.73972602739726e-07,
70
+ "logits/chosen": -2.460561752319336,
71
+ "logits/rejected": -2.414844036102295,
72
+ "logps/chosen": -283.7592468261719,
73
+ "logps/rejected": -216.4773712158203,
74
+ "loss": 0.6909,
75
+ "rewards/accuracies": 0.5484374761581421,
76
+ "rewards/chosen": 0.004375931341201067,
77
+ "rewards/margins": 0.00752140861004591,
78
+ "rewards/rejected": -0.0031454775016754866,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.21,
83
+ "learning_rate": 3.424657534246575e-07,
84
+ "logits/chosen": -2.4366953372955322,
85
+ "logits/rejected": -2.3972277641296387,
86
+ "logps/chosen": -267.2607727050781,
87
+ "logps/rejected": -223.6705780029297,
88
+ "loss": 0.6897,
89
+ "rewards/accuracies": 0.5406249761581421,
90
+ "rewards/chosen": 0.004865794442594051,
91
+ "rewards/margins": 0.007948420941829681,
92
+ "rewards/rejected": -0.0030826255679130554,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.25,
97
+ "learning_rate": 4.10958904109589e-07,
98
+ "logits/chosen": -2.412304639816284,
99
+ "logits/rejected": -2.3892178535461426,
100
+ "logps/chosen": -266.85028076171875,
101
+ "logps/rejected": -214.7494659423828,
102
+ "loss": 0.6873,
103
+ "rewards/accuracies": 0.574999988079071,
104
+ "rewards/chosen": 0.00616841483861208,
105
+ "rewards/margins": 0.012422902509570122,
106
+ "rewards/rejected": -0.006254489067941904,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.29,
111
+ "learning_rate": 4.794520547945205e-07,
112
+ "logits/chosen": -2.390881299972534,
113
+ "logits/rejected": -2.3977627754211426,
114
+ "logps/chosen": -254.04043579101562,
115
+ "logps/rejected": -214.6400909423828,
116
+ "loss": 0.6847,
117
+ "rewards/accuracies": 0.581250011920929,
118
+ "rewards/chosen": 0.009625923819839954,
119
+ "rewards/margins": 0.01777799427509308,
120
+ "rewards/rejected": -0.008152070455253124,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.33,
125
+ "learning_rate": 4.946401225114854e-07,
126
+ "logits/chosen": -2.4295055866241455,
127
+ "logits/rejected": -2.37807035446167,
128
+ "logps/chosen": -265.05718994140625,
129
+ "logps/rejected": -218.77059936523438,
130
+ "loss": 0.6795,
131
+ "rewards/accuracies": 0.6015625,
132
+ "rewards/chosen": 0.013735203072428703,
133
+ "rewards/margins": 0.028081998229026794,
134
+ "rewards/rejected": -0.014346795156598091,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 4.869831546707504e-07,
140
+ "logits/chosen": -2.4787497520446777,
141
+ "logits/rejected": -2.422356128692627,
142
+ "logps/chosen": -271.730224609375,
143
+ "logps/rejected": -224.4839324951172,
144
+ "loss": 0.6734,
145
+ "rewards/accuracies": 0.640625,
146
+ "rewards/chosen": 0.022287212312221527,
147
+ "rewards/margins": 0.04716240242123604,
148
+ "rewards/rejected": -0.02487519010901451,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.41,
153
+ "learning_rate": 4.793261868300153e-07,
154
+ "logits/chosen": -2.417426586151123,
155
+ "logits/rejected": -2.4039013385772705,
156
+ "logps/chosen": -273.8762512207031,
157
+ "logps/rejected": -227.797607421875,
158
+ "loss": 0.6692,
159
+ "rewards/accuracies": 0.659375011920929,
160
+ "rewards/chosen": 0.025275733321905136,
161
+ "rewards/margins": 0.057271964848041534,
162
+ "rewards/rejected": -0.0319962315261364,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.45,
167
+ "learning_rate": 4.7166921898928023e-07,
168
+ "logits/chosen": -2.4509871006011963,
169
+ "logits/rejected": -2.3908462524414062,
170
+ "logps/chosen": -252.6064453125,
171
+ "logps/rejected": -222.64639282226562,
172
+ "loss": 0.6628,
173
+ "rewards/accuracies": 0.667187511920929,
174
+ "rewards/chosen": 0.024279529228806496,
175
+ "rewards/margins": 0.06397499889135361,
176
+ "rewards/rejected": -0.03969546779990196,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.5,
181
+ "learning_rate": 4.640122511485451e-07,
182
+ "logits/chosen": -2.412073850631714,
183
+ "logits/rejected": -2.4017536640167236,
184
+ "logps/chosen": -256.89813232421875,
185
+ "logps/rejected": -219.8057098388672,
186
+ "loss": 0.6581,
187
+ "rewards/accuracies": 0.625,
188
+ "rewards/chosen": 0.029207896441221237,
189
+ "rewards/margins": 0.07588861137628555,
190
+ "rewards/rejected": -0.04668071120977402,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.54,
195
+ "learning_rate": 4.563552833078101e-07,
196
+ "logits/chosen": -2.4562458992004395,
197
+ "logits/rejected": -2.3951973915100098,
198
+ "logps/chosen": -265.9952697753906,
199
+ "logps/rejected": -225.94125366210938,
200
+ "loss": 0.6504,
201
+ "rewards/accuracies": 0.65625,
202
+ "rewards/chosen": 0.025836100801825523,
203
+ "rewards/margins": 0.09206128865480423,
204
+ "rewards/rejected": -0.06622518599033356,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.58,
209
+ "learning_rate": 4.4869831546707505e-07,
210
+ "logits/chosen": -2.460266590118408,
211
+ "logits/rejected": -2.401520013809204,
212
+ "logps/chosen": -270.29888916015625,
213
+ "logps/rejected": -230.37539672851562,
214
+ "loss": 0.6495,
215
+ "rewards/accuracies": 0.671875,
216
+ "rewards/chosen": 0.03395112603902817,
217
+ "rewards/margins": 0.11508414894342422,
218
+ "rewards/rejected": -0.08113302290439606,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.62,
223
+ "learning_rate": 4.4104134762633994e-07,
224
+ "logits/chosen": -2.4852752685546875,
225
+ "logits/rejected": -2.4318604469299316,
226
+ "logps/chosen": -265.973388671875,
227
+ "logps/rejected": -226.55484008789062,
228
+ "loss": 0.6433,
229
+ "rewards/accuracies": 0.692187488079071,
230
+ "rewards/chosen": 0.03317371755838394,
231
+ "rewards/margins": 0.12171275913715363,
232
+ "rewards/rejected": -0.08853904157876968,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.66,
237
+ "learning_rate": 4.333843797856049e-07,
238
+ "logits/chosen": -2.4467227458953857,
239
+ "logits/rejected": -2.4029695987701416,
240
+ "logps/chosen": -279.33648681640625,
241
+ "logps/rejected": -239.00009155273438,
242
+ "loss": 0.6343,
243
+ "rewards/accuracies": 0.6796875,
244
+ "rewards/chosen": 0.04138711839914322,
245
+ "rewards/margins": 0.1465190351009369,
246
+ "rewards/rejected": -0.10513193905353546,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.7,
251
+ "learning_rate": 4.257274119448698e-07,
252
+ "logits/chosen": -2.4798355102539062,
253
+ "logits/rejected": -2.452397108078003,
254
+ "logps/chosen": -270.727783203125,
255
+ "logps/rejected": -239.09780883789062,
256
+ "loss": 0.6335,
257
+ "rewards/accuracies": 0.653124988079071,
258
+ "rewards/chosen": 0.024355659261345863,
259
+ "rewards/margins": 0.1352422684431076,
260
+ "rewards/rejected": -0.11088661849498749,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.74,
265
+ "learning_rate": 4.180704441041347e-07,
266
+ "logits/chosen": -2.4359121322631836,
267
+ "logits/rejected": -2.388683795928955,
268
+ "logps/chosen": -256.79022216796875,
269
+ "logps/rejected": -226.1436767578125,
270
+ "loss": 0.6367,
271
+ "rewards/accuracies": 0.6640625,
272
+ "rewards/chosen": 0.016556020826101303,
273
+ "rewards/margins": 0.147763192653656,
274
+ "rewards/rejected": -0.1312071532011032,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.78,
279
+ "learning_rate": 4.1041347626339966e-07,
280
+ "logits/chosen": -2.4478251934051514,
281
+ "logits/rejected": -2.4065427780151367,
282
+ "logps/chosen": -261.63702392578125,
283
+ "logps/rejected": -213.1779327392578,
284
+ "loss": 0.6269,
285
+ "rewards/accuracies": 0.668749988079071,
286
+ "rewards/chosen": 0.02061801217496395,
287
+ "rewards/margins": 0.17272573709487915,
288
+ "rewards/rejected": -0.15210774540901184,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.83,
293
+ "learning_rate": 4.027565084226646e-07,
294
+ "logits/chosen": -2.4714255332946777,
295
+ "logits/rejected": -2.414602279663086,
296
+ "logps/chosen": -262.29486083984375,
297
+ "logps/rejected": -218.0116424560547,
298
+ "loss": 0.6175,
299
+ "rewards/accuracies": 0.6890624761581421,
300
+ "rewards/chosen": 0.027450546622276306,
301
+ "rewards/margins": 0.19447624683380127,
302
+ "rewards/rejected": -0.16702571511268616,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.87,
307
+ "learning_rate": 3.9509954058192954e-07,
308
+ "logits/chosen": -2.4752840995788574,
309
+ "logits/rejected": -2.4354655742645264,
310
+ "logps/chosen": -283.89959716796875,
311
+ "logps/rejected": -231.7078399658203,
312
+ "loss": 0.6161,
313
+ "rewards/accuracies": 0.653124988079071,
314
+ "rewards/chosen": 0.024292152374982834,
315
+ "rewards/margins": 0.21237091720104218,
316
+ "rewards/rejected": -0.18807876110076904,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 0.91,
321
+ "learning_rate": 3.874425727411945e-07,
322
+ "logits/chosen": -2.422091007232666,
323
+ "logits/rejected": -2.40881609916687,
324
+ "logps/chosen": -276.7785339355469,
325
+ "logps/rejected": -229.2734832763672,
326
+ "loss": 0.6144,
327
+ "rewards/accuracies": 0.660937488079071,
328
+ "rewards/chosen": 0.017709506675601006,
329
+ "rewards/margins": 0.21948948502540588,
330
+ "rewards/rejected": -0.20178000628948212,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 0.95,
335
+ "learning_rate": 3.797856049004594e-07,
336
+ "logits/chosen": -2.4441866874694824,
337
+ "logits/rejected": -2.38869571685791,
338
+ "logps/chosen": -264.10430908203125,
339
+ "logps/rejected": -228.3271484375,
340
+ "loss": 0.6117,
341
+ "rewards/accuracies": 0.6875,
342
+ "rewards/chosen": 0.029474353417754173,
343
+ "rewards/margins": 0.22035422921180725,
344
+ "rewards/rejected": -0.19087985157966614,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 0.99,
349
+ "learning_rate": 3.7212863705972436e-07,
350
+ "logits/chosen": -2.4633097648620605,
351
+ "logits/rejected": -2.4186224937438965,
352
+ "logps/chosen": -271.4654235839844,
353
+ "logps/rejected": -222.46841430664062,
354
+ "loss": 0.6125,
355
+ "rewards/accuracies": 0.6937500238418579,
356
+ "rewards/chosen": 0.024074096232652664,
357
+ "rewards/margins": 0.2165375053882599,
358
+ "rewards/rejected": -0.19246339797973633,
359
+ "step": 240
360
+ },
361
+ {
362
+ "epoch": 1.0,
363
+ "eval_logits/chosen": -2.144517421722412,
364
+ "eval_logits/rejected": -2.0242087841033936,
365
+ "eval_logps/chosen": -264.5997619628906,
366
+ "eval_logps/rejected": -221.6983184814453,
367
+ "eval_loss": 0.6057174205780029,
368
+ "eval_rewards/accuracies": 0.6759999990463257,
369
+ "eval_rewards/chosen": 0.007874858565628529,
370
+ "eval_rewards/margins": 0.24786852300167084,
371
+ "eval_rewards/rejected": -0.23999367654323578,
372
+ "eval_runtime": 278.9133,
373
+ "eval_samples_per_second": 7.171,
374
+ "eval_steps_per_second": 0.448,
375
+ "step": 242
376
+ },
377
+ {
378
+ "epoch": 1.03,
379
+ "learning_rate": 3.6447166921898925e-07,
380
+ "logits/chosen": -2.400252103805542,
381
+ "logits/rejected": -2.3472890853881836,
382
+ "logps/chosen": -257.4571838378906,
383
+ "logps/rejected": -210.4391326904297,
384
+ "loss": 0.6164,
385
+ "rewards/accuracies": 0.6734374761581421,
386
+ "rewards/chosen": 0.004229591693729162,
387
+ "rewards/margins": 0.22420725226402283,
388
+ "rewards/rejected": -0.21997769176959991,
389
+ "step": 250
390
+ },
391
+ {
392
+ "epoch": 1.07,
393
+ "learning_rate": 3.568147013782542e-07,
394
+ "logits/chosen": -2.415523052215576,
395
+ "logits/rejected": -2.3758111000061035,
396
+ "logps/chosen": -261.9351501464844,
397
+ "logps/rejected": -226.16259765625,
398
+ "loss": 0.6006,
399
+ "rewards/accuracies": 0.7265625,
400
+ "rewards/chosen": 0.023142099380493164,
401
+ "rewards/margins": 0.2655286490917206,
402
+ "rewards/rejected": -0.24238653481006622,
403
+ "step": 260
404
+ },
405
+ {
406
+ "epoch": 1.12,
407
+ "learning_rate": 3.4915773353751913e-07,
408
+ "logits/chosen": -2.429934024810791,
409
+ "logits/rejected": -2.365861415863037,
410
+ "logps/chosen": -278.4029846191406,
411
+ "logps/rejected": -236.08688354492188,
412
+ "loss": 0.5925,
413
+ "rewards/accuracies": 0.7406250238418579,
414
+ "rewards/chosen": 0.02966948226094246,
415
+ "rewards/margins": 0.33913469314575195,
416
+ "rewards/rejected": -0.3094651699066162,
417
+ "step": 270
418
+ },
419
+ {
420
+ "epoch": 1.16,
421
+ "learning_rate": 3.41500765696784e-07,
422
+ "logits/chosen": -2.4358582496643066,
423
+ "logits/rejected": -2.396267890930176,
424
+ "logps/chosen": -251.093017578125,
425
+ "logps/rejected": -225.80685424804688,
426
+ "loss": 0.6036,
427
+ "rewards/accuracies": 0.692187488079071,
428
+ "rewards/chosen": 0.010072538629174232,
429
+ "rewards/margins": 0.24589493870735168,
430
+ "rewards/rejected": -0.2358224093914032,
431
+ "step": 280
432
+ },
433
+ {
434
+ "epoch": 1.2,
435
+ "learning_rate": 3.33843797856049e-07,
436
+ "logits/chosen": -2.408804416656494,
437
+ "logits/rejected": -2.394888401031494,
438
+ "logps/chosen": -283.15380859375,
439
+ "logps/rejected": -228.33767700195312,
440
+ "loss": 0.5915,
441
+ "rewards/accuracies": 0.7124999761581421,
442
+ "rewards/chosen": 0.008710218593478203,
443
+ "rewards/margins": 0.3084966242313385,
444
+ "rewards/rejected": -0.29978638887405396,
445
+ "step": 290
446
+ },
447
+ {
448
+ "epoch": 1.24,
449
+ "learning_rate": 3.2618683001531396e-07,
450
+ "logits/chosen": -2.4084572792053223,
451
+ "logits/rejected": -2.337435722351074,
452
+ "logps/chosen": -261.3924865722656,
453
+ "logps/rejected": -227.77651977539062,
454
+ "loss": 0.588,
455
+ "rewards/accuracies": 0.698437511920929,
456
+ "rewards/chosen": 0.008477389812469482,
457
+ "rewards/margins": 0.298746258020401,
458
+ "rewards/rejected": -0.29026883840560913,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 1.28,
463
+ "learning_rate": 3.1852986217457885e-07,
464
+ "logits/chosen": -2.4575297832489014,
465
+ "logits/rejected": -2.373924493789673,
466
+ "logps/chosen": -261.287109375,
467
+ "logps/rejected": -228.5553741455078,
468
+ "loss": 0.5972,
469
+ "rewards/accuracies": 0.6953125,
470
+ "rewards/chosen": 0.002108477521687746,
471
+ "rewards/margins": 0.2948620915412903,
472
+ "rewards/rejected": -0.2927536368370056,
473
+ "step": 310
474
+ },
475
+ {
476
+ "epoch": 1.32,
477
+ "learning_rate": 3.108728943338438e-07,
478
+ "logits/chosen": -2.4443328380584717,
479
+ "logits/rejected": -2.4351658821105957,
480
+ "logps/chosen": -252.80996704101562,
481
+ "logps/rejected": -237.87631225585938,
482
+ "loss": 0.5943,
483
+ "rewards/accuracies": 0.684374988079071,
484
+ "rewards/chosen": -0.015406561084091663,
485
+ "rewards/margins": 0.2515925168991089,
486
+ "rewards/rejected": -0.2669990658760071,
487
+ "step": 320
488
+ },
489
+ {
490
+ "epoch": 1.36,
491
+ "learning_rate": 3.0321592649310873e-07,
492
+ "logits/chosen": -2.424647092819214,
493
+ "logits/rejected": -2.357273578643799,
494
+ "logps/chosen": -253.7325897216797,
495
+ "logps/rejected": -224.3144073486328,
496
+ "loss": 0.587,
497
+ "rewards/accuracies": 0.6859375238418579,
498
+ "rewards/chosen": -0.0076437839306890965,
499
+ "rewards/margins": 0.2989902198314667,
500
+ "rewards/rejected": -0.30663400888442993,
501
+ "step": 330
502
+ },
503
+ {
504
+ "epoch": 1.4,
505
+ "learning_rate": 2.955589586523736e-07,
506
+ "logits/chosen": -2.4427545070648193,
507
+ "logits/rejected": -2.3824856281280518,
508
+ "logps/chosen": -265.68939208984375,
509
+ "logps/rejected": -226.4335174560547,
510
+ "loss": 0.592,
511
+ "rewards/accuracies": 0.7015625238418579,
512
+ "rewards/chosen": -0.015530401840806007,
513
+ "rewards/margins": 0.3260301351547241,
514
+ "rewards/rejected": -0.3415605425834656,
515
+ "step": 340
516
+ },
517
+ {
518
+ "epoch": 1.45,
519
+ "learning_rate": 2.8790199081163856e-07,
520
+ "logits/chosen": -2.439944267272949,
521
+ "logits/rejected": -2.3695976734161377,
522
+ "logps/chosen": -266.065673828125,
523
+ "logps/rejected": -225.2880859375,
524
+ "loss": 0.5939,
525
+ "rewards/accuracies": 0.6656249761581421,
526
+ "rewards/chosen": -0.026788845658302307,
527
+ "rewards/margins": 0.28384846448898315,
528
+ "rewards/rejected": -0.31063732504844666,
529
+ "step": 350
530
+ },
531
+ {
532
+ "epoch": 1.49,
533
+ "learning_rate": 2.802450229709035e-07,
534
+ "logits/chosen": -2.399728298187256,
535
+ "logits/rejected": -2.3489761352539062,
536
+ "logps/chosen": -254.9022216796875,
537
+ "logps/rejected": -213.33193969726562,
538
+ "loss": 0.5847,
539
+ "rewards/accuracies": 0.667187511920929,
540
+ "rewards/chosen": -0.04024948924779892,
541
+ "rewards/margins": 0.29891303181648254,
542
+ "rewards/rejected": -0.33916252851486206,
543
+ "step": 360
544
+ },
545
+ {
546
+ "epoch": 1.53,
547
+ "learning_rate": 2.725880551301684e-07,
548
+ "logits/chosen": -2.462254047393799,
549
+ "logits/rejected": -2.406602621078491,
550
+ "logps/chosen": -274.6975402832031,
551
+ "logps/rejected": -232.84591674804688,
552
+ "loss": 0.5929,
553
+ "rewards/accuracies": 0.6875,
554
+ "rewards/chosen": -0.02199350856244564,
555
+ "rewards/margins": 0.31067317724227905,
556
+ "rewards/rejected": -0.33266669511795044,
557
+ "step": 370
558
+ },
559
+ {
560
+ "epoch": 1.57,
561
+ "learning_rate": 2.649310872894334e-07,
562
+ "logits/chosen": -2.4482955932617188,
563
+ "logits/rejected": -2.4154446125030518,
564
+ "logps/chosen": -275.00775146484375,
565
+ "logps/rejected": -223.1331787109375,
566
+ "loss": 0.5816,
567
+ "rewards/accuracies": 0.7124999761581421,
568
+ "rewards/chosen": -0.020305102691054344,
569
+ "rewards/margins": 0.37037259340286255,
570
+ "rewards/rejected": -0.3906777501106262,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 1.61,
575
+ "learning_rate": 2.572741194486983e-07,
576
+ "logits/chosen": -2.448878765106201,
577
+ "logits/rejected": -2.393206834793091,
578
+ "logps/chosen": -273.81109619140625,
579
+ "logps/rejected": -208.37985229492188,
580
+ "loss": 0.5799,
581
+ "rewards/accuracies": 0.706250011920929,
582
+ "rewards/chosen": -0.029856573790311813,
583
+ "rewards/margins": 0.3645634055137634,
584
+ "rewards/rejected": -0.3944200277328491,
585
+ "step": 390
586
+ },
587
+ {
588
+ "epoch": 1.65,
589
+ "learning_rate": 2.496171516079632e-07,
590
+ "logits/chosen": -2.4658501148223877,
591
+ "logits/rejected": -2.399857521057129,
592
+ "logps/chosen": -293.2225341796875,
593
+ "logps/rejected": -239.4982452392578,
594
+ "loss": 0.5813,
595
+ "rewards/accuracies": 0.734375,
596
+ "rewards/chosen": 0.0061371102929115295,
597
+ "rewards/margins": 0.399463027715683,
598
+ "rewards/rejected": -0.39332595467567444,
599
+ "step": 400
600
+ },
601
+ {
602
+ "epoch": 1.69,
603
+ "learning_rate": 2.4196018376722816e-07,
604
+ "logits/chosen": -2.429685115814209,
605
+ "logits/rejected": -2.4006247520446777,
606
+ "logps/chosen": -278.5813903808594,
607
+ "logps/rejected": -228.4702911376953,
608
+ "loss": 0.5864,
609
+ "rewards/accuracies": 0.6859375238418579,
610
+ "rewards/chosen": -0.02374974638223648,
611
+ "rewards/margins": 0.37017589807510376,
612
+ "rewards/rejected": -0.39392566680908203,
613
+ "step": 410
614
+ },
615
+ {
616
+ "epoch": 1.74,
617
+ "learning_rate": 2.343032159264931e-07,
618
+ "logits/chosen": -2.403900146484375,
619
+ "logits/rejected": -2.3333194255828857,
620
+ "logps/chosen": -268.872802734375,
621
+ "logps/rejected": -224.37728881835938,
622
+ "loss": 0.579,
623
+ "rewards/accuracies": 0.706250011920929,
624
+ "rewards/chosen": -0.020599449053406715,
625
+ "rewards/margins": 0.431951105594635,
626
+ "rewards/rejected": -0.4525505602359772,
627
+ "step": 420
628
+ },
629
+ {
630
+ "epoch": 1.78,
631
+ "learning_rate": 2.26646248085758e-07,
632
+ "logits/chosen": -2.383470058441162,
633
+ "logits/rejected": -2.3353710174560547,
634
+ "logps/chosen": -259.7237854003906,
635
+ "logps/rejected": -217.79946899414062,
636
+ "loss": 0.573,
637
+ "rewards/accuracies": 0.723437488079071,
638
+ "rewards/chosen": -0.0417955107986927,
639
+ "rewards/margins": 0.39140504598617554,
640
+ "rewards/rejected": -0.43320053815841675,
641
+ "step": 430
642
+ },
643
+ {
644
+ "epoch": 1.82,
645
+ "learning_rate": 2.1898928024502298e-07,
646
+ "logits/chosen": -2.4446728229522705,
647
+ "logits/rejected": -2.3874154090881348,
648
+ "logps/chosen": -263.4950256347656,
649
+ "logps/rejected": -221.4724578857422,
650
+ "loss": 0.5753,
651
+ "rewards/accuracies": 0.7015625238418579,
652
+ "rewards/chosen": -0.02173582836985588,
653
+ "rewards/margins": 0.39518997073173523,
654
+ "rewards/rejected": -0.4169258177280426,
655
+ "step": 440
656
+ },
657
+ {
658
+ "epoch": 1.86,
659
+ "learning_rate": 2.113323124042879e-07,
660
+ "logits/chosen": -2.4275262355804443,
661
+ "logits/rejected": -2.3907971382141113,
662
+ "logps/chosen": -271.2684326171875,
663
+ "logps/rejected": -231.44381713867188,
664
+ "loss": 0.5744,
665
+ "rewards/accuracies": 0.721875011920929,
666
+ "rewards/chosen": -0.020908143371343613,
667
+ "rewards/margins": 0.41243448853492737,
668
+ "rewards/rejected": -0.4333426058292389,
669
+ "step": 450
670
+ },
671
+ {
672
+ "epoch": 1.9,
673
+ "learning_rate": 2.036753445635528e-07,
674
+ "logits/chosen": -2.4363036155700684,
675
+ "logits/rejected": -2.4147400856018066,
676
+ "logps/chosen": -284.01824951171875,
677
+ "logps/rejected": -238.273681640625,
678
+ "loss": 0.569,
679
+ "rewards/accuracies": 0.706250011920929,
680
+ "rewards/chosen": -0.018130071461200714,
681
+ "rewards/margins": 0.4541456699371338,
682
+ "rewards/rejected": -0.4722757339477539,
683
+ "step": 460
684
+ },
685
+ {
686
+ "epoch": 1.94,
687
+ "learning_rate": 1.9601837672281775e-07,
688
+ "logits/chosen": -2.4180634021759033,
689
+ "logits/rejected": -2.3854622840881348,
690
+ "logps/chosen": -270.1515808105469,
691
+ "logps/rejected": -236.3723907470703,
692
+ "loss": 0.576,
693
+ "rewards/accuracies": 0.7046874761581421,
694
+ "rewards/chosen": -0.05924994498491287,
695
+ "rewards/margins": 0.37609511613845825,
696
+ "rewards/rejected": -0.4353450834751129,
697
+ "step": 470
698
+ },
699
+ {
700
+ "epoch": 1.98,
701
+ "learning_rate": 1.883614088820827e-07,
702
+ "logits/chosen": -2.4381699562072754,
703
+ "logits/rejected": -2.391515016555786,
704
+ "logps/chosen": -268.735595703125,
705
+ "logps/rejected": -224.8667755126953,
706
+ "loss": 0.5849,
707
+ "rewards/accuracies": 0.723437488079071,
708
+ "rewards/chosen": -0.04678649455308914,
709
+ "rewards/margins": 0.3817201852798462,
710
+ "rewards/rejected": -0.42850667238235474,
711
+ "step": 480
712
+ },
713
+ {
714
+ "epoch": 2.0,
715
+ "eval_logits/chosen": -2.127939224243164,
716
+ "eval_logits/rejected": -2.007131576538086,
717
+ "eval_logps/chosen": -265.25634765625,
718
+ "eval_logps/rejected": -224.01229858398438,
719
+ "eval_loss": 0.5730655789375305,
720
+ "eval_rewards/accuracies": 0.6899999976158142,
721
+ "eval_rewards/chosen": -0.05778134614229202,
722
+ "eval_rewards/margins": 0.4136123061180115,
723
+ "eval_rewards/rejected": -0.4713936746120453,
724
+ "eval_runtime": 277.7189,
725
+ "eval_samples_per_second": 7.202,
726
+ "eval_steps_per_second": 0.45,
727
+ "step": 484
728
+ },
729
+ {
730
+ "epoch": 2.02,
731
+ "learning_rate": 1.807044410413476e-07,
732
+ "logits/chosen": -2.4097964763641357,
733
+ "logits/rejected": -2.3763108253479004,
734
+ "logps/chosen": -257.9292297363281,
735
+ "logps/rejected": -236.3641815185547,
736
+ "loss": 0.5772,
737
+ "rewards/accuracies": 0.703125,
738
+ "rewards/chosen": -0.05177872255444527,
739
+ "rewards/margins": 0.39789050817489624,
740
+ "rewards/rejected": -0.4496693015098572,
741
+ "step": 490
742
+ },
743
+ {
744
+ "epoch": 2.07,
745
+ "learning_rate": 1.7304747320061255e-07,
746
+ "logits/chosen": -2.4072229862213135,
747
+ "logits/rejected": -2.4033942222595215,
748
+ "logps/chosen": -263.5710754394531,
749
+ "logps/rejected": -230.6610107421875,
750
+ "loss": 0.5772,
751
+ "rewards/accuracies": 0.707812488079071,
752
+ "rewards/chosen": -0.019381705671548843,
753
+ "rewards/margins": 0.4000469744205475,
754
+ "rewards/rejected": -0.41942867636680603,
755
+ "step": 500
756
+ },
757
+ {
758
+ "epoch": 2.11,
759
+ "learning_rate": 1.6539050535987747e-07,
760
+ "logits/chosen": -2.4798319339752197,
761
+ "logits/rejected": -2.370913028717041,
762
+ "logps/chosen": -270.12432861328125,
763
+ "logps/rejected": -225.058349609375,
764
+ "loss": 0.5712,
765
+ "rewards/accuracies": 0.7093750238418579,
766
+ "rewards/chosen": -0.039138875901699066,
767
+ "rewards/margins": 0.4365014135837555,
768
+ "rewards/rejected": -0.47564029693603516,
769
+ "step": 510
770
+ },
771
+ {
772
+ "epoch": 2.15,
773
+ "learning_rate": 1.5773353751914243e-07,
774
+ "logits/chosen": -2.4861385822296143,
775
+ "logits/rejected": -2.425265312194824,
776
+ "logps/chosen": -284.8677673339844,
777
+ "logps/rejected": -229.98681640625,
778
+ "loss": 0.5715,
779
+ "rewards/accuracies": 0.7359374761581421,
780
+ "rewards/chosen": -0.024127285927534103,
781
+ "rewards/margins": 0.4679562449455261,
782
+ "rewards/rejected": -0.4920835494995117,
783
+ "step": 520
784
+ },
785
+ {
786
+ "epoch": 2.19,
787
+ "learning_rate": 1.5007656967840735e-07,
788
+ "logits/chosen": -2.383533000946045,
789
+ "logits/rejected": -2.3430206775665283,
790
+ "logps/chosen": -254.0509796142578,
791
+ "logps/rejected": -230.5810089111328,
792
+ "loss": 0.5677,
793
+ "rewards/accuracies": 0.721875011920929,
794
+ "rewards/chosen": -0.039885733276605606,
795
+ "rewards/margins": 0.4442899823188782,
796
+ "rewards/rejected": -0.4841756820678711,
797
+ "step": 530
798
+ },
799
+ {
800
+ "epoch": 2.23,
801
+ "learning_rate": 1.4241960183767226e-07,
802
+ "logits/chosen": -2.4291586875915527,
803
+ "logits/rejected": -2.372559070587158,
804
+ "logps/chosen": -282.87982177734375,
805
+ "logps/rejected": -235.8987274169922,
806
+ "loss": 0.573,
807
+ "rewards/accuracies": 0.692187488079071,
808
+ "rewards/chosen": -0.05947133153676987,
809
+ "rewards/margins": 0.41908422112464905,
810
+ "rewards/rejected": -0.4785555303096771,
811
+ "step": 540
812
+ },
813
+ {
814
+ "epoch": 2.27,
815
+ "learning_rate": 1.347626339969372e-07,
816
+ "logits/chosen": -2.423152208328247,
817
+ "logits/rejected": -2.3877062797546387,
818
+ "logps/chosen": -270.82269287109375,
819
+ "logps/rejected": -242.1062469482422,
820
+ "loss": 0.5759,
821
+ "rewards/accuracies": 0.706250011920929,
822
+ "rewards/chosen": -0.01524378638714552,
823
+ "rewards/margins": 0.40917444229125977,
824
+ "rewards/rejected": -0.42441821098327637,
825
+ "step": 550
826
+ },
827
+ {
828
+ "epoch": 2.31,
829
+ "learning_rate": 1.2710566615620215e-07,
830
+ "logits/chosen": -2.3735625743865967,
831
+ "logits/rejected": -2.327951431274414,
832
+ "logps/chosen": -274.332763671875,
833
+ "logps/rejected": -225.1637420654297,
834
+ "loss": 0.5594,
835
+ "rewards/accuracies": 0.707812488079071,
836
+ "rewards/chosen": -0.03534569963812828,
837
+ "rewards/margins": 0.4445571005344391,
838
+ "rewards/rejected": -0.47990283370018005,
839
+ "step": 560
840
+ },
841
+ {
842
+ "epoch": 2.35,
843
+ "learning_rate": 1.1944869831546706e-07,
844
+ "logits/chosen": -2.3997702598571777,
845
+ "logits/rejected": -2.3793346881866455,
846
+ "logps/chosen": -267.025390625,
847
+ "logps/rejected": -238.75692749023438,
848
+ "loss": 0.5724,
849
+ "rewards/accuracies": 0.6968749761581421,
850
+ "rewards/chosen": -0.07640588283538818,
851
+ "rewards/margins": 0.4082149565219879,
852
+ "rewards/rejected": -0.4846208095550537,
853
+ "step": 570
854
+ },
855
+ {
856
+ "epoch": 2.4,
857
+ "learning_rate": 1.11791730474732e-07,
858
+ "logits/chosen": -2.4132089614868164,
859
+ "logits/rejected": -2.3745548725128174,
860
+ "logps/chosen": -262.74658203125,
861
+ "logps/rejected": -226.48898315429688,
862
+ "loss": 0.5658,
863
+ "rewards/accuracies": 0.7359374761581421,
864
+ "rewards/chosen": -0.07082664221525192,
865
+ "rewards/margins": 0.48243194818496704,
866
+ "rewards/rejected": -0.5532585382461548,
867
+ "step": 580
868
+ },
869
+ {
870
+ "epoch": 2.44,
871
+ "learning_rate": 1.0413476263399694e-07,
872
+ "logits/chosen": -2.451371669769287,
873
+ "logits/rejected": -2.407169818878174,
874
+ "logps/chosen": -269.4725646972656,
875
+ "logps/rejected": -219.11929321289062,
876
+ "loss": 0.5664,
877
+ "rewards/accuracies": 0.729687511920929,
878
+ "rewards/chosen": -0.07148631662130356,
879
+ "rewards/margins": 0.4374977946281433,
880
+ "rewards/rejected": -0.5089840888977051,
881
+ "step": 590
882
+ },
883
+ {
884
+ "epoch": 2.48,
885
+ "learning_rate": 9.647779479326186e-08,
886
+ "logits/chosen": -2.3929615020751953,
887
+ "logits/rejected": -2.3882527351379395,
888
+ "logps/chosen": -251.06576538085938,
889
+ "logps/rejected": -224.4808807373047,
890
+ "loss": 0.5768,
891
+ "rewards/accuracies": 0.667187511920929,
892
+ "rewards/chosen": -0.06799022853374481,
893
+ "rewards/margins": 0.3539872467517853,
894
+ "rewards/rejected": -0.42197751998901367,
895
+ "step": 600
896
+ },
897
+ {
898
+ "epoch": 2.52,
899
+ "learning_rate": 8.88208269525268e-08,
900
+ "logits/chosen": -2.3815102577209473,
901
+ "logits/rejected": -2.3912739753723145,
902
+ "logps/chosen": -260.7129821777344,
903
+ "logps/rejected": -223.4461212158203,
904
+ "loss": 0.566,
905
+ "rewards/accuracies": 0.71875,
906
+ "rewards/chosen": -0.06933742761611938,
907
+ "rewards/margins": 0.41847410798072815,
908
+ "rewards/rejected": -0.48781150579452515,
909
+ "step": 610
910
+ },
911
+ {
912
+ "epoch": 2.56,
913
+ "learning_rate": 8.116385911179173e-08,
914
+ "logits/chosen": -2.3711135387420654,
915
+ "logits/rejected": -2.3626675605773926,
916
+ "logps/chosen": -279.54461669921875,
917
+ "logps/rejected": -219.48974609375,
918
+ "loss": 0.5701,
919
+ "rewards/accuracies": 0.734375,
920
+ "rewards/chosen": -0.023505648598074913,
921
+ "rewards/margins": 0.4627605378627777,
922
+ "rewards/rejected": -0.4862661361694336,
923
+ "step": 620
924
+ },
925
+ {
926
+ "epoch": 2.6,
927
+ "learning_rate": 7.350689127105667e-08,
928
+ "logits/chosen": -2.4528985023498535,
929
+ "logits/rejected": -2.3787388801574707,
930
+ "logps/chosen": -272.67572021484375,
931
+ "logps/rejected": -232.7178192138672,
932
+ "loss": 0.5603,
933
+ "rewards/accuracies": 0.706250011920929,
934
+ "rewards/chosen": -0.0497988685965538,
935
+ "rewards/margins": 0.46719294786453247,
936
+ "rewards/rejected": -0.5169917941093445,
937
+ "step": 630
938
+ },
939
+ {
940
+ "epoch": 2.64,
941
+ "learning_rate": 6.584992343032159e-08,
942
+ "logits/chosen": -2.355626106262207,
943
+ "logits/rejected": -2.342153549194336,
944
+ "logps/chosen": -263.1979675292969,
945
+ "logps/rejected": -229.1007843017578,
946
+ "loss": 0.5752,
947
+ "rewards/accuracies": 0.690625011920929,
948
+ "rewards/chosen": -0.04885732755064964,
949
+ "rewards/margins": 0.42579683661460876,
950
+ "rewards/rejected": -0.4746541380882263,
951
+ "step": 640
952
+ },
953
+ {
954
+ "epoch": 2.69,
955
+ "learning_rate": 5.819295558958652e-08,
956
+ "logits/chosen": -2.3994874954223633,
957
+ "logits/rejected": -2.34912109375,
958
+ "logps/chosen": -288.324462890625,
959
+ "logps/rejected": -222.3997344970703,
960
+ "loss": 0.5646,
961
+ "rewards/accuracies": 0.7484375238418579,
962
+ "rewards/chosen": -0.021456807851791382,
963
+ "rewards/margins": 0.5244570374488831,
964
+ "rewards/rejected": -0.545913815498352,
965
+ "step": 650
966
+ },
967
+ {
968
+ "epoch": 2.73,
969
+ "learning_rate": 5.0535987748851455e-08,
970
+ "logits/chosen": -2.4647653102874756,
971
+ "logits/rejected": -2.4115538597106934,
972
+ "logps/chosen": -275.77947998046875,
973
+ "logps/rejected": -231.9734649658203,
974
+ "loss": 0.5621,
975
+ "rewards/accuracies": 0.7015625238418579,
976
+ "rewards/chosen": -0.07127931714057922,
977
+ "rewards/margins": 0.41370710730552673,
978
+ "rewards/rejected": -0.48498645424842834,
979
+ "step": 660
980
+ },
981
+ {
982
+ "epoch": 2.77,
983
+ "learning_rate": 4.287901990811638e-08,
984
+ "logits/chosen": -2.4428927898406982,
985
+ "logits/rejected": -2.3569588661193848,
986
+ "logps/chosen": -268.5105285644531,
987
+ "logps/rejected": -244.9532928466797,
988
+ "loss": 0.5636,
989
+ "rewards/accuracies": 0.7109375,
990
+ "rewards/chosen": -0.048072461038827896,
991
+ "rewards/margins": 0.46722808480262756,
992
+ "rewards/rejected": -0.5153006315231323,
993
+ "step": 670
994
+ },
995
+ {
996
+ "epoch": 2.81,
997
+ "learning_rate": 3.522205206738132e-08,
998
+ "logits/chosen": -2.4071362018585205,
999
+ "logits/rejected": -2.3730602264404297,
1000
+ "logps/chosen": -275.3606872558594,
1001
+ "logps/rejected": -230.1616668701172,
1002
+ "loss": 0.5682,
1003
+ "rewards/accuracies": 0.71875,
1004
+ "rewards/chosen": -0.076107919216156,
1005
+ "rewards/margins": 0.44682103395462036,
1006
+ "rewards/rejected": -0.5229289531707764,
1007
+ "step": 680
1008
+ },
1009
+ {
1010
+ "epoch": 2.85,
1011
+ "learning_rate": 2.7565084226646246e-08,
1012
+ "logits/chosen": -2.3889846801757812,
1013
+ "logits/rejected": -2.376112937927246,
1014
+ "logps/chosen": -264.30804443359375,
1015
+ "logps/rejected": -236.71640014648438,
1016
+ "loss": 0.5747,
1017
+ "rewards/accuracies": 0.7109375,
1018
+ "rewards/chosen": -0.04645932838320732,
1019
+ "rewards/margins": 0.4625559449195862,
1020
+ "rewards/rejected": -0.5090152621269226,
1021
+ "step": 690
1022
+ },
1023
+ {
1024
+ "epoch": 2.89,
1025
+ "learning_rate": 1.9908116385911178e-08,
1026
+ "logits/chosen": -2.4064643383026123,
1027
+ "logits/rejected": -2.388768434524536,
1028
+ "logps/chosen": -260.59393310546875,
1029
+ "logps/rejected": -227.6177215576172,
1030
+ "loss": 0.5718,
1031
+ "rewards/accuracies": 0.671875,
1032
+ "rewards/chosen": -0.09053535759449005,
1033
+ "rewards/margins": 0.3823908865451813,
1034
+ "rewards/rejected": -0.47292619943618774,
1035
+ "step": 700
1036
+ },
1037
+ {
1038
+ "epoch": 2.93,
1039
+ "learning_rate": 1.225114854517611e-08,
1040
+ "logits/chosen": -2.436859130859375,
1041
+ "logits/rejected": -2.3719522953033447,
1042
+ "logps/chosen": -280.87774658203125,
1043
+ "logps/rejected": -221.83944702148438,
1044
+ "loss": 0.5696,
1045
+ "rewards/accuracies": 0.7093750238418579,
1046
+ "rewards/chosen": -0.06765580177307129,
1047
+ "rewards/margins": 0.4343256950378418,
1048
+ "rewards/rejected": -0.5019814968109131,
1049
+ "step": 710
1050
+ },
1051
+ {
1052
+ "epoch": 2.97,
1053
+ "learning_rate": 4.594180704441042e-09,
1054
+ "logits/chosen": -2.4007372856140137,
1055
+ "logits/rejected": -2.378627300262451,
1056
+ "logps/chosen": -257.9851989746094,
1057
+ "logps/rejected": -225.0294189453125,
1058
+ "loss": 0.5671,
1059
+ "rewards/accuracies": 0.699999988079071,
1060
+ "rewards/chosen": -0.053695209324359894,
1061
+ "rewards/margins": 0.4172247052192688,
1062
+ "rewards/rejected": -0.4709199070930481,
1063
+ "step": 720
1064
+ },
1065
+ {
1066
+ "epoch": 3.0,
1067
+ "eval_logits/chosen": -2.1214964389801025,
1068
+ "eval_logits/rejected": -2.000164031982422,
1069
+ "eval_logps/chosen": -265.42364501953125,
1070
+ "eval_logps/rejected": -224.6269989013672,
1071
+ "eval_loss": 0.5655443072319031,
1072
+ "eval_rewards/accuracies": 0.699999988079071,
1073
+ "eval_rewards/chosen": -0.07451467216014862,
1074
+ "eval_rewards/margins": 0.4583480656147003,
1075
+ "eval_rewards/rejected": -0.5328627228736877,
1076
+ "eval_runtime": 278.3863,
1077
+ "eval_samples_per_second": 7.184,
1078
+ "eval_steps_per_second": 0.449,
1079
+ "step": 726
1080
+ },
1081
+ {
1082
+ "epoch": 3.0,
1083
+ "step": 726,
1084
+ "total_flos": 0.0,
1085
+ "train_loss": 0.6037390495627379,
1086
+ "train_runtime": 36278.6969,
1087
+ "train_samples_per_second": 5.124,
1088
+ "train_steps_per_second": 0.02
1089
+ }
1090
+ ],
1091
+ "logging_steps": 10,
1092
+ "max_steps": 726,
1093
+ "num_train_epochs": 3,
1094
+ "save_steps": 500,
1095
+ "total_flos": 0.0,
1096
+ "trial_name": null,
1097
+ "trial_params": null
1098
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:770ceb7f475a80fbf8cdb9216d30e9881423a9197d8cb880f7ee45cd0ec56959
3
+ size 4728