ptrdvn commited on
Commit
679732b
1 Parent(s): 034f9a4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: Qwen/Qwen2.5-7B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: simpo_trained_1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # simpo_trained_1
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the lightblue_orpo_data dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-06
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 1
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 16
45
+ - total_train_batch_size: 128
46
+ - total_eval_batch_size: 8
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 1.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.46.1
59
+ - Pytorch 2.4.0+cu121
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9982859101816935,
3
+ "total_flos": 58779245903872.0,
4
+ "train_loss": 1.1193010831599708,
5
+ "train_runtime": 13670.339,
6
+ "train_samples_per_second": 1.707,
7
+ "train_steps_per_second": 0.013
8
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 28,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 4,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.46.1",
26
+ "use_cache": false,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 152064
29
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.46.1"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c54c66db42fa82fe5847dbacc47dd223170339572089ebc336a195fc0a18fd99
3
+ size 4877660776
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ff40a68ab8f646969d08226933892a38b53c0260587a95b65e69d4ad4e1995
3
+ size 4932751008
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c768cda91e51eccd3e57063a23579529958a3a430c4a40c08db0c72d1457d975
3
+ size 4330865200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61651c2ce64e5cd654f636513187540b2f5f8dd77b1ba130e0da3b3bba5a0f67
3
+ size 1089994880
model.safetensors.index.json ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15231233024
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
344
+ "model.norm.weight": "model-00003-of-00004.safetensors"
345
+ }
346
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "padding_side": "right",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9982859101816935,
3
+ "total_flos": 58779245903872.0,
4
+ "train_loss": 1.1193010831599708,
5
+ "train_runtime": 13670.339,
6
+ "train_samples_per_second": 1.707,
7
+ "train_steps_per_second": 0.013
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 182, "loss": 2.2106, "accuracy": 0.328125, "lr": 5.2631578947368416e-08, "epoch": 0.005485087418580734, "percentage": 0.55, "elapsed_time": "0:01:19", "remaining_time": "3:59:54"}
2
+ {"current_steps": 2, "total_steps": 182, "loss": 2.1187, "accuracy": 0.3671875, "lr": 1.0526315789473683e-07, "epoch": 0.010970174837161468, "percentage": 1.1, "elapsed_time": "0:02:33", "remaining_time": "3:49:39"}
3
+ {"current_steps": 3, "total_steps": 182, "loss": 2.0561, "accuracy": 0.375, "lr": 1.5789473684210525e-07, "epoch": 0.0164552622557422, "percentage": 1.65, "elapsed_time": "0:03:52", "remaining_time": "3:51:05"}
4
+ {"current_steps": 4, "total_steps": 182, "loss": 2.1332, "accuracy": 0.4140625, "lr": 2.1052631578947366e-07, "epoch": 0.021940349674322936, "percentage": 2.2, "elapsed_time": "0:05:08", "remaining_time": "3:48:30"}
5
+ {"current_steps": 5, "total_steps": 182, "loss": 2.0846, "accuracy": 0.3359375, "lr": 2.631578947368421e-07, "epoch": 0.027425437092903668, "percentage": 2.75, "elapsed_time": "0:06:25", "remaining_time": "3:47:23"}
6
+ {"current_steps": 6, "total_steps": 182, "loss": 2.1337, "accuracy": 0.375, "lr": 3.157894736842105e-07, "epoch": 0.0329105245114844, "percentage": 3.3, "elapsed_time": "0:07:40", "remaining_time": "3:44:59"}
7
+ {"current_steps": 7, "total_steps": 182, "loss": 2.0016, "accuracy": 0.4140625, "lr": 3.684210526315789e-07, "epoch": 0.03839561193006513, "percentage": 3.85, "elapsed_time": "0:08:55", "remaining_time": "3:43:05"}
8
+ {"current_steps": 8, "total_steps": 182, "loss": 2.1026, "accuracy": 0.390625, "lr": 4.2105263157894733e-07, "epoch": 0.04388069934864587, "percentage": 4.4, "elapsed_time": "0:10:13", "remaining_time": "3:42:32"}
9
+ {"current_steps": 9, "total_steps": 182, "loss": 2.425, "accuracy": 0.265625, "lr": 4.7368421052631574e-07, "epoch": 0.049365786767226603, "percentage": 4.95, "elapsed_time": "0:11:31", "remaining_time": "3:41:37"}
10
+ {"current_steps": 10, "total_steps": 182, "loss": 2.435, "accuracy": 0.296875, "lr": 5.263157894736842e-07, "epoch": 0.054850874185807336, "percentage": 5.49, "elapsed_time": "0:12:47", "remaining_time": "3:39:54"}
11
+ {"current_steps": 11, "total_steps": 182, "loss": 2.3341, "accuracy": 0.3359375, "lr": 5.789473684210526e-07, "epoch": 0.06033596160438807, "percentage": 6.04, "elapsed_time": "0:14:02", "remaining_time": "3:38:18"}
12
+ {"current_steps": 12, "total_steps": 182, "loss": 2.2512, "accuracy": 0.296875, "lr": 6.31578947368421e-07, "epoch": 0.0658210490229688, "percentage": 6.59, "elapsed_time": "0:15:17", "remaining_time": "3:36:40"}
13
+ {"current_steps": 13, "total_steps": 182, "loss": 2.1644, "accuracy": 0.328125, "lr": 6.842105263157895e-07, "epoch": 0.07130613644154954, "percentage": 7.14, "elapsed_time": "0:16:32", "remaining_time": "3:35:07"}
14
+ {"current_steps": 14, "total_steps": 182, "loss": 2.3949, "accuracy": 0.3203125, "lr": 7.368421052631578e-07, "epoch": 0.07679122386013026, "percentage": 7.69, "elapsed_time": "0:17:51", "remaining_time": "3:34:12"}
15
+ {"current_steps": 15, "total_steps": 182, "loss": 2.2735, "accuracy": 0.328125, "lr": 7.894736842105263e-07, "epoch": 0.082276311278711, "percentage": 8.24, "elapsed_time": "0:19:04", "remaining_time": "3:32:20"}
16
+ {"current_steps": 16, "total_steps": 182, "loss": 2.2617, "accuracy": 0.34375, "lr": 8.421052631578947e-07, "epoch": 0.08776139869729174, "percentage": 8.79, "elapsed_time": "0:20:21", "remaining_time": "3:31:17"}
17
+ {"current_steps": 17, "total_steps": 182, "loss": 2.3686, "accuracy": 0.265625, "lr": 8.947368421052631e-07, "epoch": 0.09324648611587247, "percentage": 9.34, "elapsed_time": "0:21:38", "remaining_time": "3:30:02"}
18
+ {"current_steps": 18, "total_steps": 182, "loss": 2.0268, "accuracy": 0.3984375, "lr": 9.473684210526315e-07, "epoch": 0.09873157353445321, "percentage": 9.89, "elapsed_time": "0:22:53", "remaining_time": "3:28:30"}
19
+ {"current_steps": 19, "total_steps": 182, "loss": 2.0874, "accuracy": 0.3046875, "lr": 1e-06, "epoch": 0.10421666095303393, "percentage": 10.44, "elapsed_time": "0:24:03", "remaining_time": "3:26:23"}
20
+ {"current_steps": 20, "total_steps": 182, "loss": 2.0832, "accuracy": 0.34375, "lr": 9.999071352056673e-07, "epoch": 0.10970174837161467, "percentage": 10.99, "elapsed_time": "0:25:18", "remaining_time": "3:25:00"}
21
+ {"current_steps": 21, "total_steps": 182, "loss": 2.1691, "accuracy": 0.3046875, "lr": 9.996285753181497e-07, "epoch": 0.11518683579019541, "percentage": 11.54, "elapsed_time": "0:26:31", "remaining_time": "3:23:23"}
22
+ {"current_steps": 22, "total_steps": 182, "loss": 1.9912, "accuracy": 0.34375, "lr": 9.99164423811074e-07, "epoch": 0.12067192320877614, "percentage": 12.09, "elapsed_time": "0:27:46", "remaining_time": "3:22:01"}
23
+ {"current_steps": 23, "total_steps": 182, "loss": 1.9723, "accuracy": 0.3203125, "lr": 9.985148530977764e-07, "epoch": 0.12615701062735687, "percentage": 12.64, "elapsed_time": "0:28:59", "remaining_time": "3:20:26"}
24
+ {"current_steps": 24, "total_steps": 182, "loss": 1.8305, "accuracy": 0.4140625, "lr": 9.976801044672607e-07, "epoch": 0.1316420980459376, "percentage": 13.19, "elapsed_time": "0:30:12", "remaining_time": "3:18:54"}
25
+ {"current_steps": 25, "total_steps": 182, "loss": 1.9472, "accuracy": 0.3828125, "lr": 9.966604879945656e-07, "epoch": 0.13712718546451835, "percentage": 13.74, "elapsed_time": "0:31:26", "remaining_time": "3:17:28"}
26
+ {"current_steps": 26, "total_steps": 182, "loss": 1.8902, "accuracy": 0.4140625, "lr": 9.954563824255877e-07, "epoch": 0.14261227288309908, "percentage": 14.29, "elapsed_time": "0:32:41", "remaining_time": "3:16:10"}
27
+ {"current_steps": 27, "total_steps": 182, "loss": 1.9858, "accuracy": 0.3046875, "lr": 9.94068235036391e-07, "epoch": 0.1480973603016798, "percentage": 14.84, "elapsed_time": "0:33:57", "remaining_time": "3:14:54"}
28
+ {"current_steps": 28, "total_steps": 182, "loss": 1.9039, "accuracy": 0.3828125, "lr": 9.924965614670628e-07, "epoch": 0.15358244772026053, "percentage": 15.38, "elapsed_time": "0:35:12", "remaining_time": "3:13:38"}
29
+ {"current_steps": 29, "total_steps": 182, "loss": 2.0302, "accuracy": 0.28125, "lr": 9.90741945530174e-07, "epoch": 0.15906753513884128, "percentage": 15.93, "elapsed_time": "0:36:24", "remaining_time": "3:12:06"}
30
+ {"current_steps": 30, "total_steps": 182, "loss": 1.9332, "accuracy": 0.421875, "lr": 9.888050389939172e-07, "epoch": 0.164552622557422, "percentage": 16.48, "elapsed_time": "0:37:39", "remaining_time": "3:10:47"}
31
+ {"current_steps": 31, "total_steps": 182, "loss": 1.9123, "accuracy": 0.3671875, "lr": 9.866865613400006e-07, "epoch": 0.17003770997600273, "percentage": 17.03, "elapsed_time": "0:38:58", "remaining_time": "3:09:50"}
32
+ {"current_steps": 32, "total_steps": 182, "loss": 1.9177, "accuracy": 0.3515625, "lr": 9.843872994963912e-07, "epoch": 0.17552279739458349, "percentage": 17.58, "elapsed_time": "0:40:15", "remaining_time": "3:08:40"}
33
+ {"current_steps": 33, "total_steps": 182, "loss": 1.7645, "accuracy": 0.4921875, "lr": 9.819081075450013e-07, "epoch": 0.1810078848131642, "percentage": 18.13, "elapsed_time": "0:41:27", "remaining_time": "3:07:09"}
34
+ {"current_steps": 34, "total_steps": 182, "loss": 1.8363, "accuracy": 0.375, "lr": 9.792499064044342e-07, "epoch": 0.18649297223174494, "percentage": 18.68, "elapsed_time": "0:42:43", "remaining_time": "3:06:00"}
35
+ {"current_steps": 35, "total_steps": 182, "loss": 1.8242, "accuracy": 0.40625, "lr": 9.764136834878985e-07, "epoch": 0.1919780596503257, "percentage": 19.23, "elapsed_time": "0:44:00", "remaining_time": "3:04:48"}
36
+ {"current_steps": 36, "total_steps": 182, "loss": 1.8998, "accuracy": 0.390625, "lr": 9.734004923364256e-07, "epoch": 0.19746314706890641, "percentage": 19.78, "elapsed_time": "0:45:20", "remaining_time": "3:03:53"}
37
+ {"current_steps": 37, "total_steps": 182, "loss": 1.8343, "accuracy": 0.40625, "lr": 9.702114522275216e-07, "epoch": 0.20294823448748714, "percentage": 20.33, "elapsed_time": "0:46:35", "remaining_time": "3:02:34"}
38
+ {"current_steps": 38, "total_steps": 182, "loss": 1.6697, "accuracy": 0.4921875, "lr": 9.66847747759402e-07, "epoch": 0.20843332190606786, "percentage": 20.88, "elapsed_time": "0:47:47", "remaining_time": "3:01:04"}
39
+ {"current_steps": 39, "total_steps": 182, "loss": 1.8336, "accuracy": 0.359375, "lr": 9.63310628410961e-07, "epoch": 0.21391840932464862, "percentage": 21.43, "elapsed_time": "0:49:05", "remaining_time": "2:59:59"}
40
+ {"current_steps": 40, "total_steps": 182, "loss": 1.8315, "accuracy": 0.40625, "lr": 9.596014080776421e-07, "epoch": 0.21940349674322934, "percentage": 21.98, "elapsed_time": "0:50:21", "remaining_time": "2:58:46"}
41
+ {"current_steps": 41, "total_steps": 182, "loss": 1.7842, "accuracy": 0.4140625, "lr": 9.55721464583379e-07, "epoch": 0.22488858416181007, "percentage": 22.53, "elapsed_time": "0:51:36", "remaining_time": "2:57:29"}
42
+ {"current_steps": 42, "total_steps": 182, "loss": 1.9825, "accuracy": 0.3203125, "lr": 9.516722391687902e-07, "epoch": 0.23037367158039082, "percentage": 23.08, "elapsed_time": "0:52:54", "remaining_time": "2:56:20"}
43
+ {"current_steps": 43, "total_steps": 182, "loss": 1.6144, "accuracy": 0.4453125, "lr": 9.474552359558165e-07, "epoch": 0.23585875899897155, "percentage": 23.63, "elapsed_time": "0:54:07", "remaining_time": "2:54:56"}
44
+ {"current_steps": 44, "total_steps": 182, "loss": 1.648, "accuracy": 0.4453125, "lr": 9.430720213890029e-07, "epoch": 0.24134384641755227, "percentage": 24.18, "elapsed_time": "0:55:19", "remaining_time": "2:53:31"}
45
+ {"current_steps": 45, "total_steps": 182, "loss": 1.9336, "accuracy": 0.46875, "lr": 9.385242236536259e-07, "epoch": 0.24682893383613302, "percentage": 24.73, "elapsed_time": "0:56:35", "remaining_time": "2:52:16"}
46
+ {"current_steps": 46, "total_steps": 182, "loss": 1.4913, "accuracy": 0.5234375, "lr": 9.338135320708911e-07, "epoch": 0.25231402125471375, "percentage": 25.27, "elapsed_time": "0:57:47", "remaining_time": "2:50:51"}
47
+ {"current_steps": 47, "total_steps": 182, "loss": 1.4505, "accuracy": 0.6171875, "lr": 9.289416964704185e-07, "epoch": 0.2577991086732945, "percentage": 25.82, "elapsed_time": "0:59:11", "remaining_time": "2:50:00"}
48
+ {"current_steps": 48, "total_steps": 182, "loss": 1.5693, "accuracy": 0.671875, "lr": 9.239105265402525e-07, "epoch": 0.2632841960918752, "percentage": 26.37, "elapsed_time": "1:00:24", "remaining_time": "2:48:39"}
49
+ {"current_steps": 49, "total_steps": 182, "loss": 1.4994, "accuracy": 0.75, "lr": 9.187218911546361e-07, "epoch": 0.2687692835104559, "percentage": 26.92, "elapsed_time": "1:01:38", "remaining_time": "2:47:18"}
50
+ {"current_steps": 50, "total_steps": 182, "loss": 1.686, "accuracy": 0.640625, "lr": 9.133777176798012e-07, "epoch": 0.2742543709290367, "percentage": 27.47, "elapsed_time": "1:02:52", "remaining_time": "2:45:59"}
51
+ {"current_steps": 51, "total_steps": 182, "loss": 1.1265, "accuracy": 0.7265625, "lr": 9.078799912580303e-07, "epoch": 0.27973945834761743, "percentage": 28.02, "elapsed_time": "1:04:07", "remaining_time": "2:44:43"}
52
+ {"current_steps": 52, "total_steps": 182, "loss": 1.0881, "accuracy": 0.7578125, "lr": 9.022307540702576e-07, "epoch": 0.28522454576619816, "percentage": 28.57, "elapsed_time": "1:05:19", "remaining_time": "2:43:17"}
53
+ {"current_steps": 53, "total_steps": 182, "loss": 1.2003, "accuracy": 0.734375, "lr": 8.964321045774806e-07, "epoch": 0.2907096331847789, "percentage": 29.12, "elapsed_time": "1:06:31", "remaining_time": "2:41:54"}
54
+ {"current_steps": 54, "total_steps": 182, "loss": 1.4033, "accuracy": 0.6640625, "lr": 8.904861967412701e-07, "epoch": 0.2961947206033596, "percentage": 29.67, "elapsed_time": "1:07:45", "remaining_time": "2:40:36"}
55
+ {"current_steps": 55, "total_steps": 182, "loss": 1.3969, "accuracy": 0.8125, "lr": 8.843952392236593e-07, "epoch": 0.30167980802194033, "percentage": 30.22, "elapsed_time": "1:08:59", "remaining_time": "2:39:18"}
56
+ {"current_steps": 56, "total_steps": 182, "loss": 1.2378, "accuracy": 0.765625, "lr": 8.781614945667168e-07, "epoch": 0.30716489544052106, "percentage": 30.77, "elapsed_time": "1:10:15", "remaining_time": "2:38:04"}
57
+ {"current_steps": 57, "total_steps": 182, "loss": 1.0947, "accuracy": 0.8125, "lr": 8.717872783521047e-07, "epoch": 0.31264998285910184, "percentage": 31.32, "elapsed_time": "1:11:32", "remaining_time": "2:36:54"}
58
+ {"current_steps": 58, "total_steps": 182, "loss": 1.0673, "accuracy": 0.78125, "lr": 8.652749583409339e-07, "epoch": 0.31813507027768256, "percentage": 31.87, "elapsed_time": "1:12:45", "remaining_time": "2:35:33"}
59
+ {"current_steps": 59, "total_steps": 182, "loss": 1.0082, "accuracy": 0.8359375, "lr": 8.586269535942385e-07, "epoch": 0.3236201576962633, "percentage": 32.42, "elapsed_time": "1:13:57", "remaining_time": "2:34:11"}
60
+ {"current_steps": 60, "total_steps": 182, "loss": 1.2004, "accuracy": 0.7734375, "lr": 8.518457335743924e-07, "epoch": 0.329105245114844, "percentage": 32.97, "elapsed_time": "1:15:16", "remaining_time": "2:33:04"}
61
+ {"current_steps": 61, "total_steps": 182, "loss": 1.07, "accuracy": 0.78125, "lr": 8.449338172278058e-07, "epoch": 0.33459033253342474, "percentage": 33.52, "elapsed_time": "1:16:30", "remaining_time": "2:31:46"}
62
+ {"current_steps": 62, "total_steps": 182, "loss": 1.1342, "accuracy": 0.7578125, "lr": 8.378937720492383e-07, "epoch": 0.34007541995200546, "percentage": 34.07, "elapsed_time": "1:17:48", "remaining_time": "2:30:35"}
63
+ {"current_steps": 63, "total_steps": 182, "loss": 0.9218, "accuracy": 0.8203125, "lr": 8.307282131280804e-07, "epoch": 0.34556050737058625, "percentage": 34.62, "elapsed_time": "1:19:03", "remaining_time": "2:29:20"}
64
+ {"current_steps": 64, "total_steps": 182, "loss": 1.0545, "accuracy": 0.796875, "lr": 8.23439802176954e-07, "epoch": 0.35104559478916697, "percentage": 35.16, "elapsed_time": "1:20:20", "remaining_time": "2:28:07"}
65
+ {"current_steps": 65, "total_steps": 182, "loss": 0.8092, "accuracy": 0.828125, "lr": 8.160312465429952e-07, "epoch": 0.3565306822077477, "percentage": 35.71, "elapsed_time": "1:21:41", "remaining_time": "2:27:03"}
66
+ {"current_steps": 66, "total_steps": 182, "loss": 1.0525, "accuracy": 0.7734375, "lr": 8.085052982021847e-07, "epoch": 0.3620157696263284, "percentage": 36.26, "elapsed_time": "1:22:55", "remaining_time": "2:25:44"}
67
+ {"current_steps": 67, "total_steps": 182, "loss": 0.8161, "accuracy": 0.84375, "lr": 8.008647527371022e-07, "epoch": 0.36750085704490915, "percentage": 36.81, "elapsed_time": "1:24:06", "remaining_time": "2:24:22"}
68
+ {"current_steps": 68, "total_steps": 182, "loss": 1.0579, "accuracy": 0.765625, "lr": 7.931124482984801e-07, "epoch": 0.37298594446348987, "percentage": 37.36, "elapsed_time": "1:25:22", "remaining_time": "2:23:06"}
69
+ {"current_steps": 69, "total_steps": 182, "loss": 1.3453, "accuracy": 0.7265625, "lr": 7.85251264550948e-07, "epoch": 0.3784710318820706, "percentage": 37.91, "elapsed_time": "1:26:36", "remaining_time": "2:21:50"}
70
+ {"current_steps": 70, "total_steps": 182, "loss": 1.2675, "accuracy": 0.7890625, "lr": 7.772841216033532e-07, "epoch": 0.3839561193006514, "percentage": 38.46, "elapsed_time": "1:27:50", "remaining_time": "2:20:33"}
71
+ {"current_steps": 71, "total_steps": 182, "loss": 1.096, "accuracy": 0.828125, "lr": 7.69213978924061e-07, "epoch": 0.3894412067192321, "percentage": 39.01, "elapsed_time": "1:29:01", "remaining_time": "2:19:10"}
72
+ {"current_steps": 72, "total_steps": 182, "loss": 1.0171, "accuracy": 0.8046875, "lr": 7.610438342416319e-07, "epoch": 0.39492629413781283, "percentage": 39.56, "elapsed_time": "1:30:14", "remaining_time": "2:17:52"}
73
+ {"current_steps": 73, "total_steps": 182, "loss": 0.6614, "accuracy": 0.8515625, "lr": 7.527767224312882e-07, "epoch": 0.40041138155639355, "percentage": 40.11, "elapsed_time": "1:31:28", "remaining_time": "2:16:35"}
74
+ {"current_steps": 74, "total_steps": 182, "loss": 0.9105, "accuracy": 0.8359375, "lr": 7.444157143875819e-07, "epoch": 0.4058964689749743, "percentage": 40.66, "elapsed_time": "1:32:41", "remaining_time": "2:15:17"}
75
+ {"current_steps": 75, "total_steps": 182, "loss": 1.0358, "accuracy": 0.75, "lr": 7.359639158836827e-07, "epoch": 0.411381556393555, "percentage": 41.21, "elapsed_time": "1:34:00", "remaining_time": "2:14:07"}
76
+ {"current_steps": 76, "total_steps": 182, "loss": 0.9974, "accuracy": 0.8046875, "lr": 7.274244664177097e-07, "epoch": 0.41686664381213573, "percentage": 41.76, "elapsed_time": "1:35:16", "remaining_time": "2:12:52"}
77
+ {"current_steps": 77, "total_steps": 182, "loss": 0.8174, "accuracy": 0.8046875, "lr": 7.188005380465364e-07, "epoch": 0.4223517312307165, "percentage": 42.31, "elapsed_time": "1:36:32", "remaining_time": "2:11:39"}
78
+ {"current_steps": 78, "total_steps": 182, "loss": 0.8695, "accuracy": 0.8203125, "lr": 7.100953342075009e-07, "epoch": 0.42783681864929723, "percentage": 42.86, "elapsed_time": "1:37:48", "remaining_time": "2:10:24"}
79
+ {"current_steps": 79, "total_steps": 182, "loss": 0.7876, "accuracy": 0.8359375, "lr": 7.013120885284598e-07, "epoch": 0.43332190606787796, "percentage": 43.41, "elapsed_time": "1:39:02", "remaining_time": "2:09:07"}
80
+ {"current_steps": 80, "total_steps": 182, "loss": 0.9434, "accuracy": 0.859375, "lr": 6.924540636266272e-07, "epoch": 0.4388069934864587, "percentage": 43.96, "elapsed_time": "1:40:16", "remaining_time": "2:07:50"}
81
+ {"current_steps": 81, "total_steps": 182, "loss": 0.8759, "accuracy": 0.8046875, "lr": 6.83524549896646e-07, "epoch": 0.4442920809050394, "percentage": 44.51, "elapsed_time": "1:41:37", "remaining_time": "2:06:43"}
82
+ {"current_steps": 82, "total_steps": 182, "loss": 0.6727, "accuracy": 0.875, "lr": 6.745268642883404e-07, "epoch": 0.44977716832362014, "percentage": 45.05, "elapsed_time": "1:42:52", "remaining_time": "2:05:26"}
83
+ {"current_steps": 83, "total_steps": 182, "loss": 0.8406, "accuracy": 0.875, "lr": 6.654643490746041e-07, "epoch": 0.4552622557422009, "percentage": 45.6, "elapsed_time": "1:44:07", "remaining_time": "2:04:12"}
84
+ {"current_steps": 84, "total_steps": 182, "loss": 0.844, "accuracy": 0.78125, "lr": 6.563403706098832e-07, "epoch": 0.46074734316078164, "percentage": 46.15, "elapsed_time": "1:45:21", "remaining_time": "2:02:54"}
85
+ {"current_steps": 85, "total_steps": 182, "loss": 0.9392, "accuracy": 0.8046875, "lr": 6.47158318079712e-07, "epoch": 0.46623243057936237, "percentage": 46.7, "elapsed_time": "1:46:37", "remaining_time": "2:01:40"}
86
+ {"current_steps": 86, "total_steps": 182, "loss": 0.6221, "accuracy": 0.9140625, "lr": 6.379216022417695e-07, "epoch": 0.4717175179979431, "percentage": 47.25, "elapsed_time": "1:47:56", "remaining_time": "2:00:29"}
87
+ {"current_steps": 87, "total_steps": 182, "loss": 0.9602, "accuracy": 0.8125, "lr": 6.286336541589223e-07, "epoch": 0.4772026054165238, "percentage": 47.8, "elapsed_time": "1:49:10", "remaining_time": "1:59:13"}
88
+ {"current_steps": 88, "total_steps": 182, "loss": 0.8718, "accuracy": 0.796875, "lr": 6.192979239247242e-07, "epoch": 0.48268769283510454, "percentage": 48.35, "elapsed_time": "1:50:24", "remaining_time": "1:57:56"}
89
+ {"current_steps": 89, "total_steps": 182, "loss": 0.8724, "accuracy": 0.8359375, "lr": 6.099178793818478e-07, "epoch": 0.48817278025368527, "percentage": 48.9, "elapsed_time": "1:51:39", "remaining_time": "1:56:40"}
90
+ {"current_steps": 90, "total_steps": 182, "loss": 0.9605, "accuracy": 0.796875, "lr": 6.004970048339225e-07, "epoch": 0.49365786767226605, "percentage": 49.45, "elapsed_time": "1:52:51", "remaining_time": "1:55:21"}
91
+ {"current_steps": 91, "total_steps": 182, "loss": 0.7882, "accuracy": 0.859375, "lr": 5.910387997512573e-07, "epoch": 0.4991429550908468, "percentage": 50.0, "elapsed_time": "1:54:03", "remaining_time": "1:54:03"}
92
+ {"current_steps": 92, "total_steps": 182, "loss": 0.8483, "accuracy": 0.8359375, "lr": 5.815467774709313e-07, "epoch": 0.5046280425094275, "percentage": 50.55, "elapsed_time": "1:55:20", "remaining_time": "1:52:49"}
93
+ {"current_steps": 93, "total_steps": 182, "loss": 0.8819, "accuracy": 0.8203125, "lr": 5.720244638917323e-07, "epoch": 0.5101131299280083, "percentage": 51.1, "elapsed_time": "1:56:35", "remaining_time": "1:51:34"}
94
+ {"current_steps": 94, "total_steps": 182, "loss": 1.0174, "accuracy": 0.796875, "lr": 5.624753961644281e-07, "epoch": 0.515598217346589, "percentage": 51.65, "elapsed_time": "1:57:43", "remaining_time": "1:50:12"}
95
+ {"current_steps": 95, "total_steps": 182, "loss": 0.98, "accuracy": 0.7890625, "lr": 5.529031213778614e-07, "epoch": 0.5210833047651697, "percentage": 52.2, "elapsed_time": "1:58:57", "remaining_time": "1:48:56"}
96
+ {"current_steps": 96, "total_steps": 182, "loss": 0.8177, "accuracy": 0.8125, "lr": 5.433111952413494e-07, "epoch": 0.5265683921837504, "percentage": 52.75, "elapsed_time": "2:00:13", "remaining_time": "1:47:41"}
97
+ {"current_steps": 97, "total_steps": 182, "loss": 0.7902, "accuracy": 0.8671875, "lr": 5.33703180763884e-07, "epoch": 0.5320534796023312, "percentage": 53.3, "elapsed_time": "2:01:27", "remaining_time": "1:46:25"}
98
+ {"current_steps": 98, "total_steps": 182, "loss": 0.5899, "accuracy": 0.9140625, "lr": 5.240826469306186e-07, "epoch": 0.5375385670209119, "percentage": 53.85, "elapsed_time": "2:02:46", "remaining_time": "1:45:14"}
99
+ {"current_steps": 99, "total_steps": 182, "loss": 0.8516, "accuracy": 0.796875, "lr": 5.144531673771363e-07, "epoch": 0.5430236544394926, "percentage": 54.4, "elapsed_time": "2:04:05", "remaining_time": "1:44:02"}
100
+ {"current_steps": 100, "total_steps": 182, "loss": 0.8378, "accuracy": 0.828125, "lr": 5.048183190619903e-07, "epoch": 0.5485087418580734, "percentage": 54.95, "elapsed_time": "2:05:23", "remaining_time": "1:42:49"}
101
+ {"current_steps": 101, "total_steps": 182, "loss": 0.669, "accuracy": 0.8828125, "lr": 4.951816809380097e-07, "epoch": 0.5539938292766541, "percentage": 55.49, "elapsed_time": "2:06:36", "remaining_time": "1:41:32"}
102
+ {"current_steps": 102, "total_steps": 182, "loss": 0.8745, "accuracy": 0.8046875, "lr": 4.855468326228638e-07, "epoch": 0.5594789166952349, "percentage": 56.04, "elapsed_time": "2:07:51", "remaining_time": "1:40:16"}
103
+ {"current_steps": 103, "total_steps": 182, "loss": 0.7655, "accuracy": 0.8515625, "lr": 4.7591735306938134e-07, "epoch": 0.5649640041138155, "percentage": 56.59, "elapsed_time": "2:09:03", "remaining_time": "1:38:59"}
104
+ {"current_steps": 104, "total_steps": 182, "loss": 0.9818, "accuracy": 0.796875, "lr": 4.6629681923611603e-07, "epoch": 0.5704490915323963, "percentage": 57.14, "elapsed_time": "2:10:18", "remaining_time": "1:37:43"}
105
+ {"current_steps": 105, "total_steps": 182, "loss": 0.7142, "accuracy": 0.8515625, "lr": 4.5668880475865067e-07, "epoch": 0.575934178950977, "percentage": 57.69, "elapsed_time": "2:11:28", "remaining_time": "1:36:24"}
106
+ {"current_steps": 106, "total_steps": 182, "loss": 0.624, "accuracy": 0.8671875, "lr": 4.4709687862213864e-07, "epoch": 0.5814192663695578, "percentage": 58.24, "elapsed_time": "2:12:44", "remaining_time": "1:35:10"}
107
+ {"current_steps": 107, "total_steps": 182, "loss": 0.779, "accuracy": 0.8515625, "lr": 4.3752460383557194e-07, "epoch": 0.5869043537881385, "percentage": 58.79, "elapsed_time": "2:13:58", "remaining_time": "1:33:54"}
108
+ {"current_steps": 108, "total_steps": 182, "loss": 0.8094, "accuracy": 0.8359375, "lr": 4.2797553610826797e-07, "epoch": 0.5923894412067192, "percentage": 59.34, "elapsed_time": "2:15:11", "remaining_time": "1:32:37"}
109
+ {"current_steps": 109, "total_steps": 182, "loss": 0.6594, "accuracy": 0.84375, "lr": 4.184532225290686e-07, "epoch": 0.5978745286253, "percentage": 59.89, "elapsed_time": "2:16:26", "remaining_time": "1:31:22"}
110
+ {"current_steps": 110, "total_steps": 182, "loss": 0.9853, "accuracy": 0.7734375, "lr": 4.089612002487428e-07, "epoch": 0.6033596160438807, "percentage": 60.44, "elapsed_time": "2:17:40", "remaining_time": "1:30:06"}
111
+ {"current_steps": 111, "total_steps": 182, "loss": 0.6273, "accuracy": 0.875, "lr": 3.995029951660776e-07, "epoch": 0.6088447034624614, "percentage": 60.99, "elapsed_time": "2:18:54", "remaining_time": "1:28:50"}
112
+ {"current_steps": 112, "total_steps": 182, "loss": 0.5671, "accuracy": 0.875, "lr": 3.9008212061815207e-07, "epoch": 0.6143297908810421, "percentage": 61.54, "elapsed_time": "2:20:07", "remaining_time": "1:27:34"}
113
+ {"current_steps": 113, "total_steps": 182, "loss": 1.0423, "accuracy": 0.8203125, "lr": 3.8070207607527585e-07, "epoch": 0.6198148782996229, "percentage": 62.09, "elapsed_time": "2:21:18", "remaining_time": "1:26:17"}
114
+ {"current_steps": 114, "total_steps": 182, "loss": 0.7409, "accuracy": 0.84375, "lr": 3.7136634584107783e-07, "epoch": 0.6252999657182037, "percentage": 62.64, "elapsed_time": "2:22:28", "remaining_time": "1:24:59"}
115
+ {"current_steps": 115, "total_steps": 182, "loss": 0.8674, "accuracy": 0.8359375, "lr": 3.6207839775823047e-07, "epoch": 0.6307850531367843, "percentage": 63.19, "elapsed_time": "2:23:44", "remaining_time": "1:23:44"}
116
+ {"current_steps": 116, "total_steps": 182, "loss": 0.6846, "accuracy": 0.890625, "lr": 3.5284168192028805e-07, "epoch": 0.6362701405553651, "percentage": 63.74, "elapsed_time": "2:24:57", "remaining_time": "1:22:28"}
117
+ {"current_steps": 117, "total_steps": 182, "loss": 0.9343, "accuracy": 0.8046875, "lr": 3.4365962939011693e-07, "epoch": 0.6417552279739458, "percentage": 64.29, "elapsed_time": "2:26:12", "remaining_time": "1:21:13"}
118
+ {"current_steps": 118, "total_steps": 182, "loss": 0.6338, "accuracy": 0.90625, "lr": 3.345356509253958e-07, "epoch": 0.6472403153925266, "percentage": 64.84, "elapsed_time": "2:27:29", "remaining_time": "1:19:59"}
119
+ {"current_steps": 119, "total_steps": 182, "loss": 0.7826, "accuracy": 0.859375, "lr": 3.2547313571165967e-07, "epoch": 0.6527254028111072, "percentage": 65.38, "elapsed_time": "2:28:48", "remaining_time": "1:18:46"}
120
+ {"current_steps": 120, "total_steps": 182, "loss": 0.6904, "accuracy": 0.84375, "lr": 3.1647545010335395e-07, "epoch": 0.658210490229688, "percentage": 65.93, "elapsed_time": "2:30:02", "remaining_time": "1:17:31"}
121
+ {"current_steps": 121, "total_steps": 182, "loss": 0.9172, "accuracy": 0.7578125, "lr": 3.075459363733727e-07, "epoch": 0.6636955776482688, "percentage": 66.48, "elapsed_time": "2:31:14", "remaining_time": "1:16:14"}
122
+ {"current_steps": 122, "total_steps": 182, "loss": 0.868, "accuracy": 0.8125, "lr": 2.9868791147154025e-07, "epoch": 0.6691806650668495, "percentage": 67.03, "elapsed_time": "2:32:26", "remaining_time": "1:14:58"}
123
+ {"current_steps": 123, "total_steps": 182, "loss": 0.659, "accuracy": 0.84375, "lr": 2.8990466579249917e-07, "epoch": 0.6746657524854303, "percentage": 67.58, "elapsed_time": "2:33:43", "remaining_time": "1:13:44"}
124
+ {"current_steps": 124, "total_steps": 182, "loss": 0.6562, "accuracy": 0.875, "lr": 2.811994619534637e-07, "epoch": 0.6801508399040109, "percentage": 68.13, "elapsed_time": "2:34:58", "remaining_time": "1:12:29"}
125
+ {"current_steps": 125, "total_steps": 182, "loss": 0.5385, "accuracy": 0.921875, "lr": 2.725755335822903e-07, "epoch": 0.6856359273225917, "percentage": 68.68, "elapsed_time": "2:36:15", "remaining_time": "1:11:15"}
126
+ {"current_steps": 126, "total_steps": 182, "loss": 0.6031, "accuracy": 0.875, "lr": 2.640360841163174e-07, "epoch": 0.6911210147411725, "percentage": 69.23, "elapsed_time": "2:37:28", "remaining_time": "1:09:59"}
127
+ {"current_steps": 127, "total_steps": 182, "loss": 0.6676, "accuracy": 0.859375, "lr": 2.5558428561241816e-07, "epoch": 0.6966061021597532, "percentage": 69.78, "elapsed_time": "2:38:42", "remaining_time": "1:08:43"}
128
+ {"current_steps": 128, "total_steps": 182, "loss": 0.7803, "accuracy": 0.828125, "lr": 2.472232775687119e-07, "epoch": 0.7020911895783339, "percentage": 70.33, "elapsed_time": "2:40:00", "remaining_time": "1:07:30"}
129
+ {"current_steps": 129, "total_steps": 182, "loss": 0.5901, "accuracy": 0.875, "lr": 2.3895616575836806e-07, "epoch": 0.7075762769969146, "percentage": 70.88, "elapsed_time": "2:41:18", "remaining_time": "1:06:16"}
130
+ {"current_steps": 130, "total_steps": 182, "loss": 0.7432, "accuracy": 0.8515625, "lr": 2.3078602107593897e-07, "epoch": 0.7130613644154954, "percentage": 71.43, "elapsed_time": "2:42:31", "remaining_time": "1:05:00"}
131
+ {"current_steps": 131, "total_steps": 182, "loss": 0.7962, "accuracy": 0.828125, "lr": 2.2271587839664668e-07, "epoch": 0.7185464518340761, "percentage": 71.98, "elapsed_time": "2:43:47", "remaining_time": "1:03:45"}
132
+ {"current_steps": 132, "total_steps": 182, "loss": 0.7871, "accuracy": 0.859375, "lr": 2.1474873544905203e-07, "epoch": 0.7240315392526568, "percentage": 72.53, "elapsed_time": "2:45:01", "remaining_time": "1:02:30"}
133
+ {"current_steps": 133, "total_steps": 182, "loss": 0.7131, "accuracy": 0.8671875, "lr": 2.0688755170151994e-07, "epoch": 0.7295166266712376, "percentage": 73.08, "elapsed_time": "2:46:12", "remaining_time": "1:01:13"}
134
+ {"current_steps": 134, "total_steps": 182, "loss": 0.5673, "accuracy": 0.8828125, "lr": 1.991352472628978e-07, "epoch": 0.7350017140898183, "percentage": 73.63, "elapsed_time": "2:47:23", "remaining_time": "0:59:57"}
135
+ {"current_steps": 135, "total_steps": 182, "loss": 0.9743, "accuracy": 0.8125, "lr": 1.9149470179781529e-07, "epoch": 0.7404868015083991, "percentage": 74.18, "elapsed_time": "2:48:39", "remaining_time": "0:58:43"}
136
+ {"current_steps": 136, "total_steps": 182, "loss": 0.5596, "accuracy": 0.8984375, "lr": 1.8396875345700496e-07, "epoch": 0.7459718889269797, "percentage": 74.73, "elapsed_time": "2:49:55", "remaining_time": "0:57:28"}
137
+ {"current_steps": 137, "total_steps": 182, "loss": 0.7156, "accuracy": 0.875, "lr": 1.76560197823046e-07, "epoch": 0.7514569763455605, "percentage": 75.27, "elapsed_time": "2:51:09", "remaining_time": "0:56:13"}
138
+ {"current_steps": 138, "total_steps": 182, "loss": 0.5443, "accuracy": 0.875, "lr": 1.6927178687191952e-07, "epoch": 0.7569420637641412, "percentage": 75.82, "elapsed_time": "2:52:20", "remaining_time": "0:54:56"}
139
+ {"current_steps": 139, "total_steps": 182, "loss": 0.7399, "accuracy": 0.8359375, "lr": 1.6210622795076167e-07, "epoch": 0.762427151182722, "percentage": 76.37, "elapsed_time": "2:53:33", "remaining_time": "0:53:41"}
140
+ {"current_steps": 140, "total_steps": 182, "loss": 0.6289, "accuracy": 0.859375, "lr": 1.5506618277219408e-07, "epoch": 0.7679122386013028, "percentage": 76.92, "elapsed_time": "2:54:49", "remaining_time": "0:52:26"}
141
+ {"current_steps": 141, "total_steps": 182, "loss": 0.623, "accuracy": 0.8671875, "lr": 1.481542664256075e-07, "epoch": 0.7733973260198834, "percentage": 77.47, "elapsed_time": "2:56:05", "remaining_time": "0:51:12"}
142
+ {"current_steps": 142, "total_steps": 182, "loss": 0.6692, "accuracy": 0.8359375, "lr": 1.413730464057616e-07, "epoch": 0.7788824134384642, "percentage": 78.02, "elapsed_time": "2:57:18", "remaining_time": "0:49:56"}
143
+ {"current_steps": 143, "total_steps": 182, "loss": 0.6601, "accuracy": 0.890625, "lr": 1.3472504165906612e-07, "epoch": 0.7843675008570449, "percentage": 78.57, "elapsed_time": "2:58:33", "remaining_time": "0:48:41"}
144
+ {"current_steps": 144, "total_steps": 182, "loss": 0.564, "accuracy": 0.890625, "lr": 1.2821272164789543e-07, "epoch": 0.7898525882756257, "percentage": 79.12, "elapsed_time": "2:59:46", "remaining_time": "0:47:26"}
145
+ {"current_steps": 145, "total_steps": 182, "loss": 0.7226, "accuracy": 0.8515625, "lr": 1.2183850543328312e-07, "epoch": 0.7953376756942063, "percentage": 79.67, "elapsed_time": "3:01:00", "remaining_time": "0:46:11"}
146
+ {"current_steps": 146, "total_steps": 182, "loss": 0.544, "accuracy": 0.875, "lr": 1.1560476077634069e-07, "epoch": 0.8008227631127871, "percentage": 80.22, "elapsed_time": "3:02:16", "remaining_time": "0:44:56"}
147
+ {"current_steps": 147, "total_steps": 182, "loss": 0.7967, "accuracy": 0.828125, "lr": 1.0951380325872977e-07, "epoch": 0.8063078505313679, "percentage": 80.77, "elapsed_time": "3:03:31", "remaining_time": "0:43:41"}
148
+ {"current_steps": 148, "total_steps": 182, "loss": 0.8025, "accuracy": 0.84375, "lr": 1.0356789542251936e-07, "epoch": 0.8117929379499486, "percentage": 81.32, "elapsed_time": "3:04:48", "remaining_time": "0:42:27"}
149
+ {"current_steps": 149, "total_steps": 182, "loss": 0.7674, "accuracy": 0.8515625, "lr": 9.776924592974256e-08, "epoch": 0.8172780253685293, "percentage": 81.87, "elapsed_time": "3:06:02", "remaining_time": "0:41:12"}
150
+ {"current_steps": 150, "total_steps": 182, "loss": 0.6909, "accuracy": 0.8671875, "lr": 9.212000874196952e-08, "epoch": 0.82276311278711, "percentage": 82.42, "elapsed_time": "3:07:19", "remaining_time": "0:39:57"}
151
+ {"current_steps": 151, "total_steps": 182, "loss": 0.7332, "accuracy": 0.8359375, "lr": 8.662228232019875e-08, "epoch": 0.8282482002056908, "percentage": 82.97, "elapsed_time": "3:08:41", "remaining_time": "0:38:44"}
152
+ {"current_steps": 152, "total_steps": 182, "loss": 0.6407, "accuracy": 0.875, "lr": 8.127810884536402e-08, "epoch": 0.8337332876242715, "percentage": 83.52, "elapsed_time": "3:09:55", "remaining_time": "0:37:29"}
153
+ {"current_steps": 153, "total_steps": 182, "loss": 0.8117, "accuracy": 0.8359375, "lr": 7.608947345974759e-08, "epoch": 0.8392183750428522, "percentage": 84.07, "elapsed_time": "3:11:10", "remaining_time": "0:36:14"}
154
+ {"current_steps": 154, "total_steps": 182, "loss": 0.6658, "accuracy": 0.8671875, "lr": 7.105830352958142e-08, "epoch": 0.844703462461433, "percentage": 84.62, "elapsed_time": "3:12:24", "remaining_time": "0:34:59"}
155
+ {"current_steps": 155, "total_steps": 182, "loss": 0.5643, "accuracy": 0.8984375, "lr": 6.618646792910893e-08, "epoch": 0.8501885498800137, "percentage": 85.16, "elapsed_time": "3:13:37", "remaining_time": "0:33:43"}
156
+ {"current_steps": 156, "total_steps": 182, "loss": 0.7727, "accuracy": 0.8359375, "lr": 6.147577634637413e-08, "epoch": 0.8556736372985945, "percentage": 85.71, "elapsed_time": "3:14:51", "remaining_time": "0:32:28"}
157
+ {"current_steps": 157, "total_steps": 182, "loss": 0.5738, "accuracy": 0.875, "lr": 5.692797861099718e-08, "epoch": 0.8611587247171751, "percentage": 86.26, "elapsed_time": "3:16:08", "remaining_time": "0:31:13"}
158
+ {"current_steps": 158, "total_steps": 182, "loss": 0.6284, "accuracy": 0.8671875, "lr": 5.25447640441834e-08, "epoch": 0.8666438121357559, "percentage": 86.81, "elapsed_time": "3:17:19", "remaining_time": "0:29:58"}
159
+ {"current_steps": 159, "total_steps": 182, "loss": 0.6382, "accuracy": 0.8828125, "lr": 4.832776083120982e-08, "epoch": 0.8721288995543367, "percentage": 87.36, "elapsed_time": "3:18:33", "remaining_time": "0:28:43"}
160
+ {"current_steps": 160, "total_steps": 182, "loss": 0.6011, "accuracy": 0.875, "lr": 4.427853541662091e-08, "epoch": 0.8776139869729174, "percentage": 87.91, "elapsed_time": "3:19:44", "remaining_time": "0:27:27"}
161
+ {"current_steps": 161, "total_steps": 182, "loss": 0.8583, "accuracy": 0.8203125, "lr": 4.039859192235778e-08, "epoch": 0.8830990743914982, "percentage": 88.46, "elapsed_time": "3:20:56", "remaining_time": "0:26:12"}
162
+ {"current_steps": 162, "total_steps": 182, "loss": 0.5664, "accuracy": 0.8984375, "lr": 3.668937158903901e-08, "epoch": 0.8885841618100788, "percentage": 89.01, "elapsed_time": "3:22:08", "remaining_time": "0:24:57"}
163
+ {"current_steps": 163, "total_steps": 182, "loss": 0.5001, "accuracy": 0.90625, "lr": 3.3152252240598086e-08, "epoch": 0.8940692492286596, "percentage": 89.56, "elapsed_time": "3:23:20", "remaining_time": "0:23:42"}
164
+ {"current_steps": 164, "total_steps": 182, "loss": 0.6246, "accuracy": 0.875, "lr": 2.978854777247841e-08, "epoch": 0.8995543366472403, "percentage": 90.11, "elapsed_time": "3:24:31", "remaining_time": "0:22:26"}
165
+ {"current_steps": 165, "total_steps": 182, "loss": 0.5817, "accuracy": 0.890625, "lr": 2.6599507663574384e-08, "epoch": 0.905039424065821, "percentage": 90.66, "elapsed_time": "3:25:45", "remaining_time": "0:21:11"}
166
+ {"current_steps": 166, "total_steps": 182, "loss": 0.5111, "accuracy": 0.8828125, "lr": 2.358631651210141e-08, "epoch": 0.9105245114844018, "percentage": 91.21, "elapsed_time": "3:27:01", "remaining_time": "0:19:57"}
167
+ {"current_steps": 167, "total_steps": 182, "loss": 0.6247, "accuracy": 0.8671875, "lr": 2.0750093595565733e-08, "epoch": 0.9160095989029825, "percentage": 91.76, "elapsed_time": "3:28:16", "remaining_time": "0:18:42"}
168
+ {"current_steps": 168, "total_steps": 182, "loss": 0.6335, "accuracy": 0.8828125, "lr": 1.8091892454998593e-08, "epoch": 0.9214946863215633, "percentage": 92.31, "elapsed_time": "3:29:32", "remaining_time": "0:17:27"}
169
+ {"current_steps": 169, "total_steps": 182, "loss": 0.79, "accuracy": 0.8359375, "lr": 1.5612700503608967e-08, "epoch": 0.926979773740144, "percentage": 92.86, "elapsed_time": "3:30:46", "remaining_time": "0:16:12"}
170
+ {"current_steps": 170, "total_steps": 182, "loss": 0.6073, "accuracy": 0.8671875, "lr": 1.3313438659999399e-08, "epoch": 0.9324648611587247, "percentage": 93.41, "elapsed_time": "3:32:02", "remaining_time": "0:14:58"}
171
+ {"current_steps": 171, "total_steps": 182, "loss": 0.6478, "accuracy": 0.8984375, "lr": 1.119496100608297e-08, "epoch": 0.9379499485773054, "percentage": 93.96, "elapsed_time": "3:33:17", "remaining_time": "0:13:43"}
172
+ {"current_steps": 172, "total_steps": 182, "loss": 0.4678, "accuracy": 0.921875, "lr": 9.258054469825972e-09, "epoch": 0.9434350359958862, "percentage": 94.51, "elapsed_time": "3:34:27", "remaining_time": "0:12:28"}
173
+ {"current_steps": 173, "total_steps": 182, "loss": 0.8124, "accuracy": 0.8515625, "lr": 7.503438532937168e-09, "epoch": 0.948920123414467, "percentage": 95.05, "elapsed_time": "3:35:39", "remaining_time": "0:11:13"}
174
+ {"current_steps": 174, "total_steps": 182, "loss": 0.5972, "accuracy": 0.875, "lr": 5.931764963608865e-09, "epoch": 0.9544052108330476, "percentage": 95.6, "elapsed_time": "3:36:57", "remaining_time": "0:09:58"}
175
+ {"current_steps": 175, "total_steps": 182, "loss": 0.8845, "accuracy": 0.8046875, "lr": 4.543617574412184e-09, "epoch": 0.9598902982516284, "percentage": 96.15, "elapsed_time": "3:38:12", "remaining_time": "0:08:43"}
176
+ {"current_steps": 176, "total_steps": 182, "loss": 0.7588, "accuracy": 0.8203125, "lr": 3.3395120054343086e-09, "epoch": 0.9653753856702091, "percentage": 96.7, "elapsed_time": "3:39:26", "remaining_time": "0:07:28"}
177
+ {"current_steps": 177, "total_steps": 182, "loss": 0.603, "accuracy": 0.875, "lr": 2.3198955327393686e-09, "epoch": 0.9708604730887899, "percentage": 97.25, "elapsed_time": "3:40:43", "remaining_time": "0:06:14"}
178
+ {"current_steps": 178, "total_steps": 182, "loss": 0.478, "accuracy": 0.921875, "lr": 1.4851469022233997e-09, "epoch": 0.9763455605073705, "percentage": 97.8, "elapsed_time": "3:41:55", "remaining_time": "0:04:59"}
179
+ {"current_steps": 179, "total_steps": 182, "loss": 0.5532, "accuracy": 0.8984375, "lr": 8.35576188926046e-10, "epoch": 0.9818306479259513, "percentage": 98.35, "elapsed_time": "3:43:14", "remaining_time": "0:03:44"}
180
+ {"current_steps": 180, "total_steps": 182, "loss": 0.692, "accuracy": 0.84375, "lr": 3.71424681850141e-10, "epoch": 0.9873157353445321, "percentage": 98.9, "elapsed_time": "3:44:33", "remaining_time": "0:02:29"}
181
+ {"current_steps": 181, "total_steps": 182, "loss": 0.6545, "accuracy": 0.84375, "lr": 9.286479433257e-11, "epoch": 0.9928008227631128, "percentage": 99.45, "elapsed_time": "3:45:49", "remaining_time": "0:01:14"}
182
+ {"current_steps": 182, "total_steps": 182, "loss": 0.3779, "accuracy": 0.953125, "lr": 0.0, "epoch": 0.9982859101816935, "percentage": 100.0, "elapsed_time": "3:47:10", "remaining_time": "0:00:00"}
183
+ {"current_steps": 182, "total_steps": 182, "epoch": 0.9982859101816935, "percentage": 100.0, "elapsed_time": "3:47:48", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,2772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9982859101816935,
5
+ "eval_steps": 0,
6
+ "global_step": 182,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.005485087418580734,
13
+ "grad_norm": 12.289390049252361,
14
+ "learning_rate": 5.2631578947368416e-08,
15
+ "logits/chosen": -0.3854110836982727,
16
+ "logits/rejected": -0.38843637704849243,
17
+ "logps/chosen": -0.5867404937744141,
18
+ "logps/rejected": -0.7349259853363037,
19
+ "loss": 2.2106,
20
+ "rewards/accuracies": 0.328125,
21
+ "rewards/chosen": -1.8373150825500488,
22
+ "rewards/margins": -0.37046387791633606,
23
+ "rewards/rejected": -1.4668511152267456,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.010970174837161468,
28
+ "grad_norm": 9.808910529487358,
29
+ "learning_rate": 1.0526315789473683e-07,
30
+ "logits/chosen": -0.4200110137462616,
31
+ "logits/rejected": -0.4337027370929718,
32
+ "logps/chosen": -0.5888247489929199,
33
+ "logps/rejected": -0.7141146659851074,
34
+ "loss": 2.1187,
35
+ "rewards/accuracies": 0.3671875,
36
+ "rewards/chosen": -1.7852866649627686,
37
+ "rewards/margins": -0.31322479248046875,
38
+ "rewards/rejected": -1.4720618724822998,
39
+ "step": 2
40
+ },
41
+ {
42
+ "epoch": 0.0164552622557422,
43
+ "grad_norm": 7.82623444940022,
44
+ "learning_rate": 1.5789473684210525e-07,
45
+ "logits/chosen": -0.3889790177345276,
46
+ "logits/rejected": -0.3634672164916992,
47
+ "logps/chosen": -0.6838980317115784,
48
+ "logps/rejected": -0.6908231973648071,
49
+ "loss": 2.0561,
50
+ "rewards/accuracies": 0.375,
51
+ "rewards/chosen": -1.727057933807373,
52
+ "rewards/margins": -0.01731281727552414,
53
+ "rewards/rejected": -1.709745168685913,
54
+ "step": 3
55
+ },
56
+ {
57
+ "epoch": 0.021940349674322936,
58
+ "grad_norm": 11.908990882185469,
59
+ "learning_rate": 2.1052631578947366e-07,
60
+ "logits/chosen": -0.41128796339035034,
61
+ "logits/rejected": -0.44201532006263733,
62
+ "logps/chosen": -0.6115437150001526,
63
+ "logps/rejected": -0.7170974612236023,
64
+ "loss": 2.1332,
65
+ "rewards/accuracies": 0.4140625,
66
+ "rewards/chosen": -1.7927436828613281,
67
+ "rewards/margins": -0.2638842463493347,
68
+ "rewards/rejected": -1.5288593769073486,
69
+ "step": 4
70
+ },
71
+ {
72
+ "epoch": 0.027425437092903668,
73
+ "grad_norm": 9.763139044795885,
74
+ "learning_rate": 2.631578947368421e-07,
75
+ "logits/chosen": -0.4671156406402588,
76
+ "logits/rejected": -0.4450330138206482,
77
+ "logps/chosen": -0.5787723064422607,
78
+ "logps/rejected": -0.6804812550544739,
79
+ "loss": 2.0846,
80
+ "rewards/accuracies": 0.3359375,
81
+ "rewards/chosen": -1.7012031078338623,
82
+ "rewards/margins": -0.2542722821235657,
83
+ "rewards/rejected": -1.4469308853149414,
84
+ "step": 5
85
+ },
86
+ {
87
+ "epoch": 0.0329105245114844,
88
+ "grad_norm": 10.006426324247332,
89
+ "learning_rate": 3.157894736842105e-07,
90
+ "logits/chosen": -0.3141833543777466,
91
+ "logits/rejected": -0.3982672095298767,
92
+ "logps/chosen": -0.6082320213317871,
93
+ "logps/rejected": -0.7216463088989258,
94
+ "loss": 2.1337,
95
+ "rewards/accuracies": 0.375,
96
+ "rewards/chosen": -1.8041157722473145,
97
+ "rewards/margins": -0.2835356295108795,
98
+ "rewards/rejected": -1.5205800533294678,
99
+ "step": 6
100
+ },
101
+ {
102
+ "epoch": 0.03839561193006513,
103
+ "grad_norm": 11.019623612251491,
104
+ "learning_rate": 3.684210526315789e-07,
105
+ "logits/chosen": -0.41403576731681824,
106
+ "logits/rejected": -0.4747769236564636,
107
+ "logps/chosen": -0.6087530851364136,
108
+ "logps/rejected": -0.6581674814224243,
109
+ "loss": 2.0016,
110
+ "rewards/accuracies": 0.4140625,
111
+ "rewards/chosen": -1.6454188823699951,
112
+ "rewards/margins": -0.12353596091270447,
113
+ "rewards/rejected": -1.5218827724456787,
114
+ "step": 7
115
+ },
116
+ {
117
+ "epoch": 0.04388069934864587,
118
+ "grad_norm": 13.106807696358263,
119
+ "learning_rate": 4.2105263157894733e-07,
120
+ "logits/chosen": -0.3825553059577942,
121
+ "logits/rejected": -0.4531961679458618,
122
+ "logps/chosen": -0.6047242879867554,
123
+ "logps/rejected": -0.7030869722366333,
124
+ "loss": 2.1026,
125
+ "rewards/accuracies": 0.390625,
126
+ "rewards/chosen": -1.7577173709869385,
127
+ "rewards/margins": -0.24590645730495453,
128
+ "rewards/rejected": -1.5118108987808228,
129
+ "step": 8
130
+ },
131
+ {
132
+ "epoch": 0.049365786767226603,
133
+ "grad_norm": 11.770594063909739,
134
+ "learning_rate": 4.7368421052631574e-07,
135
+ "logits/chosen": -0.3712048828601837,
136
+ "logits/rejected": -0.40800797939300537,
137
+ "logps/chosen": -0.5091754794120789,
138
+ "logps/rejected": -0.7823854684829712,
139
+ "loss": 2.425,
140
+ "rewards/accuracies": 0.265625,
141
+ "rewards/chosen": -1.9559637308120728,
142
+ "rewards/margins": -0.683025062084198,
143
+ "rewards/rejected": -1.27293860912323,
144
+ "step": 9
145
+ },
146
+ {
147
+ "epoch": 0.054850874185807336,
148
+ "grad_norm": 12.669444366856121,
149
+ "learning_rate": 5.263157894736842e-07,
150
+ "logits/chosen": -0.4165411591529846,
151
+ "logits/rejected": -0.3989042639732361,
152
+ "logps/chosen": -0.5609541535377502,
153
+ "logps/rejected": -0.8160127997398376,
154
+ "loss": 2.435,
155
+ "rewards/accuracies": 0.296875,
156
+ "rewards/chosen": -2.040031909942627,
157
+ "rewards/margins": -0.637646496295929,
158
+ "rewards/rejected": -1.4023855924606323,
159
+ "step": 10
160
+ },
161
+ {
162
+ "epoch": 0.06033596160438807,
163
+ "grad_norm": 18.900235847859772,
164
+ "learning_rate": 5.789473684210526e-07,
165
+ "logits/chosen": -0.39879918098449707,
166
+ "logits/rejected": -0.4029804468154907,
167
+ "logps/chosen": -0.5685741901397705,
168
+ "logps/rejected": -0.7790898084640503,
169
+ "loss": 2.3341,
170
+ "rewards/accuracies": 0.3359375,
171
+ "rewards/chosen": -1.9477243423461914,
172
+ "rewards/margins": -0.5262887477874756,
173
+ "rewards/rejected": -1.4214354753494263,
174
+ "step": 11
175
+ },
176
+ {
177
+ "epoch": 0.0658210490229688,
178
+ "grad_norm": 11.34517381440851,
179
+ "learning_rate": 6.31578947368421e-07,
180
+ "logits/chosen": -0.43950849771499634,
181
+ "logits/rejected": -0.4288792312145233,
182
+ "logps/chosen": -0.6902109980583191,
183
+ "logps/rejected": -0.7512154579162598,
184
+ "loss": 2.2512,
185
+ "rewards/accuracies": 0.296875,
186
+ "rewards/chosen": -1.8780386447906494,
187
+ "rewards/margins": -0.15251119434833527,
188
+ "rewards/rejected": -1.7255275249481201,
189
+ "step": 12
190
+ },
191
+ {
192
+ "epoch": 0.07130613644154954,
193
+ "grad_norm": 12.242091991409806,
194
+ "learning_rate": 6.842105263157895e-07,
195
+ "logits/chosen": -0.3881993591785431,
196
+ "logits/rejected": -0.38021671772003174,
197
+ "logps/chosen": -0.5799139738082886,
198
+ "logps/rejected": -0.714693546295166,
199
+ "loss": 2.1644,
200
+ "rewards/accuracies": 0.328125,
201
+ "rewards/chosen": -1.7867339849472046,
202
+ "rewards/margins": -0.3369489908218384,
203
+ "rewards/rejected": -1.4497849941253662,
204
+ "step": 13
205
+ },
206
+ {
207
+ "epoch": 0.07679122386013026,
208
+ "grad_norm": 15.332493592901395,
209
+ "learning_rate": 7.368421052631578e-07,
210
+ "logits/chosen": -0.4336930215358734,
211
+ "logits/rejected": -0.4184849262237549,
212
+ "logps/chosen": -0.6871960163116455,
213
+ "logps/rejected": -0.8031256794929504,
214
+ "loss": 2.3949,
215
+ "rewards/accuracies": 0.3203125,
216
+ "rewards/chosen": -2.0078141689300537,
217
+ "rewards/margins": -0.28982412815093994,
218
+ "rewards/rejected": -1.7179901599884033,
219
+ "step": 14
220
+ },
221
+ {
222
+ "epoch": 0.082276311278711,
223
+ "grad_norm": 14.281514936865825,
224
+ "learning_rate": 7.894736842105263e-07,
225
+ "logits/chosen": -0.420330286026001,
226
+ "logits/rejected": -0.39623183012008667,
227
+ "logps/chosen": -0.6022149324417114,
228
+ "logps/rejected": -0.7605262398719788,
229
+ "loss": 2.2735,
230
+ "rewards/accuracies": 0.328125,
231
+ "rewards/chosen": -1.901315450668335,
232
+ "rewards/margins": -0.39577823877334595,
233
+ "rewards/rejected": -1.5055372714996338,
234
+ "step": 15
235
+ },
236
+ {
237
+ "epoch": 0.08776139869729174,
238
+ "grad_norm": 17.350845573103634,
239
+ "learning_rate": 8.421052631578947e-07,
240
+ "logits/chosen": -0.4929282069206238,
241
+ "logits/rejected": -0.4550182521343231,
242
+ "logps/chosen": -0.5783462524414062,
243
+ "logps/rejected": -0.7597732543945312,
244
+ "loss": 2.2617,
245
+ "rewards/accuracies": 0.34375,
246
+ "rewards/chosen": -1.8994331359863281,
247
+ "rewards/margins": -0.45356735587120056,
248
+ "rewards/rejected": -1.4458656311035156,
249
+ "step": 16
250
+ },
251
+ {
252
+ "epoch": 0.09324648611587247,
253
+ "grad_norm": 13.387104362920853,
254
+ "learning_rate": 8.947368421052631e-07,
255
+ "logits/chosen": -0.4294038414955139,
256
+ "logits/rejected": -0.4245000183582306,
257
+ "logps/chosen": -0.6331275105476379,
258
+ "logps/rejected": -0.7878226041793823,
259
+ "loss": 2.3686,
260
+ "rewards/accuracies": 0.265625,
261
+ "rewards/chosen": -1.9695566892623901,
262
+ "rewards/margins": -0.3867378830909729,
263
+ "rewards/rejected": -1.582818865776062,
264
+ "step": 17
265
+ },
266
+ {
267
+ "epoch": 0.09873157353445321,
268
+ "grad_norm": 11.747460293777701,
269
+ "learning_rate": 9.473684210526315e-07,
270
+ "logits/chosen": -0.43416640162467957,
271
+ "logits/rejected": -0.41705217957496643,
272
+ "logps/chosen": -0.6964335441589355,
273
+ "logps/rejected": -0.7354090213775635,
274
+ "loss": 2.0268,
275
+ "rewards/accuracies": 0.3984375,
276
+ "rewards/chosen": -1.8385226726531982,
277
+ "rewards/margins": -0.09743872284889221,
278
+ "rewards/rejected": -1.741084098815918,
279
+ "step": 18
280
+ },
281
+ {
282
+ "epoch": 0.10421666095303393,
283
+ "grad_norm": 16.166401888969105,
284
+ "learning_rate": 1e-06,
285
+ "logits/chosen": -0.42683446407318115,
286
+ "logits/rejected": -0.49105304479599,
287
+ "logps/chosen": -0.5967155694961548,
288
+ "logps/rejected": -0.6899043917655945,
289
+ "loss": 2.0874,
290
+ "rewards/accuracies": 0.3046875,
291
+ "rewards/chosen": -1.724760890007019,
292
+ "rewards/margins": -0.2329719066619873,
293
+ "rewards/rejected": -1.4917889833450317,
294
+ "step": 19
295
+ },
296
+ {
297
+ "epoch": 0.10970174837161467,
298
+ "grad_norm": 8.619811148509852,
299
+ "learning_rate": 9.999071352056673e-07,
300
+ "logits/chosen": -0.3747601807117462,
301
+ "logits/rejected": -0.41268259286880493,
302
+ "logps/chosen": -0.5958091616630554,
303
+ "logps/rejected": -0.7304958701133728,
304
+ "loss": 2.0832,
305
+ "rewards/accuracies": 0.34375,
306
+ "rewards/chosen": -1.8262397050857544,
307
+ "rewards/margins": -0.33671680092811584,
308
+ "rewards/rejected": -1.489522933959961,
309
+ "step": 20
310
+ },
311
+ {
312
+ "epoch": 0.11518683579019541,
313
+ "grad_norm": 8.74498909781148,
314
+ "learning_rate": 9.996285753181497e-07,
315
+ "logits/chosen": -0.4670315086841583,
316
+ "logits/rejected": -0.4540305733680725,
317
+ "logps/chosen": -0.6016995906829834,
318
+ "logps/rejected": -0.7677630186080933,
319
+ "loss": 2.1691,
320
+ "rewards/accuracies": 0.3046875,
321
+ "rewards/chosen": -1.9194074869155884,
322
+ "rewards/margins": -0.4151587188243866,
323
+ "rewards/rejected": -1.504248857498169,
324
+ "step": 21
325
+ },
326
+ {
327
+ "epoch": 0.12067192320877614,
328
+ "grad_norm": 12.710357284461692,
329
+ "learning_rate": 9.99164423811074e-07,
330
+ "logits/chosen": -0.4635859429836273,
331
+ "logits/rejected": -0.49371862411499023,
332
+ "logps/chosen": -0.9511612057685852,
333
+ "logps/rejected": -0.7291851043701172,
334
+ "loss": 1.9912,
335
+ "rewards/accuracies": 0.34375,
336
+ "rewards/chosen": -1.8229626417160034,
337
+ "rewards/margins": 0.5549403429031372,
338
+ "rewards/rejected": -2.3779029846191406,
339
+ "step": 22
340
+ },
341
+ {
342
+ "epoch": 0.12615701062735687,
343
+ "grad_norm": 7.521146349471263,
344
+ "learning_rate": 9.985148530977764e-07,
345
+ "logits/chosen": -0.44370928406715393,
346
+ "logits/rejected": -0.4425956606864929,
347
+ "logps/chosen": -0.6404778361320496,
348
+ "logps/rejected": -0.7111139297485352,
349
+ "loss": 1.9723,
350
+ "rewards/accuracies": 0.3203125,
351
+ "rewards/chosen": -1.777784824371338,
352
+ "rewards/margins": -0.17659035325050354,
353
+ "rewards/rejected": -1.6011945009231567,
354
+ "step": 23
355
+ },
356
+ {
357
+ "epoch": 0.1316420980459376,
358
+ "grad_norm": 9.114160770658133,
359
+ "learning_rate": 9.976801044672607e-07,
360
+ "logits/chosen": -0.4481334686279297,
361
+ "logits/rejected": -0.45388296246528625,
362
+ "logps/chosen": -0.7301231622695923,
363
+ "logps/rejected": -0.6901272535324097,
364
+ "loss": 1.8305,
365
+ "rewards/accuracies": 0.4140625,
366
+ "rewards/chosen": -1.7253180742263794,
367
+ "rewards/margins": 0.09998967498540878,
368
+ "rewards/rejected": -1.825307846069336,
369
+ "step": 24
370
+ },
371
+ {
372
+ "epoch": 0.13712718546451835,
373
+ "grad_norm": 16.624964926964527,
374
+ "learning_rate": 9.966604879945656e-07,
375
+ "logits/chosen": -0.5069385170936584,
376
+ "logits/rejected": -0.5200111865997314,
377
+ "logps/chosen": -0.721697211265564,
378
+ "logps/rejected": -0.7383979558944702,
379
+ "loss": 1.9472,
380
+ "rewards/accuracies": 0.3828125,
381
+ "rewards/chosen": -1.8459948301315308,
382
+ "rewards/margins": -0.04175184667110443,
383
+ "rewards/rejected": -1.8042429685592651,
384
+ "step": 25
385
+ },
386
+ {
387
+ "epoch": 0.14261227288309908,
388
+ "grad_norm": 8.144004658411824,
389
+ "learning_rate": 9.954563824255877e-07,
390
+ "logits/chosen": -0.4636583924293518,
391
+ "logits/rejected": -0.4762532711029053,
392
+ "logps/chosen": -0.6404789686203003,
393
+ "logps/rejected": -0.7015948295593262,
394
+ "loss": 1.8902,
395
+ "rewards/accuracies": 0.4140625,
396
+ "rewards/chosen": -1.7539873123168945,
397
+ "rewards/margins": -0.15278980135917664,
398
+ "rewards/rejected": -1.601197361946106,
399
+ "step": 26
400
+ },
401
+ {
402
+ "epoch": 0.1480973603016798,
403
+ "grad_norm": 9.860499037174334,
404
+ "learning_rate": 9.94068235036391e-07,
405
+ "logits/chosen": -0.4618387818336487,
406
+ "logits/rejected": -0.47574925422668457,
407
+ "logps/chosen": -0.6812009811401367,
408
+ "logps/rejected": -0.7617368698120117,
409
+ "loss": 1.9858,
410
+ "rewards/accuracies": 0.3046875,
411
+ "rewards/chosen": -1.9043422937393188,
412
+ "rewards/margins": -0.20133966207504272,
413
+ "rewards/rejected": -1.7030025720596313,
414
+ "step": 27
415
+ },
416
+ {
417
+ "epoch": 0.15358244772026053,
418
+ "grad_norm": 10.039428944668376,
419
+ "learning_rate": 9.924965614670628e-07,
420
+ "logits/chosen": -0.5124188661575317,
421
+ "logits/rejected": -0.4997189939022064,
422
+ "logps/chosen": -0.755838930606842,
423
+ "logps/rejected": -0.7807177901268005,
424
+ "loss": 1.9039,
425
+ "rewards/accuracies": 0.3828125,
426
+ "rewards/chosen": -1.9517943859100342,
427
+ "rewards/margins": -0.06219691038131714,
428
+ "rewards/rejected": -1.8895972967147827,
429
+ "step": 28
430
+ },
431
+ {
432
+ "epoch": 0.15906753513884128,
433
+ "grad_norm": 10.666772071989447,
434
+ "learning_rate": 9.90741945530174e-07,
435
+ "logits/chosen": -0.5431128740310669,
436
+ "logits/rejected": -0.5210611820220947,
437
+ "logps/chosen": -0.9286273121833801,
438
+ "logps/rejected": -0.9671891927719116,
439
+ "loss": 2.0302,
440
+ "rewards/accuracies": 0.28125,
441
+ "rewards/chosen": -2.417973041534424,
442
+ "rewards/margins": -0.0964045524597168,
443
+ "rewards/rejected": -2.321568250656128,
444
+ "step": 29
445
+ },
446
+ {
447
+ "epoch": 0.164552622557422,
448
+ "grad_norm": 8.160421166678184,
449
+ "learning_rate": 9.888050389939172e-07,
450
+ "logits/chosen": -0.5067495107650757,
451
+ "logits/rejected": -0.4774128198623657,
452
+ "logps/chosen": -0.7992498874664307,
453
+ "logps/rejected": -0.8549879789352417,
454
+ "loss": 1.9332,
455
+ "rewards/accuracies": 0.421875,
456
+ "rewards/chosen": -2.13746976852417,
457
+ "rewards/margins": -0.13934528827667236,
458
+ "rewards/rejected": -1.9981244802474976,
459
+ "step": 30
460
+ },
461
+ {
462
+ "epoch": 0.17003770997600273,
463
+ "grad_norm": 10.514382125623976,
464
+ "learning_rate": 9.866865613400006e-07,
465
+ "logits/chosen": -0.5173575282096863,
466
+ "logits/rejected": -0.4771508574485779,
467
+ "logps/chosen": -0.8561153411865234,
468
+ "logps/rejected": -0.9025238156318665,
469
+ "loss": 1.9123,
470
+ "rewards/accuracies": 0.3671875,
471
+ "rewards/chosen": -2.256309747695923,
472
+ "rewards/margins": -0.11602123826742172,
473
+ "rewards/rejected": -2.1402883529663086,
474
+ "step": 31
475
+ },
476
+ {
477
+ "epoch": 0.17552279739458349,
478
+ "grad_norm": 6.797061723595508,
479
+ "learning_rate": 9.843872994963912e-07,
480
+ "logits/chosen": -0.6115865707397461,
481
+ "logits/rejected": -0.5253005027770996,
482
+ "logps/chosen": -0.8001683950424194,
483
+ "logps/rejected": -0.8365844488143921,
484
+ "loss": 1.9177,
485
+ "rewards/accuracies": 0.3515625,
486
+ "rewards/chosen": -2.091461181640625,
487
+ "rewards/margins": -0.09104003012180328,
488
+ "rewards/rejected": -2.0004210472106934,
489
+ "step": 32
490
+ },
491
+ {
492
+ "epoch": 0.1810078848131642,
493
+ "grad_norm": 10.986752360147651,
494
+ "learning_rate": 9.819081075450013e-07,
495
+ "logits/chosen": -0.5589928030967712,
496
+ "logits/rejected": -0.5537349581718445,
497
+ "logps/chosen": -0.9822956323623657,
498
+ "logps/rejected": -0.8852106332778931,
499
+ "loss": 1.7645,
500
+ "rewards/accuracies": 0.4921875,
501
+ "rewards/chosen": -2.213026523590088,
502
+ "rewards/margins": 0.2427126169204712,
503
+ "rewards/rejected": -2.4557392597198486,
504
+ "step": 33
505
+ },
506
+ {
507
+ "epoch": 0.18649297223174494,
508
+ "grad_norm": 7.9793683352025395,
509
+ "learning_rate": 9.792499064044342e-07,
510
+ "logits/chosen": -0.6396060585975647,
511
+ "logits/rejected": -0.557750940322876,
512
+ "logps/chosen": -0.8982308506965637,
513
+ "logps/rejected": -0.9113630056381226,
514
+ "loss": 1.8363,
515
+ "rewards/accuracies": 0.375,
516
+ "rewards/chosen": -2.2784078121185303,
517
+ "rewards/margins": -0.03283056244254112,
518
+ "rewards/rejected": -2.245576858520508,
519
+ "step": 34
520
+ },
521
+ {
522
+ "epoch": 0.1919780596503257,
523
+ "grad_norm": 6.791520764999855,
524
+ "learning_rate": 9.764136834878985e-07,
525
+ "logits/chosen": -0.6534283757209778,
526
+ "logits/rejected": -0.6032913327217102,
527
+ "logps/chosen": -0.8987076282501221,
528
+ "logps/rejected": -0.9370582699775696,
529
+ "loss": 1.8242,
530
+ "rewards/accuracies": 0.40625,
531
+ "rewards/chosen": -2.3426456451416016,
532
+ "rewards/margins": -0.095876544713974,
533
+ "rewards/rejected": -2.2467689514160156,
534
+ "step": 35
535
+ },
536
+ {
537
+ "epoch": 0.19746314706890641,
538
+ "grad_norm": 7.022459099441044,
539
+ "learning_rate": 9.734004923364256e-07,
540
+ "logits/chosen": -0.6581586003303528,
541
+ "logits/rejected": -0.6092681884765625,
542
+ "logps/chosen": -0.960416316986084,
543
+ "logps/rejected": -0.9933475852012634,
544
+ "loss": 1.8998,
545
+ "rewards/accuracies": 0.390625,
546
+ "rewards/chosen": -2.4833688735961914,
547
+ "rewards/margins": -0.08232799917459488,
548
+ "rewards/rejected": -2.401041030883789,
549
+ "step": 36
550
+ },
551
+ {
552
+ "epoch": 0.20294823448748714,
553
+ "grad_norm": 7.996309219752066,
554
+ "learning_rate": 9.702114522275216e-07,
555
+ "logits/chosen": -0.6663313508033752,
556
+ "logits/rejected": -0.6023609638214111,
557
+ "logps/chosen": -0.9479801058769226,
558
+ "logps/rejected": -0.998908281326294,
559
+ "loss": 1.8343,
560
+ "rewards/accuracies": 0.40625,
561
+ "rewards/chosen": -2.4972708225250244,
562
+ "rewards/margins": -0.1273205429315567,
563
+ "rewards/rejected": -2.369950294494629,
564
+ "step": 37
565
+ },
566
+ {
567
+ "epoch": 0.20843332190606786,
568
+ "grad_norm": 6.699323358307292,
569
+ "learning_rate": 9.66847747759402e-07,
570
+ "logits/chosen": -0.622665286064148,
571
+ "logits/rejected": -0.5349312424659729,
572
+ "logps/chosen": -1.041911244392395,
573
+ "logps/rejected": -0.9843886494636536,
574
+ "loss": 1.6697,
575
+ "rewards/accuracies": 0.4921875,
576
+ "rewards/chosen": -2.4609715938568115,
577
+ "rewards/margins": 0.14380690455436707,
578
+ "rewards/rejected": -2.604778289794922,
579
+ "step": 38
580
+ },
581
+ {
582
+ "epoch": 0.21391840932464862,
583
+ "grad_norm": 6.682266326866778,
584
+ "learning_rate": 9.63310628410961e-07,
585
+ "logits/chosen": -0.6338837742805481,
586
+ "logits/rejected": -0.5423075556755066,
587
+ "logps/chosen": -1.0179288387298584,
588
+ "logps/rejected": -0.967695951461792,
589
+ "loss": 1.8336,
590
+ "rewards/accuracies": 0.359375,
591
+ "rewards/chosen": -2.4192402362823486,
592
+ "rewards/margins": 0.1255817860364914,
593
+ "rewards/rejected": -2.5448219776153564,
594
+ "step": 39
595
+ },
596
+ {
597
+ "epoch": 0.21940349674322934,
598
+ "grad_norm": 8.105951134201701,
599
+ "learning_rate": 9.596014080776421e-07,
600
+ "logits/chosen": -0.6672332286834717,
601
+ "logits/rejected": -0.6281388998031616,
602
+ "logps/chosen": -1.0504218339920044,
603
+ "logps/rejected": -1.0622175931930542,
604
+ "loss": 1.8315,
605
+ "rewards/accuracies": 0.40625,
606
+ "rewards/chosen": -2.6555442810058594,
607
+ "rewards/margins": -0.029489843174815178,
608
+ "rewards/rejected": -2.626054525375366,
609
+ "step": 40
610
+ },
611
+ {
612
+ "epoch": 0.22488858416181007,
613
+ "grad_norm": 13.378049805845984,
614
+ "learning_rate": 9.55721464583379e-07,
615
+ "logits/chosen": -0.7849185466766357,
616
+ "logits/rejected": -0.6884415745735168,
617
+ "logps/chosen": -1.0642319917678833,
618
+ "logps/rejected": -1.05228853225708,
619
+ "loss": 1.7842,
620
+ "rewards/accuracies": 0.4140625,
621
+ "rewards/chosen": -2.6307215690612793,
622
+ "rewards/margins": 0.02985840104520321,
623
+ "rewards/rejected": -2.6605796813964844,
624
+ "step": 41
625
+ },
626
+ {
627
+ "epoch": 0.23037367158039082,
628
+ "grad_norm": 9.064118287711297,
629
+ "learning_rate": 9.516722391687902e-07,
630
+ "logits/chosen": -0.6929774284362793,
631
+ "logits/rejected": -0.6579635739326477,
632
+ "logps/chosen": -1.1520164012908936,
633
+ "logps/rejected": -1.1617083549499512,
634
+ "loss": 1.9825,
635
+ "rewards/accuracies": 0.3203125,
636
+ "rewards/chosen": -2.904270887374878,
637
+ "rewards/margins": -0.02423013746738434,
638
+ "rewards/rejected": -2.8800406455993652,
639
+ "step": 42
640
+ },
641
+ {
642
+ "epoch": 0.23585875899897155,
643
+ "grad_norm": 7.541139303599616,
644
+ "learning_rate": 9.474552359558165e-07,
645
+ "logits/chosen": -0.7736871242523193,
646
+ "logits/rejected": -0.6667463183403015,
647
+ "logps/chosen": -1.1246612071990967,
648
+ "logps/rejected": -1.06680428981781,
649
+ "loss": 1.6144,
650
+ "rewards/accuracies": 0.4453125,
651
+ "rewards/chosen": -2.667010545730591,
652
+ "rewards/margins": 0.1446424126625061,
653
+ "rewards/rejected": -2.811652898788452,
654
+ "step": 43
655
+ },
656
+ {
657
+ "epoch": 0.24134384641755227,
658
+ "grad_norm": 9.736710769671427,
659
+ "learning_rate": 9.430720213890029e-07,
660
+ "logits/chosen": -0.7818886041641235,
661
+ "logits/rejected": -0.7196276187896729,
662
+ "logps/chosen": -1.2841920852661133,
663
+ "logps/rejected": -1.2012193202972412,
664
+ "loss": 1.648,
665
+ "rewards/accuracies": 0.4453125,
666
+ "rewards/chosen": -3.0030481815338135,
667
+ "rewards/margins": 0.2074318528175354,
668
+ "rewards/rejected": -3.210480213165283,
669
+ "step": 44
670
+ },
671
+ {
672
+ "epoch": 0.24682893383613302,
673
+ "grad_norm": 21.756241022877973,
674
+ "learning_rate": 9.385242236536259e-07,
675
+ "logits/chosen": -0.8642858266830444,
676
+ "logits/rejected": -0.816374659538269,
677
+ "logps/chosen": -1.3722171783447266,
678
+ "logps/rejected": -1.34407639503479,
679
+ "loss": 1.9336,
680
+ "rewards/accuracies": 0.46875,
681
+ "rewards/chosen": -3.3601903915405273,
682
+ "rewards/margins": 0.07035252451896667,
683
+ "rewards/rejected": -3.4305431842803955,
684
+ "step": 45
685
+ },
686
+ {
687
+ "epoch": 0.25231402125471375,
688
+ "grad_norm": 32.5907546315537,
689
+ "learning_rate": 9.338135320708911e-07,
690
+ "logits/chosen": -0.7350670695304871,
691
+ "logits/rejected": -0.6843174695968628,
692
+ "logps/chosen": -1.6099109649658203,
693
+ "logps/rejected": -1.380704641342163,
694
+ "loss": 1.4913,
695
+ "rewards/accuracies": 0.5234375,
696
+ "rewards/chosen": -3.4517619609832764,
697
+ "rewards/margins": 0.5730158686637878,
698
+ "rewards/rejected": -4.024777889251709,
699
+ "step": 46
700
+ },
701
+ {
702
+ "epoch": 0.2577991086732945,
703
+ "grad_norm": 69.74307826075179,
704
+ "learning_rate": 9.289416964704185e-07,
705
+ "logits/chosen": -0.6261876821517944,
706
+ "logits/rejected": -0.5953123569488525,
707
+ "logps/chosen": -2.0205883979797363,
708
+ "logps/rejected": -1.7966296672821045,
709
+ "loss": 1.4505,
710
+ "rewards/accuracies": 0.6171875,
711
+ "rewards/chosen": -4.491574287414551,
712
+ "rewards/margins": 0.5598966479301453,
713
+ "rewards/rejected": -5.05147123336792,
714
+ "step": 47
715
+ },
716
+ {
717
+ "epoch": 0.2632841960918752,
718
+ "grad_norm": 111.71156009374812,
719
+ "learning_rate": 9.239105265402525e-07,
720
+ "logits/chosen": -0.6214447021484375,
721
+ "logits/rejected": -0.6121379137039185,
722
+ "logps/chosen": -4.529265880584717,
723
+ "logps/rejected": -4.061288356781006,
724
+ "loss": 1.5693,
725
+ "rewards/accuracies": 0.671875,
726
+ "rewards/chosen": -10.15322208404541,
727
+ "rewards/margins": 1.1699434518814087,
728
+ "rewards/rejected": -11.323163032531738,
729
+ "step": 48
730
+ },
731
+ {
732
+ "epoch": 0.2687692835104559,
733
+ "grad_norm": 106.00540500477615,
734
+ "learning_rate": 9.187218911546361e-07,
735
+ "logits/chosen": -0.6318798661231995,
736
+ "logits/rejected": -0.6299155950546265,
737
+ "logps/chosen": -6.402857780456543,
738
+ "logps/rejected": -5.515579700469971,
739
+ "loss": 1.4994,
740
+ "rewards/accuracies": 0.75,
741
+ "rewards/chosen": -13.788949966430664,
742
+ "rewards/margins": 2.218195676803589,
743
+ "rewards/rejected": -16.007144927978516,
744
+ "step": 49
745
+ },
746
+ {
747
+ "epoch": 0.2742543709290367,
748
+ "grad_norm": 100.18405633526882,
749
+ "learning_rate": 9.133777176798012e-07,
750
+ "logits/chosen": -0.5955071449279785,
751
+ "logits/rejected": -0.587684690952301,
752
+ "logps/chosen": -6.099169731140137,
753
+ "logps/rejected": -5.420334339141846,
754
+ "loss": 1.686,
755
+ "rewards/accuracies": 0.640625,
756
+ "rewards/chosen": -13.550837516784668,
757
+ "rewards/margins": 1.6970889568328857,
758
+ "rewards/rejected": -15.2479248046875,
759
+ "step": 50
760
+ },
761
+ {
762
+ "epoch": 0.27973945834761743,
763
+ "grad_norm": 58.20603276880462,
764
+ "learning_rate": 9.078799912580303e-07,
765
+ "logits/chosen": -0.4950883388519287,
766
+ "logits/rejected": -0.47393253445625305,
767
+ "logps/chosen": -3.7327165603637695,
768
+ "logps/rejected": -2.9755775928497314,
769
+ "loss": 1.1265,
770
+ "rewards/accuracies": 0.7265625,
771
+ "rewards/chosen": -7.438943386077881,
772
+ "rewards/margins": 1.892848014831543,
773
+ "rewards/rejected": -9.331791877746582,
774
+ "step": 51
775
+ },
776
+ {
777
+ "epoch": 0.28522454576619816,
778
+ "grad_norm": 34.976161133474584,
779
+ "learning_rate": 9.022307540702576e-07,
780
+ "logits/chosen": -0.595772385597229,
781
+ "logits/rejected": -0.5804386138916016,
782
+ "logps/chosen": -3.406771659851074,
783
+ "logps/rejected": -2.477839469909668,
784
+ "loss": 1.0881,
785
+ "rewards/accuracies": 0.7578125,
786
+ "rewards/chosen": -6.1945977210998535,
787
+ "rewards/margins": 2.322330951690674,
788
+ "rewards/rejected": -8.516929626464844,
789
+ "step": 52
790
+ },
791
+ {
792
+ "epoch": 0.2907096331847789,
793
+ "grad_norm": 51.69206884243948,
794
+ "learning_rate": 8.964321045774806e-07,
795
+ "logits/chosen": -0.5769085884094238,
796
+ "logits/rejected": -0.5628898739814758,
797
+ "logps/chosen": -3.4867472648620605,
798
+ "logps/rejected": -2.7880890369415283,
799
+ "loss": 1.2003,
800
+ "rewards/accuracies": 0.734375,
801
+ "rewards/chosen": -6.970221996307373,
802
+ "rewards/margins": 1.7466471195220947,
803
+ "rewards/rejected": -8.716869354248047,
804
+ "step": 53
805
+ },
806
+ {
807
+ "epoch": 0.2961947206033596,
808
+ "grad_norm": 71.37167688264066,
809
+ "learning_rate": 8.904861967412701e-07,
810
+ "logits/chosen": -0.6889777183532715,
811
+ "logits/rejected": -0.6549051403999329,
812
+ "logps/chosen": -2.727174997329712,
813
+ "logps/rejected": -2.3221614360809326,
814
+ "loss": 1.4033,
815
+ "rewards/accuracies": 0.6640625,
816
+ "rewards/chosen": -5.805403709411621,
817
+ "rewards/margins": 1.0125339031219482,
818
+ "rewards/rejected": -6.817937850952148,
819
+ "step": 54
820
+ },
821
+ {
822
+ "epoch": 0.30167980802194033,
823
+ "grad_norm": 47.86988316740339,
824
+ "learning_rate": 8.843952392236593e-07,
825
+ "logits/chosen": -0.7492246031761169,
826
+ "logits/rejected": -0.6207780838012695,
827
+ "logps/chosen": -3.1855251789093018,
828
+ "logps/rejected": -2.70566987991333,
829
+ "loss": 1.3969,
830
+ "rewards/accuracies": 0.8125,
831
+ "rewards/chosen": -6.764174461364746,
832
+ "rewards/margins": 1.1996381282806396,
833
+ "rewards/rejected": -7.963812351226807,
834
+ "step": 55
835
+ },
836
+ {
837
+ "epoch": 0.30716489544052106,
838
+ "grad_norm": 78.52357858963444,
839
+ "learning_rate": 8.781614945667168e-07,
840
+ "logits/chosen": -0.7661877274513245,
841
+ "logits/rejected": -0.6237936615943909,
842
+ "logps/chosen": -3.6718194484710693,
843
+ "logps/rejected": -2.841695547103882,
844
+ "loss": 1.2378,
845
+ "rewards/accuracies": 0.765625,
846
+ "rewards/chosen": -7.104238033294678,
847
+ "rewards/margins": 2.075310230255127,
848
+ "rewards/rejected": -9.179548263549805,
849
+ "step": 56
850
+ },
851
+ {
852
+ "epoch": 0.31264998285910184,
853
+ "grad_norm": 71.4152133884341,
854
+ "learning_rate": 8.717872783521047e-07,
855
+ "logits/chosen": -0.800015389919281,
856
+ "logits/rejected": -0.7331135272979736,
857
+ "logps/chosen": -3.140065908432007,
858
+ "logps/rejected": -2.2423806190490723,
859
+ "loss": 1.0947,
860
+ "rewards/accuracies": 0.8125,
861
+ "rewards/chosen": -5.605951309204102,
862
+ "rewards/margins": 2.244213104248047,
863
+ "rewards/rejected": -7.850164890289307,
864
+ "step": 57
865
+ },
866
+ {
867
+ "epoch": 0.31813507027768256,
868
+ "grad_norm": 42.30260390013795,
869
+ "learning_rate": 8.652749583409339e-07,
870
+ "logits/chosen": -0.9033212661743164,
871
+ "logits/rejected": -0.7837256193161011,
872
+ "logps/chosen": -3.608813524246216,
873
+ "logps/rejected": -2.8119547367095947,
874
+ "loss": 1.0673,
875
+ "rewards/accuracies": 0.78125,
876
+ "rewards/chosen": -7.0298871994018555,
877
+ "rewards/margins": 1.99214768409729,
878
+ "rewards/rejected": -9.02203369140625,
879
+ "step": 58
880
+ },
881
+ {
882
+ "epoch": 0.3236201576962633,
883
+ "grad_norm": 72.45091367067401,
884
+ "learning_rate": 8.586269535942385e-07,
885
+ "logits/chosen": -0.9157741069793701,
886
+ "logits/rejected": -0.8138267993927002,
887
+ "logps/chosen": -4.422084808349609,
888
+ "logps/rejected": -3.3933629989624023,
889
+ "loss": 1.0082,
890
+ "rewards/accuracies": 0.8359375,
891
+ "rewards/chosen": -8.483407974243164,
892
+ "rewards/margins": 2.5718040466308594,
893
+ "rewards/rejected": -11.055212020874023,
894
+ "step": 59
895
+ },
896
+ {
897
+ "epoch": 0.329105245114844,
898
+ "grad_norm": 67.2038247219744,
899
+ "learning_rate": 8.518457335743924e-07,
900
+ "logits/chosen": -1.0231534242630005,
901
+ "logits/rejected": -0.8920707106590271,
902
+ "logps/chosen": -6.588432312011719,
903
+ "logps/rejected": -4.968594551086426,
904
+ "loss": 1.2004,
905
+ "rewards/accuracies": 0.7734375,
906
+ "rewards/chosen": -12.42148494720459,
907
+ "rewards/margins": 4.049595832824707,
908
+ "rewards/rejected": -16.471080780029297,
909
+ "step": 60
910
+ },
911
+ {
912
+ "epoch": 0.33459033253342474,
913
+ "grad_norm": 71.2725286215776,
914
+ "learning_rate": 8.449338172278058e-07,
915
+ "logits/chosen": -1.0856202840805054,
916
+ "logits/rejected": -1.001308798789978,
917
+ "logps/chosen": -7.01667594909668,
918
+ "logps/rejected": -5.5445075035095215,
919
+ "loss": 1.07,
920
+ "rewards/accuracies": 0.78125,
921
+ "rewards/chosen": -13.861268043518066,
922
+ "rewards/margins": 3.680420398712158,
923
+ "rewards/rejected": -17.541690826416016,
924
+ "step": 61
925
+ },
926
+ {
927
+ "epoch": 0.34007541995200546,
928
+ "grad_norm": 83.45605345714169,
929
+ "learning_rate": 8.378937720492383e-07,
930
+ "logits/chosen": -0.9825168251991272,
931
+ "logits/rejected": -0.8393011093139648,
932
+ "logps/chosen": -6.87870979309082,
933
+ "logps/rejected": -5.226398468017578,
934
+ "loss": 1.1342,
935
+ "rewards/accuracies": 0.7578125,
936
+ "rewards/chosen": -13.065997123718262,
937
+ "rewards/margins": 4.130776882171631,
938
+ "rewards/rejected": -17.196773529052734,
939
+ "step": 62
940
+ },
941
+ {
942
+ "epoch": 0.34556050737058625,
943
+ "grad_norm": 47.538952621401116,
944
+ "learning_rate": 8.307282131280804e-07,
945
+ "logits/chosen": -1.073388695716858,
946
+ "logits/rejected": -0.8648728728294373,
947
+ "logps/chosen": -5.328536033630371,
948
+ "logps/rejected": -3.9907565116882324,
949
+ "loss": 0.9218,
950
+ "rewards/accuracies": 0.8203125,
951
+ "rewards/chosen": -9.976890563964844,
952
+ "rewards/margins": 3.344449520111084,
953
+ "rewards/rejected": -13.321340560913086,
954
+ "step": 63
955
+ },
956
+ {
957
+ "epoch": 0.35104559478916697,
958
+ "grad_norm": 69.97786008191355,
959
+ "learning_rate": 8.23439802176954e-07,
960
+ "logits/chosen": -1.045760989189148,
961
+ "logits/rejected": -0.8985159993171692,
962
+ "logps/chosen": -4.391729831695557,
963
+ "logps/rejected": -2.979081869125366,
964
+ "loss": 1.0545,
965
+ "rewards/accuracies": 0.796875,
966
+ "rewards/chosen": -7.447704315185547,
967
+ "rewards/margins": 3.531620502471924,
968
+ "rewards/rejected": -10.979324340820312,
969
+ "step": 64
970
+ },
971
+ {
972
+ "epoch": 0.3565306822077477,
973
+ "grad_norm": 57.99100276656143,
974
+ "learning_rate": 8.160312465429952e-07,
975
+ "logits/chosen": -1.059841513633728,
976
+ "logits/rejected": -0.9158331155776978,
977
+ "logps/chosen": -4.3702073097229,
978
+ "logps/rejected": -2.9328854084014893,
979
+ "loss": 0.8092,
980
+ "rewards/accuracies": 0.828125,
981
+ "rewards/chosen": -7.332213401794434,
982
+ "rewards/margins": 3.5933048725128174,
983
+ "rewards/rejected": -10.925518035888672,
984
+ "step": 65
985
+ },
986
+ {
987
+ "epoch": 0.3620157696263284,
988
+ "grad_norm": 32.78666266298242,
989
+ "learning_rate": 8.085052982021847e-07,
990
+ "logits/chosen": -1.128019094467163,
991
+ "logits/rejected": -0.9226801991462708,
992
+ "logps/chosen": -3.9989237785339355,
993
+ "logps/rejected": -2.929816246032715,
994
+ "loss": 1.0525,
995
+ "rewards/accuracies": 0.7734375,
996
+ "rewards/chosen": -7.324541091918945,
997
+ "rewards/margins": 2.6727685928344727,
998
+ "rewards/rejected": -9.997309684753418,
999
+ "step": 66
1000
+ },
1001
+ {
1002
+ "epoch": 0.36750085704490915,
1003
+ "grad_norm": 50.293055849490955,
1004
+ "learning_rate": 8.008647527371022e-07,
1005
+ "logits/chosen": -1.4013196229934692,
1006
+ "logits/rejected": -1.138377070426941,
1007
+ "logps/chosen": -4.476810932159424,
1008
+ "logps/rejected": -3.069303512573242,
1009
+ "loss": 0.8161,
1010
+ "rewards/accuracies": 0.84375,
1011
+ "rewards/chosen": -7.6732587814331055,
1012
+ "rewards/margins": 3.518767833709717,
1013
+ "rewards/rejected": -11.19202709197998,
1014
+ "step": 67
1015
+ },
1016
+ {
1017
+ "epoch": 0.37298594446348987,
1018
+ "grad_norm": 50.48347857053289,
1019
+ "learning_rate": 7.931124482984801e-07,
1020
+ "logits/chosen": -1.4336833953857422,
1021
+ "logits/rejected": -1.2769416570663452,
1022
+ "logps/chosen": -4.982694625854492,
1023
+ "logps/rejected": -3.5518202781677246,
1024
+ "loss": 1.0579,
1025
+ "rewards/accuracies": 0.765625,
1026
+ "rewards/chosen": -8.87955093383789,
1027
+ "rewards/margins": 3.5771865844726562,
1028
+ "rewards/rejected": -12.45673656463623,
1029
+ "step": 68
1030
+ },
1031
+ {
1032
+ "epoch": 0.3784710318820706,
1033
+ "grad_norm": 41.95073141945747,
1034
+ "learning_rate": 7.85251264550948e-07,
1035
+ "logits/chosen": -1.5961790084838867,
1036
+ "logits/rejected": -1.3946001529693604,
1037
+ "logps/chosen": -5.758039951324463,
1038
+ "logps/rejected": -4.470717906951904,
1039
+ "loss": 1.3453,
1040
+ "rewards/accuracies": 0.7265625,
1041
+ "rewards/chosen": -11.176795959472656,
1042
+ "rewards/margins": 3.218303680419922,
1043
+ "rewards/rejected": -14.395099639892578,
1044
+ "step": 69
1045
+ },
1046
+ {
1047
+ "epoch": 0.3839561193006514,
1048
+ "grad_norm": 52.202423339982246,
1049
+ "learning_rate": 7.772841216033532e-07,
1050
+ "logits/chosen": -1.6466355323791504,
1051
+ "logits/rejected": -1.4171117544174194,
1052
+ "logps/chosen": -6.761007308959961,
1053
+ "logps/rejected": -5.078183650970459,
1054
+ "loss": 1.2675,
1055
+ "rewards/accuracies": 0.7890625,
1056
+ "rewards/chosen": -12.69545841217041,
1057
+ "rewards/margins": 4.207059860229492,
1058
+ "rewards/rejected": -16.90251922607422,
1059
+ "step": 70
1060
+ },
1061
+ {
1062
+ "epoch": 0.3894412067192321,
1063
+ "grad_norm": 33.978850229748005,
1064
+ "learning_rate": 7.69213978924061e-07,
1065
+ "logits/chosen": -1.544925332069397,
1066
+ "logits/rejected": -1.2573606967926025,
1067
+ "logps/chosen": -6.336057662963867,
1068
+ "logps/rejected": -4.741216659545898,
1069
+ "loss": 1.096,
1070
+ "rewards/accuracies": 0.828125,
1071
+ "rewards/chosen": -11.85304069519043,
1072
+ "rewards/margins": 3.987103223800659,
1073
+ "rewards/rejected": -15.840145111083984,
1074
+ "step": 71
1075
+ },
1076
+ {
1077
+ "epoch": 0.39492629413781283,
1078
+ "grad_norm": 96.09934849635745,
1079
+ "learning_rate": 7.610438342416319e-07,
1080
+ "logits/chosen": -1.4953880310058594,
1081
+ "logits/rejected": -1.2785418033599854,
1082
+ "logps/chosen": -6.89131498336792,
1083
+ "logps/rejected": -5.383131980895996,
1084
+ "loss": 1.0171,
1085
+ "rewards/accuracies": 0.8046875,
1086
+ "rewards/chosen": -13.457829475402832,
1087
+ "rewards/margins": 3.7704575061798096,
1088
+ "rewards/rejected": -17.228288650512695,
1089
+ "step": 72
1090
+ },
1091
+ {
1092
+ "epoch": 0.40041138155639355,
1093
+ "grad_norm": 27.422261234951808,
1094
+ "learning_rate": 7.527767224312882e-07,
1095
+ "logits/chosen": -1.322948932647705,
1096
+ "logits/rejected": -1.1441978216171265,
1097
+ "logps/chosen": -6.809509754180908,
1098
+ "logps/rejected": -4.989666938781738,
1099
+ "loss": 0.6614,
1100
+ "rewards/accuracies": 0.8515625,
1101
+ "rewards/chosen": -12.47416877746582,
1102
+ "rewards/margins": 4.549604415893555,
1103
+ "rewards/rejected": -17.023773193359375,
1104
+ "step": 73
1105
+ },
1106
+ {
1107
+ "epoch": 0.4058964689749743,
1108
+ "grad_norm": 80.95278759548928,
1109
+ "learning_rate": 7.444157143875819e-07,
1110
+ "logits/chosen": -1.216729760169983,
1111
+ "logits/rejected": -1.057979941368103,
1112
+ "logps/chosen": -6.466203689575195,
1113
+ "logps/rejected": -5.076437950134277,
1114
+ "loss": 0.9105,
1115
+ "rewards/accuracies": 0.8359375,
1116
+ "rewards/chosen": -12.691095352172852,
1117
+ "rewards/margins": 3.474414110183716,
1118
+ "rewards/rejected": -16.165510177612305,
1119
+ "step": 74
1120
+ },
1121
+ {
1122
+ "epoch": 0.411381556393555,
1123
+ "grad_norm": 72.36852993655451,
1124
+ "learning_rate": 7.359639158836827e-07,
1125
+ "logits/chosen": -1.1118669509887695,
1126
+ "logits/rejected": -1.0416420698165894,
1127
+ "logps/chosen": -7.078163146972656,
1128
+ "logps/rejected": -5.48430871963501,
1129
+ "loss": 1.0358,
1130
+ "rewards/accuracies": 0.75,
1131
+ "rewards/chosen": -13.710769653320312,
1132
+ "rewards/margins": 3.9846386909484863,
1133
+ "rewards/rejected": -17.695409774780273,
1134
+ "step": 75
1135
+ },
1136
+ {
1137
+ "epoch": 0.41686664381213573,
1138
+ "grad_norm": 66.4908598237263,
1139
+ "learning_rate": 7.274244664177097e-07,
1140
+ "logits/chosen": -1.041873812675476,
1141
+ "logits/rejected": -0.9867510199546814,
1142
+ "logps/chosen": -6.261934280395508,
1143
+ "logps/rejected": -4.695401668548584,
1144
+ "loss": 0.9974,
1145
+ "rewards/accuracies": 0.8046875,
1146
+ "rewards/chosen": -11.738503456115723,
1147
+ "rewards/margins": 3.9163331985473633,
1148
+ "rewards/rejected": -15.654836654663086,
1149
+ "step": 76
1150
+ },
1151
+ {
1152
+ "epoch": 0.4223517312307165,
1153
+ "grad_norm": 49.259846719809424,
1154
+ "learning_rate": 7.188005380465364e-07,
1155
+ "logits/chosen": -1.1777944564819336,
1156
+ "logits/rejected": -1.0354324579238892,
1157
+ "logps/chosen": -6.061973571777344,
1158
+ "logps/rejected": -4.546577453613281,
1159
+ "loss": 0.8174,
1160
+ "rewards/accuracies": 0.8046875,
1161
+ "rewards/chosen": -11.36644458770752,
1162
+ "rewards/margins": 3.7884879112243652,
1163
+ "rewards/rejected": -15.154932975769043,
1164
+ "step": 77
1165
+ },
1166
+ {
1167
+ "epoch": 0.42783681864929723,
1168
+ "grad_norm": 29.6260649209974,
1169
+ "learning_rate": 7.100953342075009e-07,
1170
+ "logits/chosen": -1.2290102243423462,
1171
+ "logits/rejected": -1.110871434211731,
1172
+ "logps/chosen": -5.283913612365723,
1173
+ "logps/rejected": -3.9515509605407715,
1174
+ "loss": 0.8695,
1175
+ "rewards/accuracies": 0.8203125,
1176
+ "rewards/chosen": -9.878876686096191,
1177
+ "rewards/margins": 3.33090877532959,
1178
+ "rewards/rejected": -13.209785461425781,
1179
+ "step": 78
1180
+ },
1181
+ {
1182
+ "epoch": 0.43332190606787796,
1183
+ "grad_norm": 32.05872824883326,
1184
+ "learning_rate": 7.013120885284598e-07,
1185
+ "logits/chosen": -1.3086589574813843,
1186
+ "logits/rejected": -1.1846544742584229,
1187
+ "logps/chosen": -5.159869194030762,
1188
+ "logps/rejected": -3.5984580516815186,
1189
+ "loss": 0.7876,
1190
+ "rewards/accuracies": 0.8359375,
1191
+ "rewards/chosen": -8.996145248413086,
1192
+ "rewards/margins": 3.9035279750823975,
1193
+ "rewards/rejected": -12.899672508239746,
1194
+ "step": 79
1195
+ },
1196
+ {
1197
+ "epoch": 0.4388069934864587,
1198
+ "grad_norm": 40.42205251951496,
1199
+ "learning_rate": 6.924540636266272e-07,
1200
+ "logits/chosen": -1.3288094997406006,
1201
+ "logits/rejected": -1.2276866436004639,
1202
+ "logps/chosen": -5.131710529327393,
1203
+ "logps/rejected": -3.831780433654785,
1204
+ "loss": 0.9434,
1205
+ "rewards/accuracies": 0.859375,
1206
+ "rewards/chosen": -9.579451560974121,
1207
+ "rewards/margins": 3.2498245239257812,
1208
+ "rewards/rejected": -12.829277038574219,
1209
+ "step": 80
1210
+ },
1211
+ {
1212
+ "epoch": 0.4442920809050394,
1213
+ "grad_norm": 36.25388918144398,
1214
+ "learning_rate": 6.83524549896646e-07,
1215
+ "logits/chosen": -1.163621187210083,
1216
+ "logits/rejected": -1.1168286800384521,
1217
+ "logps/chosen": -5.016862392425537,
1218
+ "logps/rejected": -3.682563066482544,
1219
+ "loss": 0.8759,
1220
+ "rewards/accuracies": 0.8046875,
1221
+ "rewards/chosen": -9.206408500671387,
1222
+ "rewards/margins": 3.3357465267181396,
1223
+ "rewards/rejected": -12.542155265808105,
1224
+ "step": 81
1225
+ },
1226
+ {
1227
+ "epoch": 0.44977716832362014,
1228
+ "grad_norm": 25.153488392194294,
1229
+ "learning_rate": 6.745268642883404e-07,
1230
+ "logits/chosen": -1.2277235984802246,
1231
+ "logits/rejected": -1.0471045970916748,
1232
+ "logps/chosen": -5.926680088043213,
1233
+ "logps/rejected": -4.389726161956787,
1234
+ "loss": 0.6727,
1235
+ "rewards/accuracies": 0.875,
1236
+ "rewards/chosen": -10.974315643310547,
1237
+ "rewards/margins": 3.8423848152160645,
1238
+ "rewards/rejected": -14.816699981689453,
1239
+ "step": 82
1240
+ },
1241
+ {
1242
+ "epoch": 0.4552622557422009,
1243
+ "grad_norm": 22.152291434451502,
1244
+ "learning_rate": 6.654643490746041e-07,
1245
+ "logits/chosen": -1.2063225507736206,
1246
+ "logits/rejected": -1.0923081636428833,
1247
+ "logps/chosen": -6.3293843269348145,
1248
+ "logps/rejected": -4.906074047088623,
1249
+ "loss": 0.8406,
1250
+ "rewards/accuracies": 0.875,
1251
+ "rewards/chosen": -12.26518440246582,
1252
+ "rewards/margins": 3.558277130126953,
1253
+ "rewards/rejected": -15.823461532592773,
1254
+ "step": 83
1255
+ },
1256
+ {
1257
+ "epoch": 0.46074734316078164,
1258
+ "grad_norm": 29.47429990683989,
1259
+ "learning_rate": 6.563403706098832e-07,
1260
+ "logits/chosen": -1.2531236410140991,
1261
+ "logits/rejected": -1.1536014080047607,
1262
+ "logps/chosen": -7.200500965118408,
1263
+ "logps/rejected": -5.6704511642456055,
1264
+ "loss": 0.844,
1265
+ "rewards/accuracies": 0.78125,
1266
+ "rewards/chosen": -14.176128387451172,
1267
+ "rewards/margins": 3.8251240253448486,
1268
+ "rewards/rejected": -18.001251220703125,
1269
+ "step": 84
1270
+ },
1271
+ {
1272
+ "epoch": 0.46623243057936237,
1273
+ "grad_norm": 39.720494087331325,
1274
+ "learning_rate": 6.47158318079712e-07,
1275
+ "logits/chosen": -1.2474052906036377,
1276
+ "logits/rejected": -1.1939733028411865,
1277
+ "logps/chosen": -7.995599746704102,
1278
+ "logps/rejected": -6.105539321899414,
1279
+ "loss": 0.9392,
1280
+ "rewards/accuracies": 0.8046875,
1281
+ "rewards/chosen": -15.263847351074219,
1282
+ "rewards/margins": 4.725150108337402,
1283
+ "rewards/rejected": -19.988998413085938,
1284
+ "step": 85
1285
+ },
1286
+ {
1287
+ "epoch": 0.4717175179979431,
1288
+ "grad_norm": 26.901425160757288,
1289
+ "learning_rate": 6.379216022417695e-07,
1290
+ "logits/chosen": -1.2418212890625,
1291
+ "logits/rejected": -1.1997092962265015,
1292
+ "logps/chosen": -8.617918014526367,
1293
+ "logps/rejected": -6.57258415222168,
1294
+ "loss": 0.6221,
1295
+ "rewards/accuracies": 0.9140625,
1296
+ "rewards/chosen": -16.431461334228516,
1297
+ "rewards/margins": 5.113334655761719,
1298
+ "rewards/rejected": -21.544795989990234,
1299
+ "step": 86
1300
+ },
1301
+ {
1302
+ "epoch": 0.4772026054165238,
1303
+ "grad_norm": 58.114713536632436,
1304
+ "learning_rate": 6.286336541589223e-07,
1305
+ "logits/chosen": -1.2740073204040527,
1306
+ "logits/rejected": -1.2163861989974976,
1307
+ "logps/chosen": -8.867392539978027,
1308
+ "logps/rejected": -7.1280741691589355,
1309
+ "loss": 0.9602,
1310
+ "rewards/accuracies": 0.8125,
1311
+ "rewards/chosen": -17.820186614990234,
1312
+ "rewards/margins": 4.348294734954834,
1313
+ "rewards/rejected": -22.168481826782227,
1314
+ "step": 87
1315
+ },
1316
+ {
1317
+ "epoch": 0.48268769283510454,
1318
+ "grad_norm": 33.361524717086475,
1319
+ "learning_rate": 6.192979239247242e-07,
1320
+ "logits/chosen": -1.1057997941970825,
1321
+ "logits/rejected": -1.0285227298736572,
1322
+ "logps/chosen": -8.445158004760742,
1323
+ "logps/rejected": -6.86185359954834,
1324
+ "loss": 0.8718,
1325
+ "rewards/accuracies": 0.796875,
1326
+ "rewards/chosen": -17.154632568359375,
1327
+ "rewards/margins": 3.9582619667053223,
1328
+ "rewards/rejected": -21.112895965576172,
1329
+ "step": 88
1330
+ },
1331
+ {
1332
+ "epoch": 0.48817278025368527,
1333
+ "grad_norm": 51.84635707806423,
1334
+ "learning_rate": 6.099178793818478e-07,
1335
+ "logits/chosen": -1.1163854598999023,
1336
+ "logits/rejected": -1.0613051652908325,
1337
+ "logps/chosen": -9.026713371276855,
1338
+ "logps/rejected": -7.194836616516113,
1339
+ "loss": 0.8724,
1340
+ "rewards/accuracies": 0.8359375,
1341
+ "rewards/chosen": -17.987092971801758,
1342
+ "rewards/margins": 4.5796895027160645,
1343
+ "rewards/rejected": -22.566783905029297,
1344
+ "step": 89
1345
+ },
1346
+ {
1347
+ "epoch": 0.49365786767226605,
1348
+ "grad_norm": 35.13623148031408,
1349
+ "learning_rate": 6.004970048339225e-07,
1350
+ "logits/chosen": -0.9862219095230103,
1351
+ "logits/rejected": -0.868794858455658,
1352
+ "logps/chosen": -7.408356666564941,
1353
+ "logps/rejected": -5.994558334350586,
1354
+ "loss": 0.9605,
1355
+ "rewards/accuracies": 0.796875,
1356
+ "rewards/chosen": -14.986395835876465,
1357
+ "rewards/margins": 3.5344960689544678,
1358
+ "rewards/rejected": -18.520891189575195,
1359
+ "step": 90
1360
+ },
1361
+ {
1362
+ "epoch": 0.4991429550908468,
1363
+ "grad_norm": 30.15074112906336,
1364
+ "learning_rate": 5.910387997512573e-07,
1365
+ "logits/chosen": -0.9399983286857605,
1366
+ "logits/rejected": -0.8564634919166565,
1367
+ "logps/chosen": -7.057155609130859,
1368
+ "logps/rejected": -5.3340325355529785,
1369
+ "loss": 0.7882,
1370
+ "rewards/accuracies": 0.859375,
1371
+ "rewards/chosen": -13.3350830078125,
1372
+ "rewards/margins": 4.307806968688965,
1373
+ "rewards/rejected": -17.64288902282715,
1374
+ "step": 91
1375
+ },
1376
+ {
1377
+ "epoch": 0.5046280425094275,
1378
+ "grad_norm": 38.88195575984379,
1379
+ "learning_rate": 5.815467774709313e-07,
1380
+ "logits/chosen": -0.9454355835914612,
1381
+ "logits/rejected": -0.9133027791976929,
1382
+ "logps/chosen": -6.99016809463501,
1383
+ "logps/rejected": -5.265021324157715,
1384
+ "loss": 0.8483,
1385
+ "rewards/accuracies": 0.8359375,
1386
+ "rewards/chosen": -13.162553787231445,
1387
+ "rewards/margins": 4.3128662109375,
1388
+ "rewards/rejected": -17.475419998168945,
1389
+ "step": 92
1390
+ },
1391
+ {
1392
+ "epoch": 0.5101131299280083,
1393
+ "grad_norm": 30.158868667054477,
1394
+ "learning_rate": 5.720244638917323e-07,
1395
+ "logits/chosen": -0.965910792350769,
1396
+ "logits/rejected": -0.8706585168838501,
1397
+ "logps/chosen": -6.497648239135742,
1398
+ "logps/rejected": -4.859541416168213,
1399
+ "loss": 0.8819,
1400
+ "rewards/accuracies": 0.8203125,
1401
+ "rewards/chosen": -12.148852348327637,
1402
+ "rewards/margins": 4.095267295837402,
1403
+ "rewards/rejected": -16.24411964416504,
1404
+ "step": 93
1405
+ },
1406
+ {
1407
+ "epoch": 0.515598217346589,
1408
+ "grad_norm": 53.363507967860116,
1409
+ "learning_rate": 5.624753961644281e-07,
1410
+ "logits/chosen": -1.0084278583526611,
1411
+ "logits/rejected": -0.9723138213157654,
1412
+ "logps/chosen": -5.623072624206543,
1413
+ "logps/rejected": -4.460352420806885,
1414
+ "loss": 1.0174,
1415
+ "rewards/accuracies": 0.796875,
1416
+ "rewards/chosen": -11.150880813598633,
1417
+ "rewards/margins": 2.9068009853363037,
1418
+ "rewards/rejected": -14.057682037353516,
1419
+ "step": 94
1420
+ },
1421
+ {
1422
+ "epoch": 0.5210833047651697,
1423
+ "grad_norm": 23.23631443491041,
1424
+ "learning_rate": 5.529031213778614e-07,
1425
+ "logits/chosen": -1.0280265808105469,
1426
+ "logits/rejected": -0.9905204772949219,
1427
+ "logps/chosen": -5.483713150024414,
1428
+ "logps/rejected": -4.167681694030762,
1429
+ "loss": 0.98,
1430
+ "rewards/accuracies": 0.7890625,
1431
+ "rewards/chosen": -10.419203758239746,
1432
+ "rewards/margins": 3.290079355239868,
1433
+ "rewards/rejected": -13.709283828735352,
1434
+ "step": 95
1435
+ },
1436
+ {
1437
+ "epoch": 0.5265683921837504,
1438
+ "grad_norm": 37.123060878167124,
1439
+ "learning_rate": 5.433111952413494e-07,
1440
+ "logits/chosen": -1.088523507118225,
1441
+ "logits/rejected": -0.998890221118927,
1442
+ "logps/chosen": -5.065528869628906,
1443
+ "logps/rejected": -3.6324613094329834,
1444
+ "loss": 0.8177,
1445
+ "rewards/accuracies": 0.8125,
1446
+ "rewards/chosen": -9.081153869628906,
1447
+ "rewards/margins": 3.582667112350464,
1448
+ "rewards/rejected": -12.663820266723633,
1449
+ "step": 96
1450
+ },
1451
+ {
1452
+ "epoch": 0.5320534796023312,
1453
+ "grad_norm": 25.260662494721746,
1454
+ "learning_rate": 5.33703180763884e-07,
1455
+ "logits/chosen": -1.0532890558242798,
1456
+ "logits/rejected": -0.9741649031639099,
1457
+ "logps/chosen": -5.729362964630127,
1458
+ "logps/rejected": -4.276680946350098,
1459
+ "loss": 0.7902,
1460
+ "rewards/accuracies": 0.8671875,
1461
+ "rewards/chosen": -10.691701889038086,
1462
+ "rewards/margins": 3.631704568862915,
1463
+ "rewards/rejected": -14.323406219482422,
1464
+ "step": 97
1465
+ },
1466
+ {
1467
+ "epoch": 0.5375385670209119,
1468
+ "grad_norm": 46.96307372174284,
1469
+ "learning_rate": 5.240826469306186e-07,
1470
+ "logits/chosen": -1.0120959281921387,
1471
+ "logits/rejected": -0.9785177707672119,
1472
+ "logps/chosen": -6.013980865478516,
1473
+ "logps/rejected": -4.12684965133667,
1474
+ "loss": 0.5899,
1475
+ "rewards/accuracies": 0.9140625,
1476
+ "rewards/chosen": -10.317124366760254,
1477
+ "rewards/margins": 4.71782922744751,
1478
+ "rewards/rejected": -15.034952163696289,
1479
+ "step": 98
1480
+ },
1481
+ {
1482
+ "epoch": 0.5430236544394926,
1483
+ "grad_norm": 24.414358778835954,
1484
+ "learning_rate": 5.144531673771363e-07,
1485
+ "logits/chosen": -1.002170205116272,
1486
+ "logits/rejected": -0.9993859529495239,
1487
+ "logps/chosen": -6.949717044830322,
1488
+ "logps/rejected": -5.171581745147705,
1489
+ "loss": 0.8516,
1490
+ "rewards/accuracies": 0.796875,
1491
+ "rewards/chosen": -12.928955078125,
1492
+ "rewards/margins": 4.445338726043701,
1493
+ "rewards/rejected": -17.37429428100586,
1494
+ "step": 99
1495
+ },
1496
+ {
1497
+ "epoch": 0.5485087418580734,
1498
+ "grad_norm": 25.008431407505398,
1499
+ "learning_rate": 5.048183190619903e-07,
1500
+ "logits/chosen": -0.9874565005302429,
1501
+ "logits/rejected": -0.9811626672744751,
1502
+ "logps/chosen": -6.921389102935791,
1503
+ "logps/rejected": -5.277797698974609,
1504
+ "loss": 0.8378,
1505
+ "rewards/accuracies": 0.828125,
1506
+ "rewards/chosen": -13.19449520111084,
1507
+ "rewards/margins": 4.108977317810059,
1508
+ "rewards/rejected": -17.3034725189209,
1509
+ "step": 100
1510
+ },
1511
+ {
1512
+ "epoch": 0.5539938292766541,
1513
+ "grad_norm": 27.960086992805927,
1514
+ "learning_rate": 4.951816809380097e-07,
1515
+ "logits/chosen": -1.0021039247512817,
1516
+ "logits/rejected": -0.9522125124931335,
1517
+ "logps/chosen": -7.083625793457031,
1518
+ "logps/rejected": -5.62723445892334,
1519
+ "loss": 0.669,
1520
+ "rewards/accuracies": 0.8828125,
1521
+ "rewards/chosen": -14.068085670471191,
1522
+ "rewards/margins": 3.6409800052642822,
1523
+ "rewards/rejected": -17.709064483642578,
1524
+ "step": 101
1525
+ },
1526
+ {
1527
+ "epoch": 0.5594789166952349,
1528
+ "grad_norm": 30.679626728308502,
1529
+ "learning_rate": 4.855468326228638e-07,
1530
+ "logits/chosen": -1.0606987476348877,
1531
+ "logits/rejected": -1.041282296180725,
1532
+ "logps/chosen": -7.568184852600098,
1533
+ "logps/rejected": -6.082253456115723,
1534
+ "loss": 0.8745,
1535
+ "rewards/accuracies": 0.8046875,
1536
+ "rewards/chosen": -15.205633163452148,
1537
+ "rewards/margins": 3.714829206466675,
1538
+ "rewards/rejected": -18.92046356201172,
1539
+ "step": 102
1540
+ },
1541
+ {
1542
+ "epoch": 0.5649640041138155,
1543
+ "grad_norm": 27.494401307007365,
1544
+ "learning_rate": 4.7591735306938134e-07,
1545
+ "logits/chosen": -1.0469098091125488,
1546
+ "logits/rejected": -0.9781535267829895,
1547
+ "logps/chosen": -7.355569839477539,
1548
+ "logps/rejected": -6.020066261291504,
1549
+ "loss": 0.7655,
1550
+ "rewards/accuracies": 0.8515625,
1551
+ "rewards/chosen": -15.050165176391602,
1552
+ "rewards/margins": 3.3387598991394043,
1553
+ "rewards/rejected": -18.388925552368164,
1554
+ "step": 103
1555
+ },
1556
+ {
1557
+ "epoch": 0.5704490915323963,
1558
+ "grad_norm": 37.8050143114576,
1559
+ "learning_rate": 4.6629681923611603e-07,
1560
+ "logits/chosen": -1.049713373184204,
1561
+ "logits/rejected": -1.0141334533691406,
1562
+ "logps/chosen": -7.472883224487305,
1563
+ "logps/rejected": -6.059725284576416,
1564
+ "loss": 0.9818,
1565
+ "rewards/accuracies": 0.796875,
1566
+ "rewards/chosen": -15.149312973022461,
1567
+ "rewards/margins": 3.5328941345214844,
1568
+ "rewards/rejected": -18.682207107543945,
1569
+ "step": 104
1570
+ },
1571
+ {
1572
+ "epoch": 0.575934178950977,
1573
+ "grad_norm": 24.845125832684612,
1574
+ "learning_rate": 4.5668880475865067e-07,
1575
+ "logits/chosen": -1.0235170125961304,
1576
+ "logits/rejected": -0.9582427144050598,
1577
+ "logps/chosen": -7.626412391662598,
1578
+ "logps/rejected": -6.162938594818115,
1579
+ "loss": 0.7142,
1580
+ "rewards/accuracies": 0.8515625,
1581
+ "rewards/chosen": -15.407346725463867,
1582
+ "rewards/margins": 3.658684730529785,
1583
+ "rewards/rejected": -19.066030502319336,
1584
+ "step": 105
1585
+ },
1586
+ {
1587
+ "epoch": 0.5814192663695578,
1588
+ "grad_norm": 24.20853718541258,
1589
+ "learning_rate": 4.4709687862213864e-07,
1590
+ "logits/chosen": -0.9750124216079712,
1591
+ "logits/rejected": -0.9425258636474609,
1592
+ "logps/chosen": -7.844966411590576,
1593
+ "logps/rejected": -6.051673412322998,
1594
+ "loss": 0.624,
1595
+ "rewards/accuracies": 0.8671875,
1596
+ "rewards/chosen": -15.129182815551758,
1597
+ "rewards/margins": 4.4832329750061035,
1598
+ "rewards/rejected": -19.612417221069336,
1599
+ "step": 106
1600
+ },
1601
+ {
1602
+ "epoch": 0.5869043537881385,
1603
+ "grad_norm": 27.32408594646776,
1604
+ "learning_rate": 4.3752460383557194e-07,
1605
+ "logits/chosen": -0.9948883056640625,
1606
+ "logits/rejected": -0.8997665643692017,
1607
+ "logps/chosen": -7.177610397338867,
1608
+ "logps/rejected": -5.7914228439331055,
1609
+ "loss": 0.779,
1610
+ "rewards/accuracies": 0.8515625,
1611
+ "rewards/chosen": -14.478557586669922,
1612
+ "rewards/margins": 3.465468406677246,
1613
+ "rewards/rejected": -17.94402503967285,
1614
+ "step": 107
1615
+ },
1616
+ {
1617
+ "epoch": 0.5923894412067192,
1618
+ "grad_norm": 25.295159101372597,
1619
+ "learning_rate": 4.2797553610826797e-07,
1620
+ "logits/chosen": -0.9283576011657715,
1621
+ "logits/rejected": -0.8969117403030396,
1622
+ "logps/chosen": -7.38961935043335,
1623
+ "logps/rejected": -6.016010284423828,
1624
+ "loss": 0.8094,
1625
+ "rewards/accuracies": 0.8359375,
1626
+ "rewards/chosen": -15.04002571105957,
1627
+ "rewards/margins": 3.434022903442383,
1628
+ "rewards/rejected": -18.474048614501953,
1629
+ "step": 108
1630
+ },
1631
+ {
1632
+ "epoch": 0.5978745286253,
1633
+ "grad_norm": 34.809554467100526,
1634
+ "learning_rate": 4.184532225290686e-07,
1635
+ "logits/chosen": -0.8853582739830017,
1636
+ "logits/rejected": -0.8778493404388428,
1637
+ "logps/chosen": -7.672779560089111,
1638
+ "logps/rejected": -5.923637390136719,
1639
+ "loss": 0.6594,
1640
+ "rewards/accuracies": 0.84375,
1641
+ "rewards/chosen": -14.809093475341797,
1642
+ "rewards/margins": 4.372855186462402,
1643
+ "rewards/rejected": -19.181949615478516,
1644
+ "step": 109
1645
+ },
1646
+ {
1647
+ "epoch": 0.6033596160438807,
1648
+ "grad_norm": 37.595761539961494,
1649
+ "learning_rate": 4.089612002487428e-07,
1650
+ "logits/chosen": -0.9878619909286499,
1651
+ "logits/rejected": -0.9121577739715576,
1652
+ "logps/chosen": -7.86918830871582,
1653
+ "logps/rejected": -6.307096481323242,
1654
+ "loss": 0.9853,
1655
+ "rewards/accuracies": 0.7734375,
1656
+ "rewards/chosen": -15.767744064331055,
1657
+ "rewards/margins": 3.905228614807129,
1658
+ "rewards/rejected": -19.672971725463867,
1659
+ "step": 110
1660
+ },
1661
+ {
1662
+ "epoch": 0.6088447034624614,
1663
+ "grad_norm": 19.136373017418645,
1664
+ "learning_rate": 3.995029951660776e-07,
1665
+ "logits/chosen": -0.938258945941925,
1666
+ "logits/rejected": -0.9154999256134033,
1667
+ "logps/chosen": -7.287668704986572,
1668
+ "logps/rejected": -5.683687210083008,
1669
+ "loss": 0.6273,
1670
+ "rewards/accuracies": 0.875,
1671
+ "rewards/chosen": -14.20921802520752,
1672
+ "rewards/margins": 4.009955406188965,
1673
+ "rewards/rejected": -18.21917152404785,
1674
+ "step": 111
1675
+ },
1676
+ {
1677
+ "epoch": 0.6143297908810421,
1678
+ "grad_norm": 41.38729707458279,
1679
+ "learning_rate": 3.9008212061815207e-07,
1680
+ "logits/chosen": -0.9403737783432007,
1681
+ "logits/rejected": -0.8944230079650879,
1682
+ "logps/chosen": -7.414663314819336,
1683
+ "logps/rejected": -5.728695869445801,
1684
+ "loss": 0.5671,
1685
+ "rewards/accuracies": 0.875,
1686
+ "rewards/chosen": -14.32174015045166,
1687
+ "rewards/margins": 4.214918613433838,
1688
+ "rewards/rejected": -18.536659240722656,
1689
+ "step": 112
1690
+ },
1691
+ {
1692
+ "epoch": 0.6198148782996229,
1693
+ "grad_norm": 24.324325222505287,
1694
+ "learning_rate": 3.8070207607527585e-07,
1695
+ "logits/chosen": -0.9715641736984253,
1696
+ "logits/rejected": -0.9244170784950256,
1697
+ "logps/chosen": -6.609511852264404,
1698
+ "logps/rejected": -5.317971229553223,
1699
+ "loss": 1.0423,
1700
+ "rewards/accuracies": 0.8203125,
1701
+ "rewards/chosen": -13.294927597045898,
1702
+ "rewards/margins": 3.228851795196533,
1703
+ "rewards/rejected": -16.523780822753906,
1704
+ "step": 113
1705
+ },
1706
+ {
1707
+ "epoch": 0.6252999657182037,
1708
+ "grad_norm": 25.97873694518042,
1709
+ "learning_rate": 3.7136634584107783e-07,
1710
+ "logits/chosen": -1.0553674697875977,
1711
+ "logits/rejected": -0.9997435808181763,
1712
+ "logps/chosen": -7.505800247192383,
1713
+ "logps/rejected": -5.754428863525391,
1714
+ "loss": 0.7409,
1715
+ "rewards/accuracies": 0.84375,
1716
+ "rewards/chosen": -14.386072158813477,
1717
+ "rewards/margins": 4.378428936004639,
1718
+ "rewards/rejected": -18.764501571655273,
1719
+ "step": 114
1720
+ },
1721
+ {
1722
+ "epoch": 0.6307850531367843,
1723
+ "grad_norm": 33.98592167333287,
1724
+ "learning_rate": 3.6207839775823047e-07,
1725
+ "logits/chosen": -0.9378336071968079,
1726
+ "logits/rejected": -0.9261949062347412,
1727
+ "logps/chosen": -6.874807357788086,
1728
+ "logps/rejected": -5.083373546600342,
1729
+ "loss": 0.8674,
1730
+ "rewards/accuracies": 0.8359375,
1731
+ "rewards/chosen": -12.708434104919434,
1732
+ "rewards/margins": 4.478583335876465,
1733
+ "rewards/rejected": -17.1870174407959,
1734
+ "step": 115
1735
+ },
1736
+ {
1737
+ "epoch": 0.6362701405553651,
1738
+ "grad_norm": 27.335955275549072,
1739
+ "learning_rate": 3.5284168192028805e-07,
1740
+ "logits/chosen": -0.9258574843406677,
1741
+ "logits/rejected": -0.8993632793426514,
1742
+ "logps/chosen": -6.7075514793396,
1743
+ "logps/rejected": -5.005417346954346,
1744
+ "loss": 0.6846,
1745
+ "rewards/accuracies": 0.890625,
1746
+ "rewards/chosen": -12.513543128967285,
1747
+ "rewards/margins": 4.255335330963135,
1748
+ "rewards/rejected": -16.768878936767578,
1749
+ "step": 116
1750
+ },
1751
+ {
1752
+ "epoch": 0.6417552279739458,
1753
+ "grad_norm": 36.43999913709104,
1754
+ "learning_rate": 3.4365962939011693e-07,
1755
+ "logits/chosen": -0.9867472648620605,
1756
+ "logits/rejected": -0.9383954405784607,
1757
+ "logps/chosen": -6.8381452560424805,
1758
+ "logps/rejected": -5.291529178619385,
1759
+ "loss": 0.9343,
1760
+ "rewards/accuracies": 0.8046875,
1761
+ "rewards/chosen": -13.228822708129883,
1762
+ "rewards/margins": 3.86653995513916,
1763
+ "rewards/rejected": -17.09536361694336,
1764
+ "step": 117
1765
+ },
1766
+ {
1767
+ "epoch": 0.6472403153925266,
1768
+ "grad_norm": 43.410283610352316,
1769
+ "learning_rate": 3.345356509253958e-07,
1770
+ "logits/chosen": -0.9349948167800903,
1771
+ "logits/rejected": -0.8691989183425903,
1772
+ "logps/chosen": -6.570774555206299,
1773
+ "logps/rejected": -4.859616279602051,
1774
+ "loss": 0.6338,
1775
+ "rewards/accuracies": 0.90625,
1776
+ "rewards/chosen": -12.149040222167969,
1777
+ "rewards/margins": 4.277895927429199,
1778
+ "rewards/rejected": -16.42693519592285,
1779
+ "step": 118
1780
+ },
1781
+ {
1782
+ "epoch": 0.6527254028111072,
1783
+ "grad_norm": 30.13014793522752,
1784
+ "learning_rate": 3.2547313571165967e-07,
1785
+ "logits/chosen": -0.9361096024513245,
1786
+ "logits/rejected": -0.9115648865699768,
1787
+ "logps/chosen": -6.824566841125488,
1788
+ "logps/rejected": -5.03220272064209,
1789
+ "loss": 0.7826,
1790
+ "rewards/accuracies": 0.859375,
1791
+ "rewards/chosen": -12.580507278442383,
1792
+ "rewards/margins": 4.480910778045654,
1793
+ "rewards/rejected": -17.061420440673828,
1794
+ "step": 119
1795
+ },
1796
+ {
1797
+ "epoch": 0.658210490229688,
1798
+ "grad_norm": 33.220837044211905,
1799
+ "learning_rate": 3.1647545010335395e-07,
1800
+ "logits/chosen": -0.9235398173332214,
1801
+ "logits/rejected": -0.8107198476791382,
1802
+ "logps/chosen": -6.378120422363281,
1803
+ "logps/rejected": -4.839158058166504,
1804
+ "loss": 0.6904,
1805
+ "rewards/accuracies": 0.84375,
1806
+ "rewards/chosen": -12.097895622253418,
1807
+ "rewards/margins": 3.847404956817627,
1808
+ "rewards/rejected": -15.945301055908203,
1809
+ "step": 120
1810
+ },
1811
+ {
1812
+ "epoch": 0.6636955776482688,
1813
+ "grad_norm": 50.56573612879948,
1814
+ "learning_rate": 3.075459363733727e-07,
1815
+ "logits/chosen": -0.8829526901245117,
1816
+ "logits/rejected": -0.8535292744636536,
1817
+ "logps/chosen": -6.172534465789795,
1818
+ "logps/rejected": -4.939080715179443,
1819
+ "loss": 0.9172,
1820
+ "rewards/accuracies": 0.7578125,
1821
+ "rewards/chosen": -12.347702026367188,
1822
+ "rewards/margins": 3.0836341381073,
1823
+ "rewards/rejected": -15.431337356567383,
1824
+ "step": 121
1825
+ },
1826
+ {
1827
+ "epoch": 0.6691806650668495,
1828
+ "grad_norm": 31.870637933820483,
1829
+ "learning_rate": 2.9868791147154025e-07,
1830
+ "logits/chosen": -0.9092215895652771,
1831
+ "logits/rejected": -0.8585975170135498,
1832
+ "logps/chosen": -6.820605278015137,
1833
+ "logps/rejected": -5.349386215209961,
1834
+ "loss": 0.868,
1835
+ "rewards/accuracies": 0.8125,
1836
+ "rewards/chosen": -13.373466491699219,
1837
+ "rewards/margins": 3.678046226501465,
1838
+ "rewards/rejected": -17.051511764526367,
1839
+ "step": 122
1840
+ },
1841
+ {
1842
+ "epoch": 0.6746657524854303,
1843
+ "grad_norm": 28.176132333643206,
1844
+ "learning_rate": 2.8990466579249917e-07,
1845
+ "logits/chosen": -0.8528233766555786,
1846
+ "logits/rejected": -0.7868634462356567,
1847
+ "logps/chosen": -6.514227867126465,
1848
+ "logps/rejected": -4.8763251304626465,
1849
+ "loss": 0.659,
1850
+ "rewards/accuracies": 0.84375,
1851
+ "rewards/chosen": -12.190811157226562,
1852
+ "rewards/margins": 4.094757556915283,
1853
+ "rewards/rejected": -16.28557014465332,
1854
+ "step": 123
1855
+ },
1856
+ {
1857
+ "epoch": 0.6801508399040109,
1858
+ "grad_norm": 38.990501071087174,
1859
+ "learning_rate": 2.811994619534637e-07,
1860
+ "logits/chosen": -0.9431190490722656,
1861
+ "logits/rejected": -0.9019297957420349,
1862
+ "logps/chosen": -7.381836891174316,
1863
+ "logps/rejected": -5.60933780670166,
1864
+ "loss": 0.6562,
1865
+ "rewards/accuracies": 0.875,
1866
+ "rewards/chosen": -14.023344993591309,
1867
+ "rewards/margins": 4.431247711181641,
1868
+ "rewards/rejected": -18.454591751098633,
1869
+ "step": 124
1870
+ },
1871
+ {
1872
+ "epoch": 0.6856359273225917,
1873
+ "grad_norm": 23.287789798850373,
1874
+ "learning_rate": 2.725755335822903e-07,
1875
+ "logits/chosen": -0.9163570404052734,
1876
+ "logits/rejected": -0.8643731474876404,
1877
+ "logps/chosen": -7.203619003295898,
1878
+ "logps/rejected": -5.303135395050049,
1879
+ "loss": 0.5385,
1880
+ "rewards/accuracies": 0.921875,
1881
+ "rewards/chosen": -13.25783920288086,
1882
+ "rewards/margins": 4.7512078285217285,
1883
+ "rewards/rejected": -18.00904655456543,
1884
+ "step": 125
1885
+ },
1886
+ {
1887
+ "epoch": 0.6911210147411725,
1888
+ "grad_norm": 29.532793849548835,
1889
+ "learning_rate": 2.640360841163174e-07,
1890
+ "logits/chosen": -0.87614506483078,
1891
+ "logits/rejected": -0.8524197340011597,
1892
+ "logps/chosen": -6.783047676086426,
1893
+ "logps/rejected": -5.092715263366699,
1894
+ "loss": 0.6031,
1895
+ "rewards/accuracies": 0.875,
1896
+ "rewards/chosen": -12.731788635253906,
1897
+ "rewards/margins": 4.225830078125,
1898
+ "rewards/rejected": -16.957618713378906,
1899
+ "step": 126
1900
+ },
1901
+ {
1902
+ "epoch": 0.6966061021597532,
1903
+ "grad_norm": 30.505873793824883,
1904
+ "learning_rate": 2.5558428561241816e-07,
1905
+ "logits/chosen": -0.947504997253418,
1906
+ "logits/rejected": -0.8782521486282349,
1907
+ "logps/chosen": -6.791367530822754,
1908
+ "logps/rejected": -5.176680564880371,
1909
+ "loss": 0.6676,
1910
+ "rewards/accuracies": 0.859375,
1911
+ "rewards/chosen": -12.941701889038086,
1912
+ "rewards/margins": 4.036717414855957,
1913
+ "rewards/rejected": -16.97842025756836,
1914
+ "step": 127
1915
+ },
1916
+ {
1917
+ "epoch": 0.7020911895783339,
1918
+ "grad_norm": 39.92525272801302,
1919
+ "learning_rate": 2.472232775687119e-07,
1920
+ "logits/chosen": -0.8722752332687378,
1921
+ "logits/rejected": -0.856322705745697,
1922
+ "logps/chosen": -7.144659042358398,
1923
+ "logps/rejected": -5.182129859924316,
1924
+ "loss": 0.7803,
1925
+ "rewards/accuracies": 0.828125,
1926
+ "rewards/chosen": -12.955324172973633,
1927
+ "rewards/margins": 4.906323432922363,
1928
+ "rewards/rejected": -17.86164665222168,
1929
+ "step": 128
1930
+ },
1931
+ {
1932
+ "epoch": 0.7075762769969146,
1933
+ "grad_norm": 35.65648286534799,
1934
+ "learning_rate": 2.3895616575836806e-07,
1935
+ "logits/chosen": -0.8648374676704407,
1936
+ "logits/rejected": -0.8587543964385986,
1937
+ "logps/chosen": -7.462764263153076,
1938
+ "logps/rejected": -5.415197372436523,
1939
+ "loss": 0.5901,
1940
+ "rewards/accuracies": 0.875,
1941
+ "rewards/chosen": -13.537995338439941,
1942
+ "rewards/margins": 5.1189165115356445,
1943
+ "rewards/rejected": -18.656909942626953,
1944
+ "step": 129
1945
+ },
1946
+ {
1947
+ "epoch": 0.7130613644154954,
1948
+ "grad_norm": 36.897396094592246,
1949
+ "learning_rate": 2.3078602107593897e-07,
1950
+ "logits/chosen": -0.9551251530647278,
1951
+ "logits/rejected": -0.9301334619522095,
1952
+ "logps/chosen": -7.187896251678467,
1953
+ "logps/rejected": -5.676419258117676,
1954
+ "loss": 0.7432,
1955
+ "rewards/accuracies": 0.8515625,
1956
+ "rewards/chosen": -14.191046714782715,
1957
+ "rewards/margins": 3.7786920070648193,
1958
+ "rewards/rejected": -17.96973991394043,
1959
+ "step": 130
1960
+ },
1961
+ {
1962
+ "epoch": 0.7185464518340761,
1963
+ "grad_norm": 23.791455410978802,
1964
+ "learning_rate": 2.2271587839664668e-07,
1965
+ "logits/chosen": -0.8816163539886475,
1966
+ "logits/rejected": -0.8654621839523315,
1967
+ "logps/chosen": -7.688798427581787,
1968
+ "logps/rejected": -5.900554180145264,
1969
+ "loss": 0.7962,
1970
+ "rewards/accuracies": 0.828125,
1971
+ "rewards/chosen": -14.751386642456055,
1972
+ "rewards/margins": 4.470608711242676,
1973
+ "rewards/rejected": -19.221996307373047,
1974
+ "step": 131
1975
+ },
1976
+ {
1977
+ "epoch": 0.7240315392526568,
1978
+ "grad_norm": 25.229342149520836,
1979
+ "learning_rate": 2.1474873544905203e-07,
1980
+ "logits/chosen": -0.9233815670013428,
1981
+ "logits/rejected": -0.8769809007644653,
1982
+ "logps/chosen": -7.945870876312256,
1983
+ "logps/rejected": -6.068084239959717,
1984
+ "loss": 0.7871,
1985
+ "rewards/accuracies": 0.859375,
1986
+ "rewards/chosen": -15.170208930969238,
1987
+ "rewards/margins": 4.694468975067139,
1988
+ "rewards/rejected": -19.86467933654785,
1989
+ "step": 132
1990
+ },
1991
+ {
1992
+ "epoch": 0.7295166266712376,
1993
+ "grad_norm": 24.072357862431296,
1994
+ "learning_rate": 2.0688755170151994e-07,
1995
+ "logits/chosen": -0.9093427062034607,
1996
+ "logits/rejected": -0.8254431486129761,
1997
+ "logps/chosen": -7.6159138679504395,
1998
+ "logps/rejected": -6.035447597503662,
1999
+ "loss": 0.7131,
2000
+ "rewards/accuracies": 0.8671875,
2001
+ "rewards/chosen": -15.08862018585205,
2002
+ "rewards/margins": 3.951165199279785,
2003
+ "rewards/rejected": -19.039783477783203,
2004
+ "step": 133
2005
+ },
2006
+ {
2007
+ "epoch": 0.7350017140898183,
2008
+ "grad_norm": 27.574889812468303,
2009
+ "learning_rate": 1.991352472628978e-07,
2010
+ "logits/chosen": -0.9855005741119385,
2011
+ "logits/rejected": -0.8852315545082092,
2012
+ "logps/chosen": -8.272013664245605,
2013
+ "logps/rejected": -6.531748294830322,
2014
+ "loss": 0.5673,
2015
+ "rewards/accuracies": 0.8828125,
2016
+ "rewards/chosen": -16.329370498657227,
2017
+ "rewards/margins": 4.350663661956787,
2018
+ "rewards/rejected": -20.68003273010254,
2019
+ "step": 134
2020
+ },
2021
+ {
2022
+ "epoch": 0.7404868015083991,
2023
+ "grad_norm": 38.0684454587543,
2024
+ "learning_rate": 1.9149470179781529e-07,
2025
+ "logits/chosen": -0.8634744882583618,
2026
+ "logits/rejected": -0.8510404825210571,
2027
+ "logps/chosen": -8.125703811645508,
2028
+ "logps/rejected": -6.487473011016846,
2029
+ "loss": 0.9743,
2030
+ "rewards/accuracies": 0.8125,
2031
+ "rewards/chosen": -16.21868324279785,
2032
+ "rewards/margins": 4.095577239990234,
2033
+ "rewards/rejected": -20.314258575439453,
2034
+ "step": 135
2035
+ },
2036
+ {
2037
+ "epoch": 0.7459718889269797,
2038
+ "grad_norm": 48.36799803983576,
2039
+ "learning_rate": 1.8396875345700496e-07,
2040
+ "logits/chosen": -0.9214343428611755,
2041
+ "logits/rejected": -0.8962255120277405,
2042
+ "logps/chosen": -8.190770149230957,
2043
+ "logps/rejected": -6.314955711364746,
2044
+ "loss": 0.5596,
2045
+ "rewards/accuracies": 0.8984375,
2046
+ "rewards/chosen": -15.78738784790039,
2047
+ "rewards/margins": 4.689537048339844,
2048
+ "rewards/rejected": -20.476924896240234,
2049
+ "step": 136
2050
+ },
2051
+ {
2052
+ "epoch": 0.7514569763455605,
2053
+ "grad_norm": 44.16548053204111,
2054
+ "learning_rate": 1.76560197823046e-07,
2055
+ "logits/chosen": -0.919052004814148,
2056
+ "logits/rejected": -0.8849231600761414,
2057
+ "logps/chosen": -8.452275276184082,
2058
+ "logps/rejected": -6.598462104797363,
2059
+ "loss": 0.7156,
2060
+ "rewards/accuracies": 0.875,
2061
+ "rewards/chosen": -16.496156692504883,
2062
+ "rewards/margins": 4.634530544281006,
2063
+ "rewards/rejected": -21.130685806274414,
2064
+ "step": 137
2065
+ },
2066
+ {
2067
+ "epoch": 0.7569420637641412,
2068
+ "grad_norm": 21.109729731146004,
2069
+ "learning_rate": 1.6927178687191952e-07,
2070
+ "logits/chosen": -0.9427747130393982,
2071
+ "logits/rejected": -0.8983960151672363,
2072
+ "logps/chosen": -8.133773803710938,
2073
+ "logps/rejected": -6.293461322784424,
2074
+ "loss": 0.5443,
2075
+ "rewards/accuracies": 0.875,
2076
+ "rewards/chosen": -15.733654022216797,
2077
+ "rewards/margins": 4.600779056549072,
2078
+ "rewards/rejected": -20.334434509277344,
2079
+ "step": 138
2080
+ },
2081
+ {
2082
+ "epoch": 0.762427151182722,
2083
+ "grad_norm": 24.71923168446529,
2084
+ "learning_rate": 1.6210622795076167e-07,
2085
+ "logits/chosen": -0.9024847149848938,
2086
+ "logits/rejected": -0.8320090770721436,
2087
+ "logps/chosen": -7.866386413574219,
2088
+ "logps/rejected": -5.999449729919434,
2089
+ "loss": 0.7399,
2090
+ "rewards/accuracies": 0.8359375,
2091
+ "rewards/chosen": -14.998624801635742,
2092
+ "rewards/margins": 4.6673407554626465,
2093
+ "rewards/rejected": -19.665966033935547,
2094
+ "step": 139
2095
+ },
2096
+ {
2097
+ "epoch": 0.7679122386013028,
2098
+ "grad_norm": 28.05368903550952,
2099
+ "learning_rate": 1.5506618277219408e-07,
2100
+ "logits/chosen": -0.9084888100624084,
2101
+ "logits/rejected": -0.8153257966041565,
2102
+ "logps/chosen": -8.255277633666992,
2103
+ "logps/rejected": -6.376511096954346,
2104
+ "loss": 0.6289,
2105
+ "rewards/accuracies": 0.859375,
2106
+ "rewards/chosen": -15.941277503967285,
2107
+ "rewards/margins": 4.696916580200195,
2108
+ "rewards/rejected": -20.638193130493164,
2109
+ "step": 140
2110
+ },
2111
+ {
2112
+ "epoch": 0.7733973260198834,
2113
+ "grad_norm": 25.708532161588263,
2114
+ "learning_rate": 1.481542664256075e-07,
2115
+ "logits/chosen": -0.8618345856666565,
2116
+ "logits/rejected": -0.7895917892456055,
2117
+ "logps/chosen": -7.592902660369873,
2118
+ "logps/rejected": -5.800815582275391,
2119
+ "loss": 0.623,
2120
+ "rewards/accuracies": 0.8671875,
2121
+ "rewards/chosen": -14.50204086303711,
2122
+ "rewards/margins": 4.480217933654785,
2123
+ "rewards/rejected": -18.982257843017578,
2124
+ "step": 141
2125
+ },
2126
+ {
2127
+ "epoch": 0.7788824134384642,
2128
+ "grad_norm": 24.709393184243222,
2129
+ "learning_rate": 1.413730464057616e-07,
2130
+ "logits/chosen": -0.8121160268783569,
2131
+ "logits/rejected": -0.7455395460128784,
2132
+ "logps/chosen": -7.604070663452148,
2133
+ "logps/rejected": -5.874716281890869,
2134
+ "loss": 0.6692,
2135
+ "rewards/accuracies": 0.8359375,
2136
+ "rewards/chosen": -14.68679141998291,
2137
+ "rewards/margins": 4.323384761810303,
2138
+ "rewards/rejected": -19.010177612304688,
2139
+ "step": 142
2140
+ },
2141
+ {
2142
+ "epoch": 0.7843675008570449,
2143
+ "grad_norm": 38.97698904003614,
2144
+ "learning_rate": 1.3472504165906612e-07,
2145
+ "logits/chosen": -0.8006829619407654,
2146
+ "logits/rejected": -0.7394671440124512,
2147
+ "logps/chosen": -7.116863250732422,
2148
+ "logps/rejected": -5.581884384155273,
2149
+ "loss": 0.6601,
2150
+ "rewards/accuracies": 0.890625,
2151
+ "rewards/chosen": -13.954710960388184,
2152
+ "rewards/margins": 3.837446928024292,
2153
+ "rewards/rejected": -17.792160034179688,
2154
+ "step": 143
2155
+ },
2156
+ {
2157
+ "epoch": 0.7898525882756257,
2158
+ "grad_norm": 29.640146576501934,
2159
+ "learning_rate": 1.2821272164789543e-07,
2160
+ "logits/chosen": -0.8447168469429016,
2161
+ "logits/rejected": -0.7728930115699768,
2162
+ "logps/chosen": -7.501246452331543,
2163
+ "logps/rejected": -5.655298709869385,
2164
+ "loss": 0.564,
2165
+ "rewards/accuracies": 0.890625,
2166
+ "rewards/chosen": -14.138248443603516,
2167
+ "rewards/margins": 4.6148681640625,
2168
+ "rewards/rejected": -18.753116607666016,
2169
+ "step": 144
2170
+ },
2171
+ {
2172
+ "epoch": 0.7953376756942063,
2173
+ "grad_norm": 22.947844708118026,
2174
+ "learning_rate": 1.2183850543328312e-07,
2175
+ "logits/chosen": -0.8897333741188049,
2176
+ "logits/rejected": -0.8205296397209167,
2177
+ "logps/chosen": -7.398487091064453,
2178
+ "logps/rejected": -5.790860652923584,
2179
+ "loss": 0.7226,
2180
+ "rewards/accuracies": 0.8515625,
2181
+ "rewards/chosen": -14.477151870727539,
2182
+ "rewards/margins": 4.019064426422119,
2183
+ "rewards/rejected": -18.4962158203125,
2184
+ "step": 145
2185
+ },
2186
+ {
2187
+ "epoch": 0.8008227631127871,
2188
+ "grad_norm": 27.798903132552176,
2189
+ "learning_rate": 1.1560476077634069e-07,
2190
+ "logits/chosen": -0.7919908761978149,
2191
+ "logits/rejected": -0.8394799828529358,
2192
+ "logps/chosen": -7.839868068695068,
2193
+ "logps/rejected": -5.743566036224365,
2194
+ "loss": 0.544,
2195
+ "rewards/accuracies": 0.875,
2196
+ "rewards/chosen": -14.358914375305176,
2197
+ "rewards/margins": 5.240755081176758,
2198
+ "rewards/rejected": -19.59967041015625,
2199
+ "step": 146
2200
+ },
2201
+ {
2202
+ "epoch": 0.8063078505313679,
2203
+ "grad_norm": 29.92809246962988,
2204
+ "learning_rate": 1.0951380325872977e-07,
2205
+ "logits/chosen": -0.8169230818748474,
2206
+ "logits/rejected": -0.7812893986701965,
2207
+ "logps/chosen": -7.359820365905762,
2208
+ "logps/rejected": -5.6241607666015625,
2209
+ "loss": 0.7967,
2210
+ "rewards/accuracies": 0.828125,
2211
+ "rewards/chosen": -14.060400009155273,
2212
+ "rewards/margins": 4.3391499519348145,
2213
+ "rewards/rejected": -18.39954948425293,
2214
+ "step": 147
2215
+ },
2216
+ {
2217
+ "epoch": 0.8117929379499486,
2218
+ "grad_norm": 27.792210529581745,
2219
+ "learning_rate": 1.0356789542251936e-07,
2220
+ "logits/chosen": -0.8669033050537109,
2221
+ "logits/rejected": -0.8605407476425171,
2222
+ "logps/chosen": -8.198552131652832,
2223
+ "logps/rejected": -6.282137393951416,
2224
+ "loss": 0.8025,
2225
+ "rewards/accuracies": 0.84375,
2226
+ "rewards/chosen": -15.705344200134277,
2227
+ "rewards/margins": 4.791035175323486,
2228
+ "rewards/rejected": -20.496379852294922,
2229
+ "step": 148
2230
+ },
2231
+ {
2232
+ "epoch": 0.8172780253685293,
2233
+ "grad_norm": 28.277349818001756,
2234
+ "learning_rate": 9.776924592974256e-08,
2235
+ "logits/chosen": -0.8328518867492676,
2236
+ "logits/rejected": -0.8236594796180725,
2237
+ "logps/chosen": -7.214673042297363,
2238
+ "logps/rejected": -5.481906414031982,
2239
+ "loss": 0.7674,
2240
+ "rewards/accuracies": 0.8515625,
2241
+ "rewards/chosen": -13.704765319824219,
2242
+ "rewards/margins": 4.331914901733398,
2243
+ "rewards/rejected": -18.03668212890625,
2244
+ "step": 149
2245
+ },
2246
+ {
2247
+ "epoch": 0.82276311278711,
2248
+ "grad_norm": 29.007836684575707,
2249
+ "learning_rate": 9.212000874196952e-08,
2250
+ "logits/chosen": -0.8581669330596924,
2251
+ "logits/rejected": -0.8325639367103577,
2252
+ "logps/chosen": -7.351170539855957,
2253
+ "logps/rejected": -5.474079608917236,
2254
+ "loss": 0.6909,
2255
+ "rewards/accuracies": 0.8671875,
2256
+ "rewards/chosen": -13.685198783874512,
2257
+ "rewards/margins": 4.692727088928223,
2258
+ "rewards/rejected": -18.377925872802734,
2259
+ "step": 150
2260
+ },
2261
+ {
2262
+ "epoch": 0.8282482002056908,
2263
+ "grad_norm": 43.71571457854729,
2264
+ "learning_rate": 8.662228232019875e-08,
2265
+ "logits/chosen": -0.8501139879226685,
2266
+ "logits/rejected": -0.8618481755256653,
2267
+ "logps/chosen": -7.359824180603027,
2268
+ "logps/rejected": -5.42210054397583,
2269
+ "loss": 0.7332,
2270
+ "rewards/accuracies": 0.8359375,
2271
+ "rewards/chosen": -13.555251121520996,
2272
+ "rewards/margins": 4.844309329986572,
2273
+ "rewards/rejected": -18.399559020996094,
2274
+ "step": 151
2275
+ },
2276
+ {
2277
+ "epoch": 0.8337332876242715,
2278
+ "grad_norm": 46.33073864540601,
2279
+ "learning_rate": 8.127810884536402e-08,
2280
+ "logits/chosen": -0.853046715259552,
2281
+ "logits/rejected": -0.8423393964767456,
2282
+ "logps/chosen": -6.985077857971191,
2283
+ "logps/rejected": -5.142387390136719,
2284
+ "loss": 0.6407,
2285
+ "rewards/accuracies": 0.875,
2286
+ "rewards/chosen": -12.855968475341797,
2287
+ "rewards/margins": 4.606726169586182,
2288
+ "rewards/rejected": -17.462696075439453,
2289
+ "step": 152
2290
+ },
2291
+ {
2292
+ "epoch": 0.8392183750428522,
2293
+ "grad_norm": 42.89209343669169,
2294
+ "learning_rate": 7.608947345974759e-08,
2295
+ "logits/chosen": -0.920865535736084,
2296
+ "logits/rejected": -0.8836889266967773,
2297
+ "logps/chosen": -7.042182445526123,
2298
+ "logps/rejected": -5.48915958404541,
2299
+ "loss": 0.8117,
2300
+ "rewards/accuracies": 0.8359375,
2301
+ "rewards/chosen": -13.722898483276367,
2302
+ "rewards/margins": 3.882556915283203,
2303
+ "rewards/rejected": -17.605453491210938,
2304
+ "step": 153
2305
+ },
2306
+ {
2307
+ "epoch": 0.844703462461433,
2308
+ "grad_norm": 37.7037024205324,
2309
+ "learning_rate": 7.105830352958142e-08,
2310
+ "logits/chosen": -0.9472789764404297,
2311
+ "logits/rejected": -0.9106646180152893,
2312
+ "logps/chosen": -7.459850311279297,
2313
+ "logps/rejected": -5.479578971862793,
2314
+ "loss": 0.6658,
2315
+ "rewards/accuracies": 0.8671875,
2316
+ "rewards/chosen": -13.698948860168457,
2317
+ "rewards/margins": 4.950677871704102,
2318
+ "rewards/rejected": -18.649625778198242,
2319
+ "step": 154
2320
+ },
2321
+ {
2322
+ "epoch": 0.8501885498800137,
2323
+ "grad_norm": 24.0763873064064,
2324
+ "learning_rate": 6.618646792910893e-08,
2325
+ "logits/chosen": -0.8774456977844238,
2326
+ "logits/rejected": -0.7841386795043945,
2327
+ "logps/chosen": -6.863981246948242,
2328
+ "logps/rejected": -5.015853404998779,
2329
+ "loss": 0.5643,
2330
+ "rewards/accuracies": 0.8984375,
2331
+ "rewards/chosen": -12.539634704589844,
2332
+ "rewards/margins": 4.620318412780762,
2333
+ "rewards/rejected": -17.15995216369629,
2334
+ "step": 155
2335
+ },
2336
+ {
2337
+ "epoch": 0.8556736372985945,
2338
+ "grad_norm": 32.66084265444344,
2339
+ "learning_rate": 6.147577634637413e-08,
2340
+ "logits/chosen": -0.9129813313484192,
2341
+ "logits/rejected": -0.8859033584594727,
2342
+ "logps/chosen": -7.296760082244873,
2343
+ "logps/rejected": -5.64870548248291,
2344
+ "loss": 0.7727,
2345
+ "rewards/accuracies": 0.8359375,
2346
+ "rewards/chosen": -14.121763229370117,
2347
+ "rewards/margins": 4.120136737823486,
2348
+ "rewards/rejected": -18.241899490356445,
2349
+ "step": 156
2350
+ },
2351
+ {
2352
+ "epoch": 0.8611587247171751,
2353
+ "grad_norm": 23.94997399437051,
2354
+ "learning_rate": 5.692797861099718e-08,
2355
+ "logits/chosen": -0.8945199847221375,
2356
+ "logits/rejected": -0.8570997714996338,
2357
+ "logps/chosen": -6.75827693939209,
2358
+ "logps/rejected": -5.06813907623291,
2359
+ "loss": 0.5738,
2360
+ "rewards/accuracies": 0.875,
2361
+ "rewards/chosen": -12.670347213745117,
2362
+ "rewards/margins": 4.22534704208374,
2363
+ "rewards/rejected": -16.895692825317383,
2364
+ "step": 157
2365
+ },
2366
+ {
2367
+ "epoch": 0.8666438121357559,
2368
+ "grad_norm": 24.150699682280717,
2369
+ "learning_rate": 5.25447640441834e-08,
2370
+ "logits/chosen": -0.954617977142334,
2371
+ "logits/rejected": -0.8557642102241516,
2372
+ "logps/chosen": -7.347589492797852,
2373
+ "logps/rejected": -5.5732340812683105,
2374
+ "loss": 0.6284,
2375
+ "rewards/accuracies": 0.8671875,
2376
+ "rewards/chosen": -13.933087348937988,
2377
+ "rewards/margins": 4.435887813568115,
2378
+ "rewards/rejected": -18.368972778320312,
2379
+ "step": 158
2380
+ },
2381
+ {
2382
+ "epoch": 0.8721288995543367,
2383
+ "grad_norm": 36.75533433702862,
2384
+ "learning_rate": 4.832776083120982e-08,
2385
+ "logits/chosen": -0.9205527305603027,
2386
+ "logits/rejected": -0.8304504752159119,
2387
+ "logps/chosen": -7.045970439910889,
2388
+ "logps/rejected": -5.256443023681641,
2389
+ "loss": 0.6382,
2390
+ "rewards/accuracies": 0.8828125,
2391
+ "rewards/chosen": -13.141106605529785,
2392
+ "rewards/margins": 4.473819732666016,
2393
+ "rewards/rejected": -17.614925384521484,
2394
+ "step": 159
2395
+ },
2396
+ {
2397
+ "epoch": 0.8776139869729174,
2398
+ "grad_norm": 31.994585110678578,
2399
+ "learning_rate": 4.427853541662091e-08,
2400
+ "logits/chosen": -0.9841543436050415,
2401
+ "logits/rejected": -0.8539234399795532,
2402
+ "logps/chosen": -7.1274094581604,
2403
+ "logps/rejected": -5.204132080078125,
2404
+ "loss": 0.6011,
2405
+ "rewards/accuracies": 0.875,
2406
+ "rewards/chosen": -13.010330200195312,
2407
+ "rewards/margins": 4.808194160461426,
2408
+ "rewards/rejected": -17.818523406982422,
2409
+ "step": 160
2410
+ },
2411
+ {
2412
+ "epoch": 0.8830990743914982,
2413
+ "grad_norm": 28.795886036839306,
2414
+ "learning_rate": 4.039859192235778e-08,
2415
+ "logits/chosen": -0.9625253677368164,
2416
+ "logits/rejected": -0.9089056849479675,
2417
+ "logps/chosen": -7.486809253692627,
2418
+ "logps/rejected": -5.702870845794678,
2419
+ "loss": 0.8583,
2420
+ "rewards/accuracies": 0.8203125,
2421
+ "rewards/chosen": -14.257177352905273,
2422
+ "rewards/margins": 4.459846496582031,
2423
+ "rewards/rejected": -18.717023849487305,
2424
+ "step": 161
2425
+ },
2426
+ {
2427
+ "epoch": 0.8885841618100788,
2428
+ "grad_norm": 26.44205968756603,
2429
+ "learning_rate": 3.668937158903901e-08,
2430
+ "logits/chosen": -0.9169929027557373,
2431
+ "logits/rejected": -0.8381502628326416,
2432
+ "logps/chosen": -7.5406494140625,
2433
+ "logps/rejected": -5.6013288497924805,
2434
+ "loss": 0.5664,
2435
+ "rewards/accuracies": 0.8984375,
2436
+ "rewards/chosen": -14.003321647644043,
2437
+ "rewards/margins": 4.848302364349365,
2438
+ "rewards/rejected": -18.85162353515625,
2439
+ "step": 162
2440
+ },
2441
+ {
2442
+ "epoch": 0.8940692492286596,
2443
+ "grad_norm": 27.304950853911894,
2444
+ "learning_rate": 3.3152252240598086e-08,
2445
+ "logits/chosen": -0.9413248896598816,
2446
+ "logits/rejected": -0.8493109941482544,
2447
+ "logps/chosen": -7.453137397766113,
2448
+ "logps/rejected": -5.501208782196045,
2449
+ "loss": 0.5001,
2450
+ "rewards/accuracies": 0.90625,
2451
+ "rewards/chosen": -13.753022193908691,
2452
+ "rewards/margins": 4.87982177734375,
2453
+ "rewards/rejected": -18.632844924926758,
2454
+ "step": 163
2455
+ },
2456
+ {
2457
+ "epoch": 0.8995543366472403,
2458
+ "grad_norm": 20.074530093573202,
2459
+ "learning_rate": 2.978854777247841e-08,
2460
+ "logits/chosen": -0.9120803475379944,
2461
+ "logits/rejected": -0.8509462475776672,
2462
+ "logps/chosen": -7.397423267364502,
2463
+ "logps/rejected": -5.588208198547363,
2464
+ "loss": 0.6246,
2465
+ "rewards/accuracies": 0.875,
2466
+ "rewards/chosen": -13.97052001953125,
2467
+ "rewards/margins": 4.523037433624268,
2468
+ "rewards/rejected": -18.49355697631836,
2469
+ "step": 164
2470
+ },
2471
+ {
2472
+ "epoch": 0.905039424065821,
2473
+ "grad_norm": 26.09353600189251,
2474
+ "learning_rate": 2.6599507663574384e-08,
2475
+ "logits/chosen": -0.952299952507019,
2476
+ "logits/rejected": -0.8652746677398682,
2477
+ "logps/chosen": -7.684518814086914,
2478
+ "logps/rejected": -5.83120584487915,
2479
+ "loss": 0.5817,
2480
+ "rewards/accuracies": 0.890625,
2481
+ "rewards/chosen": -14.57801628112793,
2482
+ "rewards/margins": 4.633281707763672,
2483
+ "rewards/rejected": -19.21129608154297,
2484
+ "step": 165
2485
+ },
2486
+ {
2487
+ "epoch": 0.9105245114844018,
2488
+ "grad_norm": 38.37398443400821,
2489
+ "learning_rate": 2.358631651210141e-08,
2490
+ "logits/chosen": -0.8585479259490967,
2491
+ "logits/rejected": -0.8270218372344971,
2492
+ "logps/chosen": -7.32467794418335,
2493
+ "logps/rejected": -5.463205337524414,
2494
+ "loss": 0.5111,
2495
+ "rewards/accuracies": 0.8828125,
2496
+ "rewards/chosen": -13.658012390136719,
2497
+ "rewards/margins": 4.653683185577393,
2498
+ "rewards/rejected": -18.311695098876953,
2499
+ "step": 166
2500
+ },
2501
+ {
2502
+ "epoch": 0.9160095989029825,
2503
+ "grad_norm": 17.666453011965785,
2504
+ "learning_rate": 2.0750093595565733e-08,
2505
+ "logits/chosen": -0.8858319520950317,
2506
+ "logits/rejected": -0.8542614579200745,
2507
+ "logps/chosen": -7.348204612731934,
2508
+ "logps/rejected": -5.504924774169922,
2509
+ "loss": 0.6247,
2510
+ "rewards/accuracies": 0.8671875,
2511
+ "rewards/chosen": -13.762311935424805,
2512
+ "rewards/margins": 4.608198165893555,
2513
+ "rewards/rejected": -18.37051010131836,
2514
+ "step": 167
2515
+ },
2516
+ {
2517
+ "epoch": 0.9214946863215633,
2518
+ "grad_norm": 19.09191014386972,
2519
+ "learning_rate": 1.8091892454998593e-08,
2520
+ "logits/chosen": -0.8447603583335876,
2521
+ "logits/rejected": -0.8495924472808838,
2522
+ "logps/chosen": -7.054547309875488,
2523
+ "logps/rejected": -5.083424091339111,
2524
+ "loss": 0.6335,
2525
+ "rewards/accuracies": 0.8828125,
2526
+ "rewards/chosen": -12.708559036254883,
2527
+ "rewards/margins": 4.927809238433838,
2528
+ "rewards/rejected": -17.636367797851562,
2529
+ "step": 168
2530
+ },
2531
+ {
2532
+ "epoch": 0.926979773740144,
2533
+ "grad_norm": 38.37416244304177,
2534
+ "learning_rate": 1.5612700503608967e-08,
2535
+ "logits/chosen": -0.95904541015625,
2536
+ "logits/rejected": -0.874359130859375,
2537
+ "logps/chosen": -8.061529159545898,
2538
+ "logps/rejected": -6.1823248863220215,
2539
+ "loss": 0.79,
2540
+ "rewards/accuracies": 0.8359375,
2541
+ "rewards/chosen": -15.455812454223633,
2542
+ "rewards/margins": 4.698009967803955,
2543
+ "rewards/rejected": -20.153823852539062,
2544
+ "step": 169
2545
+ },
2546
+ {
2547
+ "epoch": 0.9324648611587247,
2548
+ "grad_norm": 20.52317809637831,
2549
+ "learning_rate": 1.3313438659999399e-08,
2550
+ "logits/chosen": -0.8840410113334656,
2551
+ "logits/rejected": -0.8565166592597961,
2552
+ "logps/chosen": -7.426989555358887,
2553
+ "logps/rejected": -5.389143943786621,
2554
+ "loss": 0.6073,
2555
+ "rewards/accuracies": 0.8671875,
2556
+ "rewards/chosen": -13.472861289978027,
2557
+ "rewards/margins": 5.094613552093506,
2558
+ "rewards/rejected": -18.567476272583008,
2559
+ "step": 170
2560
+ },
2561
+ {
2562
+ "epoch": 0.9379499485773054,
2563
+ "grad_norm": 28.898112504649927,
2564
+ "learning_rate": 1.119496100608297e-08,
2565
+ "logits/chosen": -0.902927815914154,
2566
+ "logits/rejected": -0.8713952302932739,
2567
+ "logps/chosen": -7.5428547859191895,
2568
+ "logps/rejected": -5.551333904266357,
2569
+ "loss": 0.6478,
2570
+ "rewards/accuracies": 0.8984375,
2571
+ "rewards/chosen": -13.878334045410156,
2572
+ "rewards/margins": 4.978802680969238,
2573
+ "rewards/rejected": -18.857135772705078,
2574
+ "step": 171
2575
+ },
2576
+ {
2577
+ "epoch": 0.9434350359958862,
2578
+ "grad_norm": 30.615659518253594,
2579
+ "learning_rate": 9.258054469825972e-09,
2580
+ "logits/chosen": -0.9755229949951172,
2581
+ "logits/rejected": -0.8522156476974487,
2582
+ "logps/chosen": -7.569779396057129,
2583
+ "logps/rejected": -5.832912445068359,
2584
+ "loss": 0.4678,
2585
+ "rewards/accuracies": 0.921875,
2586
+ "rewards/chosen": -14.582280158996582,
2587
+ "rewards/margins": 4.34216833114624,
2588
+ "rewards/rejected": -18.924448013305664,
2589
+ "step": 172
2590
+ },
2591
+ {
2592
+ "epoch": 0.948920123414467,
2593
+ "grad_norm": 24.650975590320186,
2594
+ "learning_rate": 7.503438532937168e-09,
2595
+ "logits/chosen": -0.8953875303268433,
2596
+ "logits/rejected": -0.8538772463798523,
2597
+ "logps/chosen": -7.302203178405762,
2598
+ "logps/rejected": -5.746313095092773,
2599
+ "loss": 0.8124,
2600
+ "rewards/accuracies": 0.8515625,
2601
+ "rewards/chosen": -14.365781784057617,
2602
+ "rewards/margins": 3.8897247314453125,
2603
+ "rewards/rejected": -18.255508422851562,
2604
+ "step": 173
2605
+ },
2606
+ {
2607
+ "epoch": 0.9544052108330476,
2608
+ "grad_norm": 22.56008245408754,
2609
+ "learning_rate": 5.931764963608865e-09,
2610
+ "logits/chosen": -0.8796699047088623,
2611
+ "logits/rejected": -0.8065083026885986,
2612
+ "logps/chosen": -7.606590270996094,
2613
+ "logps/rejected": -5.541936874389648,
2614
+ "loss": 0.5972,
2615
+ "rewards/accuracies": 0.875,
2616
+ "rewards/chosen": -13.854841232299805,
2617
+ "rewards/margins": 5.1616339683532715,
2618
+ "rewards/rejected": -19.016477584838867,
2619
+ "step": 174
2620
+ },
2621
+ {
2622
+ "epoch": 0.9598902982516284,
2623
+ "grad_norm": 27.265075122995274,
2624
+ "learning_rate": 4.543617574412184e-09,
2625
+ "logits/chosen": -0.9127550721168518,
2626
+ "logits/rejected": -0.8750625848770142,
2627
+ "logps/chosen": -7.915278434753418,
2628
+ "logps/rejected": -6.025564670562744,
2629
+ "loss": 0.8845,
2630
+ "rewards/accuracies": 0.8046875,
2631
+ "rewards/chosen": -15.063911437988281,
2632
+ "rewards/margins": 4.724285125732422,
2633
+ "rewards/rejected": -19.788196563720703,
2634
+ "step": 175
2635
+ },
2636
+ {
2637
+ "epoch": 0.9653753856702091,
2638
+ "grad_norm": 28.882962456249174,
2639
+ "learning_rate": 3.3395120054343086e-09,
2640
+ "logits/chosen": -0.9111831188201904,
2641
+ "logits/rejected": -0.8427572250366211,
2642
+ "logps/chosen": -7.206770420074463,
2643
+ "logps/rejected": -5.539353847503662,
2644
+ "loss": 0.7588,
2645
+ "rewards/accuracies": 0.8203125,
2646
+ "rewards/chosen": -13.848384857177734,
2647
+ "rewards/margins": 4.168540000915527,
2648
+ "rewards/rejected": -18.016923904418945,
2649
+ "step": 176
2650
+ },
2651
+ {
2652
+ "epoch": 0.9708604730887899,
2653
+ "grad_norm": 31.271655213972466,
2654
+ "learning_rate": 2.3198955327393686e-09,
2655
+ "logits/chosen": -0.9476001262664795,
2656
+ "logits/rejected": -0.8903546333312988,
2657
+ "logps/chosen": -7.501628875732422,
2658
+ "logps/rejected": -5.69838809967041,
2659
+ "loss": 0.603,
2660
+ "rewards/accuracies": 0.875,
2661
+ "rewards/chosen": -14.245970726013184,
2662
+ "rewards/margins": 4.508101463317871,
2663
+ "rewards/rejected": -18.754070281982422,
2664
+ "step": 177
2665
+ },
2666
+ {
2667
+ "epoch": 0.9763455605073705,
2668
+ "grad_norm": 27.6286427872623,
2669
+ "learning_rate": 1.4851469022233997e-09,
2670
+ "logits/chosen": -0.9050301313400269,
2671
+ "logits/rejected": -0.8429163694381714,
2672
+ "logps/chosen": -7.289128303527832,
2673
+ "logps/rejected": -5.459531784057617,
2674
+ "loss": 0.478,
2675
+ "rewards/accuracies": 0.921875,
2676
+ "rewards/chosen": -13.648829460144043,
2677
+ "rewards/margins": 4.5739922523498535,
2678
+ "rewards/rejected": -18.222822189331055,
2679
+ "step": 178
2680
+ },
2681
+ {
2682
+ "epoch": 0.9818306479259513,
2683
+ "grad_norm": 33.536490419312095,
2684
+ "learning_rate": 8.35576188926046e-10,
2685
+ "logits/chosen": -0.8602910041809082,
2686
+ "logits/rejected": -0.8685486912727356,
2687
+ "logps/chosen": -7.6485795974731445,
2688
+ "logps/rejected": -5.617213249206543,
2689
+ "loss": 0.5532,
2690
+ "rewards/accuracies": 0.8984375,
2691
+ "rewards/chosen": -14.043033599853516,
2692
+ "rewards/margins": 5.078416347503662,
2693
+ "rewards/rejected": -19.12145233154297,
2694
+ "step": 179
2695
+ },
2696
+ {
2697
+ "epoch": 0.9873157353445321,
2698
+ "grad_norm": 36.03602197080609,
2699
+ "learning_rate": 3.71424681850141e-10,
2700
+ "logits/chosen": -0.9029905200004578,
2701
+ "logits/rejected": -0.8996679186820984,
2702
+ "logps/chosen": -7.467672348022461,
2703
+ "logps/rejected": -5.66632080078125,
2704
+ "loss": 0.692,
2705
+ "rewards/accuracies": 0.84375,
2706
+ "rewards/chosen": -14.165802001953125,
2707
+ "rewards/margins": 4.503378391265869,
2708
+ "rewards/rejected": -18.66918182373047,
2709
+ "step": 180
2710
+ },
2711
+ {
2712
+ "epoch": 0.9928008227631128,
2713
+ "grad_norm": 30.463376086047504,
2714
+ "learning_rate": 9.286479433257e-11,
2715
+ "logits/chosen": -0.9386723041534424,
2716
+ "logits/rejected": -0.8105076551437378,
2717
+ "logps/chosen": -7.705523490905762,
2718
+ "logps/rejected": -5.895651817321777,
2719
+ "loss": 0.6545,
2720
+ "rewards/accuracies": 0.84375,
2721
+ "rewards/chosen": -14.739130020141602,
2722
+ "rewards/margins": 4.524680137634277,
2723
+ "rewards/rejected": -19.263809204101562,
2724
+ "step": 181
2725
+ },
2726
+ {
2727
+ "epoch": 0.9982859101816935,
2728
+ "grad_norm": 27.85120971616897,
2729
+ "learning_rate": 0.0,
2730
+ "logits/chosen": -0.8981151580810547,
2731
+ "logits/rejected": -0.8777621388435364,
2732
+ "logps/chosen": -8.53371524810791,
2733
+ "logps/rejected": -5.803772449493408,
2734
+ "loss": 0.3779,
2735
+ "rewards/accuracies": 0.953125,
2736
+ "rewards/chosen": -14.509430885314941,
2737
+ "rewards/margins": 6.824857234954834,
2738
+ "rewards/rejected": -21.33428955078125,
2739
+ "step": 182
2740
+ },
2741
+ {
2742
+ "epoch": 0.9982859101816935,
2743
+ "step": 182,
2744
+ "total_flos": 58779245903872.0,
2745
+ "train_loss": 1.1193010831599708,
2746
+ "train_runtime": 13670.339,
2747
+ "train_samples_per_second": 1.707,
2748
+ "train_steps_per_second": 0.013
2749
+ }
2750
+ ],
2751
+ "logging_steps": 1,
2752
+ "max_steps": 182,
2753
+ "num_input_tokens_seen": 0,
2754
+ "num_train_epochs": 1,
2755
+ "save_steps": 182,
2756
+ "stateful_callbacks": {
2757
+ "TrainerControl": {
2758
+ "args": {
2759
+ "should_epoch_stop": false,
2760
+ "should_evaluate": false,
2761
+ "should_log": false,
2762
+ "should_save": true,
2763
+ "should_training_stop": true
2764
+ },
2765
+ "attributes": {}
2766
+ }
2767
+ },
2768
+ "total_flos": 58779245903872.0,
2769
+ "train_batch_size": 1,
2770
+ "trial_name": null,
2771
+ "trial_params": null
2772
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4143040e12d17e7ce35ebd0bfd82f6da325a56498f0606da029c43ea7027edb
3
+ size 7288
training_loss.png ADDED
training_rewards_accuracies.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff