ngocquangt2k46 commited on
Commit
e099111
·
verified ·
1 Parent(s): 1dbe9d4

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
 
24
  "k_proj",
25
  "v_proj",
26
  "q_proj",
27
- "gate_proj",
28
- "down_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
24
+ "down_proj",
25
  "k_proj",
26
  "v_proj",
27
  "q_proj",
28
+ "up_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83fe1a514b50fc2051d3a4b486c067b54e49f86faedf30876f758bdf64a21163
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c129f22828d9a11a70b1b6a912223b1c18badbd13ded6a14546603a09d84074b
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c0102d0c01d6af6ca04d9582e07651587d90c8b92564821fd2465d025253a70
3
  size 111142
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08421e6f0b59e1bd7f90d8d5d4a01b9ed4e59046adf26cfad2d2fdd7067acd3f
3
  size 111142
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6279e2d6d30b2ce7f2189e648e383ab146d51fb0fecf44d1e16d05504562e6f8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0cab76d95a75c34838cec8f4adace53a2d256142482eba847df4bb06de7690f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532138a5ca880d8da393ae449e5715b2766def36b8838785ca08d07228b119b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00487685930260912,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
- "grad_norm": 0.024652473628520966,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
@@ -18,132 +18,175 @@
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
- "eval_runtime": 276.626,
22
- "eval_samples_per_second": 37.455,
23
- "eval_steps_per_second": 18.729,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
- "grad_norm": 0.027672940865159035,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
- "grad_norm": 0.025643622502684593,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
- "grad_norm": 0.025678519159555435,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
- "grad_norm": 0.02572811394929886,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
- "eval_loss": 11.764395713806152,
57
- "eval_runtime": 276.5749,
58
- "eval_samples_per_second": 37.462,
59
- "eval_steps_per_second": 18.733,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
- "grad_norm": 0.026358768343925476,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
- "grad_norm": 0.026500564068555832,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
- "grad_norm": 0.02642131596803665,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
- "grad_norm": 0.02585265040397644,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
- "grad_norm": 0.024854907765984535,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
- "eval_loss": 11.76411247253418,
100
- "eval_runtime": 276.5048,
101
- "eval_samples_per_second": 37.471,
102
- "eval_steps_per_second": 18.737,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.003576363488580021,
107
- "grad_norm": 0.02757527120411396,
108
  "learning_rate": 9.755282581475769e-05,
109
- "loss": 11.7652,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.003901487442087296,
114
- "grad_norm": 0.028088003396987915,
115
  "learning_rate": 9.045084971874738e-05,
116
- "loss": 11.7641,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.00422661139559457,
121
- "grad_norm": 0.02876427210867405,
122
  "learning_rate": 7.938926261462366e-05,
123
- "loss": 11.7642,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.004551735349101845,
128
- "grad_norm": 0.027249282225966454,
129
  "learning_rate": 6.545084971874738e-05,
130
- "loss": 11.7638,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.00487685930260912,
135
- "grad_norm": 0.025040265172719955,
136
  "learning_rate": 5e-05,
137
  "loss": 11.763,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.00487685930260912,
142
- "eval_loss": 11.763741493225098,
143
- "eval_runtime": 276.3837,
144
- "eval_samples_per_second": 37.488,
145
- "eval_steps_per_second": 18.746,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 48888010506240.0,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006502479070145493,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
+ "grad_norm": 0.021640103310346603,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
 
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
+ "eval_runtime": 249.3031,
22
+ "eval_samples_per_second": 41.56,
23
+ "eval_steps_per_second": 20.782,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
+ "grad_norm": 0.024706723168492317,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
+ "grad_norm": 0.022929754108190536,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
+ "grad_norm": 0.022171195596456528,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
+ "grad_norm": 0.02208411693572998,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
+ "eval_loss": 11.764402389526367,
57
+ "eval_runtime": 249.1579,
58
+ "eval_samples_per_second": 41.584,
59
+ "eval_steps_per_second": 20.794,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
+ "grad_norm": 0.02443164400756359,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
+ "grad_norm": 0.023470092564821243,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
+ "grad_norm": 0.023716744035482407,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
+ "grad_norm": 0.022970277816057205,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
+ "grad_norm": 0.022244542837142944,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
+ "eval_loss": 11.764139175415039,
100
+ "eval_runtime": 248.9949,
101
+ "eval_samples_per_second": 41.611,
102
+ "eval_steps_per_second": 20.808,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.003576363488580021,
107
+ "grad_norm": 0.024796368554234505,
108
  "learning_rate": 9.755282581475769e-05,
109
+ "loss": 11.7653,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.003901487442087296,
114
+ "grad_norm": 0.024691808968782425,
115
  "learning_rate": 9.045084971874738e-05,
116
+ "loss": 11.7642,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.00422661139559457,
121
+ "grad_norm": 0.026635609567165375,
122
  "learning_rate": 7.938926261462366e-05,
123
+ "loss": 11.7643,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.004551735349101845,
128
+ "grad_norm": 0.024080852046608925,
129
  "learning_rate": 6.545084971874738e-05,
130
+ "loss": 11.7639,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.00487685930260912,
135
+ "grad_norm": 0.021976996213197708,
136
  "learning_rate": 5e-05,
137
  "loss": 11.763,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.00487685930260912,
142
+ "eval_loss": 11.7637939453125,
143
+ "eval_runtime": 248.9817,
144
+ "eval_samples_per_second": 41.613,
145
+ "eval_steps_per_second": 20.809,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.005201983256116394,
150
+ "grad_norm": 0.021976066753268242,
151
+ "learning_rate": 3.4549150281252636e-05,
152
+ "loss": 11.7639,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.005527107209623669,
157
+ "grad_norm": 0.0250637736171484,
158
+ "learning_rate": 2.061073738537635e-05,
159
+ "loss": 11.7633,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.005852231163130944,
164
+ "grad_norm": 0.024703800678253174,
165
+ "learning_rate": 9.549150281252633e-06,
166
+ "loss": 11.7632,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.006177355116638218,
171
+ "grad_norm": 0.02371840551495552,
172
+ "learning_rate": 2.4471741852423237e-06,
173
+ "loss": 11.7637,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.006502479070145493,
178
+ "grad_norm": 0.027609504759311676,
179
+ "learning_rate": 0.0,
180
+ "loss": 11.764,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.006502479070145493,
185
+ "eval_loss": 11.763700485229492,
186
+ "eval_runtime": 248.9974,
187
+ "eval_samples_per_second": 41.611,
188
+ "eval_steps_per_second": 20.807,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 65184014008320.0,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e17a6c83dec956fb9efedd2c17093957afde82b766a4186ec8d108b6cbea110d
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a413dd30c82bf1458b4c9aa8cd9fc6df506b56de5b15f10d2e2a254ff755ea0
3
  size 6776