leixa commited on
Commit
daafd27
·
verified ·
1 Parent(s): 3f84c73

Training in progress, step 84, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5727aed7264895ce483e3134fbb2b431bf979e4ce5f8dca7474305ae25cb545
3
  size 93608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f96fe0f77223bf21e72bb8cec824d6e8118b382aeb4fe8d6ab68534e260cbf8d
3
  size 93608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2c570c5c3aa5eebbe9d69815e16860b95f7b21fa85b9d5b528b7e153ad5e5cf
3
  size 197158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9ecd8d4edbc1eb01635b7969b0317e2e2ac7ee4e46b9c2667bf035cf5a6df33
3
  size 197158
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a75ae50ef05b0af49c3969f35c925149f1546408e414606cff98807664e1d74
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:103b84660003cbb55211ca46c75dea4086b05940b67e924565c8120dfc91f67a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d24225e147d7b9425c760fd15a44cb007389a8d29dfe49f25bc43f19fd631f65
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df224011d0e75c4a97901f6c1b2930bba4bc3a9aa7c877e6c91e796bec6013f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1188118811881188,
5
  "eval_steps": 42,
6
- "global_step": 42,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -121,6 +121,112 @@
121
  "eval_samples_per_second": 94.125,
122
  "eval_steps_per_second": 11.845,
123
  "step": 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  }
125
  ],
126
  "logging_steps": 3,
@@ -140,7 +246,7 @@
140
  "attributes": {}
141
  }
142
  },
143
- "total_flos": 254298685440.0,
144
  "train_batch_size": 8,
145
  "trial_name": null,
146
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2376237623762376,
5
  "eval_steps": 42,
6
+ "global_step": 84,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
121
  "eval_samples_per_second": 94.125,
122
  "eval_steps_per_second": 11.845,
123
  "step": 42
124
+ },
125
+ {
126
+ "epoch": 0.1272984441301273,
127
+ "grad_norm": 0.20692946016788483,
128
+ "learning_rate": 9.874639560909117e-05,
129
+ "loss": 11.8396,
130
+ "step": 45
131
+ },
132
+ {
133
+ "epoch": 0.13578500707213578,
134
+ "grad_norm": 0.23685254156589508,
135
+ "learning_rate": 9.852339101019574e-05,
136
+ "loss": 11.8316,
137
+ "step": 48
138
+ },
139
+ {
140
+ "epoch": 0.14427157001414428,
141
+ "grad_norm": 0.2432631552219391,
142
+ "learning_rate": 9.828243544427796e-05,
143
+ "loss": 11.8341,
144
+ "step": 51
145
+ },
146
+ {
147
+ "epoch": 0.15275813295615276,
148
+ "grad_norm": 0.11745542287826538,
149
+ "learning_rate": 9.802361805155097e-05,
150
+ "loss": 11.8301,
151
+ "step": 54
152
+ },
153
+ {
154
+ "epoch": 0.16124469589816123,
155
+ "grad_norm": 0.134097158908844,
156
+ "learning_rate": 9.774703458011453e-05,
157
+ "loss": 11.8359,
158
+ "step": 57
159
+ },
160
+ {
161
+ "epoch": 0.16973125884016974,
162
+ "grad_norm": 0.19680051505565643,
163
+ "learning_rate": 9.745278735053343e-05,
164
+ "loss": 11.8378,
165
+ "step": 60
166
+ },
167
+ {
168
+ "epoch": 0.1782178217821782,
169
+ "grad_norm": 0.10566498339176178,
170
+ "learning_rate": 9.714098521798465e-05,
171
+ "loss": 11.832,
172
+ "step": 63
173
+ },
174
+ {
175
+ "epoch": 0.1867043847241867,
176
+ "grad_norm": 0.1530551165342331,
177
+ "learning_rate": 9.681174353198687e-05,
178
+ "loss": 11.8363,
179
+ "step": 66
180
+ },
181
+ {
182
+ "epoch": 0.19519094766619519,
183
+ "grad_norm": 0.1929464042186737,
184
+ "learning_rate": 9.64651840937276e-05,
185
+ "loss": 11.8284,
186
+ "step": 69
187
+ },
188
+ {
189
+ "epoch": 0.2036775106082037,
190
+ "grad_norm": 0.17411480844020844,
191
+ "learning_rate": 9.610143511100354e-05,
192
+ "loss": 11.8314,
193
+ "step": 72
194
+ },
195
+ {
196
+ "epoch": 0.21216407355021216,
197
+ "grad_norm": 0.14971987903118134,
198
+ "learning_rate": 9.572063115079063e-05,
199
+ "loss": 11.832,
200
+ "step": 75
201
+ },
202
+ {
203
+ "epoch": 0.22065063649222066,
204
+ "grad_norm": 0.18370923399925232,
205
+ "learning_rate": 9.53229130894619e-05,
206
+ "loss": 11.8275,
207
+ "step": 78
208
+ },
209
+ {
210
+ "epoch": 0.22913719943422914,
211
+ "grad_norm": 0.26103201508522034,
212
+ "learning_rate": 9.490842806067095e-05,
213
+ "loss": 11.8278,
214
+ "step": 81
215
+ },
216
+ {
217
+ "epoch": 0.2376237623762376,
218
+ "grad_norm": 0.21483545005321503,
219
+ "learning_rate": 9.44773294009206e-05,
220
+ "loss": 11.825,
221
+ "step": 84
222
+ },
223
+ {
224
+ "epoch": 0.2376237623762376,
225
+ "eval_loss": 11.825268745422363,
226
+ "eval_runtime": 6.402,
227
+ "eval_samples_per_second": 93.097,
228
+ "eval_steps_per_second": 11.715,
229
+ "step": 84
230
  }
231
  ],
232
  "logging_steps": 3,
 
246
  "attributes": {}
247
  }
248
  },
249
+ "total_flos": 503776542720.0,
250
  "train_batch_size": 8,
251
  "trial_name": null,
252
  "trial_params": null