dimasik87 commited on
Commit
ed81264
·
verified ·
1 Parent(s): cd85f70

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e201cb9cff52f30bf5ae9ea51508e46e10420b2a220ea7f3245f8e692808e0c
3
  size 100966336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b1e5ae09b2358a7cec14ebf082c6791eb4b2371a0e0e0f881d1f2f7de4e2d1e
3
  size 100966336
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f5b76d64e67de9fd356021a9eb396e88a7e41adc982a1e266fc6fced3b5b2a9
3
  size 202110330
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e72e98a3f824f25f6d07ecdaffef31b797d3c943bad0324510fdfec7d11d317
3
  size 202110330
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2878759bee0a12cd28952503c327798cb375b6062d6ba2ce8f13fa04c9ecf2c1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc78e937af6df61002f0454d4a89f4506e44ed33ab12b2726e58d84741b9206e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:99fc9c0ec571f76cf9b6d1229601c5173899cd18104e487c5627f5f4c56c6e8a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46fa8207e86dee7d50b0ab12f1dd18c4426e8c65d06f97f8b2bd004a747e9cfa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0023502867349816676,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 29.216,
199
  "eval_steps_per_second": 14.608,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -217,7 +400,7 @@
217
  "attributes": {}
218
  }
219
  },
220
- "total_flos": 5208852450508800.0,
221
  "train_batch_size": 2,
222
  "trial_name": null,
223
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.004700573469963335,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 29.216,
199
  "eval_steps_per_second": 14.608,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.0024442982043809346,
204
+ "grad_norm": 1.5596882104873657,
205
+ "learning_rate": 7.68649804173412e-05,
206
+ "loss": 0.996,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.002538309673780201,
211
+ "grad_norm": 1.2554572820663452,
212
+ "learning_rate": 7.500000000000001e-05,
213
+ "loss": 0.7677,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.002632321143179468,
218
+ "grad_norm": 1.1897380352020264,
219
+ "learning_rate": 7.308743066175172e-05,
220
+ "loss": 0.8327,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.0027263326125787344,
225
+ "grad_norm": 1.1996997594833374,
226
+ "learning_rate": 7.113091308703498e-05,
227
+ "loss": 0.7552,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.0028203440819780014,
232
+ "grad_norm": 1.190172553062439,
233
+ "learning_rate": 6.91341716182545e-05,
234
+ "loss": 0.6472,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.002914355551377268,
239
+ "grad_norm": 1.0043503046035767,
240
+ "learning_rate": 6.710100716628344e-05,
241
+ "loss": 0.5782,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.0030083670207765346,
246
+ "grad_norm": 0.9340465068817139,
247
+ "learning_rate": 6.503528997521366e-05,
248
+ "loss": 0.5787,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.0031023784901758012,
253
+ "grad_norm": 1.2603780031204224,
254
+ "learning_rate": 6.294095225512603e-05,
255
+ "loss": 0.6059,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.0031963899595750683,
260
+ "grad_norm": 1.4247483015060425,
261
+ "learning_rate": 6.0821980696905146e-05,
262
+ "loss": 0.6423,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.003290401428974335,
267
+ "grad_norm": 0.9418686032295227,
268
+ "learning_rate": 5.868240888334653e-05,
269
+ "loss": 0.522,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.0033844128983736015,
274
+ "grad_norm": 1.142016887664795,
275
+ "learning_rate": 5.6526309611002594e-05,
276
+ "loss": 0.6962,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.003478424367772868,
281
+ "grad_norm": 1.1568361520767212,
282
+ "learning_rate": 5.435778713738292e-05,
283
+ "loss": 0.5529,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.003572435837172135,
288
+ "grad_norm": 1.2142870426177979,
289
+ "learning_rate": 5.218096936826681e-05,
290
+ "loss": 0.4753,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.0036664473065714017,
295
+ "grad_norm": 1.2301100492477417,
296
+ "learning_rate": 5e-05,
297
+ "loss": 0.5065,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.0037604587759706683,
302
+ "grad_norm": 1.3371412754058838,
303
+ "learning_rate": 4.781903063173321e-05,
304
+ "loss": 0.4529,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.0038544702453699353,
309
+ "grad_norm": 0.9668884873390198,
310
+ "learning_rate": 4.564221286261709e-05,
311
+ "loss": 0.4737,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.0039484817147692015,
316
+ "grad_norm": 1.0440391302108765,
317
+ "learning_rate": 4.347369038899744e-05,
318
+ "loss": 0.5063,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.004042493184168469,
323
+ "grad_norm": 1.243342638015747,
324
+ "learning_rate": 4.131759111665349e-05,
325
+ "loss": 0.3832,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.004136504653567736,
330
+ "grad_norm": 1.4090092182159424,
331
+ "learning_rate": 3.917801930309486e-05,
332
+ "loss": 0.6327,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.004230516122967002,
337
+ "grad_norm": 1.3914059400558472,
338
+ "learning_rate": 3.705904774487396e-05,
339
+ "loss": 0.4771,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.004324527592366269,
344
+ "grad_norm": 1.1954938173294067,
345
+ "learning_rate": 3.4964710024786354e-05,
346
+ "loss": 0.4217,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.004418539061765535,
351
+ "grad_norm": 1.7103815078735352,
352
+ "learning_rate": 3.289899283371657e-05,
353
+ "loss": 0.4914,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.004512550531164802,
358
+ "grad_norm": 1.5357106924057007,
359
+ "learning_rate": 3.086582838174551e-05,
360
+ "loss": 0.456,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.004606562000564069,
365
+ "grad_norm": 1.7698625326156616,
366
+ "learning_rate": 2.886908691296504e-05,
367
+ "loss": 0.4604,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.004700573469963335,
372
+ "grad_norm": 2.5681278705596924,
373
+ "learning_rate": 2.6912569338248315e-05,
374
+ "loss": 0.4387,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.004700573469963335,
379
+ "eval_loss": 0.6145913004875183,
380
+ "eval_runtime": 306.57,
381
+ "eval_samples_per_second": 29.22,
382
+ "eval_steps_per_second": 14.61,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
400
  "attributes": {}
401
  }
402
  },
403
+ "total_flos": 1.04177049010176e+16,
404
  "train_batch_size": 2,
405
  "trial_name": null,
406
  "trial_params": null