leixa commited on
Commit
ac3a882
·
verified ·
1 Parent(s): 9b920da

Training in progress, step 168, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aadd79cb0ce90a3800ba84eddb5d6ed311601f6907fd8577721c4a48d11c900a
3
  size 201892112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a2a60feb1e96d790294c0b5eae9acc5fb705f4771269491fe8bf9cd16a835e5
3
  size 201892112
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3369db71b47a6b28d8af870490f2fffae251fdfc9b2ec04d614352e035cb1c3a
3
  size 102864548
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cbcfaf89e28391d7c82167b07cd38f9662d61b8bf262f3ab44b55dd4fde9bc5
3
  size 102864548
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e19d88c5ee8945747e0c8f00ae8f30f12218b1bee6b5d59ad749865193f9388
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98eb662dc5bc7d04a24c041329941502e3aed4d1fcb64bac57ca17d20f66e522
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e00328545513b592b6bf311a002e087bb048e374276594cf37ee8782d28887fc
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f516a6e4e8a8eba956b80cb2ea416b9fd98f0dec12d7d9d9a36274d0eef4a63
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02641232575201761,
5
  "eval_steps": 42,
6
- "global_step": 126,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -333,6 +333,112 @@
333
  "eval_samples_per_second": 48.682,
334
  "eval_steps_per_second": 6.089,
335
  "step": 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  }
337
  ],
338
  "logging_steps": 3,
@@ -352,7 +458,7 @@
352
  "attributes": {}
353
  }
354
  },
355
- "total_flos": 2.687766294115123e+16,
356
  "train_batch_size": 8,
357
  "trial_name": null,
358
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.03521643433602348,
5
  "eval_steps": 42,
6
+ "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
333
  "eval_samples_per_second": 48.682,
334
  "eval_steps_per_second": 6.089,
335
  "step": 126
336
+ },
337
+ {
338
+ "epoch": 0.02704119065087517,
339
+ "grad_norm": 0.3487012982368469,
340
+ "learning_rate": 8.613974319136958e-05,
341
+ "loss": 1.1212,
342
+ "step": 129
343
+ },
344
+ {
345
+ "epoch": 0.027670055549732734,
346
+ "grad_norm": 0.37176796793937683,
347
+ "learning_rate": 8.54684960502629e-05,
348
+ "loss": 1.0707,
349
+ "step": 132
350
+ },
351
+ {
352
+ "epoch": 0.028298920448590295,
353
+ "grad_norm": 0.3854920268058777,
354
+ "learning_rate": 8.478412753017433e-05,
355
+ "loss": 1.1204,
356
+ "step": 135
357
+ },
358
+ {
359
+ "epoch": 0.028927785347447856,
360
+ "grad_norm": 0.3561486303806305,
361
+ "learning_rate": 8.408689080954998e-05,
362
+ "loss": 1.1238,
363
+ "step": 138
364
+ },
365
+ {
366
+ "epoch": 0.029556650246305417,
367
+ "grad_norm": 0.35637167096138,
368
+ "learning_rate": 8.33770438273574e-05,
369
+ "loss": 1.1415,
370
+ "step": 141
371
+ },
372
+ {
373
+ "epoch": 0.030185515145162982,
374
+ "grad_norm": 0.37608301639556885,
375
+ "learning_rate": 8.265484918766243e-05,
376
+ "loss": 1.1374,
377
+ "step": 144
378
+ },
379
+ {
380
+ "epoch": 0.030814380044020543,
381
+ "grad_norm": 0.3702179789543152,
382
+ "learning_rate": 8.192057406248028e-05,
383
+ "loss": 1.1001,
384
+ "step": 147
385
+ },
386
+ {
387
+ "epoch": 0.031443244942878104,
388
+ "grad_norm": 0.35054582357406616,
389
+ "learning_rate": 8.117449009293668e-05,
390
+ "loss": 1.122,
391
+ "step": 150
392
+ },
393
+ {
394
+ "epoch": 0.03207210984173567,
395
+ "grad_norm": 0.35157763957977295,
396
+ "learning_rate": 8.041687328877567e-05,
397
+ "loss": 1.1126,
398
+ "step": 153
399
+ },
400
+ {
401
+ "epoch": 0.032700974740593226,
402
+ "grad_norm": 0.3567587435245514,
403
+ "learning_rate": 7.964800392625129e-05,
404
+ "loss": 1.1196,
405
+ "step": 156
406
+ },
407
+ {
408
+ "epoch": 0.03332983963945079,
409
+ "grad_norm": 0.33376169204711914,
410
+ "learning_rate": 7.886816644444098e-05,
411
+ "loss": 1.1269,
412
+ "step": 159
413
+ },
414
+ {
415
+ "epoch": 0.033958704538308356,
416
+ "grad_norm": 0.33005964756011963,
417
+ "learning_rate": 7.807764934001874e-05,
418
+ "loss": 1.0881,
419
+ "step": 162
420
+ },
421
+ {
422
+ "epoch": 0.03458756943716591,
423
+ "grad_norm": 0.3465365171432495,
424
+ "learning_rate": 7.727674506052743e-05,
425
+ "loss": 1.0912,
426
+ "step": 165
427
+ },
428
+ {
429
+ "epoch": 0.03521643433602348,
430
+ "grad_norm": 0.3523496985435486,
431
+ "learning_rate": 7.646574989618938e-05,
432
+ "loss": 1.0962,
433
+ "step": 168
434
+ },
435
+ {
436
+ "epoch": 0.03521643433602348,
437
+ "eval_loss": 1.1077728271484375,
438
+ "eval_runtime": 165.0063,
439
+ "eval_samples_per_second": 48.695,
440
+ "eval_steps_per_second": 6.091,
441
+ "step": 168
442
  }
443
  ],
444
  "logging_steps": 3,
 
458
  "attributes": {}
459
  }
460
  },
461
+ "total_flos": 3.5836883921534976e+16,
462
  "train_batch_size": 8,
463
  "trial_name": null,
464
  "trial_params": null