RodrigoSalazar-U commited on
Commit
825c7cd
·
verified ·
1 Parent(s): 6b05f5a

Training in progress, step 4000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd3ba167b05ddca415d83ce674f218d12eb00f8de957403146522132701a0645
3
  size 4785762744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9cf64b63b7766902a74424e2b9849dd20993349b621f4131a344d2c85b4630
3
  size 4785762744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5f73c26475321619da788f02bc5dfa39e953d35292b0e44c815a1d264534b76
3
  size 3497859804
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32cf687af0a4ab1e094756b89de6b91731e958f35e735a064e6bab3e4368a447
3
  size 3497859804
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4df07c96cd5b34fe2c47040206f73bf07f974f4e6c2b7b7219d670c2d00c146
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1d32a5240cd2bec4435f56208363b0d59e8bf8c99164c7cbee490171e7de00
3
  size 14308
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:948ca56b053f56e11e66d405c77732b82bb85e418d1080c5ae696bdf321e898b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9c6ca92757145af39d20b47188651ac4ae897a7257a652d8a9acf8e3ccda307
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.9167579408543265,
5
  "eval_steps": 500,
6
- "global_step": 3500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -252,6 +252,41 @@
252
  "learning_rate": 3.473107811842055e-05,
253
  "loss": 0.9308,
254
  "step": 3500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  }
256
  ],
257
  "logging_steps": 100,
@@ -271,7 +306,7 @@
271
  "attributes": {}
272
  }
273
  },
274
- "total_flos": 1.6878032741533286e+18,
275
  "train_batch_size": 16,
276
  "trial_name": null,
277
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.190580503833516,
5
  "eval_steps": 500,
6
+ "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
252
  "learning_rate": 3.473107811842055e-05,
253
  "loss": 0.9308,
254
  "step": 3500
255
+ },
256
+ {
257
+ "epoch": 1.9715224534501643,
258
+ "grad_norm": 1.9522345066070557,
259
+ "learning_rate": 3.173012410158744e-05,
260
+ "loss": 0.9195,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 2.026286966046002,
265
+ "grad_norm": 1.4607131481170654,
266
+ "learning_rate": 2.8803334322232017e-05,
267
+ "loss": 0.7255,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 2.08105147864184,
272
+ "grad_norm": 2.2295525074005127,
273
+ "learning_rate": 2.596258971120737e-05,
274
+ "loss": 0.5014,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 2.135815991237678,
279
+ "grad_norm": 1.6707744598388672,
280
+ "learning_rate": 2.3219421909949735e-05,
281
+ "loss": 0.5206,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 2.190580503833516,
286
+ "grad_norm": 1.4230283498764038,
287
+ "learning_rate": 2.0584966459246906e-05,
288
+ "loss": 0.4994,
289
+ "step": 4000
290
  }
291
  ],
292
  "logging_steps": 100,
 
306
  "attributes": {}
307
  }
308
  },
309
+ "total_flos": 1.9236155642036552e+18,
310
  "train_batch_size": 16,
311
  "trial_name": null,
312
  "trial_params": null