leixa's picture
Training in progress, step 84, checkpoint
96b833e verified
raw
history blame
6.23 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01760821716801174,
"eval_steps": 42,
"global_step": 84,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002096216329525207,
"eval_loss": 1.4208108186721802,
"eval_runtime": 163.5996,
"eval_samples_per_second": 49.114,
"eval_steps_per_second": 6.143,
"step": 1
},
{
"epoch": 0.0006288648988575621,
"grad_norm": 0.39683395624160767,
"learning_rate": 3e-05,
"loss": 1.4354,
"step": 3
},
{
"epoch": 0.0012577297977151242,
"grad_norm": 0.4367702305316925,
"learning_rate": 6e-05,
"loss": 1.4342,
"step": 6
},
{
"epoch": 0.0018865946965726864,
"grad_norm": 0.37110769748687744,
"learning_rate": 9e-05,
"loss": 1.3479,
"step": 9
},
{
"epoch": 0.0025154595954302483,
"grad_norm": 0.36730247735977173,
"learning_rate": 9.999588943391597e-05,
"loss": 1.361,
"step": 12
},
{
"epoch": 0.0031443244942878103,
"grad_norm": 0.35089993476867676,
"learning_rate": 9.99743108100344e-05,
"loss": 1.2814,
"step": 15
},
{
"epoch": 0.0037731893931453727,
"grad_norm": 0.38672298192977905,
"learning_rate": 9.993424445916923e-05,
"loss": 1.3016,
"step": 18
},
{
"epoch": 0.004402054292002935,
"grad_norm": 0.321205198764801,
"learning_rate": 9.987570520365104e-05,
"loss": 1.2163,
"step": 21
},
{
"epoch": 0.005030919190860497,
"grad_norm": 0.3282659649848938,
"learning_rate": 9.979871469976196e-05,
"loss": 1.2312,
"step": 24
},
{
"epoch": 0.005659784089718059,
"grad_norm": 0.3265891373157501,
"learning_rate": 9.970330142972401e-05,
"loss": 1.2505,
"step": 27
},
{
"epoch": 0.006288648988575621,
"grad_norm": 0.3450949192047119,
"learning_rate": 9.95895006911623e-05,
"loss": 1.1982,
"step": 30
},
{
"epoch": 0.0069175138874331835,
"grad_norm": 0.3153296411037445,
"learning_rate": 9.945735458404681e-05,
"loss": 1.1669,
"step": 33
},
{
"epoch": 0.0075463787862907455,
"grad_norm": 0.3419332802295685,
"learning_rate": 9.930691199511775e-05,
"loss": 1.2492,
"step": 36
},
{
"epoch": 0.008175243685148307,
"grad_norm": 0.2867058515548706,
"learning_rate": 9.91382285798002e-05,
"loss": 1.1652,
"step": 39
},
{
"epoch": 0.00880410858400587,
"grad_norm": 0.3220820426940918,
"learning_rate": 9.895136674161465e-05,
"loss": 1.1996,
"step": 42
},
{
"epoch": 0.00880410858400587,
"eval_loss": 1.1905547380447388,
"eval_runtime": 165.0006,
"eval_samples_per_second": 48.697,
"eval_steps_per_second": 6.091,
"step": 42
},
{
"epoch": 0.009432973482863432,
"grad_norm": 0.30218949913978577,
"learning_rate": 9.874639560909117e-05,
"loss": 1.1916,
"step": 45
},
{
"epoch": 0.010061838381720993,
"grad_norm": 0.3343711197376251,
"learning_rate": 9.852339101019574e-05,
"loss": 1.1487,
"step": 48
},
{
"epoch": 0.010690703280578556,
"grad_norm": 0.3268805146217346,
"learning_rate": 9.828243544427796e-05,
"loss": 1.1781,
"step": 51
},
{
"epoch": 0.011319568179436117,
"grad_norm": 0.34138286113739014,
"learning_rate": 9.802361805155097e-05,
"loss": 1.1215,
"step": 54
},
{
"epoch": 0.01194843307829368,
"grad_norm": 0.3412030339241028,
"learning_rate": 9.774703458011453e-05,
"loss": 1.2044,
"step": 57
},
{
"epoch": 0.012577297977151241,
"grad_norm": 0.3168179392814636,
"learning_rate": 9.745278735053343e-05,
"loss": 1.1087,
"step": 60
},
{
"epoch": 0.013206162876008804,
"grad_norm": 0.3409186005592346,
"learning_rate": 9.714098521798465e-05,
"loss": 1.2062,
"step": 63
},
{
"epoch": 0.013835027774866367,
"grad_norm": 0.29721856117248535,
"learning_rate": 9.681174353198687e-05,
"loss": 1.1158,
"step": 66
},
{
"epoch": 0.014463892673723928,
"grad_norm": 0.3369769752025604,
"learning_rate": 9.64651840937276e-05,
"loss": 1.1834,
"step": 69
},
{
"epoch": 0.015092757572581491,
"grad_norm": 0.31268638372421265,
"learning_rate": 9.610143511100354e-05,
"loss": 1.1367,
"step": 72
},
{
"epoch": 0.015721622471439052,
"grad_norm": 0.333360493183136,
"learning_rate": 9.572063115079063e-05,
"loss": 1.1805,
"step": 75
},
{
"epoch": 0.016350487370296613,
"grad_norm": 0.2951726019382477,
"learning_rate": 9.53229130894619e-05,
"loss": 1.1459,
"step": 78
},
{
"epoch": 0.016979352269154178,
"grad_norm": 0.36082521080970764,
"learning_rate": 9.490842806067095e-05,
"loss": 1.17,
"step": 81
},
{
"epoch": 0.01760821716801174,
"grad_norm": 0.33050239086151123,
"learning_rate": 9.44773294009206e-05,
"loss": 1.1611,
"step": 84
},
{
"epoch": 0.01760821716801174,
"eval_loss": 1.147385835647583,
"eval_runtime": 165.0588,
"eval_samples_per_second": 48.68,
"eval_steps_per_second": 6.089,
"step": 84
}
],
"logging_steps": 3,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 42,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.7918441960767488e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}