leixa's picture
Training in progress, step 84, checkpoint
a8b4ffe verified
raw
history blame
6.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0181268882175227,
"eval_steps": 21,
"global_step": 84,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012084592145015106,
"eval_loss": 10.387871742248535,
"eval_runtime": 0.2615,
"eval_samples_per_second": 535.336,
"eval_steps_per_second": 68.829,
"step": 1
},
{
"epoch": 0.03625377643504532,
"grad_norm": 0.13119755685329437,
"learning_rate": 3e-05,
"loss": 10.3886,
"step": 3
},
{
"epoch": 0.07250755287009064,
"grad_norm": 0.10730081051588058,
"learning_rate": 6e-05,
"loss": 10.3847,
"step": 6
},
{
"epoch": 0.10876132930513595,
"grad_norm": 0.09705545753240585,
"learning_rate": 9e-05,
"loss": 10.3871,
"step": 9
},
{
"epoch": 0.14501510574018128,
"grad_norm": 0.12139848619699478,
"learning_rate": 9.998272257842641e-05,
"loss": 10.3874,
"step": 12
},
{
"epoch": 0.18126888217522658,
"grad_norm": 0.11973798274993896,
"learning_rate": 9.989204876292688e-05,
"loss": 10.3781,
"step": 15
},
{
"epoch": 0.2175226586102719,
"grad_norm": 0.13775140047073364,
"learning_rate": 9.972379999624936e-05,
"loss": 10.3777,
"step": 18
},
{
"epoch": 0.2537764350453172,
"grad_norm": 0.12977235019207,
"learning_rate": 9.947823788099753e-05,
"loss": 10.3796,
"step": 21
},
{
"epoch": 0.2537764350453172,
"eval_loss": 10.373146057128906,
"eval_runtime": 0.2637,
"eval_samples_per_second": 530.958,
"eval_steps_per_second": 68.266,
"step": 21
},
{
"epoch": 0.29003021148036257,
"grad_norm": 0.16504716873168945,
"learning_rate": 9.91557442308987e-05,
"loss": 10.3703,
"step": 24
},
{
"epoch": 0.32628398791540786,
"grad_norm": 0.16905713081359863,
"learning_rate": 9.875682047713846e-05,
"loss": 10.3718,
"step": 27
},
{
"epoch": 0.36253776435045315,
"grad_norm": 0.18219968676567078,
"learning_rate": 9.828208688870735e-05,
"loss": 10.3611,
"step": 30
},
{
"epoch": 0.3987915407854985,
"grad_norm": 0.2741997241973877,
"learning_rate": 9.773228160797188e-05,
"loss": 10.3599,
"step": 33
},
{
"epoch": 0.4350453172205438,
"grad_norm": 0.20199109613895416,
"learning_rate": 9.71082595029695e-05,
"loss": 10.3536,
"step": 36
},
{
"epoch": 0.47129909365558914,
"grad_norm": 0.16347676515579224,
"learning_rate": 9.64109908382119e-05,
"loss": 10.3424,
"step": 39
},
{
"epoch": 0.5075528700906344,
"grad_norm": 0.15472479164600372,
"learning_rate": 9.564155976606339e-05,
"loss": 10.3365,
"step": 42
},
{
"epoch": 0.5075528700906344,
"eval_loss": 10.338454246520996,
"eval_runtime": 0.2615,
"eval_samples_per_second": 535.377,
"eval_steps_per_second": 68.834,
"step": 42
},
{
"epoch": 0.5438066465256798,
"grad_norm": 0.15116359293460846,
"learning_rate": 9.480116264104011e-05,
"loss": 10.3382,
"step": 45
},
{
"epoch": 0.5800604229607251,
"grad_norm": 0.20655445754528046,
"learning_rate": 9.389110615965102e-05,
"loss": 10.3347,
"step": 48
},
{
"epoch": 0.6163141993957704,
"grad_norm": 0.15545906126499176,
"learning_rate": 9.291280532867302e-05,
"loss": 10.3275,
"step": 51
},
{
"epoch": 0.6525679758308157,
"grad_norm": 0.189162015914917,
"learning_rate": 9.186778126501916e-05,
"loss": 10.3294,
"step": 54
},
{
"epoch": 0.6888217522658611,
"grad_norm": 0.21338708698749542,
"learning_rate": 9.075765883062093e-05,
"loss": 10.3236,
"step": 57
},
{
"epoch": 0.7250755287009063,
"grad_norm": 0.23534299433231354,
"learning_rate": 8.958416410600187e-05,
"loss": 10.3183,
"step": 60
},
{
"epoch": 0.7613293051359517,
"grad_norm": 0.2692500054836273,
"learning_rate": 8.834912170647101e-05,
"loss": 10.3116,
"step": 63
},
{
"epoch": 0.7613293051359517,
"eval_loss": 10.310102462768555,
"eval_runtime": 0.2628,
"eval_samples_per_second": 532.785,
"eval_steps_per_second": 68.501,
"step": 63
},
{
"epoch": 0.797583081570997,
"grad_norm": 0.2844769358634949,
"learning_rate": 8.705445194510868e-05,
"loss": 10.3075,
"step": 66
},
{
"epoch": 0.8338368580060423,
"grad_norm": 0.2514300048351288,
"learning_rate": 8.570216784695637e-05,
"loss": 10.3049,
"step": 69
},
{
"epoch": 0.8700906344410876,
"grad_norm": 0.24744316935539246,
"learning_rate": 8.429437201905254e-05,
"loss": 10.295,
"step": 72
},
{
"epoch": 0.9063444108761329,
"grad_norm": 0.21623125672340393,
"learning_rate": 8.283325338118153e-05,
"loss": 10.2903,
"step": 75
},
{
"epoch": 0.9425981873111783,
"grad_norm": 0.21527834236621857,
"learning_rate": 8.132108376241849e-05,
"loss": 10.2817,
"step": 78
},
{
"epoch": 0.9788519637462235,
"grad_norm": 0.2678958475589752,
"learning_rate": 7.97602143687623e-05,
"loss": 10.2804,
"step": 81
},
{
"epoch": 1.0181268882175227,
"grad_norm": 0.20537346601486206,
"learning_rate": 7.815307212734888e-05,
"loss": 11.7642,
"step": 84
},
{
"epoch": 1.0181268882175227,
"eval_loss": 10.264846801757812,
"eval_runtime": 0.2605,
"eval_samples_per_second": 537.448,
"eval_steps_per_second": 69.1,
"step": 84
}
],
"logging_steps": 3,
"max_steps": 249,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 21,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9370255491072.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}