oldiday's picture
Training in progress, step 100, checkpoint
3659093 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.556420233463035,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01556420233463035,
"eval_loss": 0.888220489025116,
"eval_runtime": 7.1127,
"eval_samples_per_second": 15.184,
"eval_steps_per_second": 1.968,
"step": 1
},
{
"epoch": 0.04669260700389105,
"grad_norm": 3.0385119915008545,
"learning_rate": 1.5e-05,
"loss": 0.863,
"step": 3
},
{
"epoch": 0.0933852140077821,
"grad_norm": 2.6818928718566895,
"learning_rate": 3e-05,
"loss": 0.8733,
"step": 6
},
{
"epoch": 0.14007782101167315,
"grad_norm": 0.7148049473762512,
"learning_rate": 4.5e-05,
"loss": 0.8218,
"step": 9
},
{
"epoch": 0.14007782101167315,
"eval_loss": 0.7536948919296265,
"eval_runtime": 7.0144,
"eval_samples_per_second": 15.397,
"eval_steps_per_second": 1.996,
"step": 9
},
{
"epoch": 0.1867704280155642,
"grad_norm": 0.43349385261535645,
"learning_rate": 4.993910125649561e-05,
"loss": 0.7216,
"step": 12
},
{
"epoch": 0.23346303501945526,
"grad_norm": 0.3839814066886902,
"learning_rate": 4.962019382530521e-05,
"loss": 0.722,
"step": 15
},
{
"epoch": 0.2801556420233463,
"grad_norm": 0.40558719635009766,
"learning_rate": 4.9031542398457974e-05,
"loss": 0.6981,
"step": 18
},
{
"epoch": 0.2801556420233463,
"eval_loss": 0.6632740497589111,
"eval_runtime": 7.0535,
"eval_samples_per_second": 15.312,
"eval_steps_per_second": 1.985,
"step": 18
},
{
"epoch": 0.32684824902723736,
"grad_norm": 0.35617005825042725,
"learning_rate": 4.817959636416969e-05,
"loss": 0.6165,
"step": 21
},
{
"epoch": 0.3735408560311284,
"grad_norm": 0.3675176799297333,
"learning_rate": 4.707368982147318e-05,
"loss": 0.699,
"step": 24
},
{
"epoch": 0.42023346303501946,
"grad_norm": 0.32696929574012756,
"learning_rate": 4.572593931387604e-05,
"loss": 0.6745,
"step": 27
},
{
"epoch": 0.42023346303501946,
"eval_loss": 0.6363734006881714,
"eval_runtime": 7.072,
"eval_samples_per_second": 15.271,
"eval_steps_per_second": 1.98,
"step": 27
},
{
"epoch": 0.4669260700389105,
"grad_norm": 0.28614386916160583,
"learning_rate": 4.415111107797445e-05,
"loss": 0.633,
"step": 30
},
{
"epoch": 0.5136186770428015,
"grad_norm": 0.32294508814811707,
"learning_rate": 4.2366459261474933e-05,
"loss": 0.6074,
"step": 33
},
{
"epoch": 0.5603112840466926,
"grad_norm": 0.3273395895957947,
"learning_rate": 4.039153688314145e-05,
"loss": 0.6574,
"step": 36
},
{
"epoch": 0.5603112840466926,
"eval_loss": 0.6291098594665527,
"eval_runtime": 7.0767,
"eval_samples_per_second": 15.261,
"eval_steps_per_second": 1.978,
"step": 36
},
{
"epoch": 0.6070038910505836,
"grad_norm": 0.27768364548683167,
"learning_rate": 3.824798160583012e-05,
"loss": 0.639,
"step": 39
},
{
"epoch": 0.6536964980544747,
"grad_norm": 0.29399535059928894,
"learning_rate": 3.5959278669726935e-05,
"loss": 0.6505,
"step": 42
},
{
"epoch": 0.7003891050583657,
"grad_norm": 0.3063369393348694,
"learning_rate": 3.355050358314172e-05,
"loss": 0.6367,
"step": 45
},
{
"epoch": 0.7003891050583657,
"eval_loss": 0.6239914298057556,
"eval_runtime": 7.0733,
"eval_samples_per_second": 15.269,
"eval_steps_per_second": 1.979,
"step": 45
},
{
"epoch": 0.7470817120622568,
"grad_norm": 0.3164641559123993,
"learning_rate": 3.104804738999169e-05,
"loss": 0.7004,
"step": 48
},
{
"epoch": 0.7937743190661478,
"grad_norm": 0.29213565587997437,
"learning_rate": 2.8479327524001636e-05,
"loss": 0.6424,
"step": 51
},
{
"epoch": 0.8404669260700389,
"grad_norm": 0.31585273146629333,
"learning_rate": 2.587248741756253e-05,
"loss": 0.6445,
"step": 54
},
{
"epoch": 0.8404669260700389,
"eval_loss": 0.6201390624046326,
"eval_runtime": 7.08,
"eval_samples_per_second": 15.254,
"eval_steps_per_second": 1.977,
"step": 54
},
{
"epoch": 0.8871595330739299,
"grad_norm": 0.31377464532852173,
"learning_rate": 2.3256088156396868e-05,
"loss": 0.6538,
"step": 57
},
{
"epoch": 0.933852140077821,
"grad_norm": 0.2973349988460541,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.6141,
"step": 60
},
{
"epoch": 0.980544747081712,
"grad_norm": 0.31598252058029175,
"learning_rate": 1.8109066104575023e-05,
"loss": 0.6287,
"step": 63
},
{
"epoch": 0.980544747081712,
"eval_loss": 0.6172313094139099,
"eval_runtime": 7.2626,
"eval_samples_per_second": 14.871,
"eval_steps_per_second": 1.928,
"step": 63
},
{
"epoch": 1.027237354085603,
"grad_norm": 0.2909192144870758,
"learning_rate": 1.56348351646022e-05,
"loss": 0.776,
"step": 66
},
{
"epoch": 1.0739299610894941,
"grad_norm": 0.2928639054298401,
"learning_rate": 1.3263210930352737e-05,
"loss": 0.6068,
"step": 69
},
{
"epoch": 1.1206225680933852,
"grad_norm": 0.2987300753593445,
"learning_rate": 1.1020177413231334e-05,
"loss": 0.5927,
"step": 72
},
{
"epoch": 1.1206225680933852,
"eval_loss": 0.6153312921524048,
"eval_runtime": 7.261,
"eval_samples_per_second": 14.874,
"eval_steps_per_second": 1.928,
"step": 72
},
{
"epoch": 1.1673151750972763,
"grad_norm": 0.3104216456413269,
"learning_rate": 8.930309757836517e-06,
"loss": 0.6262,
"step": 75
},
{
"epoch": 1.2140077821011672,
"grad_norm": 0.2893487215042114,
"learning_rate": 7.016504991533726e-06,
"loss": 0.623,
"step": 78
},
{
"epoch": 1.2607003891050583,
"grad_norm": 0.309826523065567,
"learning_rate": 5.299731159831953e-06,
"loss": 0.643,
"step": 81
},
{
"epoch": 1.2607003891050583,
"eval_loss": 0.614518404006958,
"eval_runtime": 7.0859,
"eval_samples_per_second": 15.242,
"eval_steps_per_second": 1.976,
"step": 81
},
{
"epoch": 1.3073929961089494,
"grad_norm": 0.3125797212123871,
"learning_rate": 3.798797596089351e-06,
"loss": 0.5811,
"step": 84
},
{
"epoch": 1.3540856031128405,
"grad_norm": 0.31804636120796204,
"learning_rate": 2.5301488425208296e-06,
"loss": 0.6013,
"step": 87
},
{
"epoch": 1.4007782101167314,
"grad_norm": 0.31651419401168823,
"learning_rate": 1.5076844803522922e-06,
"loss": 0.6143,
"step": 90
},
{
"epoch": 1.4007782101167314,
"eval_loss": 0.6147182583808899,
"eval_runtime": 7.2685,
"eval_samples_per_second": 14.859,
"eval_steps_per_second": 1.926,
"step": 90
},
{
"epoch": 1.4474708171206225,
"grad_norm": 0.2994842529296875,
"learning_rate": 7.426068431000882e-07,
"loss": 0.5747,
"step": 93
},
{
"epoch": 1.4941634241245136,
"grad_norm": 0.3009912073612213,
"learning_rate": 2.4329828146074095e-07,
"loss": 0.6074,
"step": 96
},
{
"epoch": 1.5408560311284045,
"grad_norm": 0.3032476603984833,
"learning_rate": 1.522932452260595e-08,
"loss": 0.6417,
"step": 99
},
{
"epoch": 1.5408560311284045,
"eval_loss": 0.6142562031745911,
"eval_runtime": 7.0785,
"eval_samples_per_second": 15.257,
"eval_steps_per_second": 1.978,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.220911035069235e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}