|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.998401704848162, |
|
"eval_steps": 500, |
|
"global_step": 1407, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.21310602024507191, |
|
"grad_norm": 3.0930368900299072, |
|
"learning_rate": 7.092198581560284e-05, |
|
"loss": 2.0066, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.42621204049014383, |
|
"grad_norm": 1.5248420238494873, |
|
"learning_rate": 9.533965244865718e-05, |
|
"loss": 1.2202, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6393180607352158, |
|
"grad_norm": 1.2673641443252563, |
|
"learning_rate": 8.744075829383887e-05, |
|
"loss": 1.1874, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.8524240809802877, |
|
"grad_norm": 1.1734017133712769, |
|
"learning_rate": 7.954186413902054e-05, |
|
"loss": 1.1678, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0655301012253595, |
|
"grad_norm": 1.4848545789718628, |
|
"learning_rate": 7.164296998420222e-05, |
|
"loss": 1.0412, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2786361214704316, |
|
"grad_norm": 1.3286683559417725, |
|
"learning_rate": 6.374407582938389e-05, |
|
"loss": 0.8314, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.4917421417155035, |
|
"grad_norm": 1.1215488910675049, |
|
"learning_rate": 5.584518167456556e-05, |
|
"loss": 0.8177, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.7048481619605753, |
|
"grad_norm": 1.2876837253570557, |
|
"learning_rate": 4.794628751974723e-05, |
|
"loss": 0.8189, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.9179541822056474, |
|
"grad_norm": 1.2666414976119995, |
|
"learning_rate": 4.004739336492891e-05, |
|
"loss": 0.8247, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.131060202450719, |
|
"grad_norm": 1.3837511539459229, |
|
"learning_rate": 3.214849921011059e-05, |
|
"loss": 0.6754, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.344166222695791, |
|
"grad_norm": 1.3017562627792358, |
|
"learning_rate": 2.424960505529226e-05, |
|
"loss": 0.5903, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.557272242940863, |
|
"grad_norm": 1.1946550607681274, |
|
"learning_rate": 1.6350710900473933e-05, |
|
"loss": 0.5916, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.7703782631859353, |
|
"grad_norm": 1.2071906328201294, |
|
"learning_rate": 8.451816745655609e-06, |
|
"loss": 0.5928, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.983484283431007, |
|
"grad_norm": 1.1832154989242554, |
|
"learning_rate": 5.529225908372828e-07, |
|
"loss": 0.5906, |
|
"step": 1400 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 1407, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.848088742642156e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|