|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.075313807531381, |
|
"eval_steps": 50, |
|
"global_step": 45, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06694560669456066, |
|
"grad_norm": 27.560821533203125, |
|
"learning_rate": 5e-06, |
|
"loss": 10.1223, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06694560669456066, |
|
"eval_loss": 10.099274635314941, |
|
"eval_runtime": 0.2689, |
|
"eval_samples_per_second": 747.449, |
|
"eval_steps_per_second": 48.342, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13389121338912133, |
|
"grad_norm": 28.028085708618164, |
|
"learning_rate": 1e-05, |
|
"loss": 10.0918, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.200836820083682, |
|
"grad_norm": 26.342453002929688, |
|
"learning_rate": 1.5e-05, |
|
"loss": 10.097, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.26778242677824265, |
|
"grad_norm": 26.227397918701172, |
|
"learning_rate": 2e-05, |
|
"loss": 10.038, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 28.014986038208008, |
|
"learning_rate": 2.5e-05, |
|
"loss": 9.9078, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.401673640167364, |
|
"grad_norm": 25.674665451049805, |
|
"learning_rate": 3e-05, |
|
"loss": 9.7148, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4686192468619247, |
|
"grad_norm": 25.88928985595703, |
|
"learning_rate": 3.5e-05, |
|
"loss": 9.4521, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5355648535564853, |
|
"grad_norm": 22.087249755859375, |
|
"learning_rate": 4e-05, |
|
"loss": 9.3105, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.602510460251046, |
|
"grad_norm": 21.283077239990234, |
|
"learning_rate": 4.5e-05, |
|
"loss": 9.0043, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 21.313087463378906, |
|
"learning_rate": 5e-05, |
|
"loss": 8.7409, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7364016736401674, |
|
"grad_norm": 20.148950576782227, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 8.4883, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.803347280334728, |
|
"grad_norm": 20.127819061279297, |
|
"learning_rate": 6e-05, |
|
"loss": 8.1642, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8702928870292888, |
|
"grad_norm": 20.053115844726562, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 7.8865, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.9372384937238494, |
|
"grad_norm": 20.051658630371094, |
|
"learning_rate": 7e-05, |
|
"loss": 7.5385, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0251046025104602, |
|
"grad_norm": 26.823057174682617, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 9.9643, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.0920502092050208, |
|
"grad_norm": 17.49614143371582, |
|
"learning_rate": 8e-05, |
|
"loss": 6.9635, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.1589958158995817, |
|
"grad_norm": 15.299077033996582, |
|
"learning_rate": 8.5e-05, |
|
"loss": 6.481, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2259414225941423, |
|
"grad_norm": 14.408894538879395, |
|
"learning_rate": 9e-05, |
|
"loss": 6.2407, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.292887029288703, |
|
"grad_norm": 13.718225479125977, |
|
"learning_rate": 9.5e-05, |
|
"loss": 5.9908, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.3598326359832635, |
|
"grad_norm": 12.873955726623535, |
|
"learning_rate": 0.0001, |
|
"loss": 5.6383, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.4267782426778242, |
|
"grad_norm": 11.39185619354248, |
|
"learning_rate": 9.96057350657239e-05, |
|
"loss": 5.525, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.493723849372385, |
|
"grad_norm": 10.285475730895996, |
|
"learning_rate": 9.842915805643155e-05, |
|
"loss": 5.0268, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.5606694560669456, |
|
"grad_norm": 9.467267036437988, |
|
"learning_rate": 9.648882429441257e-05, |
|
"loss": 5.0979, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.6276150627615062, |
|
"grad_norm": 9.132113456726074, |
|
"learning_rate": 9.381533400219318e-05, |
|
"loss": 5.0314, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.694560669456067, |
|
"grad_norm": 7.965480804443359, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 4.7873, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.7615062761506275, |
|
"grad_norm": 6.717679023742676, |
|
"learning_rate": 8.644843137107059e-05, |
|
"loss": 4.7389, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.8284518828451883, |
|
"grad_norm": 6.294735431671143, |
|
"learning_rate": 8.18711994874345e-05, |
|
"loss": 4.4523, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.895397489539749, |
|
"grad_norm": 5.053362846374512, |
|
"learning_rate": 7.679133974894983e-05, |
|
"loss": 4.5132, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.9623430962343096, |
|
"grad_norm": 5.998095989227295, |
|
"learning_rate": 7.128896457825364e-05, |
|
"loss": 5.5026, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.0502092050209204, |
|
"grad_norm": 5.140533447265625, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 5.16, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.1171548117154813, |
|
"grad_norm": 4.294057369232178, |
|
"learning_rate": 5.9369065729286245e-05, |
|
"loss": 4.1192, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.1841004184100417, |
|
"grad_norm": 4.689243793487549, |
|
"learning_rate": 5.313952597646568e-05, |
|
"loss": 4.1063, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.2510460251046025, |
|
"grad_norm": 4.195860385894775, |
|
"learning_rate": 4.6860474023534335e-05, |
|
"loss": 4.2288, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.3179916317991633, |
|
"grad_norm": 4.730858325958252, |
|
"learning_rate": 4.063093427071376e-05, |
|
"loss": 4.3229, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.3849372384937237, |
|
"grad_norm": 3.84011173248291, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 4.1128, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.4518828451882846, |
|
"grad_norm": 3.5080318450927734, |
|
"learning_rate": 2.8711035421746367e-05, |
|
"loss": 4.0789, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.518828451882845, |
|
"grad_norm": 3.859924554824829, |
|
"learning_rate": 2.3208660251050158e-05, |
|
"loss": 4.1502, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.585774058577406, |
|
"grad_norm": 3.6729748249053955, |
|
"learning_rate": 1.8128800512565513e-05, |
|
"loss": 3.9726, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.6527196652719667, |
|
"grad_norm": 2.9506115913391113, |
|
"learning_rate": 1.3551568628929434e-05, |
|
"loss": 4.0159, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.719665271966527, |
|
"grad_norm": 3.1066789627075195, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 4.1407, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.786610878661088, |
|
"grad_norm": 2.8327407836914062, |
|
"learning_rate": 6.184665997806832e-06, |
|
"loss": 3.9974, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.8535564853556483, |
|
"grad_norm": 2.8391292095184326, |
|
"learning_rate": 3.511175705587433e-06, |
|
"loss": 3.9826, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.920502092050209, |
|
"grad_norm": 3.0579187870025635, |
|
"learning_rate": 1.5708419435684462e-06, |
|
"loss": 3.8871, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 3.00836820083682, |
|
"grad_norm": 3.730663299560547, |
|
"learning_rate": 3.9426493427611177e-07, |
|
"loss": 5.4636, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 3.075313807531381, |
|
"grad_norm": 2.6881635189056396, |
|
"learning_rate": 0.0, |
|
"loss": 3.9492, |
|
"step": 45 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 45, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 470968329830400.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|