|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7317073170731707, |
|
"eval_steps": 500, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024390243902439025, |
|
"grad_norm": 0.11692590266466141, |
|
"learning_rate": 0.00019631901840490797, |
|
"loss": 2.6359, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04878048780487805, |
|
"grad_norm": 0.09717526286840439, |
|
"learning_rate": 0.0001914110429447853, |
|
"loss": 2.3437, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07317073170731707, |
|
"grad_norm": 0.09254099428653717, |
|
"learning_rate": 0.00018650306748466258, |
|
"loss": 2.3361, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0975609756097561, |
|
"grad_norm": 0.10900052636861801, |
|
"learning_rate": 0.00018159509202453987, |
|
"loss": 2.2778, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12195121951219512, |
|
"grad_norm": 0.09833081066608429, |
|
"learning_rate": 0.0001766871165644172, |
|
"loss": 2.2547, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14634146341463414, |
|
"grad_norm": 0.15433165431022644, |
|
"learning_rate": 0.0001717791411042945, |
|
"loss": 2.2073, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.17073170731707318, |
|
"grad_norm": 0.15249375998973846, |
|
"learning_rate": 0.00016687116564417177, |
|
"loss": 2.3032, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1951219512195122, |
|
"grad_norm": 0.10253068804740906, |
|
"learning_rate": 0.00016196319018404909, |
|
"loss": 2.1939, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.21951219512195122, |
|
"grad_norm": 0.170660600066185, |
|
"learning_rate": 0.0001570552147239264, |
|
"loss": 2.1974, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.24390243902439024, |
|
"grad_norm": 0.11670807003974915, |
|
"learning_rate": 0.0001521472392638037, |
|
"loss": 2.2525, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2682926829268293, |
|
"grad_norm": 0.1081448644399643, |
|
"learning_rate": 0.00014723926380368098, |
|
"loss": 2.2139, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.2926829268292683, |
|
"grad_norm": 0.09517224878072739, |
|
"learning_rate": 0.00014233128834355828, |
|
"loss": 2.2027, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3170731707317073, |
|
"grad_norm": 0.11719302833080292, |
|
"learning_rate": 0.0001374233128834356, |
|
"loss": 2.1831, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.34146341463414637, |
|
"grad_norm": 0.09806275367736816, |
|
"learning_rate": 0.00013251533742331288, |
|
"loss": 2.1686, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.36585365853658536, |
|
"grad_norm": 0.11017315089702606, |
|
"learning_rate": 0.00012760736196319017, |
|
"loss": 2.1871, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.11465785652399063, |
|
"learning_rate": 0.0001226993865030675, |
|
"loss": 2.1798, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.4146341463414634, |
|
"grad_norm": 0.10591326653957367, |
|
"learning_rate": 0.0001177914110429448, |
|
"loss": 2.2399, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.43902439024390244, |
|
"grad_norm": 0.10572998225688934, |
|
"learning_rate": 0.00011288343558282209, |
|
"loss": 2.1076, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.4634146341463415, |
|
"grad_norm": 0.09855518490076065, |
|
"learning_rate": 0.00010797546012269939, |
|
"loss": 2.2347, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 0.1365315169095993, |
|
"learning_rate": 0.0001030674846625767, |
|
"loss": 2.2565, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5121951219512195, |
|
"grad_norm": 0.12178418040275574, |
|
"learning_rate": 9.815950920245399e-05, |
|
"loss": 2.214, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.5365853658536586, |
|
"grad_norm": 0.11690341681241989, |
|
"learning_rate": 9.325153374233129e-05, |
|
"loss": 2.1724, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.5609756097560976, |
|
"grad_norm": 0.11081600189208984, |
|
"learning_rate": 8.83435582822086e-05, |
|
"loss": 2.2109, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.5853658536585366, |
|
"grad_norm": 0.13180199265480042, |
|
"learning_rate": 8.343558282208588e-05, |
|
"loss": 2.1648, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6097560975609756, |
|
"grad_norm": 0.09484437853097916, |
|
"learning_rate": 7.85276073619632e-05, |
|
"loss": 2.2001, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6341463414634146, |
|
"grad_norm": 0.12273681908845901, |
|
"learning_rate": 7.361963190184049e-05, |
|
"loss": 2.1424, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.6585365853658537, |
|
"grad_norm": 0.12908370792865753, |
|
"learning_rate": 6.87116564417178e-05, |
|
"loss": 2.1562, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.6829268292682927, |
|
"grad_norm": 0.11981745809316635, |
|
"learning_rate": 6.380368098159509e-05, |
|
"loss": 2.1774, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.7073170731707317, |
|
"grad_norm": 0.12334165722131729, |
|
"learning_rate": 5.88957055214724e-05, |
|
"loss": 2.1732, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.7317073170731707, |
|
"grad_norm": 0.12499678134918213, |
|
"learning_rate": 5.3987730061349695e-05, |
|
"loss": 2.1708, |
|
"step": 600 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 820, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.520572373139456e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|