nadejdatarabukina's picture
Training in progress, step 50, checkpoint
a469f1a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0020927507115352417,
"eval_steps": 5,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.1855014230704836e-05,
"grad_norm": 0.23623579740524292,
"learning_rate": 2e-05,
"loss": 0.1918,
"step": 1
},
{
"epoch": 4.1855014230704836e-05,
"eval_loss": 0.16839955747127533,
"eval_runtime": 950.9919,
"eval_samples_per_second": 10.578,
"eval_steps_per_second": 5.289,
"step": 1
},
{
"epoch": 8.371002846140967e-05,
"grad_norm": 0.2619614005088806,
"learning_rate": 4e-05,
"loss": 0.1554,
"step": 2
},
{
"epoch": 0.00012556504269211453,
"grad_norm": 0.21573609113693237,
"learning_rate": 6e-05,
"loss": 0.1409,
"step": 3
},
{
"epoch": 0.00016742005692281934,
"grad_norm": 0.20257234573364258,
"learning_rate": 8e-05,
"loss": 0.1308,
"step": 4
},
{
"epoch": 0.00020927507115352419,
"grad_norm": 0.22933052480220795,
"learning_rate": 0.0001,
"loss": 0.1471,
"step": 5
},
{
"epoch": 0.00020927507115352419,
"eval_loss": 0.15534359216690063,
"eval_runtime": 956.5429,
"eval_samples_per_second": 10.517,
"eval_steps_per_second": 5.259,
"step": 5
},
{
"epoch": 0.00025113008538422905,
"grad_norm": 0.14353002607822418,
"learning_rate": 0.00012,
"loss": 0.0659,
"step": 6
},
{
"epoch": 0.00029298509961493387,
"grad_norm": 0.18972477316856384,
"learning_rate": 0.00014,
"loss": 0.1075,
"step": 7
},
{
"epoch": 0.0003348401138456387,
"grad_norm": 0.21828801929950714,
"learning_rate": 0.00016,
"loss": 0.1016,
"step": 8
},
{
"epoch": 0.00037669512807634355,
"grad_norm": 0.26345500349998474,
"learning_rate": 0.00018,
"loss": 0.1297,
"step": 9
},
{
"epoch": 0.00041855014230704837,
"grad_norm": 0.2362617701292038,
"learning_rate": 0.0002,
"loss": 0.0807,
"step": 10
},
{
"epoch": 0.00041855014230704837,
"eval_loss": 0.07172638177871704,
"eval_runtime": 956.2922,
"eval_samples_per_second": 10.52,
"eval_steps_per_second": 5.26,
"step": 10
},
{
"epoch": 0.00046040515653775324,
"grad_norm": 0.1236543208360672,
"learning_rate": 0.0001996917333733128,
"loss": 0.0325,
"step": 11
},
{
"epoch": 0.0005022601707684581,
"grad_norm": 0.23662090301513672,
"learning_rate": 0.00019876883405951377,
"loss": 0.0623,
"step": 12
},
{
"epoch": 0.0005441151849991629,
"grad_norm": 0.17430146038532257,
"learning_rate": 0.00019723699203976766,
"loss": 0.0378,
"step": 13
},
{
"epoch": 0.0005859701992298677,
"grad_norm": 0.18858376145362854,
"learning_rate": 0.00019510565162951537,
"loss": 0.031,
"step": 14
},
{
"epoch": 0.0006278252134605726,
"grad_norm": 0.25688692927360535,
"learning_rate": 0.0001923879532511287,
"loss": 0.0241,
"step": 15
},
{
"epoch": 0.0006278252134605726,
"eval_loss": 0.028141211718320847,
"eval_runtime": 955.7785,
"eval_samples_per_second": 10.525,
"eval_steps_per_second": 5.263,
"step": 15
},
{
"epoch": 0.0006696802276912774,
"grad_norm": 0.17680053412914276,
"learning_rate": 0.0001891006524188368,
"loss": 0.021,
"step": 16
},
{
"epoch": 0.0007115352419219822,
"grad_norm": 0.13639086484909058,
"learning_rate": 0.00018526401643540922,
"loss": 0.0156,
"step": 17
},
{
"epoch": 0.0007533902561526871,
"grad_norm": 0.23436982929706573,
"learning_rate": 0.00018090169943749476,
"loss": 0.0199,
"step": 18
},
{
"epoch": 0.000795245270383392,
"grad_norm": 0.16494698822498322,
"learning_rate": 0.0001760405965600031,
"loss": 0.0099,
"step": 19
},
{
"epoch": 0.0008371002846140967,
"grad_norm": 0.1418473720550537,
"learning_rate": 0.00017071067811865476,
"loss": 0.0136,
"step": 20
},
{
"epoch": 0.0008371002846140967,
"eval_loss": 0.011286907829344273,
"eval_runtime": 955.9815,
"eval_samples_per_second": 10.523,
"eval_steps_per_second": 5.262,
"step": 20
},
{
"epoch": 0.0008789552988448016,
"grad_norm": 0.06463932991027832,
"learning_rate": 0.00016494480483301836,
"loss": 0.004,
"step": 21
},
{
"epoch": 0.0009208103130755065,
"grad_norm": 0.10179118812084198,
"learning_rate": 0.00015877852522924732,
"loss": 0.0088,
"step": 22
},
{
"epoch": 0.0009626653273062112,
"grad_norm": 0.07703794538974762,
"learning_rate": 0.0001522498564715949,
"loss": 0.0042,
"step": 23
},
{
"epoch": 0.0010045203415369162,
"grad_norm": 0.214707612991333,
"learning_rate": 0.00014539904997395468,
"loss": 0.0082,
"step": 24
},
{
"epoch": 0.0010463753557676209,
"grad_norm": 0.10525104403495789,
"learning_rate": 0.000138268343236509,
"loss": 0.0064,
"step": 25
},
{
"epoch": 0.0010463753557676209,
"eval_loss": 0.007706103380769491,
"eval_runtime": 955.9544,
"eval_samples_per_second": 10.524,
"eval_steps_per_second": 5.262,
"step": 25
},
{
"epoch": 0.0010882303699983257,
"grad_norm": 0.054454121738672256,
"learning_rate": 0.00013090169943749476,
"loss": 0.0045,
"step": 26
},
{
"epoch": 0.0011300853842290306,
"grad_norm": 0.17281416058540344,
"learning_rate": 0.00012334453638559057,
"loss": 0.0074,
"step": 27
},
{
"epoch": 0.0011719403984597355,
"grad_norm": 0.12564797699451447,
"learning_rate": 0.0001156434465040231,
"loss": 0.0037,
"step": 28
},
{
"epoch": 0.0012137954126904403,
"grad_norm": 0.13228414952754974,
"learning_rate": 0.0001078459095727845,
"loss": 0.0029,
"step": 29
},
{
"epoch": 0.0012556504269211452,
"grad_norm": 0.20889438688755035,
"learning_rate": 0.0001,
"loss": 0.0114,
"step": 30
},
{
"epoch": 0.0012556504269211452,
"eval_loss": 0.00621896144002676,
"eval_runtime": 955.609,
"eval_samples_per_second": 10.527,
"eval_steps_per_second": 5.264,
"step": 30
},
{
"epoch": 0.00129750544115185,
"grad_norm": 0.07152388989925385,
"learning_rate": 9.215409042721552e-05,
"loss": 0.005,
"step": 31
},
{
"epoch": 0.0013393604553825547,
"grad_norm": 0.2723340392112732,
"learning_rate": 8.435655349597689e-05,
"loss": 0.0044,
"step": 32
},
{
"epoch": 0.0013812154696132596,
"grad_norm": 0.04145659878849983,
"learning_rate": 7.66554636144095e-05,
"loss": 0.0016,
"step": 33
},
{
"epoch": 0.0014230704838439645,
"grad_norm": 0.06280238926410675,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0019,
"step": 34
},
{
"epoch": 0.0014649254980746694,
"grad_norm": 0.16065062582492828,
"learning_rate": 6.173165676349103e-05,
"loss": 0.0046,
"step": 35
},
{
"epoch": 0.0014649254980746694,
"eval_loss": 0.004886394366621971,
"eval_runtime": 955.9011,
"eval_samples_per_second": 10.524,
"eval_steps_per_second": 5.262,
"step": 35
},
{
"epoch": 0.0015067805123053742,
"grad_norm": 0.18025602400302887,
"learning_rate": 5.4600950026045326e-05,
"loss": 0.0063,
"step": 36
},
{
"epoch": 0.001548635526536079,
"grad_norm": 0.12437538802623749,
"learning_rate": 4.7750143528405126e-05,
"loss": 0.0032,
"step": 37
},
{
"epoch": 0.001590490540766784,
"grad_norm": 0.08849278837442398,
"learning_rate": 4.12214747707527e-05,
"loss": 0.0021,
"step": 38
},
{
"epoch": 0.0016323455549974886,
"grad_norm": 0.034282781183719635,
"learning_rate": 3.5055195166981645e-05,
"loss": 0.0013,
"step": 39
},
{
"epoch": 0.0016742005692281935,
"grad_norm": 0.06674959510564804,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.0017,
"step": 40
},
{
"epoch": 0.0016742005692281935,
"eval_loss": 0.004585847724229097,
"eval_runtime": 956.0546,
"eval_samples_per_second": 10.522,
"eval_steps_per_second": 5.261,
"step": 40
},
{
"epoch": 0.0017160555834588984,
"grad_norm": 0.028878623619675636,
"learning_rate": 2.3959403439996907e-05,
"loss": 0.0012,
"step": 41
},
{
"epoch": 0.0017579105976896032,
"grad_norm": 0.15476588904857635,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.006,
"step": 42
},
{
"epoch": 0.001799765611920308,
"grad_norm": 0.06052372604608536,
"learning_rate": 1.4735983564590783e-05,
"loss": 0.0022,
"step": 43
},
{
"epoch": 0.001841620626151013,
"grad_norm": 0.01776609756052494,
"learning_rate": 1.0899347581163221e-05,
"loss": 0.0009,
"step": 44
},
{
"epoch": 0.0018834756403817178,
"grad_norm": 0.1711350679397583,
"learning_rate": 7.612046748871327e-06,
"loss": 0.0118,
"step": 45
},
{
"epoch": 0.0018834756403817178,
"eval_loss": 0.004424721002578735,
"eval_runtime": 956.2757,
"eval_samples_per_second": 10.52,
"eval_steps_per_second": 5.26,
"step": 45
},
{
"epoch": 0.0019253306546124225,
"grad_norm": 0.12957611680030823,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.0053,
"step": 46
},
{
"epoch": 0.0019671856688431274,
"grad_norm": 0.05963761731982231,
"learning_rate": 2.7630079602323442e-06,
"loss": 0.004,
"step": 47
},
{
"epoch": 0.0020090406830738324,
"grad_norm": 0.18093325197696686,
"learning_rate": 1.231165940486234e-06,
"loss": 0.008,
"step": 48
},
{
"epoch": 0.002050895697304537,
"grad_norm": 0.24867264926433563,
"learning_rate": 3.0826662668720364e-07,
"loss": 0.0088,
"step": 49
},
{
"epoch": 0.0020927507115352417,
"grad_norm": 0.02437353879213333,
"learning_rate": 0.0,
"loss": 0.0016,
"step": 50
},
{
"epoch": 0.0020927507115352417,
"eval_loss": 0.004385404288768768,
"eval_runtime": 956.438,
"eval_samples_per_second": 10.518,
"eval_steps_per_second": 5.259,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 70,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8856525581844480.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}