0x1202's picture
Training in progress, step 100, checkpoint
e7a01b6 verified
{
"best_metric": 0.23508746922016144,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.007125552230297848,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.125552230297848e-05,
"grad_norm": 1.042211651802063,
"learning_rate": 1e-06,
"loss": 0.3057,
"step": 1
},
{
"epoch": 7.125552230297848e-05,
"eval_loss": 0.7187018990516663,
"eval_runtime": 728.4647,
"eval_samples_per_second": 8.112,
"eval_steps_per_second": 2.029,
"step": 1
},
{
"epoch": 0.00014251104460595695,
"grad_norm": 1.606184959411621,
"learning_rate": 2e-06,
"loss": 0.4561,
"step": 2
},
{
"epoch": 0.00021376656690893543,
"grad_norm": 1.5126073360443115,
"learning_rate": 3e-06,
"loss": 0.4418,
"step": 3
},
{
"epoch": 0.0002850220892119139,
"grad_norm": 1.9431225061416626,
"learning_rate": 4e-06,
"loss": 0.5693,
"step": 4
},
{
"epoch": 0.0003562776115148924,
"grad_norm": 1.9813414812088013,
"learning_rate": 4.9999999999999996e-06,
"loss": 0.5906,
"step": 5
},
{
"epoch": 0.00042753313381787086,
"grad_norm": 1.8735884428024292,
"learning_rate": 6e-06,
"loss": 0.4769,
"step": 6
},
{
"epoch": 0.0004987886561208494,
"grad_norm": 1.8006073236465454,
"learning_rate": 7e-06,
"loss": 0.5359,
"step": 7
},
{
"epoch": 0.0005700441784238278,
"grad_norm": 1.539935827255249,
"learning_rate": 8e-06,
"loss": 0.4696,
"step": 8
},
{
"epoch": 0.0006412997007268063,
"grad_norm": 2.0199615955352783,
"learning_rate": 9e-06,
"loss": 0.5917,
"step": 9
},
{
"epoch": 0.0007125552230297848,
"grad_norm": 1.9582463502883911,
"learning_rate": 9.999999999999999e-06,
"loss": 0.4258,
"step": 10
},
{
"epoch": 0.0007838107453327633,
"grad_norm": 1.63983952999115,
"learning_rate": 1.1e-05,
"loss": 0.3967,
"step": 11
},
{
"epoch": 0.0008550662676357417,
"grad_norm": 1.615180253982544,
"learning_rate": 1.2e-05,
"loss": 0.4462,
"step": 12
},
{
"epoch": 0.0009263217899387203,
"grad_norm": 1.6889790296554565,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.3865,
"step": 13
},
{
"epoch": 0.0009975773122416988,
"grad_norm": 1.7628371715545654,
"learning_rate": 1.4e-05,
"loss": 0.5235,
"step": 14
},
{
"epoch": 0.0010688328345446773,
"grad_norm": 1.7251651287078857,
"learning_rate": 1.5e-05,
"loss": 0.2985,
"step": 15
},
{
"epoch": 0.0011400883568476556,
"grad_norm": 1.7641141414642334,
"learning_rate": 1.6e-05,
"loss": 0.4062,
"step": 16
},
{
"epoch": 0.0012113438791506342,
"grad_norm": 1.7730730772018433,
"learning_rate": 1.7e-05,
"loss": 0.2535,
"step": 17
},
{
"epoch": 0.0012825994014536127,
"grad_norm": 1.6763519048690796,
"learning_rate": 1.8e-05,
"loss": 0.2611,
"step": 18
},
{
"epoch": 0.0013538549237565912,
"grad_norm": 1.8056228160858154,
"learning_rate": 1.9e-05,
"loss": 0.3504,
"step": 19
},
{
"epoch": 0.0014251104460595695,
"grad_norm": 1.7897692918777466,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.2584,
"step": 20
},
{
"epoch": 0.001496365968362548,
"grad_norm": 1.8470932245254517,
"learning_rate": 2.1e-05,
"loss": 0.2949,
"step": 21
},
{
"epoch": 0.0015676214906655266,
"grad_norm": 1.2270846366882324,
"learning_rate": 2.2e-05,
"loss": 0.2835,
"step": 22
},
{
"epoch": 0.0016388770129685051,
"grad_norm": 1.2478747367858887,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.2585,
"step": 23
},
{
"epoch": 0.0017101325352714834,
"grad_norm": 1.7448375225067139,
"learning_rate": 2.4e-05,
"loss": 0.3246,
"step": 24
},
{
"epoch": 0.001781388057574462,
"grad_norm": 1.377163290977478,
"learning_rate": 2.5e-05,
"loss": 0.1817,
"step": 25
},
{
"epoch": 0.0018526435798774405,
"grad_norm": 1.6893903017044067,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.2882,
"step": 26
},
{
"epoch": 0.001923899102180419,
"grad_norm": 1.412766456604004,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.293,
"step": 27
},
{
"epoch": 0.0019951546244833976,
"grad_norm": 1.3407995700836182,
"learning_rate": 2.8e-05,
"loss": 0.1812,
"step": 28
},
{
"epoch": 0.002066410146786376,
"grad_norm": 1.8757520914077759,
"learning_rate": 2.9e-05,
"loss": 0.2906,
"step": 29
},
{
"epoch": 0.0021376656690893546,
"grad_norm": 1.1567577123641968,
"learning_rate": 3e-05,
"loss": 0.2146,
"step": 30
},
{
"epoch": 0.002208921191392333,
"grad_norm": 1.3666956424713135,
"learning_rate": 2.9984895998119723e-05,
"loss": 0.1976,
"step": 31
},
{
"epoch": 0.0022801767136953113,
"grad_norm": 1.3091264963150024,
"learning_rate": 2.993961440992859e-05,
"loss": 0.1436,
"step": 32
},
{
"epoch": 0.00235143223599829,
"grad_norm": 1.0269235372543335,
"learning_rate": 2.9864246426519023e-05,
"loss": 0.1848,
"step": 33
},
{
"epoch": 0.0024226877583012683,
"grad_norm": 1.5328474044799805,
"learning_rate": 2.9758943828979444e-05,
"loss": 0.2269,
"step": 34
},
{
"epoch": 0.0024939432806042466,
"grad_norm": 1.4854594469070435,
"learning_rate": 2.9623918682727355e-05,
"loss": 0.1739,
"step": 35
},
{
"epoch": 0.0025651988029072254,
"grad_norm": 1.223655104637146,
"learning_rate": 2.9459442910437798e-05,
"loss": 0.1476,
"step": 36
},
{
"epoch": 0.0026364543252102037,
"grad_norm": 1.455477237701416,
"learning_rate": 2.9265847744427305e-05,
"loss": 0.1762,
"step": 37
},
{
"epoch": 0.0027077098475131824,
"grad_norm": 1.223581075668335,
"learning_rate": 2.904352305959606e-05,
"loss": 0.1392,
"step": 38
},
{
"epoch": 0.0027789653698161608,
"grad_norm": 1.1783467531204224,
"learning_rate": 2.8792916588271762e-05,
"loss": 0.1729,
"step": 39
},
{
"epoch": 0.002850220892119139,
"grad_norm": 1.8590701818466187,
"learning_rate": 2.8514533018536286e-05,
"loss": 0.3492,
"step": 40
},
{
"epoch": 0.002921476414422118,
"grad_norm": 1.1538301706314087,
"learning_rate": 2.820893297785107e-05,
"loss": 0.1841,
"step": 41
},
{
"epoch": 0.002992731936725096,
"grad_norm": 1.5694233179092407,
"learning_rate": 2.7876731904027994e-05,
"loss": 0.2156,
"step": 42
},
{
"epoch": 0.003063987459028075,
"grad_norm": 1.357988953590393,
"learning_rate": 2.7518598805819542e-05,
"loss": 0.2037,
"step": 43
},
{
"epoch": 0.003135242981331053,
"grad_norm": 1.150630235671997,
"learning_rate": 2.7135254915624213e-05,
"loss": 0.1607,
"step": 44
},
{
"epoch": 0.0032064985036340315,
"grad_norm": 1.2249001264572144,
"learning_rate": 2.672747223702045e-05,
"loss": 0.2267,
"step": 45
},
{
"epoch": 0.0032777540259370103,
"grad_norm": 1.1600937843322754,
"learning_rate": 2.6296071990054167e-05,
"loss": 0.1363,
"step": 46
},
{
"epoch": 0.0033490095482399886,
"grad_norm": 1.2449615001678467,
"learning_rate": 2.5841922957410875e-05,
"loss": 0.2024,
"step": 47
},
{
"epoch": 0.003420265070542967,
"grad_norm": 1.292077660560608,
"learning_rate": 2.5365939734802973e-05,
"loss": 0.1311,
"step": 48
},
{
"epoch": 0.0034915205928459456,
"grad_norm": 1.2999500036239624,
"learning_rate": 2.4869080889095693e-05,
"loss": 0.1977,
"step": 49
},
{
"epoch": 0.003562776115148924,
"grad_norm": 1.0472160577774048,
"learning_rate": 2.4352347027881003e-05,
"loss": 0.1228,
"step": 50
},
{
"epoch": 0.003562776115148924,
"eval_loss": 0.2451000064611435,
"eval_runtime": 732.9403,
"eval_samples_per_second": 8.062,
"eval_steps_per_second": 2.017,
"step": 50
},
{
"epoch": 0.0036340316374519027,
"grad_norm": 0.9585462808609009,
"learning_rate": 2.3816778784387097e-05,
"loss": 0.4262,
"step": 51
},
{
"epoch": 0.003705287159754881,
"grad_norm": 1.3350521326065063,
"learning_rate": 2.3263454721781537e-05,
"loss": 0.3508,
"step": 52
},
{
"epoch": 0.0037765426820578593,
"grad_norm": 0.9149147868156433,
"learning_rate": 2.2693489161088592e-05,
"loss": 0.2918,
"step": 53
},
{
"epoch": 0.003847798204360838,
"grad_norm": 0.9929086565971375,
"learning_rate": 2.210802993709498e-05,
"loss": 0.2762,
"step": 54
},
{
"epoch": 0.003919053726663816,
"grad_norm": 1.152302861213684,
"learning_rate": 2.1508256086763372e-05,
"loss": 0.4493,
"step": 55
},
{
"epoch": 0.003990309248966795,
"grad_norm": 0.7821376919746399,
"learning_rate": 2.0895375474808857e-05,
"loss": 0.3416,
"step": 56
},
{
"epoch": 0.004061564771269773,
"grad_norm": 0.9475064277648926,
"learning_rate": 2.0270622361220143e-05,
"loss": 0.2441,
"step": 57
},
{
"epoch": 0.004132820293572752,
"grad_norm": 1.1275452375411987,
"learning_rate": 1.963525491562421e-05,
"loss": 0.2919,
"step": 58
},
{
"epoch": 0.0042040758158757305,
"grad_norm": 1.3180280923843384,
"learning_rate": 1.8990552683500128e-05,
"loss": 0.3509,
"step": 59
},
{
"epoch": 0.004275331338178709,
"grad_norm": 0.9340567588806152,
"learning_rate": 1.8337814009344716e-05,
"loss": 0.3231,
"step": 60
},
{
"epoch": 0.004346586860481687,
"grad_norm": 0.8491362929344177,
"learning_rate": 1.767835342197955e-05,
"loss": 0.241,
"step": 61
},
{
"epoch": 0.004417842382784666,
"grad_norm": 1.1892591714859009,
"learning_rate": 1.7013498987264832e-05,
"loss": 0.2353,
"step": 62
},
{
"epoch": 0.004489097905087645,
"grad_norm": 0.8448580503463745,
"learning_rate": 1.6344589633551502e-05,
"loss": 0.317,
"step": 63
},
{
"epoch": 0.0045603534273906225,
"grad_norm": 1.1618448495864868,
"learning_rate": 1.5672972455257726e-05,
"loss": 0.3225,
"step": 64
},
{
"epoch": 0.004631608949693601,
"grad_norm": 1.12700355052948,
"learning_rate": 1.5e-05,
"loss": 0.3619,
"step": 65
},
{
"epoch": 0.00470286447199658,
"grad_norm": 1.1308168172836304,
"learning_rate": 1.4327027544742281e-05,
"loss": 0.3482,
"step": 66
},
{
"epoch": 0.004774119994299558,
"grad_norm": 0.7516178488731384,
"learning_rate": 1.36554103664485e-05,
"loss": 0.1622,
"step": 67
},
{
"epoch": 0.004845375516602537,
"grad_norm": 1.1769253015518188,
"learning_rate": 1.2986501012735174e-05,
"loss": 0.2047,
"step": 68
},
{
"epoch": 0.004916631038905515,
"grad_norm": 0.9646146297454834,
"learning_rate": 1.2321646578020452e-05,
"loss": 0.2437,
"step": 69
},
{
"epoch": 0.004987886561208493,
"grad_norm": 1.2298526763916016,
"learning_rate": 1.1662185990655285e-05,
"loss": 0.4731,
"step": 70
},
{
"epoch": 0.005059142083511472,
"grad_norm": 0.7508978247642517,
"learning_rate": 1.1009447316499875e-05,
"loss": 0.1173,
"step": 71
},
{
"epoch": 0.005130397605814451,
"grad_norm": 1.2414405345916748,
"learning_rate": 1.036474508437579e-05,
"loss": 0.293,
"step": 72
},
{
"epoch": 0.0052016531281174295,
"grad_norm": 0.951433539390564,
"learning_rate": 9.729377638779859e-06,
"loss": 0.1768,
"step": 73
},
{
"epoch": 0.005272908650420407,
"grad_norm": 1.0032628774642944,
"learning_rate": 9.104624525191147e-06,
"loss": 0.2115,
"step": 74
},
{
"epoch": 0.005344164172723386,
"grad_norm": 1.182035207748413,
"learning_rate": 8.491743913236629e-06,
"loss": 0.2316,
"step": 75
},
{
"epoch": 0.005415419695026365,
"grad_norm": 1.3811928033828735,
"learning_rate": 7.89197006290502e-06,
"loss": 0.2767,
"step": 76
},
{
"epoch": 0.005486675217329343,
"grad_norm": 1.2715567350387573,
"learning_rate": 7.30651083891141e-06,
"loss": 0.1368,
"step": 77
},
{
"epoch": 0.0055579307396323215,
"grad_norm": 1.1821553707122803,
"learning_rate": 6.736545278218464e-06,
"loss": 0.164,
"step": 78
},
{
"epoch": 0.0056291862619353,
"grad_norm": 1.0368329286575317,
"learning_rate": 6.1832212156129045e-06,
"loss": 0.2001,
"step": 79
},
{
"epoch": 0.005700441784238278,
"grad_norm": 0.8645971417427063,
"learning_rate": 5.647652972118998e-06,
"loss": 0.1707,
"step": 80
},
{
"epoch": 0.005771697306541257,
"grad_norm": 0.9727141857147217,
"learning_rate": 5.130919110904311e-06,
"loss": 0.2369,
"step": 81
},
{
"epoch": 0.005842952828844236,
"grad_norm": 0.7467105388641357,
"learning_rate": 4.6340602651970304e-06,
"loss": 0.1482,
"step": 82
},
{
"epoch": 0.0059142083511472135,
"grad_norm": 1.139006495475769,
"learning_rate": 4.158077042589129e-06,
"loss": 0.1787,
"step": 83
},
{
"epoch": 0.005985463873450192,
"grad_norm": 1.2143248319625854,
"learning_rate": 3.7039280099458373e-06,
"loss": 0.2871,
"step": 84
},
{
"epoch": 0.006056719395753171,
"grad_norm": 2.230907678604126,
"learning_rate": 3.272527762979553e-06,
"loss": 0.2485,
"step": 85
},
{
"epoch": 0.00612797491805615,
"grad_norm": 1.4834065437316895,
"learning_rate": 2.86474508437579e-06,
"loss": 0.2082,
"step": 86
},
{
"epoch": 0.006199230440359128,
"grad_norm": 1.2078521251678467,
"learning_rate": 2.4814011941804603e-06,
"loss": 0.205,
"step": 87
},
{
"epoch": 0.006270485962662106,
"grad_norm": 1.078839659690857,
"learning_rate": 2.1232680959720085e-06,
"loss": 0.1255,
"step": 88
},
{
"epoch": 0.006341741484965085,
"grad_norm": 1.7553359270095825,
"learning_rate": 1.79106702214893e-06,
"loss": 0.271,
"step": 89
},
{
"epoch": 0.006412997007268063,
"grad_norm": 1.4102157354354858,
"learning_rate": 1.4854669814637145e-06,
"loss": 0.192,
"step": 90
},
{
"epoch": 0.006484252529571042,
"grad_norm": 0.9362900853157043,
"learning_rate": 1.2070834117282414e-06,
"loss": 0.1759,
"step": 91
},
{
"epoch": 0.0065555080518740205,
"grad_norm": 1.3166675567626953,
"learning_rate": 9.56476940403942e-07,
"loss": 0.2363,
"step": 92
},
{
"epoch": 0.006626763574176998,
"grad_norm": 1.0089662075042725,
"learning_rate": 7.341522555726971e-07,
"loss": 0.2477,
"step": 93
},
{
"epoch": 0.006698019096479977,
"grad_norm": 1.362122654914856,
"learning_rate": 5.405570895622014e-07,
"loss": 0.2324,
"step": 94
},
{
"epoch": 0.006769274618782956,
"grad_norm": 0.8471099734306335,
"learning_rate": 3.760813172726457e-07,
"loss": 0.1179,
"step": 95
},
{
"epoch": 0.006840530141085934,
"grad_norm": 1.2971007823944092,
"learning_rate": 2.41056171020555e-07,
"loss": 0.1556,
"step": 96
},
{
"epoch": 0.0069117856633889125,
"grad_norm": 1.1084494590759277,
"learning_rate": 1.357535734809795e-07,
"loss": 0.2148,
"step": 97
},
{
"epoch": 0.006983041185691891,
"grad_norm": 1.6969143152236938,
"learning_rate": 6.038559007141397e-08,
"loss": 0.2654,
"step": 98
},
{
"epoch": 0.00705429670799487,
"grad_norm": 1.321176290512085,
"learning_rate": 1.510400188028116e-08,
"loss": 0.2057,
"step": 99
},
{
"epoch": 0.007125552230297848,
"grad_norm": 1.1489577293395996,
"learning_rate": 0.0,
"loss": 0.1503,
"step": 100
},
{
"epoch": 0.007125552230297848,
"eval_loss": 0.23508746922016144,
"eval_runtime": 733.1631,
"eval_samples_per_second": 8.06,
"eval_steps_per_second": 2.016,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.808996786333286e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}