kokovova's picture
Training in progress, step 75, checkpoint
821b8f4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0025556710340245005,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.407561378699334e-05,
"grad_norm": 2.5001614093780518,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.825,
"step": 1
},
{
"epoch": 3.407561378699334e-05,
"eval_loss": 5.655045509338379,
"eval_runtime": 5197.3091,
"eval_samples_per_second": 2.378,
"eval_steps_per_second": 1.189,
"step": 1
},
{
"epoch": 6.815122757398668e-05,
"grad_norm": 2.1226208209991455,
"learning_rate": 6.666666666666667e-05,
"loss": 1.6844,
"step": 2
},
{
"epoch": 0.00010222684136098001,
"grad_norm": 2.5686793327331543,
"learning_rate": 0.0001,
"loss": 2.1539,
"step": 3
},
{
"epoch": 0.00013630245514797336,
"grad_norm": 4.603909015655518,
"learning_rate": 9.99571699711836e-05,
"loss": 4.0091,
"step": 4
},
{
"epoch": 0.0001703780689349667,
"grad_norm": 2.2434098720550537,
"learning_rate": 9.982876141412856e-05,
"loss": 1.482,
"step": 5
},
{
"epoch": 0.00020445368272196002,
"grad_norm": 2.6793839931488037,
"learning_rate": 9.961501876182148e-05,
"loss": 0.8997,
"step": 6
},
{
"epoch": 0.00023852929650895337,
"grad_norm": 3.525517463684082,
"learning_rate": 9.931634888554937e-05,
"loss": 1.328,
"step": 7
},
{
"epoch": 0.0002726049102959467,
"grad_norm": 4.246636867523193,
"learning_rate": 9.893332032039701e-05,
"loss": 1.3833,
"step": 8
},
{
"epoch": 0.00030668052408294006,
"grad_norm": 5.746326923370361,
"learning_rate": 9.846666218300807e-05,
"loss": 1.1267,
"step": 9
},
{
"epoch": 0.0003407561378699334,
"grad_norm": 5.541106700897217,
"learning_rate": 9.791726278367022e-05,
"loss": 0.8371,
"step": 10
},
{
"epoch": 0.0003748317516569267,
"grad_norm": 4.492342472076416,
"learning_rate": 9.728616793536588e-05,
"loss": 0.8644,
"step": 11
},
{
"epoch": 0.00040890736544392005,
"grad_norm": 5.070585250854492,
"learning_rate": 9.657457896300791e-05,
"loss": 0.5652,
"step": 12
},
{
"epoch": 0.0004429829792309134,
"grad_norm": 3.305798292160034,
"learning_rate": 9.578385041664925e-05,
"loss": 0.5209,
"step": 13
},
{
"epoch": 0.00047705859301790674,
"grad_norm": 1.7791708707809448,
"learning_rate": 9.491548749301997e-05,
"loss": 0.1586,
"step": 14
},
{
"epoch": 0.0005111342068049,
"grad_norm": 4.680875301361084,
"learning_rate": 9.397114317029975e-05,
"loss": 0.5646,
"step": 15
},
{
"epoch": 0.0005452098205918934,
"grad_norm": 3.8708016872406006,
"learning_rate": 9.295261506157986e-05,
"loss": 0.431,
"step": 16
},
{
"epoch": 0.0005792854343788867,
"grad_norm": 4.305510520935059,
"learning_rate": 9.186184199300464e-05,
"loss": 0.4888,
"step": 17
},
{
"epoch": 0.0006133610481658801,
"grad_norm": 4.049569606781006,
"learning_rate": 9.070090031310558e-05,
"loss": 0.2277,
"step": 18
},
{
"epoch": 0.0006474366619528734,
"grad_norm": 3.324610948562622,
"learning_rate": 8.947199994035401e-05,
"loss": 0.2978,
"step": 19
},
{
"epoch": 0.0006815122757398668,
"grad_norm": 3.7074697017669678,
"learning_rate": 8.817748015645558e-05,
"loss": 0.3028,
"step": 20
},
{
"epoch": 0.0007155878895268601,
"grad_norm": 1.7295836210250854,
"learning_rate": 8.681980515339464e-05,
"loss": 0.0916,
"step": 21
},
{
"epoch": 0.0007496635033138534,
"grad_norm": 2.569653272628784,
"learning_rate": 8.540155934270471e-05,
"loss": 0.3138,
"step": 22
},
{
"epoch": 0.0007837391171008468,
"grad_norm": 2.379580020904541,
"learning_rate": 8.392544243589427e-05,
"loss": 0.2004,
"step": 23
},
{
"epoch": 0.0008178147308878401,
"grad_norm": 2.292313814163208,
"learning_rate": 8.239426430539243e-05,
"loss": 0.1673,
"step": 24
},
{
"epoch": 0.0008518903446748335,
"grad_norm": 2.342252731323242,
"learning_rate": 8.081093963579707e-05,
"loss": 0.2094,
"step": 25
},
{
"epoch": 0.0008518903446748335,
"eval_loss": 0.17272759974002838,
"eval_runtime": 5196.4661,
"eval_samples_per_second": 2.378,
"eval_steps_per_second": 1.189,
"step": 25
},
{
"epoch": 0.0008859659584618268,
"grad_norm": 3.6202399730682373,
"learning_rate": 7.917848237560709e-05,
"loss": 0.127,
"step": 26
},
{
"epoch": 0.0009200415722488202,
"grad_norm": 1.361546516418457,
"learning_rate": 7.75e-05,
"loss": 0.0881,
"step": 27
},
{
"epoch": 0.0009541171860358135,
"grad_norm": 3.2597994804382324,
"learning_rate": 7.577868759557654e-05,
"loss": 0.209,
"step": 28
},
{
"epoch": 0.0009881927998228069,
"grad_norm": 2.3739514350891113,
"learning_rate": 7.401782177833148e-05,
"loss": 0.0655,
"step": 29
},
{
"epoch": 0.0010222684136098,
"grad_norm": 0.39956721663475037,
"learning_rate": 7.222075445642904e-05,
"loss": 0.013,
"step": 30
},
{
"epoch": 0.0010563440273967935,
"grad_norm": 2.310549736022949,
"learning_rate": 7.03909064496551e-05,
"loss": 0.0997,
"step": 31
},
{
"epoch": 0.0010904196411837869,
"grad_norm": 3.8629372119903564,
"learning_rate": 6.853176097769229e-05,
"loss": 0.1503,
"step": 32
},
{
"epoch": 0.0011244952549707803,
"grad_norm": 1.4826256036758423,
"learning_rate": 6.664685702961344e-05,
"loss": 0.0285,
"step": 33
},
{
"epoch": 0.0011585708687577734,
"grad_norm": 1.613270878791809,
"learning_rate": 6.473978262721463e-05,
"loss": 0.0595,
"step": 34
},
{
"epoch": 0.0011926464825447668,
"grad_norm": 0.6385797262191772,
"learning_rate": 6.281416799501188e-05,
"loss": 0.0196,
"step": 35
},
{
"epoch": 0.0012267220963317602,
"grad_norm": 1.283711552619934,
"learning_rate": 6.087367864990233e-05,
"loss": 0.0276,
"step": 36
},
{
"epoch": 0.0012607977101187534,
"grad_norm": 5.805143356323242,
"learning_rate": 5.8922008423644624e-05,
"loss": 0.1078,
"step": 37
},
{
"epoch": 0.0012948733239057468,
"grad_norm": 0.6078380346298218,
"learning_rate": 5.696287243144013e-05,
"loss": 0.0143,
"step": 38
},
{
"epoch": 0.0013289489376927402,
"grad_norm": 1.3938196897506714,
"learning_rate": 5.500000000000001e-05,
"loss": 0.0196,
"step": 39
},
{
"epoch": 0.0013630245514797336,
"grad_norm": 4.788840293884277,
"learning_rate": 5.303712756855988e-05,
"loss": 0.1522,
"step": 40
},
{
"epoch": 0.0013971001652667268,
"grad_norm": 7.833195686340332,
"learning_rate": 5.107799157635538e-05,
"loss": 0.0832,
"step": 41
},
{
"epoch": 0.0014311757790537202,
"grad_norm": 6.889073371887207,
"learning_rate": 4.912632135009769e-05,
"loss": 0.1242,
"step": 42
},
{
"epoch": 0.0014652513928407136,
"grad_norm": 6.13986349105835,
"learning_rate": 4.718583200498814e-05,
"loss": 0.0414,
"step": 43
},
{
"epoch": 0.0014993270066277068,
"grad_norm": 7.624245643615723,
"learning_rate": 4.526021737278538e-05,
"loss": 0.1478,
"step": 44
},
{
"epoch": 0.0015334026204147002,
"grad_norm": 0.07408539205789566,
"learning_rate": 4.3353142970386564e-05,
"loss": 0.0022,
"step": 45
},
{
"epoch": 0.0015674782342016936,
"grad_norm": 1.7765361070632935,
"learning_rate": 4.146823902230772e-05,
"loss": 0.1055,
"step": 46
},
{
"epoch": 0.001601553847988687,
"grad_norm": 1.6538923978805542,
"learning_rate": 3.960909355034491e-05,
"loss": 0.0226,
"step": 47
},
{
"epoch": 0.0016356294617756802,
"grad_norm": 13.931591987609863,
"learning_rate": 3.777924554357096e-05,
"loss": 0.2841,
"step": 48
},
{
"epoch": 0.0016697050755626736,
"grad_norm": 11.1533203125,
"learning_rate": 3.598217822166854e-05,
"loss": 0.1394,
"step": 49
},
{
"epoch": 0.001703780689349667,
"grad_norm": 4.493430137634277,
"learning_rate": 3.422131240442349e-05,
"loss": 0.0744,
"step": 50
},
{
"epoch": 0.001703780689349667,
"eval_loss": 0.0442812517285347,
"eval_runtime": 5199.0583,
"eval_samples_per_second": 2.377,
"eval_steps_per_second": 1.188,
"step": 50
},
{
"epoch": 0.0017378563031366602,
"grad_norm": 4.004736423492432,
"learning_rate": 3.250000000000001e-05,
"loss": 0.4785,
"step": 51
},
{
"epoch": 0.0017719319169236536,
"grad_norm": 0.8578753471374512,
"learning_rate": 3.082151762439293e-05,
"loss": 0.0086,
"step": 52
},
{
"epoch": 0.001806007530710647,
"grad_norm": 1.8137702941894531,
"learning_rate": 2.9189060364202943e-05,
"loss": 0.0333,
"step": 53
},
{
"epoch": 0.0018400831444976404,
"grad_norm": 0.4614853262901306,
"learning_rate": 2.760573569460757e-05,
"loss": 0.0078,
"step": 54
},
{
"epoch": 0.0018741587582846336,
"grad_norm": 0.09576446563005447,
"learning_rate": 2.6074557564105727e-05,
"loss": 0.0015,
"step": 55
},
{
"epoch": 0.001908234372071627,
"grad_norm": 0.09370886534452438,
"learning_rate": 2.459844065729529e-05,
"loss": 0.0019,
"step": 56
},
{
"epoch": 0.0019423099858586204,
"grad_norm": 0.4553550183773041,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.0037,
"step": 57
},
{
"epoch": 0.0019763855996456138,
"grad_norm": 0.11098048835992813,
"learning_rate": 2.1822519843544424e-05,
"loss": 0.0025,
"step": 58
},
{
"epoch": 0.002010461213432607,
"grad_norm": 0.11278193444013596,
"learning_rate": 2.0528000059645997e-05,
"loss": 0.0022,
"step": 59
},
{
"epoch": 0.0020445368272196,
"grad_norm": 1.5145128965377808,
"learning_rate": 1.9299099686894423e-05,
"loss": 0.042,
"step": 60
},
{
"epoch": 0.0020786124410065937,
"grad_norm": 0.15734955668449402,
"learning_rate": 1.8138158006995364e-05,
"loss": 0.0031,
"step": 61
},
{
"epoch": 0.002112688054793587,
"grad_norm": 0.16131563484668732,
"learning_rate": 1.7047384938420154e-05,
"loss": 0.0041,
"step": 62
},
{
"epoch": 0.00214676366858058,
"grad_norm": 1.3162636756896973,
"learning_rate": 1.602885682970026e-05,
"loss": 0.017,
"step": 63
},
{
"epoch": 0.0021808392823675737,
"grad_norm": 0.5748438239097595,
"learning_rate": 1.5084512506980026e-05,
"loss": 0.0109,
"step": 64
},
{
"epoch": 0.002214914896154567,
"grad_norm": 1.6675885915756226,
"learning_rate": 1.4216149583350754e-05,
"loss": 0.0192,
"step": 65
},
{
"epoch": 0.0022489905099415605,
"grad_norm": 0.22785191237926483,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.0039,
"step": 66
},
{
"epoch": 0.0022830661237285537,
"grad_norm": 0.2156006395816803,
"learning_rate": 1.2713832064634126e-05,
"loss": 0.0043,
"step": 67
},
{
"epoch": 0.002317141737515547,
"grad_norm": 1.7005534172058105,
"learning_rate": 1.2082737216329794e-05,
"loss": 0.0998,
"step": 68
},
{
"epoch": 0.0023512173513025405,
"grad_norm": 0.23563118278980255,
"learning_rate": 1.1533337816991932e-05,
"loss": 0.0034,
"step": 69
},
{
"epoch": 0.0023852929650895337,
"grad_norm": 0.18278473615646362,
"learning_rate": 1.1066679679603e-05,
"loss": 0.0037,
"step": 70
},
{
"epoch": 0.002419368578876527,
"grad_norm": 2.191817045211792,
"learning_rate": 1.0683651114450641e-05,
"loss": 0.0575,
"step": 71
},
{
"epoch": 0.0024534441926635205,
"grad_norm": 0.29528528451919556,
"learning_rate": 1.0384981238178534e-05,
"loss": 0.0046,
"step": 72
},
{
"epoch": 0.0024875198064505137,
"grad_norm": 0.05887974426150322,
"learning_rate": 1.017123858587145e-05,
"loss": 0.0012,
"step": 73
},
{
"epoch": 0.002521595420237507,
"grad_norm": 0.8336066603660583,
"learning_rate": 1.00428300288164e-05,
"loss": 0.0193,
"step": 74
},
{
"epoch": 0.0025556710340245005,
"grad_norm": 0.5219399333000183,
"learning_rate": 1e-05,
"loss": 0.0082,
"step": 75
},
{
"epoch": 0.0025556710340245005,
"eval_loss": 0.019740475341677666,
"eval_runtime": 5200.5985,
"eval_samples_per_second": 2.376,
"eval_steps_per_second": 1.188,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.1111982529917747e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}