RoyJoy's picture
Training in progress, step 100, checkpoint
aa9fa1c verified
raw
history blame
18.7 kB
{
"best_metric": 0.2764929533004761,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.2588996763754045,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025889967637540453,
"grad_norm": 8.612391471862793,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.8772,
"step": 1
},
{
"epoch": 0.0025889967637540453,
"eval_loss": 1.1311349868774414,
"eval_runtime": 63.1174,
"eval_samples_per_second": 41.241,
"eval_steps_per_second": 5.165,
"step": 1
},
{
"epoch": 0.005177993527508091,
"grad_norm": 10.561990737915039,
"learning_rate": 6.666666666666667e-06,
"loss": 2.3642,
"step": 2
},
{
"epoch": 0.007766990291262136,
"grad_norm": 12.674636840820312,
"learning_rate": 1e-05,
"loss": 2.7038,
"step": 3
},
{
"epoch": 0.010355987055016181,
"grad_norm": 11.861933708190918,
"learning_rate": 1.3333333333333333e-05,
"loss": 2.9593,
"step": 4
},
{
"epoch": 0.012944983818770227,
"grad_norm": 10.037701606750488,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.8463,
"step": 5
},
{
"epoch": 0.015533980582524271,
"grad_norm": 8.951319694519043,
"learning_rate": 2e-05,
"loss": 2.6833,
"step": 6
},
{
"epoch": 0.018122977346278317,
"grad_norm": 11.811137199401855,
"learning_rate": 2.3333333333333336e-05,
"loss": 2.8029,
"step": 7
},
{
"epoch": 0.020711974110032363,
"grad_norm": 11.602544784545898,
"learning_rate": 2.6666666666666667e-05,
"loss": 2.5999,
"step": 8
},
{
"epoch": 0.02330097087378641,
"grad_norm": 10.852923393249512,
"learning_rate": 3e-05,
"loss": 2.3762,
"step": 9
},
{
"epoch": 0.025889967637540454,
"grad_norm": 16.908262252807617,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.2558,
"step": 10
},
{
"epoch": 0.0284789644012945,
"grad_norm": 19.876672744750977,
"learning_rate": 3.6666666666666666e-05,
"loss": 1.9599,
"step": 11
},
{
"epoch": 0.031067961165048542,
"grad_norm": 36.011985778808594,
"learning_rate": 4e-05,
"loss": 1.9861,
"step": 12
},
{
"epoch": 0.03365695792880259,
"grad_norm": 37.01054000854492,
"learning_rate": 4.3333333333333334e-05,
"loss": 1.9578,
"step": 13
},
{
"epoch": 0.036245954692556634,
"grad_norm": 19.07244300842285,
"learning_rate": 4.666666666666667e-05,
"loss": 1.7452,
"step": 14
},
{
"epoch": 0.038834951456310676,
"grad_norm": 11.258912086486816,
"learning_rate": 5e-05,
"loss": 1.473,
"step": 15
},
{
"epoch": 0.041423948220064725,
"grad_norm": 9.571911811828613,
"learning_rate": 5.333333333333333e-05,
"loss": 1.4802,
"step": 16
},
{
"epoch": 0.04401294498381877,
"grad_norm": 5.4818878173828125,
"learning_rate": 5.666666666666667e-05,
"loss": 1.449,
"step": 17
},
{
"epoch": 0.04660194174757282,
"grad_norm": 3.5284886360168457,
"learning_rate": 6e-05,
"loss": 1.3482,
"step": 18
},
{
"epoch": 0.04919093851132686,
"grad_norm": 4.385798931121826,
"learning_rate": 6.333333333333333e-05,
"loss": 1.3401,
"step": 19
},
{
"epoch": 0.05177993527508091,
"grad_norm": 4.73576545715332,
"learning_rate": 6.666666666666667e-05,
"loss": 1.3967,
"step": 20
},
{
"epoch": 0.05436893203883495,
"grad_norm": 7.08858585357666,
"learning_rate": 7e-05,
"loss": 1.4665,
"step": 21
},
{
"epoch": 0.056957928802589,
"grad_norm": 5.8825225830078125,
"learning_rate": 7.333333333333333e-05,
"loss": 1.4479,
"step": 22
},
{
"epoch": 0.05954692556634304,
"grad_norm": 6.359987735748291,
"learning_rate": 7.666666666666667e-05,
"loss": 1.4314,
"step": 23
},
{
"epoch": 0.062135922330097085,
"grad_norm": 26.79585075378418,
"learning_rate": 8e-05,
"loss": 1.499,
"step": 24
},
{
"epoch": 0.06472491909385113,
"grad_norm": 9.032891273498535,
"learning_rate": 8.333333333333334e-05,
"loss": 1.5914,
"step": 25
},
{
"epoch": 0.06731391585760518,
"grad_norm": 10.370737075805664,
"learning_rate": 8.666666666666667e-05,
"loss": 1.5544,
"step": 26
},
{
"epoch": 0.06990291262135923,
"grad_norm": 7.443507671356201,
"learning_rate": 9e-05,
"loss": 1.3836,
"step": 27
},
{
"epoch": 0.07249190938511327,
"grad_norm": 4.631689548492432,
"learning_rate": 9.333333333333334e-05,
"loss": 1.2513,
"step": 28
},
{
"epoch": 0.07508090614886731,
"grad_norm": 3.5808937549591064,
"learning_rate": 9.666666666666667e-05,
"loss": 1.265,
"step": 29
},
{
"epoch": 0.07766990291262135,
"grad_norm": 3.6336581707000732,
"learning_rate": 0.0001,
"loss": 1.2874,
"step": 30
},
{
"epoch": 0.08025889967637541,
"grad_norm": 3.7123985290527344,
"learning_rate": 9.999146252290264e-05,
"loss": 1.2587,
"step": 31
},
{
"epoch": 0.08284789644012945,
"grad_norm": 3.7729804515838623,
"learning_rate": 9.996585300715116e-05,
"loss": 1.3292,
"step": 32
},
{
"epoch": 0.0854368932038835,
"grad_norm": 6.0967206954956055,
"learning_rate": 9.99231801983717e-05,
"loss": 1.3803,
"step": 33
},
{
"epoch": 0.08802588996763754,
"grad_norm": 4.336344242095947,
"learning_rate": 9.986345866928941e-05,
"loss": 1.328,
"step": 34
},
{
"epoch": 0.09061488673139159,
"grad_norm": 4.434997081756592,
"learning_rate": 9.978670881475172e-05,
"loss": 1.2927,
"step": 35
},
{
"epoch": 0.09320388349514563,
"grad_norm": 7.466955661773682,
"learning_rate": 9.96929568447637e-05,
"loss": 1.5473,
"step": 36
},
{
"epoch": 0.09579288025889968,
"grad_norm": 6.0542497634887695,
"learning_rate": 9.958223477553714e-05,
"loss": 1.4992,
"step": 37
},
{
"epoch": 0.09838187702265372,
"grad_norm": 6.534477233886719,
"learning_rate": 9.94545804185573e-05,
"loss": 1.3268,
"step": 38
},
{
"epoch": 0.10097087378640776,
"grad_norm": 6.788901329040527,
"learning_rate": 9.931003736767013e-05,
"loss": 1.2662,
"step": 39
},
{
"epoch": 0.10355987055016182,
"grad_norm": 4.574948310852051,
"learning_rate": 9.91486549841951e-05,
"loss": 1.2097,
"step": 40
},
{
"epoch": 0.10614886731391586,
"grad_norm": 2.7755305767059326,
"learning_rate": 9.89704883800683e-05,
"loss": 1.1472,
"step": 41
},
{
"epoch": 0.1087378640776699,
"grad_norm": 3.1703152656555176,
"learning_rate": 9.877559839902184e-05,
"loss": 1.2213,
"step": 42
},
{
"epoch": 0.11132686084142394,
"grad_norm": 2.9166173934936523,
"learning_rate": 9.85640515958057e-05,
"loss": 1.1592,
"step": 43
},
{
"epoch": 0.113915857605178,
"grad_norm": 3.054363965988159,
"learning_rate": 9.833592021345937e-05,
"loss": 1.1857,
"step": 44
},
{
"epoch": 0.11650485436893204,
"grad_norm": 3.5410144329071045,
"learning_rate": 9.809128215864097e-05,
"loss": 1.2165,
"step": 45
},
{
"epoch": 0.11909385113268608,
"grad_norm": 4.187359809875488,
"learning_rate": 9.783022097502204e-05,
"loss": 1.2585,
"step": 46
},
{
"epoch": 0.12168284789644013,
"grad_norm": 4.440476894378662,
"learning_rate": 9.755282581475769e-05,
"loss": 1.258,
"step": 47
},
{
"epoch": 0.12427184466019417,
"grad_norm": 7.523629188537598,
"learning_rate": 9.725919140804099e-05,
"loss": 1.2681,
"step": 48
},
{
"epoch": 0.1268608414239482,
"grad_norm": 19.543785095214844,
"learning_rate": 9.694941803075283e-05,
"loss": 1.3755,
"step": 49
},
{
"epoch": 0.12944983818770225,
"grad_norm": 14.362659454345703,
"learning_rate": 9.662361147021779e-05,
"loss": 1.4683,
"step": 50
},
{
"epoch": 0.12944983818770225,
"eval_loss": 0.3648124039173126,
"eval_runtime": 64.5567,
"eval_samples_per_second": 40.321,
"eval_steps_per_second": 5.05,
"step": 50
},
{
"epoch": 0.13203883495145632,
"grad_norm": 12.063788414001465,
"learning_rate": 9.628188298907782e-05,
"loss": 1.4899,
"step": 51
},
{
"epoch": 0.13462783171521037,
"grad_norm": 8.200540542602539,
"learning_rate": 9.592434928729616e-05,
"loss": 1.3093,
"step": 52
},
{
"epoch": 0.1372168284789644,
"grad_norm": 3.508943796157837,
"learning_rate": 9.555113246230442e-05,
"loss": 1.1634,
"step": 53
},
{
"epoch": 0.13980582524271845,
"grad_norm": 2.361328601837158,
"learning_rate": 9.516235996730645e-05,
"loss": 1.0916,
"step": 54
},
{
"epoch": 0.1423948220064725,
"grad_norm": 2.5727005004882812,
"learning_rate": 9.475816456775313e-05,
"loss": 1.0954,
"step": 55
},
{
"epoch": 0.14498381877022654,
"grad_norm": 3.032360792160034,
"learning_rate": 9.43386842960031e-05,
"loss": 1.175,
"step": 56
},
{
"epoch": 0.14757281553398058,
"grad_norm": 2.9916622638702393,
"learning_rate": 9.39040624041849e-05,
"loss": 1.1121,
"step": 57
},
{
"epoch": 0.15016181229773462,
"grad_norm": 3.048910140991211,
"learning_rate": 9.345444731527642e-05,
"loss": 1.0988,
"step": 58
},
{
"epoch": 0.15275080906148866,
"grad_norm": 3.363703489303589,
"learning_rate": 9.298999257241863e-05,
"loss": 1.1841,
"step": 59
},
{
"epoch": 0.1553398058252427,
"grad_norm": 3.3770105838775635,
"learning_rate": 9.251085678648072e-05,
"loss": 1.1814,
"step": 60
},
{
"epoch": 0.15792880258899678,
"grad_norm": 4.700929641723633,
"learning_rate": 9.201720358189464e-05,
"loss": 1.2283,
"step": 61
},
{
"epoch": 0.16051779935275082,
"grad_norm": 5.556697845458984,
"learning_rate": 9.150920154077754e-05,
"loss": 1.3271,
"step": 62
},
{
"epoch": 0.16310679611650486,
"grad_norm": 5.743304252624512,
"learning_rate": 9.098702414536107e-05,
"loss": 1.2262,
"step": 63
},
{
"epoch": 0.1656957928802589,
"grad_norm": 5.529282093048096,
"learning_rate": 9.045084971874738e-05,
"loss": 1.198,
"step": 64
},
{
"epoch": 0.16828478964401294,
"grad_norm": 3.399848222732544,
"learning_rate": 8.9900861364012e-05,
"loss": 1.1249,
"step": 65
},
{
"epoch": 0.170873786407767,
"grad_norm": 2.213207244873047,
"learning_rate": 8.933724690167417e-05,
"loss": 1.0662,
"step": 66
},
{
"epoch": 0.17346278317152103,
"grad_norm": 2.491964101791382,
"learning_rate": 8.876019880555649e-05,
"loss": 1.0862,
"step": 67
},
{
"epoch": 0.17605177993527507,
"grad_norm": 2.7167015075683594,
"learning_rate": 8.816991413705516e-05,
"loss": 1.1059,
"step": 68
},
{
"epoch": 0.1786407766990291,
"grad_norm": 2.776489734649658,
"learning_rate": 8.756659447784368e-05,
"loss": 1.0858,
"step": 69
},
{
"epoch": 0.18122977346278318,
"grad_norm": 2.965355396270752,
"learning_rate": 8.695044586103296e-05,
"loss": 1.125,
"step": 70
},
{
"epoch": 0.18381877022653723,
"grad_norm": 2.7822370529174805,
"learning_rate": 8.632167870081121e-05,
"loss": 1.0688,
"step": 71
},
{
"epoch": 0.18640776699029127,
"grad_norm": 4.001903533935547,
"learning_rate": 8.568050772058762e-05,
"loss": 1.1053,
"step": 72
},
{
"epoch": 0.1889967637540453,
"grad_norm": 3.649930477142334,
"learning_rate": 8.502715187966455e-05,
"loss": 1.1806,
"step": 73
},
{
"epoch": 0.19158576051779935,
"grad_norm": 4.8959550857543945,
"learning_rate": 8.436183429846313e-05,
"loss": 1.2705,
"step": 74
},
{
"epoch": 0.1941747572815534,
"grad_norm": 7.251972198486328,
"learning_rate": 8.368478218232787e-05,
"loss": 1.3655,
"step": 75
},
{
"epoch": 0.19676375404530744,
"grad_norm": 9.770423889160156,
"learning_rate": 8.299622674393614e-05,
"loss": 1.1064,
"step": 76
},
{
"epoch": 0.19935275080906148,
"grad_norm": 8.65103530883789,
"learning_rate": 8.229640312433937e-05,
"loss": 1.1839,
"step": 77
},
{
"epoch": 0.20194174757281552,
"grad_norm": 5.945736885070801,
"learning_rate": 8.158555031266254e-05,
"loss": 1.1019,
"step": 78
},
{
"epoch": 0.2045307443365696,
"grad_norm": 3.460510730743408,
"learning_rate": 8.086391106448965e-05,
"loss": 1.0443,
"step": 79
},
{
"epoch": 0.20711974110032363,
"grad_norm": 2.2635905742645264,
"learning_rate": 8.013173181896283e-05,
"loss": 1.0537,
"step": 80
},
{
"epoch": 0.20970873786407768,
"grad_norm": 2.609692335128784,
"learning_rate": 7.938926261462366e-05,
"loss": 1.0779,
"step": 81
},
{
"epoch": 0.21229773462783172,
"grad_norm": 2.645723819732666,
"learning_rate": 7.863675700402526e-05,
"loss": 1.0427,
"step": 82
},
{
"epoch": 0.21488673139158576,
"grad_norm": 3.2835347652435303,
"learning_rate": 7.787447196714427e-05,
"loss": 1.1034,
"step": 83
},
{
"epoch": 0.2174757281553398,
"grad_norm": 3.677543878555298,
"learning_rate": 7.710266782362247e-05,
"loss": 1.2105,
"step": 84
},
{
"epoch": 0.22006472491909385,
"grad_norm": 3.5897769927978516,
"learning_rate": 7.63216081438678e-05,
"loss": 1.2181,
"step": 85
},
{
"epoch": 0.2226537216828479,
"grad_norm": 3.5425643920898438,
"learning_rate": 7.553155965904535e-05,
"loss": 1.1825,
"step": 86
},
{
"epoch": 0.22524271844660193,
"grad_norm": 4.001893997192383,
"learning_rate": 7.473279216998895e-05,
"loss": 1.2106,
"step": 87
},
{
"epoch": 0.227831715210356,
"grad_norm": 3.9724557399749756,
"learning_rate": 7.392557845506432e-05,
"loss": 1.1139,
"step": 88
},
{
"epoch": 0.23042071197411004,
"grad_norm": 4.996490955352783,
"learning_rate": 7.311019417701566e-05,
"loss": 1.1814,
"step": 89
},
{
"epoch": 0.23300970873786409,
"grad_norm": 3.354581117630005,
"learning_rate": 7.228691778882693e-05,
"loss": 1.0731,
"step": 90
},
{
"epoch": 0.23559870550161813,
"grad_norm": 1.9178783893585205,
"learning_rate": 7.145603043863045e-05,
"loss": 0.9357,
"step": 91
},
{
"epoch": 0.23818770226537217,
"grad_norm": 2.0360922813415527,
"learning_rate": 7.061781587369519e-05,
"loss": 0.9789,
"step": 92
},
{
"epoch": 0.2407766990291262,
"grad_norm": 2.401505470275879,
"learning_rate": 6.977256034352712e-05,
"loss": 1.0518,
"step": 93
},
{
"epoch": 0.24336569579288025,
"grad_norm": 2.738330364227295,
"learning_rate": 6.892055250211552e-05,
"loss": 1.0681,
"step": 94
},
{
"epoch": 0.2459546925566343,
"grad_norm": 3.1010687351226807,
"learning_rate": 6.806208330935766e-05,
"loss": 1.1431,
"step": 95
},
{
"epoch": 0.24854368932038834,
"grad_norm": 3.6441831588745117,
"learning_rate": 6.719744593169641e-05,
"loss": 1.1438,
"step": 96
},
{
"epoch": 0.2511326860841424,
"grad_norm": 3.7766201496124268,
"learning_rate": 6.632693564200416e-05,
"loss": 1.1669,
"step": 97
},
{
"epoch": 0.2537216828478964,
"grad_norm": 4.321850299835205,
"learning_rate": 6.545084971874738e-05,
"loss": 1.1743,
"step": 98
},
{
"epoch": 0.2563106796116505,
"grad_norm": 5.203691005706787,
"learning_rate": 6.456948734446624e-05,
"loss": 1.2916,
"step": 99
},
{
"epoch": 0.2588996763754045,
"grad_norm": 6.3613057136535645,
"learning_rate": 6.368314950360415e-05,
"loss": 1.2034,
"step": 100
},
{
"epoch": 0.2588996763754045,
"eval_loss": 0.2764929533004761,
"eval_runtime": 67.6851,
"eval_samples_per_second": 38.457,
"eval_steps_per_second": 4.816,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.802703442715607e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}