diaenra's picture
Training in progress, step 50, checkpoint
a9c0085 verified
raw
history blame
10.3 kB
{
"best_metric": 0.20686934888362885,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.02689618074233459,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005379236148466917,
"grad_norm": 5.665757179260254,
"learning_rate": 4e-05,
"loss": 14.1149,
"step": 1
},
{
"epoch": 0.0005379236148466917,
"eval_loss": 14.399935722351074,
"eval_runtime": 480.3434,
"eval_samples_per_second": 1.63,
"eval_steps_per_second": 0.816,
"step": 1
},
{
"epoch": 0.0010758472296933835,
"grad_norm": 6.0138421058654785,
"learning_rate": 8e-05,
"loss": 14.3315,
"step": 2
},
{
"epoch": 0.0016137708445400753,
"grad_norm": 5.68032169342041,
"learning_rate": 0.00012,
"loss": 14.4942,
"step": 3
},
{
"epoch": 0.002151694459386767,
"grad_norm": 7.876317977905273,
"learning_rate": 0.00016,
"loss": 14.411,
"step": 4
},
{
"epoch": 0.0026896180742334587,
"grad_norm": 8.77420425415039,
"learning_rate": 0.0002,
"loss": 14.3799,
"step": 5
},
{
"epoch": 0.0032275416890801506,
"grad_norm": 8.047574996948242,
"learning_rate": 0.00019994532573409262,
"loss": 11.1861,
"step": 6
},
{
"epoch": 0.0037654653039268424,
"grad_norm": 8.94501781463623,
"learning_rate": 0.00019978136272187747,
"loss": 9.831,
"step": 7
},
{
"epoch": 0.004303388918773534,
"grad_norm": 9.489190101623535,
"learning_rate": 0.00019950829025450114,
"loss": 7.9405,
"step": 8
},
{
"epoch": 0.004841312533620226,
"grad_norm": 13.784337997436523,
"learning_rate": 0.00019912640693269752,
"loss": 6.1143,
"step": 9
},
{
"epoch": 0.0053792361484669175,
"grad_norm": 9.499981880187988,
"learning_rate": 0.00019863613034027224,
"loss": 4.2967,
"step": 10
},
{
"epoch": 0.005917159763313609,
"grad_norm": 9.118963241577148,
"learning_rate": 0.00019803799658748094,
"loss": 2.7132,
"step": 11
},
{
"epoch": 0.006455083378160301,
"grad_norm": 9.794291496276855,
"learning_rate": 0.0001973326597248006,
"loss": 1.1106,
"step": 12
},
{
"epoch": 0.006993006993006993,
"grad_norm": 5.810440540313721,
"learning_rate": 0.00019652089102773488,
"loss": 1.0159,
"step": 13
},
{
"epoch": 0.007530930607853685,
"grad_norm": 3.0343141555786133,
"learning_rate": 0.00019560357815343577,
"loss": 0.3357,
"step": 14
},
{
"epoch": 0.008068854222700376,
"grad_norm": 4.984272480010986,
"learning_rate": 0.00019458172417006347,
"loss": 0.3256,
"step": 15
},
{
"epoch": 0.008606777837547068,
"grad_norm": 8.477812767028809,
"learning_rate": 0.0001934564464599461,
"loss": 0.4395,
"step": 16
},
{
"epoch": 0.00914470145239376,
"grad_norm": 7.445779323577881,
"learning_rate": 0.00019222897549773848,
"loss": 0.5309,
"step": 17
},
{
"epoch": 0.009682625067240451,
"grad_norm": 5.090211391448975,
"learning_rate": 0.00019090065350491626,
"loss": 0.2973,
"step": 18
},
{
"epoch": 0.010220548682087143,
"grad_norm": 7.343393802642822,
"learning_rate": 0.00018947293298207635,
"loss": 0.4837,
"step": 19
},
{
"epoch": 0.010758472296933835,
"grad_norm": 2.520585536956787,
"learning_rate": 0.0001879473751206489,
"loss": 0.1629,
"step": 20
},
{
"epoch": 0.011296395911780527,
"grad_norm": 3.9465153217315674,
"learning_rate": 0.00018632564809575742,
"loss": 0.1549,
"step": 21
},
{
"epoch": 0.011834319526627219,
"grad_norm": 3.0971474647521973,
"learning_rate": 0.00018460952524209355,
"loss": 0.2236,
"step": 22
},
{
"epoch": 0.01237224314147391,
"grad_norm": 6.442452430725098,
"learning_rate": 0.00018280088311480201,
"loss": 0.3162,
"step": 23
},
{
"epoch": 0.012910166756320602,
"grad_norm": 4.07744026184082,
"learning_rate": 0.00018090169943749476,
"loss": 0.1577,
"step": 24
},
{
"epoch": 0.013448090371167294,
"grad_norm": 1.9578357934951782,
"learning_rate": 0.00017891405093963938,
"loss": 0.1762,
"step": 25
},
{
"epoch": 0.013448090371167294,
"eval_loss": 0.20686934888362885,
"eval_runtime": 482.315,
"eval_samples_per_second": 1.623,
"eval_steps_per_second": 0.813,
"step": 25
},
{
"epoch": 0.013986013986013986,
"grad_norm": 1.5185937881469727,
"learning_rate": 0.00017684011108568592,
"loss": 0.2361,
"step": 26
},
{
"epoch": 0.014523937600860678,
"grad_norm": 0.9820839762687683,
"learning_rate": 0.0001746821476984154,
"loss": 0.1982,
"step": 27
},
{
"epoch": 0.01506186121570737,
"grad_norm": 5.153009414672852,
"learning_rate": 0.00017244252047910892,
"loss": 0.2591,
"step": 28
},
{
"epoch": 0.015599784830554062,
"grad_norm": 1.0352336168289185,
"learning_rate": 0.00017012367842724887,
"loss": 0.14,
"step": 29
},
{
"epoch": 0.01613770844540075,
"grad_norm": 2.7931392192840576,
"learning_rate": 0.00016772815716257412,
"loss": 0.3103,
"step": 30
},
{
"epoch": 0.016675632060247445,
"grad_norm": 3.7326819896698,
"learning_rate": 0.00016525857615241687,
"loss": 0.2908,
"step": 31
},
{
"epoch": 0.017213555675094135,
"grad_norm": 1.1818197965621948,
"learning_rate": 0.0001627176358473537,
"loss": 0.1197,
"step": 32
},
{
"epoch": 0.01775147928994083,
"grad_norm": 3.522305727005005,
"learning_rate": 0.00016010811472830252,
"loss": 0.2051,
"step": 33
},
{
"epoch": 0.01828940290478752,
"grad_norm": 1.0128105878829956,
"learning_rate": 0.00015743286626829437,
"loss": 0.1311,
"step": 34
},
{
"epoch": 0.018827326519634213,
"grad_norm": 4.8102593421936035,
"learning_rate": 0.00015469481581224272,
"loss": 0.3403,
"step": 35
},
{
"epoch": 0.019365250134480903,
"grad_norm": 13.774560928344727,
"learning_rate": 0.00015189695737812152,
"loss": 0.9765,
"step": 36
},
{
"epoch": 0.019903173749327596,
"grad_norm": 5.964848518371582,
"learning_rate": 0.00014904235038305083,
"loss": 0.4839,
"step": 37
},
{
"epoch": 0.020441097364174286,
"grad_norm": 10.879966735839844,
"learning_rate": 0.0001461341162978688,
"loss": 0.685,
"step": 38
},
{
"epoch": 0.02097902097902098,
"grad_norm": 6.293717384338379,
"learning_rate": 0.00014317543523384928,
"loss": 0.3725,
"step": 39
},
{
"epoch": 0.02151694459386767,
"grad_norm": 0.753055214881897,
"learning_rate": 0.00014016954246529696,
"loss": 0.1652,
"step": 40
},
{
"epoch": 0.022054868208714364,
"grad_norm": 1.0214154720306396,
"learning_rate": 0.00013711972489182208,
"loss": 0.1115,
"step": 41
},
{
"epoch": 0.022592791823561054,
"grad_norm": 3.1957497596740723,
"learning_rate": 0.00013402931744416433,
"loss": 0.173,
"step": 42
},
{
"epoch": 0.023130715438407747,
"grad_norm": 0.9727430939674377,
"learning_rate": 0.00013090169943749476,
"loss": 0.211,
"step": 43
},
{
"epoch": 0.023668639053254437,
"grad_norm": 4.678534030914307,
"learning_rate": 0.00012774029087618446,
"loss": 0.3037,
"step": 44
},
{
"epoch": 0.02420656266810113,
"grad_norm": 3.1848816871643066,
"learning_rate": 0.00012454854871407994,
"loss": 0.2527,
"step": 45
},
{
"epoch": 0.02474448628294782,
"grad_norm": 2.432804584503174,
"learning_rate": 0.0001213299630743747,
"loss": 0.1629,
"step": 46
},
{
"epoch": 0.025282409897794515,
"grad_norm": 1.7503596544265747,
"learning_rate": 0.000118088053433211,
"loss": 0.1667,
"step": 47
},
{
"epoch": 0.025820333512641205,
"grad_norm": 3.7610039710998535,
"learning_rate": 0.0001148263647711842,
"loss": 0.2712,
"step": 48
},
{
"epoch": 0.026358257127487898,
"grad_norm": 1.4483740329742432,
"learning_rate": 0.00011154846369695863,
"loss": 0.1399,
"step": 49
},
{
"epoch": 0.02689618074233459,
"grad_norm": 2.2921817302703857,
"learning_rate": 0.00010825793454723325,
"loss": 0.091,
"step": 50
},
{
"epoch": 0.02689618074233459,
"eval_loss": 0.21423955261707306,
"eval_runtime": 482.3607,
"eval_samples_per_second": 1.623,
"eval_steps_per_second": 0.813,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.382181645582336e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}