nttx's picture
Training in progress, step 50, checkpoint
41257f8 verified
raw
history blame
10.4 kB
{
"best_metric": 11.926169395446777,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.020713046630246226,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004142609326049245,
"grad_norm": 0.009111136198043823,
"learning_rate": 2.9999999999999997e-05,
"loss": 11.9291,
"step": 1
},
{
"epoch": 0.0004142609326049245,
"eval_loss": 11.93281364440918,
"eval_runtime": 0.2456,
"eval_samples_per_second": 203.617,
"eval_steps_per_second": 28.506,
"step": 1
},
{
"epoch": 0.000828521865209849,
"grad_norm": 0.01578676700592041,
"learning_rate": 5.9999999999999995e-05,
"loss": 11.9311,
"step": 2
},
{
"epoch": 0.0012427827978147737,
"grad_norm": 0.010788201354444027,
"learning_rate": 8.999999999999999e-05,
"loss": 11.9303,
"step": 3
},
{
"epoch": 0.001657043730419698,
"grad_norm": 0.014519501477479935,
"learning_rate": 0.00011999999999999999,
"loss": 11.9288,
"step": 4
},
{
"epoch": 0.0020713046630246226,
"grad_norm": 0.01510781329125166,
"learning_rate": 0.00015,
"loss": 11.9304,
"step": 5
},
{
"epoch": 0.0024855655956295473,
"grad_norm": 0.013269297778606415,
"learning_rate": 0.00017999999999999998,
"loss": 11.9294,
"step": 6
},
{
"epoch": 0.0028998265282344717,
"grad_norm": 0.013751151971518993,
"learning_rate": 0.00020999999999999998,
"loss": 11.9321,
"step": 7
},
{
"epoch": 0.003314087460839396,
"grad_norm": 0.015573951415717602,
"learning_rate": 0.00023999999999999998,
"loss": 11.9287,
"step": 8
},
{
"epoch": 0.0037283483934443208,
"grad_norm": 0.009504775516688824,
"learning_rate": 0.00027,
"loss": 11.9305,
"step": 9
},
{
"epoch": 0.004142609326049245,
"grad_norm": 0.010801173746585846,
"learning_rate": 0.0003,
"loss": 11.9337,
"step": 10
},
{
"epoch": 0.0045568702586541695,
"grad_norm": 0.011178042739629745,
"learning_rate": 0.0002999794957488703,
"loss": 11.9278,
"step": 11
},
{
"epoch": 0.004971131191259095,
"grad_norm": 0.014659388922154903,
"learning_rate": 0.0002999179886011389,
"loss": 11.9297,
"step": 12
},
{
"epoch": 0.005385392123864019,
"grad_norm": 0.013904299587011337,
"learning_rate": 0.0002998154953722457,
"loss": 11.9315,
"step": 13
},
{
"epoch": 0.005799653056468943,
"grad_norm": 0.014901366084814072,
"learning_rate": 0.00029967204408281613,
"loss": 11.9306,
"step": 14
},
{
"epoch": 0.006213913989073868,
"grad_norm": 0.014373673126101494,
"learning_rate": 0.00029948767395100045,
"loss": 11.9291,
"step": 15
},
{
"epoch": 0.006628174921678792,
"grad_norm": 0.012232257053256035,
"learning_rate": 0.0002992624353817517,
"loss": 11.9292,
"step": 16
},
{
"epoch": 0.007042435854283717,
"grad_norm": 0.011413573287427425,
"learning_rate": 0.0002989963899530457,
"loss": 11.9322,
"step": 17
},
{
"epoch": 0.0074566967868886416,
"grad_norm": 0.016656706109642982,
"learning_rate": 0.00029868961039904624,
"loss": 11.9324,
"step": 18
},
{
"epoch": 0.007870957719493567,
"grad_norm": 0.014373266138136387,
"learning_rate": 0.00029834218059022024,
"loss": 11.9318,
"step": 19
},
{
"epoch": 0.00828521865209849,
"grad_norm": 0.015115369111299515,
"learning_rate": 0.00029795419551040833,
"loss": 11.9287,
"step": 20
},
{
"epoch": 0.008699479584703415,
"grad_norm": 0.01734703592956066,
"learning_rate": 0.00029752576123085736,
"loss": 11.9309,
"step": 21
},
{
"epoch": 0.009113740517308339,
"grad_norm": 0.014644989743828773,
"learning_rate": 0.0002970569948812214,
"loss": 11.9254,
"step": 22
},
{
"epoch": 0.009528001449913264,
"grad_norm": 0.021497027948498726,
"learning_rate": 0.0002965480246175399,
"loss": 11.9302,
"step": 23
},
{
"epoch": 0.00994226238251819,
"grad_norm": 0.01817285642027855,
"learning_rate": 0.0002959989895872009,
"loss": 11.9312,
"step": 24
},
{
"epoch": 0.010356523315123113,
"grad_norm": 0.02207845263183117,
"learning_rate": 0.0002954100398908995,
"loss": 11.9284,
"step": 25
},
{
"epoch": 0.010356523315123113,
"eval_loss": 11.93165397644043,
"eval_runtime": 0.2441,
"eval_samples_per_second": 204.847,
"eval_steps_per_second": 28.679,
"step": 25
},
{
"epoch": 0.010770784247728038,
"grad_norm": 0.017103340476751328,
"learning_rate": 0.0002947813365416023,
"loss": 11.9297,
"step": 26
},
{
"epoch": 0.011185045180332961,
"grad_norm": 0.0265261959284544,
"learning_rate": 0.0002941130514205272,
"loss": 11.9303,
"step": 27
},
{
"epoch": 0.011599306112937887,
"grad_norm": 0.031121160835027695,
"learning_rate": 0.0002934053672301536,
"loss": 11.9264,
"step": 28
},
{
"epoch": 0.012013567045542812,
"grad_norm": 0.020733291283249855,
"learning_rate": 0.00029265847744427303,
"loss": 11.9311,
"step": 29
},
{
"epoch": 0.012427827978147735,
"grad_norm": 0.019345130771398544,
"learning_rate": 0.00029187258625509513,
"loss": 11.9295,
"step": 30
},
{
"epoch": 0.01284208891075266,
"grad_norm": 0.025070849806070328,
"learning_rate": 0.00029104790851742417,
"loss": 11.9307,
"step": 31
},
{
"epoch": 0.013256349843357584,
"grad_norm": 0.02109096758067608,
"learning_rate": 0.0002901846696899191,
"loss": 11.927,
"step": 32
},
{
"epoch": 0.01367061077596251,
"grad_norm": 0.027676530182361603,
"learning_rate": 0.00028928310577345606,
"loss": 11.9296,
"step": 33
},
{
"epoch": 0.014084871708567434,
"grad_norm": 0.02774973399937153,
"learning_rate": 0.0002883434632466077,
"loss": 11.9294,
"step": 34
},
{
"epoch": 0.014499132641172358,
"grad_norm": 0.026240255683660507,
"learning_rate": 0.00028736599899825856,
"loss": 11.9283,
"step": 35
},
{
"epoch": 0.014913393573777283,
"grad_norm": 0.03300342708826065,
"learning_rate": 0.00028635098025737434,
"loss": 11.9277,
"step": 36
},
{
"epoch": 0.015327654506382208,
"grad_norm": 0.03084516152739525,
"learning_rate": 0.00028529868451994384,
"loss": 11.9284,
"step": 37
},
{
"epoch": 0.015741915438987134,
"grad_norm": 0.03772755712270737,
"learning_rate": 0.0002842093994731145,
"loss": 11.9301,
"step": 38
},
{
"epoch": 0.016156176371592057,
"grad_norm": 0.044647328555583954,
"learning_rate": 0.00028308342291654174,
"loss": 11.929,
"step": 39
},
{
"epoch": 0.01657043730419698,
"grad_norm": 0.0458788201212883,
"learning_rate": 0.00028192106268097334,
"loss": 11.9261,
"step": 40
},
{
"epoch": 0.016984698236801904,
"grad_norm": 0.062342319637537,
"learning_rate": 0.00028072263654409154,
"loss": 11.9292,
"step": 41
},
{
"epoch": 0.01739895916940683,
"grad_norm": 0.04045253247022629,
"learning_rate": 0.0002794884721436361,
"loss": 11.9276,
"step": 42
},
{
"epoch": 0.017813220102011754,
"grad_norm": 0.05095702409744263,
"learning_rate": 0.00027821890688783083,
"loss": 11.9244,
"step": 43
},
{
"epoch": 0.018227481034616678,
"grad_norm": 0.04911315068602562,
"learning_rate": 0.0002769142878631403,
"loss": 11.9264,
"step": 44
},
{
"epoch": 0.018641741967221605,
"grad_norm": 0.04607488214969635,
"learning_rate": 0.00027557497173937923,
"loss": 11.9244,
"step": 45
},
{
"epoch": 0.019056002899826528,
"grad_norm": 0.05658293142914772,
"learning_rate": 0.000274201324672203,
"loss": 11.9224,
"step": 46
},
{
"epoch": 0.01947026383243145,
"grad_norm": 0.050977252423763275,
"learning_rate": 0.00027279372220300385,
"loss": 11.9255,
"step": 47
},
{
"epoch": 0.01988452476503638,
"grad_norm": 0.06175487861037254,
"learning_rate": 0.0002713525491562421,
"loss": 11.9218,
"step": 48
},
{
"epoch": 0.020298785697641302,
"grad_norm": 0.049341753125190735,
"learning_rate": 0.00026987819953423867,
"loss": 11.927,
"step": 49
},
{
"epoch": 0.020713046630246226,
"grad_norm": 0.0748741626739502,
"learning_rate": 0.00026837107640945905,
"loss": 11.9258,
"step": 50
},
{
"epoch": 0.020713046630246226,
"eval_loss": 11.926169395446777,
"eval_runtime": 0.2445,
"eval_samples_per_second": 204.526,
"eval_steps_per_second": 28.634,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 79631155200.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}