|
{ |
|
"best_metric": 11.920040130615234, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.006184865633794106, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00012369731267588213, |
|
"grad_norm": 0.012027100659906864, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 11.9301, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00012369731267588213, |
|
"eval_loss": 11.931971549987793, |
|
"eval_runtime": 0.2658, |
|
"eval_samples_per_second": 188.11, |
|
"eval_steps_per_second": 26.335, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00024739462535176425, |
|
"grad_norm": 0.013170535676181316, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 11.9357, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0003710919380276463, |
|
"grad_norm": 0.015393908135592937, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 11.931, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0004947892507035285, |
|
"grad_norm": 0.015217465348541737, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 11.9307, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0006184865633794106, |
|
"grad_norm": 0.015969593077898026, |
|
"learning_rate": 0.00015, |
|
"loss": 11.9308, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0007421838760552926, |
|
"grad_norm": 0.012820259667932987, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 11.9314, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0008658811887311748, |
|
"grad_norm": 0.013964623212814331, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 11.9306, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.000989578501407057, |
|
"grad_norm": 0.019782794639468193, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 11.9299, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.001113275814082939, |
|
"grad_norm": 0.0200200155377388, |
|
"learning_rate": 0.00027, |
|
"loss": 11.9311, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0012369731267588213, |
|
"grad_norm": 0.02155681885778904, |
|
"learning_rate": 0.0003, |
|
"loss": 11.9329, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0013606704394347033, |
|
"grad_norm": 0.01726888120174408, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 11.9289, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0014843677521105853, |
|
"grad_norm": 0.028205610811710358, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 11.9323, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0016080650647864675, |
|
"grad_norm": 0.018932929262518883, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 11.9303, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0017317623774623495, |
|
"grad_norm": 0.017916956916451454, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 11.929, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0018554596901382318, |
|
"grad_norm": 0.015873437747359276, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 11.9308, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.001979157002814114, |
|
"grad_norm": 0.028634965419769287, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 11.9296, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002102854315489996, |
|
"grad_norm": 0.021988065913319588, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 11.9315, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.002226551628165878, |
|
"grad_norm": 0.02658429555594921, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 11.9289, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00235024894084176, |
|
"grad_norm": 0.0265872273594141, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 11.9296, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0024739462535176425, |
|
"grad_norm": 0.027340911328792572, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 11.9287, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0025976435661935245, |
|
"grad_norm": 0.031099706888198853, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 11.9258, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0027213408788694065, |
|
"grad_norm": 0.02729742042720318, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 11.9302, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0028450381915452886, |
|
"grad_norm": 0.024717018008232117, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 11.9306, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0029687355042211706, |
|
"grad_norm": 0.04332926496863365, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 11.9283, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.003092432816897053, |
|
"grad_norm": 0.03554433956742287, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 11.9265, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.003092432816897053, |
|
"eval_loss": 11.9295654296875, |
|
"eval_runtime": 0.2693, |
|
"eval_samples_per_second": 185.663, |
|
"eval_steps_per_second": 25.993, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.003216130129572935, |
|
"grad_norm": 0.04288264736533165, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 11.9294, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.003339827442248817, |
|
"grad_norm": 0.046658046543598175, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 11.9296, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.003463524754924699, |
|
"grad_norm": 0.04863458871841431, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 11.9259, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0035872220676005815, |
|
"grad_norm": 0.0424097515642643, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 11.9289, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0037109193802764636, |
|
"grad_norm": 0.0457475483417511, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 11.9288, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0038346166929523456, |
|
"grad_norm": 0.05019732937216759, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 11.9267, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.003958314005628228, |
|
"grad_norm": 0.04077630862593651, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 11.9231, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.00408201131830411, |
|
"grad_norm": 0.08654378354549408, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 11.9298, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.004205708630979992, |
|
"grad_norm": 0.08918948471546173, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 11.9257, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0043294059436558745, |
|
"grad_norm": 0.06919989734888077, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 11.9268, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.004453103256331756, |
|
"grad_norm": 0.06473810225725174, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 11.9275, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0045768005690076385, |
|
"grad_norm": 0.09428096562623978, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 11.9215, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.00470049788168352, |
|
"grad_norm": 0.09449499100446701, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 11.9191, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.004824195194359403, |
|
"grad_norm": 0.0703858807682991, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 11.9235, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.004947892507035285, |
|
"grad_norm": 0.056587882339954376, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 11.9233, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.005071589819711167, |
|
"grad_norm": 0.07054153084754944, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 11.9206, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.005195287132387049, |
|
"grad_norm": 0.08422651141881943, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 11.9194, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.005318984445062931, |
|
"grad_norm": 0.06794271618127823, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 11.9172, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.005442681757738813, |
|
"grad_norm": 0.08688784390687943, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 11.9104, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0055663790704146955, |
|
"grad_norm": 0.08762703835964203, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 11.9169, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.005690076383090577, |
|
"grad_norm": 0.09055648744106293, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 11.9117, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.00581377369576646, |
|
"grad_norm": 0.07354215532541275, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 11.916, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.005937471008442341, |
|
"grad_norm": 0.08788519352674484, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 11.92, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.006061168321118224, |
|
"grad_norm": 0.09053384512662888, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 11.9145, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.006184865633794106, |
|
"grad_norm": 0.08623462170362473, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 11.9131, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.006184865633794106, |
|
"eval_loss": 11.920040130615234, |
|
"eval_runtime": 0.2628, |
|
"eval_samples_per_second": 190.255, |
|
"eval_steps_per_second": 26.636, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 79631155200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|