|
{ |
|
"best_metric": 4.9084954261779785, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1693480101608806, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003386960203217612, |
|
"grad_norm": 13.740797996520996, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 3.1754, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003386960203217612, |
|
"eval_loss": 4.746492385864258, |
|
"eval_runtime": 6.8487, |
|
"eval_samples_per_second": 7.301, |
|
"eval_steps_per_second": 1.022, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006773920406435224, |
|
"grad_norm": 15.781672477722168, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 3.1112, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.010160880609652836, |
|
"grad_norm": 13.645687103271484, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 3.0881, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.013547840812870448, |
|
"grad_norm": 9.539155960083008, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 1.7159, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01693480101608806, |
|
"grad_norm": 4.083582878112793, |
|
"learning_rate": 0.00015, |
|
"loss": 0.4898, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02032176121930567, |
|
"grad_norm": 2.5189051628112793, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.2539, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.023708721422523286, |
|
"grad_norm": 6.38540506362915, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.3516, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.027095681625740897, |
|
"grad_norm": 13.505107879638672, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.5986, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03048264182895851, |
|
"grad_norm": 3.495882987976074, |
|
"learning_rate": 0.00027, |
|
"loss": 0.2681, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03386960203217612, |
|
"grad_norm": 4.064078330993652, |
|
"learning_rate": 0.0003, |
|
"loss": 0.1063, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03725656223539373, |
|
"grad_norm": 5.738462924957275, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.0551, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04064352243861134, |
|
"grad_norm": 11.198445320129395, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.5952, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04403048264182896, |
|
"grad_norm": 10.263776779174805, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.4714, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04741744284504657, |
|
"grad_norm": 13.766398429870605, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.615, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05080440304826418, |
|
"grad_norm": 6.041114330291748, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.6127, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05419136325148179, |
|
"grad_norm": 3.049060583114624, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.5814, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.057578323454699404, |
|
"grad_norm": 2.864790678024292, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.4473, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06096528365791702, |
|
"grad_norm": 6.220133304595947, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.4051, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06435224386113463, |
|
"grad_norm": 2.263486862182617, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.2403, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06773920406435224, |
|
"grad_norm": 4.127490520477295, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.2728, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07112616426756986, |
|
"grad_norm": 3.9908688068389893, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.2682, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07451312447078746, |
|
"grad_norm": 9.111742973327637, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.2637, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07790008467400508, |
|
"grad_norm": 3.493535280227661, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.2798, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08128704487722269, |
|
"grad_norm": 2.2693541049957275, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.2922, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0846740050804403, |
|
"grad_norm": 3.484853744506836, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.2737, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0846740050804403, |
|
"eval_loss": 1.9888197183609009, |
|
"eval_runtime": 6.9843, |
|
"eval_samples_per_second": 7.159, |
|
"eval_steps_per_second": 1.002, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08806096528365792, |
|
"grad_norm": 2.42002534866333, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.1555, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09144792548687553, |
|
"grad_norm": 4.468302249908447, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.2272, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09483488569009314, |
|
"grad_norm": 4.684446334838867, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 0.5432, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09822184589331075, |
|
"grad_norm": 11.514558792114258, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.4408, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.10160880609652836, |
|
"grad_norm": 4.456749439239502, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.3308, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.10499576629974598, |
|
"grad_norm": 6.331120491027832, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.7949, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.10838272650296359, |
|
"grad_norm": 29.455829620361328, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 4.6651, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1117696867061812, |
|
"grad_norm": 11.79928970336914, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 1.6663, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11515664690939881, |
|
"grad_norm": 6.791766166687012, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.6458, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11854360711261643, |
|
"grad_norm": 9.273675918579102, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.6169, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12193056731583404, |
|
"grad_norm": 1.2118967771530151, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.315, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12531752751905165, |
|
"grad_norm": 3.5652599334716797, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.3108, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12870448772226925, |
|
"grad_norm": 1.0388684272766113, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.3072, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.13209144792548688, |
|
"grad_norm": 1.3198330402374268, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.2595, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1354784081287045, |
|
"grad_norm": 9.642812728881836, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.4847, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1388653683319221, |
|
"grad_norm": 8.038244247436523, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.4086, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.14225232853513972, |
|
"grad_norm": 2.3692054748535156, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.3204, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.14563928873835733, |
|
"grad_norm": 0.9498773217201233, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.2721, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14902624894157493, |
|
"grad_norm": 0.9654709100723267, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.1986, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.15241320914479256, |
|
"grad_norm": 3.0386719703674316, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.2193, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.15580016934801016, |
|
"grad_norm": 4.969003200531006, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.296, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.15918712955122777, |
|
"grad_norm": 5.841146945953369, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.5754, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.16257408975444537, |
|
"grad_norm": 4.163698196411133, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.3311, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.165961049957663, |
|
"grad_norm": 0.9581086039543152, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.1751, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1693480101608806, |
|
"grad_norm": 0.8359802961349487, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.1348, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1693480101608806, |
|
"eval_loss": 4.9084954261779785, |
|
"eval_runtime": 6.9763, |
|
"eval_samples_per_second": 7.167, |
|
"eval_steps_per_second": 1.003, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.75088325902336e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|