|
{ |
|
"best_metric": 1.770018458366394, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.007801833430856251, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000156036668617125, |
|
"grad_norm": 0.6086230874061584, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 1.8397, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000156036668617125, |
|
"eval_loss": 2.050737142562866, |
|
"eval_runtime": 4.681, |
|
"eval_samples_per_second": 10.682, |
|
"eval_steps_per_second": 1.495, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00031207333723425, |
|
"grad_norm": 0.8839565515518188, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 1.9253, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0004681100058513751, |
|
"grad_norm": 0.5914949178695679, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 1.7086, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006241466744685, |
|
"grad_norm": 0.6697356104850769, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 1.676, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007801833430856252, |
|
"grad_norm": 0.7815700173377991, |
|
"learning_rate": 0.00015, |
|
"loss": 1.7765, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009362200117027502, |
|
"grad_norm": 0.8394500613212585, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 1.7859, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001092256680319875, |
|
"grad_norm": 0.6029866337776184, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 1.6454, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.001248293348937, |
|
"grad_norm": 0.5297218561172485, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 1.7182, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0014043300175541251, |
|
"grad_norm": 0.8180967569351196, |
|
"learning_rate": 0.00027, |
|
"loss": 1.8304, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0015603666861712503, |
|
"grad_norm": 0.9878036379814148, |
|
"learning_rate": 0.0003, |
|
"loss": 1.7367, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0017164033547883754, |
|
"grad_norm": 0.5219380855560303, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 1.878, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0018724400234055004, |
|
"grad_norm": 0.5020744800567627, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 1.8158, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0020284766920226254, |
|
"grad_norm": 0.5324579477310181, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 1.562, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00218451336063975, |
|
"grad_norm": 0.849560558795929, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 1.6667, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0023405500292568754, |
|
"grad_norm": 0.5848795771598816, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 1.6083, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002496586697874, |
|
"grad_norm": 0.6541223526000977, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 1.6179, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0026526233664911254, |
|
"grad_norm": 0.6580960154533386, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 1.4159, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0028086600351082502, |
|
"grad_norm": 0.5440740585327148, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 1.5937, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0029646967037253755, |
|
"grad_norm": 0.6181334257125854, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 1.5747, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0031207333723425007, |
|
"grad_norm": 0.7562193870544434, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 1.4357, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0032767700409596255, |
|
"grad_norm": 0.9943856000900269, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 1.5356, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0034328067095767507, |
|
"grad_norm": 0.7195516228675842, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 1.3726, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0035888433781938755, |
|
"grad_norm": 0.9126946330070496, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 1.3734, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0037448800468110007, |
|
"grad_norm": 0.6467637419700623, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 1.3602, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0039009167154281255, |
|
"grad_norm": 0.5799360275268555, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 1.2958, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0039009167154281255, |
|
"eval_loss": 1.5699554681777954, |
|
"eval_runtime": 4.237, |
|
"eval_samples_per_second": 11.801, |
|
"eval_steps_per_second": 1.652, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004056953384045251, |
|
"grad_norm": 0.6595892310142517, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 1.3138, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.004212990052662376, |
|
"grad_norm": 0.6430525779724121, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 1.3484, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0043690267212795, |
|
"grad_norm": 0.6260256171226501, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 1.4841, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004525063389896626, |
|
"grad_norm": 0.644149899482727, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 1.3044, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.004681100058513751, |
|
"grad_norm": 0.5812307596206665, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 1.2462, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004837136727130876, |
|
"grad_norm": 0.5822577476501465, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 1.333, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.004993173395748, |
|
"grad_norm": 0.7623059749603271, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 1.269, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005149210064365126, |
|
"grad_norm": 0.6771302223205566, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 1.4905, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005305246732982251, |
|
"grad_norm": 0.7112970948219299, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 1.3871, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005461283401599376, |
|
"grad_norm": 0.7185697555541992, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 1.2095, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0056173200702165004, |
|
"grad_norm": 0.8005110025405884, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 1.3555, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005773356738833626, |
|
"grad_norm": 0.8502066135406494, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 1.3121, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005929393407450751, |
|
"grad_norm": 0.8016048073768616, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 1.058, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.006085430076067876, |
|
"grad_norm": 0.8298421502113342, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 1.1519, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006241466744685001, |
|
"grad_norm": 0.9262282848358154, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 1.2059, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.006397503413302126, |
|
"grad_norm": 0.9439979195594788, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 1.0447, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006553540081919251, |
|
"grad_norm": 1.0094624757766724, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.9239, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006709576750536376, |
|
"grad_norm": 0.9697788953781128, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.8678, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006865613419153501, |
|
"grad_norm": 1.063493251800537, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.7462, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.007021650087770626, |
|
"grad_norm": 0.9183794856071472, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.9486, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.007177686756387751, |
|
"grad_norm": 1.2309966087341309, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.9477, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007333723425004876, |
|
"grad_norm": 1.1949628591537476, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.9138, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0074897600936220015, |
|
"grad_norm": 1.3658891916275024, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.8504, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007645796762239126, |
|
"grad_norm": 1.4715008735656738, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.7391, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007801833430856251, |
|
"grad_norm": 2.4105653762817383, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.5911, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007801833430856251, |
|
"eval_loss": 1.770018458366394, |
|
"eval_runtime": 4.2306, |
|
"eval_samples_per_second": 11.819, |
|
"eval_steps_per_second": 1.655, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.88255238782976e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|