|
{ |
|
"best_metric": 0.01057486142963171, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.012738853503184714, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00025477707006369424, |
|
"grad_norm": 1.0286307334899902, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.269, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00025477707006369424, |
|
"eval_loss": 0.5101139545440674, |
|
"eval_runtime": 1.5196, |
|
"eval_samples_per_second": 32.903, |
|
"eval_steps_per_second": 4.606, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005095541401273885, |
|
"grad_norm": 1.556894063949585, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.3567, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0007643312101910828, |
|
"grad_norm": 2.283867359161377, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.4087, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001019108280254777, |
|
"grad_norm": 2.4171900749206543, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.363, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0012738853503184713, |
|
"grad_norm": 12.618427276611328, |
|
"learning_rate": 0.00015, |
|
"loss": 0.3981, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0015286624203821656, |
|
"grad_norm": 2.4185802936553955, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.4004, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0017834394904458599, |
|
"grad_norm": 5.5376715660095215, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.2874, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.002038216560509554, |
|
"grad_norm": 0.9478390216827393, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.1853, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0022929936305732482, |
|
"grad_norm": 56.32394027709961, |
|
"learning_rate": 0.00027, |
|
"loss": 0.3149, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0025477707006369425, |
|
"grad_norm": 16.362091064453125, |
|
"learning_rate": 0.0003, |
|
"loss": 0.1729, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002802547770700637, |
|
"grad_norm": 0.7184139490127563, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.1242, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.003057324840764331, |
|
"grad_norm": 0.5045642256736755, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.1038, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0033121019108280254, |
|
"grad_norm": 0.310913622379303, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.0685, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0035668789808917197, |
|
"grad_norm": 0.2581654191017151, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.0829, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.003821656050955414, |
|
"grad_norm": 0.3674789369106293, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.0568, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004076433121019108, |
|
"grad_norm": 0.39360904693603516, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.0528, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004331210191082803, |
|
"grad_norm": 0.1805201917886734, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.04, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0045859872611464965, |
|
"grad_norm": 0.1762702465057373, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.0367, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.004840764331210191, |
|
"grad_norm": 0.13610054552555084, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.025, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005095541401273885, |
|
"grad_norm": 0.19703835248947144, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.0246, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00535031847133758, |
|
"grad_norm": 0.09786652028560638, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.0268, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.005605095541401274, |
|
"grad_norm": 0.14086395502090454, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.0264, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.005859872611464968, |
|
"grad_norm": 0.17169000208377838, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.0196, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006114649681528662, |
|
"grad_norm": 0.09433834254741669, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.0189, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006369426751592357, |
|
"grad_norm": 0.16031892597675323, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.0108, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006369426751592357, |
|
"eval_loss": 0.010693784803152084, |
|
"eval_runtime": 1.1107, |
|
"eval_samples_per_second": 45.016, |
|
"eval_steps_per_second": 6.302, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006624203821656051, |
|
"grad_norm": 0.11191248893737793, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.0238, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.006878980891719746, |
|
"grad_norm": 0.10025566816329956, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.0235, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0071337579617834395, |
|
"grad_norm": 0.07389934360980988, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 0.0243, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.007388535031847133, |
|
"grad_norm": 0.091887466609478, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.0129, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.007643312101910828, |
|
"grad_norm": 0.12741683423519135, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.0103, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.007898089171974522, |
|
"grad_norm": 0.05791885778307915, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.0159, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.008152866242038216, |
|
"grad_norm": 0.024696996435523033, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 0.0102, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.008407643312101911, |
|
"grad_norm": 0.11524629592895508, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 0.0152, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.008662420382165605, |
|
"grad_norm": 0.059739116579294205, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.0144, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.008917197452229299, |
|
"grad_norm": 0.10027167946100235, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.0126, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.009171974522292993, |
|
"grad_norm": 0.12271470576524734, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.013, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.009426751592356689, |
|
"grad_norm": 0.06018821895122528, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.0046, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.009681528662420382, |
|
"grad_norm": 0.04106978327035904, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.0141, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.009936305732484076, |
|
"grad_norm": 0.08049760013818741, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.0091, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01019108280254777, |
|
"grad_norm": 0.07265523076057434, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.0135, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.010445859872611466, |
|
"grad_norm": 0.10459401458501816, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.0146, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01070063694267516, |
|
"grad_norm": 0.19710560142993927, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.0117, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.010955414012738853, |
|
"grad_norm": 0.04603102430701256, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.0207, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.011210191082802547, |
|
"grad_norm": 0.04048272594809532, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.0051, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.011464968152866241, |
|
"grad_norm": 0.10804333537817001, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.0253, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.011719745222929937, |
|
"grad_norm": 0.10464876145124435, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.0167, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.01197452229299363, |
|
"grad_norm": 0.05495596304535866, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.0113, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.012229299363057325, |
|
"grad_norm": 0.19954094290733337, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.0109, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.012484076433121018, |
|
"grad_norm": 0.34227123856544495, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.0267, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"grad_norm": 0.4878714680671692, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.0323, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"eval_loss": 0.01057486142963171, |
|
"eval_runtime": 1.1123, |
|
"eval_samples_per_second": 44.951, |
|
"eval_steps_per_second": 6.293, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0991728418881536e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|