|
{ |
|
"best_metric": 1.3095707893371582, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.45610034207525657, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009122006841505131, |
|
"grad_norm": 8.655662536621094, |
|
"learning_rate": 5e-05, |
|
"loss": 1.871, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009122006841505131, |
|
"eval_loss": 3.7504098415374756, |
|
"eval_runtime": 1.5013, |
|
"eval_samples_per_second": 33.304, |
|
"eval_steps_per_second": 8.659, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018244013683010263, |
|
"grad_norm": 18.90291404724121, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5266, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.027366020524515394, |
|
"grad_norm": 11.101104736328125, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 2.2531, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.036488027366020526, |
|
"grad_norm": 3.917557954788208, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.1085, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04561003420752566, |
|
"grad_norm": 2.785952091217041, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 2.1402, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05473204104903079, |
|
"grad_norm": 2.5528736114501953, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.0671, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06385404789053592, |
|
"grad_norm": 3.1096019744873047, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.022, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07297605473204105, |
|
"grad_norm": 4.098336696624756, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.9843, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08209806157354618, |
|
"grad_norm": 4.202548027038574, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.0921, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09122006841505131, |
|
"grad_norm": 3.1605403423309326, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.0352, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10034207525655645, |
|
"grad_norm": 2.562784194946289, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 2.0455, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10946408209806158, |
|
"grad_norm": 2.120959520339966, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.0248, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.11858608893956671, |
|
"grad_norm": 1.7797281742095947, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 1.3069, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12770809578107184, |
|
"grad_norm": 1.5691810846328735, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.4204, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13683010262257697, |
|
"grad_norm": 1.2062739133834839, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 1.5388, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1459521094640821, |
|
"grad_norm": 0.7961310744285583, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 1.4491, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15507411630558723, |
|
"grad_norm": 0.7442319393157959, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 1.5383, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16419612314709237, |
|
"grad_norm": 0.6347817778587341, |
|
"learning_rate": 7.75e-05, |
|
"loss": 1.5935, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1733181299885975, |
|
"grad_norm": 0.6833974719047546, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 1.5712, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18244013683010263, |
|
"grad_norm": 0.9213839173316956, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.5696, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19156214367160776, |
|
"grad_norm": 1.3780533075332642, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 1.6993, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2006841505131129, |
|
"grad_norm": 1.2376089096069336, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 1.6858, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.20980615735461802, |
|
"grad_norm": 1.1128700971603394, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 1.6236, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.21892816419612315, |
|
"grad_norm": 1.1550122499465942, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 1.6805, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.22805017103762829, |
|
"grad_norm": 1.4760537147521973, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 1.7456, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22805017103762829, |
|
"eval_loss": 1.4106637239456177, |
|
"eval_runtime": 1.0204, |
|
"eval_samples_per_second": 49.001, |
|
"eval_steps_per_second": 12.74, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23717217787913342, |
|
"grad_norm": 0.9181596040725708, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.1175, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.24629418472063855, |
|
"grad_norm": 0.6287382245063782, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 1.3472, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2554161915621437, |
|
"grad_norm": 0.8954634666442871, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 1.382, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2645381984036488, |
|
"grad_norm": 0.5583573579788208, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 1.4087, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.27366020524515394, |
|
"grad_norm": 0.7373714447021484, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 1.4713, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28278221208665905, |
|
"grad_norm": 0.46830883622169495, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 1.4206, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2919042189281642, |
|
"grad_norm": 0.445218950510025, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.4428, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3010262257696693, |
|
"grad_norm": 0.5559998750686646, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 1.5217, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.31014823261117447, |
|
"grad_norm": 0.6704520583152771, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.4832, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.31927023945267957, |
|
"grad_norm": 0.9207093715667725, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 1.5879, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32839224629418473, |
|
"grad_norm": 0.606840193271637, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.5241, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.33751425313568983, |
|
"grad_norm": 1.1559696197509766, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 1.5729, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.346636259977195, |
|
"grad_norm": 0.6878672242164612, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.0944, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3557582668187001, |
|
"grad_norm": 0.4424948990345001, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 1.2331, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.36488027366020526, |
|
"grad_norm": 0.46379706263542175, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 1.4107, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37400228050171036, |
|
"grad_norm": 0.5940743088722229, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 1.3661, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3831242873432155, |
|
"grad_norm": 0.5553629994392395, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.4202, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3922462941847206, |
|
"grad_norm": 0.40024417638778687, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 1.3746, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4013683010262258, |
|
"grad_norm": 0.4371756613254547, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.4586, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4104903078677309, |
|
"grad_norm": 0.49693790078163147, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 1.444, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.41961231470923605, |
|
"grad_norm": 0.463955819606781, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.5353, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.42873432155074115, |
|
"grad_norm": 0.496754914522171, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 1.4412, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4378563283922463, |
|
"grad_norm": 0.680721640586853, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.5626, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4469783352337514, |
|
"grad_norm": 0.5851555466651917, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 1.5285, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.45610034207525657, |
|
"grad_norm": 1.3343528509140015, |
|
"learning_rate": 1e-05, |
|
"loss": 1.6886, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.45610034207525657, |
|
"eval_loss": 1.3095707893371582, |
|
"eval_runtime": 1.0162, |
|
"eval_samples_per_second": 49.203, |
|
"eval_steps_per_second": 12.793, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.368021483552768e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|