|
{ |
|
"best_metric": 6.405816078186035, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1245136186770428, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002490272373540856, |
|
"grad_norm": 4.651301383972168, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 7.3638, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002490272373540856, |
|
"eval_loss": 7.0145440101623535, |
|
"eval_runtime": 0.468, |
|
"eval_samples_per_second": 106.837, |
|
"eval_steps_per_second": 27.777, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004980544747081712, |
|
"grad_norm": 2.4325013160705566, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 7.2351, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007470817120622568, |
|
"grad_norm": 2.669245719909668, |
|
"learning_rate": 0.0001, |
|
"loss": 7.4741, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009961089494163425, |
|
"grad_norm": 2.6087687015533447, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 7.4151, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012451361867704281, |
|
"grad_norm": 2.327425241470337, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 7.3941, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014941634241245135, |
|
"grad_norm": 2.1162900924682617, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 7.2274, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017431906614785993, |
|
"grad_norm": 2.0817477703094482, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 7.3322, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01992217898832685, |
|
"grad_norm": 1.9033664464950562, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 7.241, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022412451361867706, |
|
"grad_norm": 2.0073659420013428, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 7.165, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.024902723735408562, |
|
"grad_norm": 2.191962242126465, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 7.1324, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027392996108949415, |
|
"grad_norm": 2.5749411582946777, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 7.1557, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02988326848249027, |
|
"grad_norm": 3.204463005065918, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 6.5895, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03237354085603113, |
|
"grad_norm": 1.7733168601989746, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 6.9292, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03486381322957199, |
|
"grad_norm": 1.2914466857910156, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 6.8379, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03735408560311284, |
|
"grad_norm": 1.261443853378296, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 7.0811, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0398443579766537, |
|
"grad_norm": 1.2122100591659546, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 7.038, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04233463035019455, |
|
"grad_norm": 1.1478087902069092, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 7.0881, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04482490272373541, |
|
"grad_norm": 1.0610593557357788, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 7.0056, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.047315175097276264, |
|
"grad_norm": 1.1188586950302124, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 6.9136, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.049805447470817124, |
|
"grad_norm": 1.1458325386047363, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 6.9737, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05229571984435798, |
|
"grad_norm": 1.067223310470581, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 6.876, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05478599221789883, |
|
"grad_norm": 1.486210823059082, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 6.7558, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05727626459143969, |
|
"grad_norm": 1.893701434135437, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 6.7791, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05976653696498054, |
|
"grad_norm": 2.173220157623291, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 6.7891, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"grad_norm": 2.901264190673828, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 6.4873, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"eval_loss": 6.543909072875977, |
|
"eval_runtime": 0.4765, |
|
"eval_samples_per_second": 104.926, |
|
"eval_steps_per_second": 27.281, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06474708171206225, |
|
"grad_norm": 3.1084837913513184, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 6.6169, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06723735408560311, |
|
"grad_norm": 1.2209361791610718, |
|
"learning_rate": 7.75e-05, |
|
"loss": 6.7791, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06972762645914397, |
|
"grad_norm": 1.1099854707717896, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 6.936, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07221789883268483, |
|
"grad_norm": 1.0699315071105957, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 6.8154, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07470817120622568, |
|
"grad_norm": 0.9406839609146118, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 6.988, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07719844357976653, |
|
"grad_norm": 0.9526640176773071, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 6.7899, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0796887159533074, |
|
"grad_norm": 0.8295072913169861, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 6.7357, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08217898832684825, |
|
"grad_norm": 0.9567013382911682, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 6.797, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0846692607003891, |
|
"grad_norm": 1.071341872215271, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 6.6351, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08715953307392996, |
|
"grad_norm": 1.3368276357650757, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 6.6517, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08964980544747082, |
|
"grad_norm": 1.7732629776000977, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 6.4047, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09214007782101168, |
|
"grad_norm": 2.385563850402832, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 6.4936, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09463035019455253, |
|
"grad_norm": 2.034431219100952, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 7.0241, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09712062256809338, |
|
"grad_norm": 0.9206064343452454, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 6.7179, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09961089494163425, |
|
"grad_norm": 0.995243489742279, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 6.8535, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1021011673151751, |
|
"grad_norm": 0.9085476994514465, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 6.7739, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10459143968871595, |
|
"grad_norm": 0.8075990080833435, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 6.8463, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.1070817120622568, |
|
"grad_norm": 0.7797780632972717, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 6.7627, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.10957198443579766, |
|
"grad_norm": 0.7625030279159546, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 6.7574, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11206225680933853, |
|
"grad_norm": 0.788215160369873, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 6.653, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11455252918287938, |
|
"grad_norm": 1.022986888885498, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 6.7437, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11704280155642023, |
|
"grad_norm": 1.1553353071212769, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 6.5571, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11953307392996108, |
|
"grad_norm": 1.4759111404418945, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 6.4163, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12202334630350195, |
|
"grad_norm": 1.8967125415802002, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 6.7076, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1245136186770428, |
|
"grad_norm": 2.9591472148895264, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 6.5811, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1245136186770428, |
|
"eval_loss": 6.405816078186035, |
|
"eval_runtime": 0.4887, |
|
"eval_samples_per_second": 102.322, |
|
"eval_steps_per_second": 26.604, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 232050050203648.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|