|
{ |
|
"best_metric": 1.3384933471679688, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03352329869259135, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006704659738518271, |
|
"grad_norm": 2.785381555557251, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9043, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006704659738518271, |
|
"eval_loss": 5.129286766052246, |
|
"eval_runtime": 217.5802, |
|
"eval_samples_per_second": 2.886, |
|
"eval_steps_per_second": 1.443, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013409319477036541, |
|
"grad_norm": 4.200088024139404, |
|
"learning_rate": 0.0002, |
|
"loss": 4.5378, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002011397921555481, |
|
"grad_norm": 2.5246617794036865, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 3.8154, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0026818638954073082, |
|
"grad_norm": 3.857762575149536, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 3.9286, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003352329869259135, |
|
"grad_norm": 3.4422905445098877, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 2.9298, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004022795843110962, |
|
"grad_norm": 5.336806774139404, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 2.6884, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004693261816962789, |
|
"grad_norm": 2.9105687141418457, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 2.3281, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0053637277908146165, |
|
"grad_norm": 2.198127269744873, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 2.1382, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.006034193764666443, |
|
"grad_norm": 2.610673427581787, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 2.0367, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00670465973851827, |
|
"grad_norm": 1.9399616718292236, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.6264, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0073751257123700975, |
|
"grad_norm": 2.213160753250122, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 2.1415, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.008045591686221924, |
|
"grad_norm": 2.0367908477783203, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.8516, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008716057660073752, |
|
"grad_norm": 2.198748826980591, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 1.7538, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009386523633925578, |
|
"grad_norm": 1.8134102821350098, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.6823, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.010056989607777405, |
|
"grad_norm": 2.1345443725585938, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.8103, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010727455581629233, |
|
"grad_norm": 2.9759273529052734, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.6287, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011397921555481059, |
|
"grad_norm": 2.164623975753784, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 1.693, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.012068387529332886, |
|
"grad_norm": 1.6544045209884644, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.5992, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"grad_norm": 1.7987711429595947, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 1.6052, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01340931947703654, |
|
"grad_norm": 2.2308642864227295, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.5962, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.014079785450888367, |
|
"grad_norm": 1.4975007772445679, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 1.3212, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014750251424740195, |
|
"grad_norm": 1.5792878866195679, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 1.6516, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.015420717398592021, |
|
"grad_norm": 1.7073818445205688, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 1.5147, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01609118337244385, |
|
"grad_norm": 1.8162294626235962, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 1.3587, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.016761649346295676, |
|
"grad_norm": 1.8236422538757324, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 1.3354, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016761649346295676, |
|
"eval_loss": 1.3677418231964111, |
|
"eval_runtime": 219.7996, |
|
"eval_samples_per_second": 2.857, |
|
"eval_steps_per_second": 1.429, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.017432115320147504, |
|
"grad_norm": 1.7336963415145874, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3554, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.018102581293999328, |
|
"grad_norm": 1.6814982891082764, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 1.2641, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.018773047267851155, |
|
"grad_norm": 1.820735216140747, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 1.3903, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.019443513241702983, |
|
"grad_norm": 1.9364521503448486, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 1.4451, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02011397921555481, |
|
"grad_norm": 2.0241713523864746, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.3482, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02078444518940664, |
|
"grad_norm": 1.512275218963623, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 1.2534, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.021454911163258466, |
|
"grad_norm": 1.9568251371383667, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 1.5905, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02212537713711029, |
|
"grad_norm": 2.224773645401001, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 1.1378, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.022795843110962118, |
|
"grad_norm": 1.7800034284591675, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 1.0066, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.023466309084813945, |
|
"grad_norm": 2.645667314529419, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.9694, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.024136775058665773, |
|
"grad_norm": 2.402721405029297, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 1.3358, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0248072410325176, |
|
"grad_norm": 2.066645860671997, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 1.2806, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.025477707006369428, |
|
"grad_norm": 1.59635329246521, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.834, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.026148172980221252, |
|
"grad_norm": 1.2451773881912231, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.767, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02681863895407308, |
|
"grad_norm": 2.293517589569092, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 1.1113, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.027489104927924907, |
|
"grad_norm": 1.319640040397644, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.6773, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.028159570901776735, |
|
"grad_norm": 1.7485390901565552, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 1.0643, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.028830036875628563, |
|
"grad_norm": 1.466673493385315, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.7863, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02950050284948039, |
|
"grad_norm": 1.6620216369628906, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.7743, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.030170968823332214, |
|
"grad_norm": 2.3844711780548096, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.5747, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.030841434797184042, |
|
"grad_norm": 1.4075263738632202, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.6564, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03151190077103587, |
|
"grad_norm": 1.3964762687683105, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.6778, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0321823667448877, |
|
"grad_norm": 1.50095796585083, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.6561, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.032852832718739525, |
|
"grad_norm": 1.9353364706039429, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.5138, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03352329869259135, |
|
"grad_norm": 2.526801824569702, |
|
"learning_rate": 0.0, |
|
"loss": 0.5273, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03352329869259135, |
|
"eval_loss": 1.3384933471679688, |
|
"eval_runtime": 219.8146, |
|
"eval_samples_per_second": 2.857, |
|
"eval_steps_per_second": 1.428, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.41887283560448e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|