|
{ |
|
"best_metric": 1.0614150762557983, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.15464913976416006, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003092982795283201, |
|
"grad_norm": 63.227630615234375, |
|
"learning_rate": 1e-05, |
|
"loss": 25.075, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003092982795283201, |
|
"eval_loss": 1.5490102767944336, |
|
"eval_runtime": 149.2754, |
|
"eval_samples_per_second": 3.651, |
|
"eval_steps_per_second": 1.829, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006185965590566402, |
|
"grad_norm": 66.90909576416016, |
|
"learning_rate": 2e-05, |
|
"loss": 23.6882, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009278948385849604, |
|
"grad_norm": 63.644508361816406, |
|
"learning_rate": 3e-05, |
|
"loss": 23.8152, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012371931181132804, |
|
"grad_norm": 66.12242126464844, |
|
"learning_rate": 4e-05, |
|
"loss": 24.5011, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015464913976416006, |
|
"grad_norm": 37.521697998046875, |
|
"learning_rate": 5e-05, |
|
"loss": 20.4137, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01855789677169921, |
|
"grad_norm": 36.76587677001953, |
|
"learning_rate": 6e-05, |
|
"loss": 21.0439, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02165087956698241, |
|
"grad_norm": 35.1934928894043, |
|
"learning_rate": 7e-05, |
|
"loss": 19.4824, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02474386236226561, |
|
"grad_norm": 34.91908645629883, |
|
"learning_rate": 8e-05, |
|
"loss": 18.7023, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02783684515754881, |
|
"grad_norm": 37.3280143737793, |
|
"learning_rate": 9e-05, |
|
"loss": 19.6331, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03092982795283201, |
|
"grad_norm": 33.82084655761719, |
|
"learning_rate": 0.0001, |
|
"loss": 19.1644, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.034022810748115216, |
|
"grad_norm": 28.904260635375977, |
|
"learning_rate": 9.98458666866564e-05, |
|
"loss": 18.3894, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03711579354339842, |
|
"grad_norm": 33.33706283569336, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 19.5411, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04020877633868162, |
|
"grad_norm": 28.71995735168457, |
|
"learning_rate": 9.861849601988383e-05, |
|
"loss": 17.7854, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04330175913396482, |
|
"grad_norm": 24.508121490478516, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 17.7444, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.046394741929248015, |
|
"grad_norm": 25.298206329345703, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 17.419, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04948772472453122, |
|
"grad_norm": 26.203685760498047, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 17.0621, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05258070751981442, |
|
"grad_norm": 28.71617889404297, |
|
"learning_rate": 9.263200821770461e-05, |
|
"loss": 19.3019, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05567369031509762, |
|
"grad_norm": 24.17940902709961, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 17.6426, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05876667311038082, |
|
"grad_norm": 25.524850845336914, |
|
"learning_rate": 8.802029828000156e-05, |
|
"loss": 17.6575, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06185965590566402, |
|
"grad_norm": 30.970611572265625, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 18.1651, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06495263870094722, |
|
"grad_norm": 26.73845100402832, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 17.7731, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06804562149623043, |
|
"grad_norm": 27.083934783935547, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 18.8107, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07113860429151363, |
|
"grad_norm": 26.337148666381836, |
|
"learning_rate": 7.612492823579745e-05, |
|
"loss": 17.086, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07423158708679684, |
|
"grad_norm": 24.6472225189209, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 19.3557, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07732456988208003, |
|
"grad_norm": 24.704971313476562, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 17.915, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07732456988208003, |
|
"eval_loss": 1.0824270248413086, |
|
"eval_runtime": 150.1751, |
|
"eval_samples_per_second": 3.629, |
|
"eval_steps_per_second": 1.818, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08041755267736324, |
|
"grad_norm": 24.245737075805664, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 17.5508, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08351053547264643, |
|
"grad_norm": 23.731287002563477, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 16.6255, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08660351826792964, |
|
"grad_norm": 25.361608505249023, |
|
"learning_rate": 5.782172325201155e-05, |
|
"loss": 17.3047, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08969650106321284, |
|
"grad_norm": 25.7116756439209, |
|
"learning_rate": 5.392295478639225e-05, |
|
"loss": 18.7329, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09278948385849603, |
|
"grad_norm": 22.363431930541992, |
|
"learning_rate": 5e-05, |
|
"loss": 16.206, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09588246665377924, |
|
"grad_norm": 25.386978149414062, |
|
"learning_rate": 4.607704521360776e-05, |
|
"loss": 18.509, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.09897544944906243, |
|
"grad_norm": 23.575090408325195, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 16.8986, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10206843224434564, |
|
"grad_norm": 24.97161102294922, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 18.4959, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10516141503962884, |
|
"grad_norm": 22.639801025390625, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 16.6215, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10825439783491204, |
|
"grad_norm": 21.792438507080078, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 16.5128, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11134738063019524, |
|
"grad_norm": 23.47806167602539, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 16.1797, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11444036342547845, |
|
"grad_norm": 20.9477596282959, |
|
"learning_rate": 2.3875071764202563e-05, |
|
"loss": 14.7101, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11753334622076164, |
|
"grad_norm": 23.272993087768555, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 17.4994, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12062632901604485, |
|
"grad_norm": 24.445510864257812, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 18.4799, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12371931181132804, |
|
"grad_norm": 21.996252059936523, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 16.2203, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12681229460661125, |
|
"grad_norm": 24.246135711669922, |
|
"learning_rate": 1.1979701719998453e-05, |
|
"loss": 17.0163, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.12990527740189445, |
|
"grad_norm": 26.47881507873535, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 17.7896, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13299826019717764, |
|
"grad_norm": 22.64656639099121, |
|
"learning_rate": 7.367991782295391e-06, |
|
"loss": 16.8853, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.13609124299246086, |
|
"grad_norm": 23.412134170532227, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 16.9634, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13918422578774406, |
|
"grad_norm": 23.991744995117188, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 16.9744, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14227720858302725, |
|
"grad_norm": 22.722434997558594, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 17.6464, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14537019137831045, |
|
"grad_norm": 25.069778442382812, |
|
"learning_rate": 1.3815039801161721e-06, |
|
"loss": 17.4536, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.14846317417359367, |
|
"grad_norm": 24.66384506225586, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 17.223, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15155615696887687, |
|
"grad_norm": 25.479217529296875, |
|
"learning_rate": 1.5413331334360182e-07, |
|
"loss": 17.2031, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.15464913976416006, |
|
"grad_norm": 22.113872528076172, |
|
"learning_rate": 0.0, |
|
"loss": 19.1739, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15464913976416006, |
|
"eval_loss": 1.0614150762557983, |
|
"eval_runtime": 150.2967, |
|
"eval_samples_per_second": 3.626, |
|
"eval_steps_per_second": 1.816, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5435789463506125e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|