|
{ |
|
"best_metric": 1.0076379776000977, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.02781157656874674, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005562315313749349, |
|
"grad_norm": 0.1657542884349823, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4995, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005562315313749349, |
|
"eval_loss": 1.4900273084640503, |
|
"eval_runtime": 138.2121, |
|
"eval_samples_per_second": 87.633, |
|
"eval_steps_per_second": 10.954, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011124630627498697, |
|
"grad_norm": 0.2748042643070221, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7094, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0016686945941248046, |
|
"grad_norm": 0.27427706122398376, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 0.7878, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0022249261254997394, |
|
"grad_norm": 0.1776735633611679, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.6785, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0027811576568746743, |
|
"grad_norm": 0.20435677468776703, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.7492, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003337389188249609, |
|
"grad_norm": 0.2518686056137085, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.7544, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0038936207196245435, |
|
"grad_norm": 0.2997715175151825, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.0074, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004449852250999479, |
|
"grad_norm": 0.40672293305397034, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.2292, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005006083782374413, |
|
"grad_norm": 0.6158118844032288, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.3492, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0055623153137493485, |
|
"grad_norm": 0.8367748260498047, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.3936, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006118546845124283, |
|
"grad_norm": 0.7799615263938904, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.5371, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.006674778376499218, |
|
"grad_norm": 1.3306225538253784, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.7766, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007231009907874153, |
|
"grad_norm": 0.28293538093566895, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.3871, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007787241439249087, |
|
"grad_norm": 0.48717114329338074, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.6559, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008343472970624021, |
|
"grad_norm": 0.5592238903045654, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.7763, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.008899704501998958, |
|
"grad_norm": 0.5054487586021423, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.721, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.009455936033373892, |
|
"grad_norm": 0.4145556390285492, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.7055, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.010012167564748826, |
|
"grad_norm": 0.36540091037750244, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.6979, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.010568399096123761, |
|
"grad_norm": 0.28136712312698364, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.7644, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.011124630627498697, |
|
"grad_norm": 0.2922019064426422, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.9497, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011680862158873631, |
|
"grad_norm": 0.3472326695919037, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.17, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.012237093690248566, |
|
"grad_norm": 0.4307328462600708, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.295, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0127933252216235, |
|
"grad_norm": 0.6883488297462463, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.4575, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.013349556752998436, |
|
"grad_norm": 0.7492279410362244, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.3692, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01390578828437337, |
|
"grad_norm": 1.6095669269561768, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.9458, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01390578828437337, |
|
"eval_loss": 1.0297096967697144, |
|
"eval_runtime": 138.2794, |
|
"eval_samples_per_second": 87.591, |
|
"eval_steps_per_second": 10.949, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.014462019815748305, |
|
"grad_norm": 0.09419423341751099, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4267, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01501825134712324, |
|
"grad_norm": 0.1334831416606903, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.6666, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.015574482878498174, |
|
"grad_norm": 0.144070565700531, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6366, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01613071440987311, |
|
"grad_norm": 0.1303204894065857, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.633, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.016686945941248043, |
|
"grad_norm": 0.14772789180278778, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.732, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01724317747262298, |
|
"grad_norm": 0.1663038730621338, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.7748, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.017799409003997915, |
|
"grad_norm": 0.21385891735553741, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.008, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.01835564053537285, |
|
"grad_norm": 0.29136043787002563, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.1981, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.018911872066747784, |
|
"grad_norm": 0.358648419380188, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.2509, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01946810359812272, |
|
"grad_norm": 0.40348002314567566, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.3403, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020024335129497653, |
|
"grad_norm": 0.5576580762863159, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.385, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.020580566660872587, |
|
"grad_norm": 0.9451633095741272, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.5531, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.021136798192247522, |
|
"grad_norm": 0.10654843598604202, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.4292, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.021693029723622456, |
|
"grad_norm": 0.10906118899583817, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.5853, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.022249261254997394, |
|
"grad_norm": 0.1391826868057251, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.7037, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02280549278637233, |
|
"grad_norm": 0.13406039774417877, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.6435, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.023361724317747263, |
|
"grad_norm": 0.1331794112920761, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.6847, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.023917955849122197, |
|
"grad_norm": 0.1532629281282425, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.8577, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.024474187380497132, |
|
"grad_norm": 0.17151039838790894, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.8592, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.025030418911872066, |
|
"grad_norm": 0.24025927484035492, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.1217, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025586650443247, |
|
"grad_norm": 0.2666977047920227, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.1314, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.026142881974621935, |
|
"grad_norm": 0.3222261369228363, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.2233, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.026699113505996873, |
|
"grad_norm": 0.42572563886642456, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.3975, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.027255345037371807, |
|
"grad_norm": 0.5978182554244995, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.3051, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.02781157656874674, |
|
"grad_norm": 1.0638318061828613, |
|
"learning_rate": 0.0, |
|
"loss": 1.7991, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02781157656874674, |
|
"eval_loss": 1.0076379776000977, |
|
"eval_runtime": 138.2815, |
|
"eval_samples_per_second": 87.589, |
|
"eval_steps_per_second": 10.949, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0608607728186163e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|