|
{ |
|
"best_metric": 11.915068626403809, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.061609549480169425, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012321909896033886, |
|
"grad_norm": 0.07178230583667755, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9363, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012321909896033886, |
|
"eval_loss": 11.935738563537598, |
|
"eval_runtime": 0.7485, |
|
"eval_samples_per_second": 66.799, |
|
"eval_steps_per_second": 17.368, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002464381979206777, |
|
"grad_norm": 0.08658259361982346, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9365, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0036965729688101657, |
|
"grad_norm": 0.07627413421869278, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 11.936, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004928763958413554, |
|
"grad_norm": 0.06695223599672318, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 11.9371, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0061609549480169425, |
|
"grad_norm": 0.07730023562908173, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 11.9373, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0073931459376203315, |
|
"grad_norm": 0.08005542308092117, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 11.9352, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00862533692722372, |
|
"grad_norm": 0.08071882277727127, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 11.9335, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009857527916827109, |
|
"grad_norm": 0.09804913401603699, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 11.9324, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011089718906430497, |
|
"grad_norm": 0.0916438177227974, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 11.9314, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.012321909896033885, |
|
"grad_norm": 0.11245589703321457, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 11.9334, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013554100885637273, |
|
"grad_norm": 0.15651749074459076, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 11.931, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.014786291875240663, |
|
"grad_norm": 0.165276437997818, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 11.929, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01601848286484405, |
|
"grad_norm": 0.10776877403259277, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 11.9321, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01725067385444744, |
|
"grad_norm": 0.08762101083993912, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 11.9322, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.018482864844050827, |
|
"grad_norm": 0.1009363904595375, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 11.9318, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.019715055833654217, |
|
"grad_norm": 0.10808451473712921, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 11.9334, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.020947246823257604, |
|
"grad_norm": 0.10856714844703674, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 11.9314, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.022179437812860994, |
|
"grad_norm": 0.1188795268535614, |
|
"learning_rate": 7.75e-05, |
|
"loss": 11.931, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.023411628802464383, |
|
"grad_norm": 0.1201574057340622, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 11.9307, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02464381979206777, |
|
"grad_norm": 0.13258428871631622, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 11.9308, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02587601078167116, |
|
"grad_norm": 0.14269478619098663, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 11.9279, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.027108201771274546, |
|
"grad_norm": 0.16528292000293732, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 11.9243, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.028340392760877936, |
|
"grad_norm": 0.1886805295944214, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 11.9237, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.029572583750481326, |
|
"grad_norm": 0.19681161642074585, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 11.9207, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.030804774740084712, |
|
"grad_norm": 0.23278166353702545, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 11.9213, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.030804774740084712, |
|
"eval_loss": 11.922410011291504, |
|
"eval_runtime": 0.4466, |
|
"eval_samples_per_second": 111.969, |
|
"eval_steps_per_second": 29.112, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0320369657296881, |
|
"grad_norm": 0.14079074561595917, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 11.9271, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03326915671929149, |
|
"grad_norm": 0.1567951738834381, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 11.9258, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03450134770889488, |
|
"grad_norm": 0.16366036236286163, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 11.9249, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03573353869849827, |
|
"grad_norm": 0.1638365387916565, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 11.926, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.036965729688101655, |
|
"grad_norm": 0.17652325332164764, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 11.9231, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03819792067770504, |
|
"grad_norm": 0.15289749205112457, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 11.9256, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.039430111667308435, |
|
"grad_norm": 0.21502667665481567, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 11.918, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04066230265691182, |
|
"grad_norm": 0.16959460079669952, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 11.9229, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04189449364651521, |
|
"grad_norm": 0.18960808217525482, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 11.921, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0431266846361186, |
|
"grad_norm": 0.20932435989379883, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 11.9176, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04435887562572199, |
|
"grad_norm": 0.2247883677482605, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 11.9166, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.045591066615325374, |
|
"grad_norm": 0.2090682089328766, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 11.9216, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04682325760492877, |
|
"grad_norm": 0.19237466156482697, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 11.9209, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04805544859453215, |
|
"grad_norm": 0.15888743102550507, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 11.9218, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04928763958413554, |
|
"grad_norm": 0.1693039834499359, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 11.9215, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.050519830573738926, |
|
"grad_norm": 0.18711499869823456, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 11.9198, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.05175202156334232, |
|
"grad_norm": 0.1777219921350479, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 11.9196, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.052984212552945706, |
|
"grad_norm": 0.17029505968093872, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 11.9212, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05421640354254909, |
|
"grad_norm": 0.1959444284439087, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 11.92, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.055448594532152486, |
|
"grad_norm": 0.19401198625564575, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 11.9157, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05668078552175587, |
|
"grad_norm": 0.18555186688899994, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 11.9181, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05791297651135926, |
|
"grad_norm": 0.207061767578125, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 11.9177, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05914516750096265, |
|
"grad_norm": 0.19741538166999817, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 11.9168, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06037735849056604, |
|
"grad_norm": 0.2410183548927307, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 11.9102, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.061609549480169425, |
|
"grad_norm": 0.27445363998413086, |
|
"learning_rate": 1e-05, |
|
"loss": 11.9102, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.061609549480169425, |
|
"eval_loss": 11.915068626403809, |
|
"eval_runtime": 0.4086, |
|
"eval_samples_per_second": 122.365, |
|
"eval_steps_per_second": 31.815, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 417735062323200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|