ardaspear's picture
Training in progress, step 102, checkpoint
0cd7f51 verified
raw
history blame
7.52 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02900199033266989,
"eval_steps": 34,
"global_step": 102,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002843332385555871,
"eval_loss": 1.5018354654312134,
"eval_runtime": 166.295,
"eval_samples_per_second": 35.623,
"eval_steps_per_second": 4.456,
"step": 1
},
{
"epoch": 0.0008529997156667614,
"grad_norm": 0.7210565209388733,
"learning_rate": 1.5e-05,
"loss": 1.5223,
"step": 3
},
{
"epoch": 0.0017059994313335229,
"grad_norm": 0.6832892298698425,
"learning_rate": 3e-05,
"loss": 1.5221,
"step": 6
},
{
"epoch": 0.0025589991470002845,
"grad_norm": 0.7228020429611206,
"learning_rate": 4.5e-05,
"loss": 1.408,
"step": 9
},
{
"epoch": 0.0034119988626670457,
"grad_norm": 0.6384875774383545,
"learning_rate": 4.999675562428437e-05,
"loss": 1.4404,
"step": 12
},
{
"epoch": 0.004264998578333807,
"grad_norm": 0.3351251482963562,
"learning_rate": 4.9979724954289244e-05,
"loss": 1.3736,
"step": 15
},
{
"epoch": 0.005117998294000569,
"grad_norm": 0.182565838098526,
"learning_rate": 4.994810682835951e-05,
"loss": 1.3079,
"step": 18
},
{
"epoch": 0.00597099800966733,
"grad_norm": 0.19113591313362122,
"learning_rate": 4.990191971059033e-05,
"loss": 1.316,
"step": 21
},
{
"epoch": 0.0068239977253340914,
"grad_norm": 0.1948336660861969,
"learning_rate": 4.984119057295783e-05,
"loss": 1.2426,
"step": 24
},
{
"epoch": 0.007676997441000853,
"grad_norm": 0.18684643507003784,
"learning_rate": 4.976595487956823e-05,
"loss": 1.2503,
"step": 27
},
{
"epoch": 0.008529997156667614,
"grad_norm": 0.18414735794067383,
"learning_rate": 4.967625656594782e-05,
"loss": 1.2323,
"step": 30
},
{
"epoch": 0.009382996872334376,
"grad_norm": 0.17294025421142578,
"learning_rate": 4.957214801338581e-05,
"loss": 1.1942,
"step": 33
},
{
"epoch": 0.009667330110889964,
"eval_loss": 1.1813108921051025,
"eval_runtime": 167.7438,
"eval_samples_per_second": 35.316,
"eval_steps_per_second": 4.417,
"step": 34
},
{
"epoch": 0.010235996588001138,
"grad_norm": 0.16702412068843842,
"learning_rate": 4.9453690018345144e-05,
"loss": 1.1981,
"step": 36
},
{
"epoch": 0.011088996303667898,
"grad_norm": 0.1968175172805786,
"learning_rate": 4.932095175695911e-05,
"loss": 1.1675,
"step": 39
},
{
"epoch": 0.01194199601933466,
"grad_norm": 0.18244469165802002,
"learning_rate": 4.917401074463441e-05,
"loss": 1.1584,
"step": 42
},
{
"epoch": 0.01279499573500142,
"grad_norm": 0.16749081015586853,
"learning_rate": 4.901295279078431e-05,
"loss": 1.1134,
"step": 45
},
{
"epoch": 0.013647995450668183,
"grad_norm": 0.17398597300052643,
"learning_rate": 4.883787194871841e-05,
"loss": 1.1139,
"step": 48
},
{
"epoch": 0.014500995166334945,
"grad_norm": 0.17164087295532227,
"learning_rate": 4.864887046071813e-05,
"loss": 1.079,
"step": 51
},
{
"epoch": 0.015353994882001705,
"grad_norm": 0.1644001007080078,
"learning_rate": 4.8446058698330115e-05,
"loss": 1.0646,
"step": 54
},
{
"epoch": 0.01620699459766847,
"grad_norm": 0.16490623354911804,
"learning_rate": 4.822955509791233e-05,
"loss": 1.0739,
"step": 57
},
{
"epoch": 0.017059994313335228,
"grad_norm": 0.17708458006381989,
"learning_rate": 4.799948609147061e-05,
"loss": 1.0897,
"step": 60
},
{
"epoch": 0.01791299402900199,
"grad_norm": 0.15597032010555267,
"learning_rate": 4.7755986032825864e-05,
"loss": 1.0566,
"step": 63
},
{
"epoch": 0.018765993744668752,
"grad_norm": 0.17728550732135773,
"learning_rate": 4.74991971191553e-05,
"loss": 1.0275,
"step": 66
},
{
"epoch": 0.019334660221779928,
"eval_loss": 1.0011852979660034,
"eval_runtime": 168.1174,
"eval_samples_per_second": 35.237,
"eval_steps_per_second": 4.408,
"step": 68
},
{
"epoch": 0.019618993460335514,
"grad_norm": 0.16994404792785645,
"learning_rate": 4.7229269307953235e-05,
"loss": 0.9841,
"step": 69
},
{
"epoch": 0.020471993176002276,
"grad_norm": 0.17181497812271118,
"learning_rate": 4.694636022946012e-05,
"loss": 0.9944,
"step": 72
},
{
"epoch": 0.021324992891669035,
"grad_norm": 0.21411621570587158,
"learning_rate": 4.665063509461097e-05,
"loss": 1.0013,
"step": 75
},
{
"epoch": 0.022177992607335797,
"grad_norm": 0.19594340026378632,
"learning_rate": 4.6342266598556814e-05,
"loss": 0.9969,
"step": 78
},
{
"epoch": 0.02303099232300256,
"grad_norm": 0.2057276964187622,
"learning_rate": 4.6021434819815555e-05,
"loss": 0.9808,
"step": 81
},
{
"epoch": 0.02388399203866932,
"grad_norm": 0.21778051555156708,
"learning_rate": 4.568832711511125e-05,
"loss": 0.9456,
"step": 84
},
{
"epoch": 0.024736991754336083,
"grad_norm": 0.21349306404590607,
"learning_rate": 4.534313800996299e-05,
"loss": 0.953,
"step": 87
},
{
"epoch": 0.02558999147000284,
"grad_norm": 0.21818219125270844,
"learning_rate": 4.498606908508754e-05,
"loss": 0.9133,
"step": 90
},
{
"epoch": 0.026442991185669604,
"grad_norm": 0.2510223388671875,
"learning_rate": 4.46173288586818e-05,
"loss": 0.9125,
"step": 93
},
{
"epoch": 0.027295990901336366,
"grad_norm": 0.24031595885753632,
"learning_rate": 4.4237132664654154e-05,
"loss": 0.8784,
"step": 96
},
{
"epoch": 0.028148990617003128,
"grad_norm": 0.2543729543685913,
"learning_rate": 4.384570252687542e-05,
"loss": 0.8984,
"step": 99
},
{
"epoch": 0.02900199033266989,
"grad_norm": 0.27020716667175293,
"learning_rate": 4.344326702952326e-05,
"loss": 0.8719,
"step": 102
},
{
"epoch": 0.02900199033266989,
"eval_loss": 0.8574855923652649,
"eval_runtime": 168.0651,
"eval_samples_per_second": 35.248,
"eval_steps_per_second": 4.409,
"step": 102
}
],
"logging_steps": 3,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 34,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.2664001796112384e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}