lesso11's picture
Training in progress, step 75, checkpoint
9f6abca verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.22123893805309736,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0029498525073746312,
"grad_norm": 2.6695961952209473,
"learning_rate": 1e-05,
"loss": 4.059,
"step": 1
},
{
"epoch": 0.0029498525073746312,
"eval_loss": 2.0677247047424316,
"eval_runtime": 44.3994,
"eval_samples_per_second": 6.442,
"eval_steps_per_second": 0.811,
"step": 1
},
{
"epoch": 0.0058997050147492625,
"grad_norm": 1.780763030052185,
"learning_rate": 2e-05,
"loss": 3.1111,
"step": 2
},
{
"epoch": 0.008849557522123894,
"grad_norm": 3.134167194366455,
"learning_rate": 3e-05,
"loss": 3.5404,
"step": 3
},
{
"epoch": 0.011799410029498525,
"grad_norm": 4.528249263763428,
"learning_rate": 4e-05,
"loss": 4.5936,
"step": 4
},
{
"epoch": 0.014749262536873156,
"grad_norm": 3.391472339630127,
"learning_rate": 5e-05,
"loss": 4.1795,
"step": 5
},
{
"epoch": 0.017699115044247787,
"grad_norm": 2.1838064193725586,
"learning_rate": 6e-05,
"loss": 3.0666,
"step": 6
},
{
"epoch": 0.02064896755162242,
"grad_norm": 2.6701786518096924,
"learning_rate": 7e-05,
"loss": 3.5408,
"step": 7
},
{
"epoch": 0.02359882005899705,
"grad_norm": 2.9336118698120117,
"learning_rate": 8e-05,
"loss": 3.67,
"step": 8
},
{
"epoch": 0.02654867256637168,
"grad_norm": 5.5627264976501465,
"learning_rate": 9e-05,
"loss": 3.7612,
"step": 9
},
{
"epoch": 0.02654867256637168,
"eval_loss": 1.4617587327957153,
"eval_runtime": 44.4212,
"eval_samples_per_second": 6.438,
"eval_steps_per_second": 0.81,
"step": 9
},
{
"epoch": 0.029498525073746312,
"grad_norm": 3.2141149044036865,
"learning_rate": 0.0001,
"loss": 3.1932,
"step": 10
},
{
"epoch": 0.032448377581120944,
"grad_norm": 2.183711051940918,
"learning_rate": 9.99695413509548e-05,
"loss": 2.4995,
"step": 11
},
{
"epoch": 0.035398230088495575,
"grad_norm": 2.1251938343048096,
"learning_rate": 9.987820251299122e-05,
"loss": 2.4882,
"step": 12
},
{
"epoch": 0.038348082595870206,
"grad_norm": 2.368736743927002,
"learning_rate": 9.972609476841367e-05,
"loss": 1.8657,
"step": 13
},
{
"epoch": 0.04129793510324484,
"grad_norm": 3.1230757236480713,
"learning_rate": 9.951340343707852e-05,
"loss": 2.2617,
"step": 14
},
{
"epoch": 0.04424778761061947,
"grad_norm": 2.4695842266082764,
"learning_rate": 9.924038765061042e-05,
"loss": 2.24,
"step": 15
},
{
"epoch": 0.0471976401179941,
"grad_norm": 3.388068199157715,
"learning_rate": 9.890738003669029e-05,
"loss": 1.9291,
"step": 16
},
{
"epoch": 0.05014749262536873,
"grad_norm": 3.315546989440918,
"learning_rate": 9.851478631379982e-05,
"loss": 2.5513,
"step": 17
},
{
"epoch": 0.05309734513274336,
"grad_norm": 3.138029098510742,
"learning_rate": 9.806308479691595e-05,
"loss": 2.061,
"step": 18
},
{
"epoch": 0.05309734513274336,
"eval_loss": 1.0333534479141235,
"eval_runtime": 44.4437,
"eval_samples_per_second": 6.435,
"eval_steps_per_second": 0.81,
"step": 18
},
{
"epoch": 0.05604719764011799,
"grad_norm": 2.192615509033203,
"learning_rate": 9.755282581475769e-05,
"loss": 1.6975,
"step": 19
},
{
"epoch": 0.058997050147492625,
"grad_norm": 2.5154130458831787,
"learning_rate": 9.698463103929542e-05,
"loss": 1.3443,
"step": 20
},
{
"epoch": 0.061946902654867256,
"grad_norm": 2.76058030128479,
"learning_rate": 9.635919272833938e-05,
"loss": 2.3646,
"step": 21
},
{
"epoch": 0.06489675516224189,
"grad_norm": 1.8679944276809692,
"learning_rate": 9.567727288213005e-05,
"loss": 2.1289,
"step": 22
},
{
"epoch": 0.06784660766961652,
"grad_norm": 1.9377188682556152,
"learning_rate": 9.493970231495835e-05,
"loss": 2.1156,
"step": 23
},
{
"epoch": 0.07079646017699115,
"grad_norm": 2.5624561309814453,
"learning_rate": 9.414737964294636e-05,
"loss": 1.9728,
"step": 24
},
{
"epoch": 0.07374631268436578,
"grad_norm": 1.7926393747329712,
"learning_rate": 9.330127018922194e-05,
"loss": 2.9014,
"step": 25
},
{
"epoch": 0.07669616519174041,
"grad_norm": 1.742391586303711,
"learning_rate": 9.24024048078213e-05,
"loss": 1.5669,
"step": 26
},
{
"epoch": 0.07964601769911504,
"grad_norm": 2.4783685207366943,
"learning_rate": 9.145187862775209e-05,
"loss": 2.2309,
"step": 27
},
{
"epoch": 0.07964601769911504,
"eval_loss": 1.0076097249984741,
"eval_runtime": 44.4736,
"eval_samples_per_second": 6.431,
"eval_steps_per_second": 0.809,
"step": 27
},
{
"epoch": 0.08259587020648967,
"grad_norm": 2.483924388885498,
"learning_rate": 9.045084971874738e-05,
"loss": 1.7079,
"step": 28
},
{
"epoch": 0.0855457227138643,
"grad_norm": 2.8759710788726807,
"learning_rate": 8.940053768033609e-05,
"loss": 2.1136,
"step": 29
},
{
"epoch": 0.08849557522123894,
"grad_norm": 2.6359269618988037,
"learning_rate": 8.83022221559489e-05,
"loss": 1.9826,
"step": 30
},
{
"epoch": 0.09144542772861357,
"grad_norm": 1.697524905204773,
"learning_rate": 8.715724127386972e-05,
"loss": 2.5094,
"step": 31
},
{
"epoch": 0.0943952802359882,
"grad_norm": 3.194821834564209,
"learning_rate": 8.596699001693255e-05,
"loss": 1.8343,
"step": 32
},
{
"epoch": 0.09734513274336283,
"grad_norm": 3.587616443634033,
"learning_rate": 8.473291852294987e-05,
"loss": 1.7127,
"step": 33
},
{
"epoch": 0.10029498525073746,
"grad_norm": 2.2063448429107666,
"learning_rate": 8.345653031794292e-05,
"loss": 2.7964,
"step": 34
},
{
"epoch": 0.10324483775811209,
"grad_norm": 2.4330263137817383,
"learning_rate": 8.213938048432697e-05,
"loss": 1.4703,
"step": 35
},
{
"epoch": 0.10619469026548672,
"grad_norm": 4.004213333129883,
"learning_rate": 8.07830737662829e-05,
"loss": 1.8824,
"step": 36
},
{
"epoch": 0.10619469026548672,
"eval_loss": 0.9778890609741211,
"eval_runtime": 44.3941,
"eval_samples_per_second": 6.442,
"eval_steps_per_second": 0.811,
"step": 36
},
{
"epoch": 0.10914454277286136,
"grad_norm": 2.327597141265869,
"learning_rate": 7.938926261462366e-05,
"loss": 2.1188,
"step": 37
},
{
"epoch": 0.11209439528023599,
"grad_norm": 2.4239344596862793,
"learning_rate": 7.795964517353735e-05,
"loss": 1.8542,
"step": 38
},
{
"epoch": 0.11504424778761062,
"grad_norm": 2.819444417953491,
"learning_rate": 7.649596321166024e-05,
"loss": 3.0174,
"step": 39
},
{
"epoch": 0.11799410029498525,
"grad_norm": 2.2955985069274902,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8439,
"step": 40
},
{
"epoch": 0.12094395280235988,
"grad_norm": 2.412621021270752,
"learning_rate": 7.347357813929454e-05,
"loss": 1.3983,
"step": 41
},
{
"epoch": 0.12389380530973451,
"grad_norm": 2.1134157180786133,
"learning_rate": 7.191855733945387e-05,
"loss": 2.4839,
"step": 42
},
{
"epoch": 0.12684365781710916,
"grad_norm": 3.077287197113037,
"learning_rate": 7.033683215379002e-05,
"loss": 1.7456,
"step": 43
},
{
"epoch": 0.12979351032448377,
"grad_norm": 2.8075761795043945,
"learning_rate": 6.873032967079561e-05,
"loss": 1.6592,
"step": 44
},
{
"epoch": 0.13274336283185842,
"grad_norm": 3.0821831226348877,
"learning_rate": 6.710100716628344e-05,
"loss": 2.0796,
"step": 45
},
{
"epoch": 0.13274336283185842,
"eval_loss": 0.9554188251495361,
"eval_runtime": 44.4051,
"eval_samples_per_second": 6.441,
"eval_steps_per_second": 0.811,
"step": 45
},
{
"epoch": 0.13569321533923304,
"grad_norm": 1.895007848739624,
"learning_rate": 6.545084971874738e-05,
"loss": 1.5245,
"step": 46
},
{
"epoch": 0.13864306784660768,
"grad_norm": 1.7392289638519287,
"learning_rate": 6.378186779084995e-05,
"loss": 1.772,
"step": 47
},
{
"epoch": 0.1415929203539823,
"grad_norm": 2.4289803504943848,
"learning_rate": 6.209609477998338e-05,
"loss": 2.5995,
"step": 48
},
{
"epoch": 0.14454277286135694,
"grad_norm": 2.307551383972168,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.3335,
"step": 49
},
{
"epoch": 0.14749262536873156,
"grad_norm": 2.0941660404205322,
"learning_rate": 5.868240888334653e-05,
"loss": 2.5142,
"step": 50
},
{
"epoch": 0.1504424778761062,
"grad_norm": 1.5647395849227905,
"learning_rate": 5.695865504800327e-05,
"loss": 2.0823,
"step": 51
},
{
"epoch": 0.15339233038348082,
"grad_norm": 3.6214590072631836,
"learning_rate": 5.522642316338268e-05,
"loss": 2.4263,
"step": 52
},
{
"epoch": 0.15634218289085547,
"grad_norm": 2.2273218631744385,
"learning_rate": 5.348782368720626e-05,
"loss": 1.8634,
"step": 53
},
{
"epoch": 0.1592920353982301,
"grad_norm": 2.075181722640991,
"learning_rate": 5.174497483512506e-05,
"loss": 2.1057,
"step": 54
},
{
"epoch": 0.1592920353982301,
"eval_loss": 0.9436812996864319,
"eval_runtime": 44.4269,
"eval_samples_per_second": 6.438,
"eval_steps_per_second": 0.81,
"step": 54
},
{
"epoch": 0.16224188790560473,
"grad_norm": 2.0070507526397705,
"learning_rate": 5e-05,
"loss": 1.7728,
"step": 55
},
{
"epoch": 0.16519174041297935,
"grad_norm": 2.2338101863861084,
"learning_rate": 4.825502516487497e-05,
"loss": 2.7216,
"step": 56
},
{
"epoch": 0.168141592920354,
"grad_norm": 2.3538854122161865,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.6894,
"step": 57
},
{
"epoch": 0.1710914454277286,
"grad_norm": 2.551027536392212,
"learning_rate": 4.477357683661734e-05,
"loss": 1.4082,
"step": 58
},
{
"epoch": 0.17404129793510326,
"grad_norm": 2.7152366638183594,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.7138,
"step": 59
},
{
"epoch": 0.17699115044247787,
"grad_norm": 3.2682361602783203,
"learning_rate": 4.131759111665349e-05,
"loss": 2.0795,
"step": 60
},
{
"epoch": 0.17994100294985252,
"grad_norm": 1.910207748413086,
"learning_rate": 3.960441545911204e-05,
"loss": 2.4073,
"step": 61
},
{
"epoch": 0.18289085545722714,
"grad_norm": 2.7701568603515625,
"learning_rate": 3.790390522001662e-05,
"loss": 1.678,
"step": 62
},
{
"epoch": 0.18584070796460178,
"grad_norm": 2.447645425796509,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.9186,
"step": 63
},
{
"epoch": 0.18584070796460178,
"eval_loss": 0.9331210255622864,
"eval_runtime": 44.3944,
"eval_samples_per_second": 6.442,
"eval_steps_per_second": 0.811,
"step": 63
},
{
"epoch": 0.1887905604719764,
"grad_norm": 2.811461925506592,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.0873,
"step": 64
},
{
"epoch": 0.19174041297935104,
"grad_norm": 2.722278594970703,
"learning_rate": 3.289899283371657e-05,
"loss": 1.8269,
"step": 65
},
{
"epoch": 0.19469026548672566,
"grad_norm": 2.2737720012664795,
"learning_rate": 3.12696703292044e-05,
"loss": 1.8935,
"step": 66
},
{
"epoch": 0.1976401179941003,
"grad_norm": 1.9448575973510742,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.4455,
"step": 67
},
{
"epoch": 0.20058997050147492,
"grad_norm": 2.384688138961792,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.9643,
"step": 68
},
{
"epoch": 0.20353982300884957,
"grad_norm": 2.049166440963745,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.3775,
"step": 69
},
{
"epoch": 0.20648967551622419,
"grad_norm": 2.747662305831909,
"learning_rate": 2.500000000000001e-05,
"loss": 1.7945,
"step": 70
},
{
"epoch": 0.20943952802359883,
"grad_norm": 2.058532953262329,
"learning_rate": 2.350403678833976e-05,
"loss": 1.4522,
"step": 71
},
{
"epoch": 0.21238938053097345,
"grad_norm": 2.542921543121338,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.0277,
"step": 72
},
{
"epoch": 0.21238938053097345,
"eval_loss": 0.9295855760574341,
"eval_runtime": 44.4249,
"eval_samples_per_second": 6.438,
"eval_steps_per_second": 0.81,
"step": 72
},
{
"epoch": 0.2153392330383481,
"grad_norm": 2.58382248878479,
"learning_rate": 2.061073738537635e-05,
"loss": 1.7377,
"step": 73
},
{
"epoch": 0.2182890855457227,
"grad_norm": 2.8606531620025635,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.8194,
"step": 74
},
{
"epoch": 0.22123893805309736,
"grad_norm": 3.316101551055908,
"learning_rate": 1.7860619515673033e-05,
"loss": 2.3525,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.521684545536e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}