lesso03's picture
Training in progress, step 75, checkpoint
9999e1d verified
raw
history blame
15.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.012459506603538499,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016612675471384668,
"grad_norm": 0.9919585585594177,
"learning_rate": 1e-05,
"loss": 1.8966,
"step": 1
},
{
"epoch": 0.00016612675471384668,
"eval_loss": 1.0414841175079346,
"eval_runtime": 673.9747,
"eval_samples_per_second": 7.521,
"eval_steps_per_second": 0.941,
"step": 1
},
{
"epoch": 0.00033225350942769335,
"grad_norm": 1.5468533039093018,
"learning_rate": 2e-05,
"loss": 2.8453,
"step": 2
},
{
"epoch": 0.00049838026414154,
"grad_norm": 1.3301949501037598,
"learning_rate": 3e-05,
"loss": 2.3285,
"step": 3
},
{
"epoch": 0.0006645070188553867,
"grad_norm": 1.870064377784729,
"learning_rate": 4e-05,
"loss": 2.5663,
"step": 4
},
{
"epoch": 0.0008306337735692333,
"grad_norm": 1.6597638130187988,
"learning_rate": 5e-05,
"loss": 2.1576,
"step": 5
},
{
"epoch": 0.00099676052828308,
"grad_norm": 0.5557398796081543,
"learning_rate": 6e-05,
"loss": 1.2538,
"step": 6
},
{
"epoch": 0.0011628872829969267,
"grad_norm": 0.991572916507721,
"learning_rate": 7e-05,
"loss": 1.6291,
"step": 7
},
{
"epoch": 0.0013290140377107734,
"grad_norm": 0.6162525415420532,
"learning_rate": 8e-05,
"loss": 1.8258,
"step": 8
},
{
"epoch": 0.00149514079242462,
"grad_norm": 1.4342612028121948,
"learning_rate": 9e-05,
"loss": 2.7709,
"step": 9
},
{
"epoch": 0.00149514079242462,
"eval_loss": 0.9687274694442749,
"eval_runtime": 675.7106,
"eval_samples_per_second": 7.502,
"eval_steps_per_second": 0.938,
"step": 9
},
{
"epoch": 0.0016612675471384666,
"grad_norm": 1.043684720993042,
"learning_rate": 0.0001,
"loss": 1.713,
"step": 10
},
{
"epoch": 0.0018273943018523133,
"grad_norm": 1.1158920526504517,
"learning_rate": 9.99695413509548e-05,
"loss": 1.2303,
"step": 11
},
{
"epoch": 0.00199352105656616,
"grad_norm": 0.8735452890396118,
"learning_rate": 9.987820251299122e-05,
"loss": 1.0613,
"step": 12
},
{
"epoch": 0.0021596478112800065,
"grad_norm": 0.6356571912765503,
"learning_rate": 9.972609476841367e-05,
"loss": 1.1697,
"step": 13
},
{
"epoch": 0.0023257745659938534,
"grad_norm": 0.5694183111190796,
"learning_rate": 9.951340343707852e-05,
"loss": 1.2002,
"step": 14
},
{
"epoch": 0.0024919013207077,
"grad_norm": 0.589718759059906,
"learning_rate": 9.924038765061042e-05,
"loss": 1.5224,
"step": 15
},
{
"epoch": 0.002658028075421547,
"grad_norm": 1.3405238389968872,
"learning_rate": 9.890738003669029e-05,
"loss": 1.5344,
"step": 16
},
{
"epoch": 0.0028241548301353933,
"grad_norm": 2.1417508125305176,
"learning_rate": 9.851478631379982e-05,
"loss": 2.131,
"step": 17
},
{
"epoch": 0.00299028158484924,
"grad_norm": 1.19926917552948,
"learning_rate": 9.806308479691595e-05,
"loss": 1.9731,
"step": 18
},
{
"epoch": 0.00299028158484924,
"eval_loss": 0.7821024656295776,
"eval_runtime": 675.4552,
"eval_samples_per_second": 7.505,
"eval_steps_per_second": 0.939,
"step": 18
},
{
"epoch": 0.0031564083395630867,
"grad_norm": 0.885982871055603,
"learning_rate": 9.755282581475769e-05,
"loss": 1.3646,
"step": 19
},
{
"epoch": 0.003322535094276933,
"grad_norm": 1.1024696826934814,
"learning_rate": 9.698463103929542e-05,
"loss": 1.4764,
"step": 20
},
{
"epoch": 0.00348866184899078,
"grad_norm": 3.787921667098999,
"learning_rate": 9.635919272833938e-05,
"loss": 2.868,
"step": 21
},
{
"epoch": 0.0036547886037046266,
"grad_norm": 1.1288354396820068,
"learning_rate": 9.567727288213005e-05,
"loss": 1.7279,
"step": 22
},
{
"epoch": 0.003820915358418473,
"grad_norm": 1.6195688247680664,
"learning_rate": 9.493970231495835e-05,
"loss": 1.1748,
"step": 23
},
{
"epoch": 0.00398704211313232,
"grad_norm": 3.927457094192505,
"learning_rate": 9.414737964294636e-05,
"loss": 1.8016,
"step": 24
},
{
"epoch": 0.004153168867846167,
"grad_norm": 2.22636342048645,
"learning_rate": 9.330127018922194e-05,
"loss": 1.4189,
"step": 25
},
{
"epoch": 0.004319295622560013,
"grad_norm": 1.719977855682373,
"learning_rate": 9.24024048078213e-05,
"loss": 0.7547,
"step": 26
},
{
"epoch": 0.00448542237727386,
"grad_norm": 1.4725106954574585,
"learning_rate": 9.145187862775209e-05,
"loss": 1.498,
"step": 27
},
{
"epoch": 0.00448542237727386,
"eval_loss": 0.7110866904258728,
"eval_runtime": 675.4946,
"eval_samples_per_second": 7.504,
"eval_steps_per_second": 0.939,
"step": 27
},
{
"epoch": 0.004651549131987707,
"grad_norm": 1.9094619750976562,
"learning_rate": 9.045084971874738e-05,
"loss": 2.0087,
"step": 28
},
{
"epoch": 0.004817675886701553,
"grad_norm": 1.453922986984253,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3544,
"step": 29
},
{
"epoch": 0.0049838026414154,
"grad_norm": 4.656007766723633,
"learning_rate": 8.83022221559489e-05,
"loss": 1.3588,
"step": 30
},
{
"epoch": 0.005149929396129247,
"grad_norm": 1.933850884437561,
"learning_rate": 8.715724127386972e-05,
"loss": 0.9921,
"step": 31
},
{
"epoch": 0.005316056150843094,
"grad_norm": 1.3430155515670776,
"learning_rate": 8.596699001693255e-05,
"loss": 0.6642,
"step": 32
},
{
"epoch": 0.00548218290555694,
"grad_norm": 1.802078366279602,
"learning_rate": 8.473291852294987e-05,
"loss": 1.4628,
"step": 33
},
{
"epoch": 0.005648309660270787,
"grad_norm": 2.6342110633850098,
"learning_rate": 8.345653031794292e-05,
"loss": 1.4388,
"step": 34
},
{
"epoch": 0.0058144364149846335,
"grad_norm": 1.334199070930481,
"learning_rate": 8.213938048432697e-05,
"loss": 1.0315,
"step": 35
},
{
"epoch": 0.00598056316969848,
"grad_norm": 0.9666208028793335,
"learning_rate": 8.07830737662829e-05,
"loss": 0.6273,
"step": 36
},
{
"epoch": 0.00598056316969848,
"eval_loss": 0.6773849725723267,
"eval_runtime": 675.1591,
"eval_samples_per_second": 7.508,
"eval_steps_per_second": 0.939,
"step": 36
},
{
"epoch": 0.0061466899244123265,
"grad_norm": 2.4796395301818848,
"learning_rate": 7.938926261462366e-05,
"loss": 1.2106,
"step": 37
},
{
"epoch": 0.0063128166791261734,
"grad_norm": 2.0141119956970215,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0963,
"step": 38
},
{
"epoch": 0.00647894343384002,
"grad_norm": 1.0724881887435913,
"learning_rate": 7.649596321166024e-05,
"loss": 0.7781,
"step": 39
},
{
"epoch": 0.006645070188553866,
"grad_norm": 1.5177243947982788,
"learning_rate": 7.500000000000001e-05,
"loss": 1.1593,
"step": 40
},
{
"epoch": 0.006811196943267713,
"grad_norm": 2.06948184967041,
"learning_rate": 7.347357813929454e-05,
"loss": 1.6186,
"step": 41
},
{
"epoch": 0.00697732369798156,
"grad_norm": 2.045438051223755,
"learning_rate": 7.191855733945387e-05,
"loss": 1.5002,
"step": 42
},
{
"epoch": 0.007143450452695406,
"grad_norm": 1.8725861310958862,
"learning_rate": 7.033683215379002e-05,
"loss": 1.289,
"step": 43
},
{
"epoch": 0.007309577207409253,
"grad_norm": 0.9375693798065186,
"learning_rate": 6.873032967079561e-05,
"loss": 1.0544,
"step": 44
},
{
"epoch": 0.0074757039621231,
"grad_norm": 3.7686853408813477,
"learning_rate": 6.710100716628344e-05,
"loss": 1.0144,
"step": 45
},
{
"epoch": 0.0074757039621231,
"eval_loss": 0.6617150902748108,
"eval_runtime": 675.1702,
"eval_samples_per_second": 7.508,
"eval_steps_per_second": 0.939,
"step": 45
},
{
"epoch": 0.007641830716836946,
"grad_norm": 1.291003704071045,
"learning_rate": 6.545084971874738e-05,
"loss": 1.3174,
"step": 46
},
{
"epoch": 0.007807957471550793,
"grad_norm": 1.5664713382720947,
"learning_rate": 6.378186779084995e-05,
"loss": 1.6628,
"step": 47
},
{
"epoch": 0.00797408422626464,
"grad_norm": 1.041298270225525,
"learning_rate": 6.209609477998338e-05,
"loss": 0.6944,
"step": 48
},
{
"epoch": 0.008140210980978486,
"grad_norm": 1.353527307510376,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.7041,
"step": 49
},
{
"epoch": 0.008306337735692334,
"grad_norm": 0.8514286875724792,
"learning_rate": 5.868240888334653e-05,
"loss": 1.7434,
"step": 50
},
{
"epoch": 0.00847246449040618,
"grad_norm": 1.392175555229187,
"learning_rate": 5.695865504800327e-05,
"loss": 0.627,
"step": 51
},
{
"epoch": 0.008638591245120026,
"grad_norm": 2.5748090744018555,
"learning_rate": 5.522642316338268e-05,
"loss": 1.297,
"step": 52
},
{
"epoch": 0.008804717999833874,
"grad_norm": 1.3663445711135864,
"learning_rate": 5.348782368720626e-05,
"loss": 1.9941,
"step": 53
},
{
"epoch": 0.00897084475454772,
"grad_norm": 1.5222846269607544,
"learning_rate": 5.174497483512506e-05,
"loss": 0.8739,
"step": 54
},
{
"epoch": 0.00897084475454772,
"eval_loss": 0.6527463793754578,
"eval_runtime": 675.2164,
"eval_samples_per_second": 7.507,
"eval_steps_per_second": 0.939,
"step": 54
},
{
"epoch": 0.009136971509261566,
"grad_norm": 1.4149327278137207,
"learning_rate": 5e-05,
"loss": 1.0193,
"step": 55
},
{
"epoch": 0.009303098263975414,
"grad_norm": 1.3249423503875732,
"learning_rate": 4.825502516487497e-05,
"loss": 1.386,
"step": 56
},
{
"epoch": 0.00946922501868926,
"grad_norm": 1.4684275388717651,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.6665,
"step": 57
},
{
"epoch": 0.009635351773403106,
"grad_norm": 0.864578902721405,
"learning_rate": 4.477357683661734e-05,
"loss": 1.6305,
"step": 58
},
{
"epoch": 0.009801478528116954,
"grad_norm": 0.8630561828613281,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.5473,
"step": 59
},
{
"epoch": 0.0099676052828308,
"grad_norm": 1.2544828653335571,
"learning_rate": 4.131759111665349e-05,
"loss": 1.0987,
"step": 60
},
{
"epoch": 0.010133732037544647,
"grad_norm": 1.6959971189498901,
"learning_rate": 3.960441545911204e-05,
"loss": 0.9921,
"step": 61
},
{
"epoch": 0.010299858792258493,
"grad_norm": 1.5748471021652222,
"learning_rate": 3.790390522001662e-05,
"loss": 2.033,
"step": 62
},
{
"epoch": 0.01046598554697234,
"grad_norm": 0.922096312046051,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.8486,
"step": 63
},
{
"epoch": 0.01046598554697234,
"eval_loss": 0.6467626094818115,
"eval_runtime": 675.6424,
"eval_samples_per_second": 7.502,
"eval_steps_per_second": 0.938,
"step": 63
},
{
"epoch": 0.010632112301686187,
"grad_norm": 1.0161826610565186,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.1515,
"step": 64
},
{
"epoch": 0.010798239056400033,
"grad_norm": 0.8706979751586914,
"learning_rate": 3.289899283371657e-05,
"loss": 0.8809,
"step": 65
},
{
"epoch": 0.01096436581111388,
"grad_norm": 1.361327052116394,
"learning_rate": 3.12696703292044e-05,
"loss": 1.8019,
"step": 66
},
{
"epoch": 0.011130492565827727,
"grad_norm": 0.8916012048721313,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.9665,
"step": 67
},
{
"epoch": 0.011296619320541573,
"grad_norm": 1.261985182762146,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.5859,
"step": 68
},
{
"epoch": 0.01146274607525542,
"grad_norm": 1.4368607997894287,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.3238,
"step": 69
},
{
"epoch": 0.011628872829969267,
"grad_norm": 3.813182830810547,
"learning_rate": 2.500000000000001e-05,
"loss": 2.1585,
"step": 70
},
{
"epoch": 0.011794999584683113,
"grad_norm": 1.8414254188537598,
"learning_rate": 2.350403678833976e-05,
"loss": 1.3511,
"step": 71
},
{
"epoch": 0.01196112633939696,
"grad_norm": 2.074822187423706,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.1272,
"step": 72
},
{
"epoch": 0.01196112633939696,
"eval_loss": 0.6417830586433411,
"eval_runtime": 675.5888,
"eval_samples_per_second": 7.503,
"eval_steps_per_second": 0.938,
"step": 72
},
{
"epoch": 0.012127253094110807,
"grad_norm": 1.6472017765045166,
"learning_rate": 2.061073738537635e-05,
"loss": 1.2974,
"step": 73
},
{
"epoch": 0.012293379848824653,
"grad_norm": 0.7575200200080872,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.0512,
"step": 74
},
{
"epoch": 0.012459506603538499,
"grad_norm": 0.760666012763977,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.3661,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.155367310838989e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}