GaetanMichelet's picture
Model save
3b6be6c verified
raw
history blame contribute delete
No virus
14.1 kB
{
"best_metric": 0.8693992495536804,
"best_model_checkpoint": "data/Llama-31-8B_task-1_120-samples_config-2_full/checkpoint-66",
"epoch": 18.90909090909091,
"eval_steps": 500,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18181818181818182,
"grad_norm": 1.942243218421936,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4963,
"step": 1
},
{
"epoch": 0.36363636363636365,
"grad_norm": 1.9989137649536133,
"learning_rate": 8.000000000000001e-06,
"loss": 2.515,
"step": 2
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.5480235815048218,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.448,
"step": 4
},
{
"epoch": 0.9090909090909091,
"eval_loss": 2.3731963634490967,
"eval_runtime": 9.6256,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 5
},
{
"epoch": 1.0909090909090908,
"grad_norm": 1.586003065109253,
"learning_rate": 2.4e-05,
"loss": 2.4028,
"step": 6
},
{
"epoch": 1.4545454545454546,
"grad_norm": 1.5806779861450195,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.3232,
"step": 8
},
{
"epoch": 1.8181818181818183,
"grad_norm": 1.2769224643707275,
"learning_rate": 4e-05,
"loss": 2.2361,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 2.027904510498047,
"eval_runtime": 9.6154,
"eval_samples_per_second": 2.496,
"eval_steps_per_second": 2.496,
"step": 11
},
{
"epoch": 2.1818181818181817,
"grad_norm": 1.6216256618499756,
"learning_rate": 4.8e-05,
"loss": 2.0218,
"step": 12
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.9748833775520325,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.9447,
"step": 14
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.8017610311508179,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8253,
"step": 16
},
{
"epoch": 2.909090909090909,
"eval_loss": 1.7251297235488892,
"eval_runtime": 9.6206,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 16
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.9146191477775574,
"learning_rate": 7.2e-05,
"loss": 1.6844,
"step": 18
},
{
"epoch": 3.6363636363636362,
"grad_norm": 1.028951644897461,
"learning_rate": 8e-05,
"loss": 1.5093,
"step": 20
},
{
"epoch": 4.0,
"grad_norm": 1.1190450191497803,
"learning_rate": 8.800000000000001e-05,
"loss": 1.3791,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 1.2330317497253418,
"eval_runtime": 9.6142,
"eval_samples_per_second": 2.496,
"eval_steps_per_second": 2.496,
"step": 22
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.6760878562927246,
"learning_rate": 9.6e-05,
"loss": 1.1944,
"step": 24
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.4904247522354126,
"learning_rate": 9.999512620046522e-05,
"loss": 1.0878,
"step": 26
},
{
"epoch": 4.909090909090909,
"eval_loss": 1.0336649417877197,
"eval_runtime": 9.6445,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 27
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.47956767678260803,
"learning_rate": 9.995614150494293e-05,
"loss": 0.9978,
"step": 28
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.37655767798423767,
"learning_rate": 9.987820251299122e-05,
"loss": 0.9748,
"step": 30
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.3446647524833679,
"learning_rate": 9.976136999909156e-05,
"loss": 0.9771,
"step": 32
},
{
"epoch": 6.0,
"eval_loss": 0.973893404006958,
"eval_runtime": 9.6397,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 33
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.32855767011642456,
"learning_rate": 9.96057350657239e-05,
"loss": 0.9427,
"step": 34
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.37166285514831543,
"learning_rate": 9.941141907232765e-05,
"loss": 0.9719,
"step": 36
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.35821229219436646,
"learning_rate": 9.917857354066931e-05,
"loss": 0.8967,
"step": 38
},
{
"epoch": 6.909090909090909,
"eval_loss": 0.9425709843635559,
"eval_runtime": 9.7372,
"eval_samples_per_second": 2.465,
"eval_steps_per_second": 2.465,
"step": 38
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.41391488909721375,
"learning_rate": 9.890738003669029e-05,
"loss": 0.8814,
"step": 40
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.4022800326347351,
"learning_rate": 9.859805002892732e-05,
"loss": 0.8303,
"step": 42
},
{
"epoch": 8.0,
"grad_norm": 0.40724870562553406,
"learning_rate": 9.825082472361557e-05,
"loss": 0.8815,
"step": 44
},
{
"epoch": 8.0,
"eval_loss": 0.9129160046577454,
"eval_runtime": 9.7169,
"eval_samples_per_second": 2.47,
"eval_steps_per_second": 2.47,
"step": 44
},
{
"epoch": 8.363636363636363,
"grad_norm": 0.45184680819511414,
"learning_rate": 9.786597487660337e-05,
"loss": 0.8283,
"step": 46
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.5330278277397156,
"learning_rate": 9.744380058222483e-05,
"loss": 0.816,
"step": 48
},
{
"epoch": 8.909090909090908,
"eval_loss": 0.8952023386955261,
"eval_runtime": 9.6573,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 49
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.5944344997406006,
"learning_rate": 9.698463103929542e-05,
"loss": 0.8059,
"step": 50
},
{
"epoch": 9.454545454545455,
"grad_norm": 0.6521593332290649,
"learning_rate": 9.648882429441257e-05,
"loss": 0.7773,
"step": 52
},
{
"epoch": 9.818181818181818,
"grad_norm": 0.6768603920936584,
"learning_rate": 9.595676696276172e-05,
"loss": 0.748,
"step": 54
},
{
"epoch": 10.0,
"eval_loss": 0.8761978149414062,
"eval_runtime": 9.6144,
"eval_samples_per_second": 2.496,
"eval_steps_per_second": 2.496,
"step": 55
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.6031374335289001,
"learning_rate": 9.538887392664544e-05,
"loss": 0.7397,
"step": 56
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.47089871764183044,
"learning_rate": 9.478558801197065e-05,
"loss": 0.7211,
"step": 58
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.44608962535858154,
"learning_rate": 9.414737964294636e-05,
"loss": 0.6939,
"step": 60
},
{
"epoch": 10.909090909090908,
"eval_loss": 0.8726906776428223,
"eval_runtime": 9.6232,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 60
},
{
"epoch": 11.272727272727273,
"grad_norm": 0.47155243158340454,
"learning_rate": 9.347474647526095e-05,
"loss": 0.6328,
"step": 62
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.5070695877075195,
"learning_rate": 9.276821300802534e-05,
"loss": 0.6563,
"step": 64
},
{
"epoch": 12.0,
"grad_norm": 0.5034713745117188,
"learning_rate": 9.202833017478422e-05,
"loss": 0.6449,
"step": 66
},
{
"epoch": 12.0,
"eval_loss": 0.8693992495536804,
"eval_runtime": 9.6261,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 66
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.513129711151123,
"learning_rate": 9.125567491391476e-05,
"loss": 0.5736,
"step": 68
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.5481001138687134,
"learning_rate": 9.045084971874738e-05,
"loss": 0.5874,
"step": 70
},
{
"epoch": 12.909090909090908,
"eval_loss": 0.8921452164649963,
"eval_runtime": 9.6307,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 71
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.5650292634963989,
"learning_rate": 8.961448216775954e-05,
"loss": 0.5501,
"step": 72
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.6034680008888245,
"learning_rate": 8.874722443520899e-05,
"loss": 0.518,
"step": 74
},
{
"epoch": 13.818181818181818,
"grad_norm": 0.7035646438598633,
"learning_rate": 8.784975278258783e-05,
"loss": 0.4934,
"step": 76
},
{
"epoch": 14.0,
"eval_loss": 0.9428841471672058,
"eval_runtime": 9.6183,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 77
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.6939181089401245,
"learning_rate": 8.692276703129421e-05,
"loss": 0.4695,
"step": 78
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.8460732102394104,
"learning_rate": 8.596699001693255e-05,
"loss": 0.4428,
"step": 80
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.8051614165306091,
"learning_rate": 8.498316702566828e-05,
"loss": 0.4382,
"step": 82
},
{
"epoch": 14.909090909090908,
"eval_loss": 1.008301854133606,
"eval_runtime": 9.6147,
"eval_samples_per_second": 2.496,
"eval_steps_per_second": 2.496,
"step": 82
},
{
"epoch": 15.272727272727273,
"grad_norm": 0.6874304413795471,
"learning_rate": 8.397206521307584e-05,
"loss": 0.3561,
"step": 84
},
{
"epoch": 15.636363636363637,
"grad_norm": 1.0137032270431519,
"learning_rate": 8.293447300593402e-05,
"loss": 0.3449,
"step": 86
},
{
"epoch": 16.0,
"grad_norm": 0.9092028737068176,
"learning_rate": 8.18711994874345e-05,
"loss": 0.347,
"step": 88
},
{
"epoch": 16.0,
"eval_loss": 1.0591822862625122,
"eval_runtime": 9.6202,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 88
},
{
"epoch": 16.363636363636363,
"grad_norm": 0.9383953213691711,
"learning_rate": 8.07830737662829e-05,
"loss": 0.2896,
"step": 90
},
{
"epoch": 16.727272727272727,
"grad_norm": 1.1991755962371826,
"learning_rate": 7.967094433018508e-05,
"loss": 0.2565,
"step": 92
},
{
"epoch": 16.90909090909091,
"eval_loss": 1.145786166191101,
"eval_runtime": 9.6302,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 93
},
{
"epoch": 17.09090909090909,
"grad_norm": 1.0061393976211548,
"learning_rate": 7.85356783842216e-05,
"loss": 0.2364,
"step": 94
},
{
"epoch": 17.454545454545453,
"grad_norm": 1.079446792602539,
"learning_rate": 7.737816117462752e-05,
"loss": 0.1962,
"step": 96
},
{
"epoch": 17.818181818181817,
"grad_norm": 1.1677623987197876,
"learning_rate": 7.619929529850397e-05,
"loss": 0.1926,
"step": 98
},
{
"epoch": 18.0,
"eval_loss": 1.2522639036178589,
"eval_runtime": 9.6209,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 99
},
{
"epoch": 18.181818181818183,
"grad_norm": 1.041843295097351,
"learning_rate": 7.500000000000001e-05,
"loss": 0.1788,
"step": 100
},
{
"epoch": 18.545454545454547,
"grad_norm": 1.2381653785705566,
"learning_rate": 7.378121045351378e-05,
"loss": 0.146,
"step": 102
},
{
"epoch": 18.90909090909091,
"grad_norm": 1.218409776687622,
"learning_rate": 7.254387703447154e-05,
"loss": 0.1477,
"step": 104
},
{
"epoch": 18.90909090909091,
"eval_loss": 1.470971941947937,
"eval_runtime": 9.6218,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 104
},
{
"epoch": 18.90909090909091,
"step": 104,
"total_flos": 2.5664889569673216e+16,
"train_loss": 0.9187728297681763,
"train_runtime": 1959.5505,
"train_samples_per_second": 2.245,
"train_steps_per_second": 0.128
}
],
"logging_steps": 2,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.5664889569673216e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}