Azzurro / trainer_state.json
JacopoAbate's picture
Upload 10 files
2396111 verified
raw
history blame
9.42 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1998400213304894,
"eval_steps": 500,
"global_step": 4500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.5563874321368991,
"learning_rate": 4.000000000000001e-06,
"loss": 1.58,
"step": 100
},
{
"epoch": 0.05,
"grad_norm": 0.4957053999852371,
"learning_rate": 8.000000000000001e-06,
"loss": 1.4463,
"step": 200
},
{
"epoch": 0.08,
"grad_norm": 0.589756030703148,
"learning_rate": 1.2e-05,
"loss": 1.3542,
"step": 300
},
{
"epoch": 0.11,
"grad_norm": 0.6420302678887735,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.3331,
"step": 400
},
{
"epoch": 0.13,
"grad_norm": 0.7649283840590947,
"learning_rate": 2e-05,
"loss": 1.3253,
"step": 500
},
{
"epoch": 0.13,
"eval_loss": 1.3249897956848145,
"eval_runtime": 2000.6322,
"eval_samples_per_second": 6.665,
"eval_steps_per_second": 0.833,
"step": 500
},
{
"epoch": 0.16,
"grad_norm": 0.7507878377667085,
"learning_rate": 1.9714285714285718e-05,
"loss": 1.3221,
"step": 600
},
{
"epoch": 0.19,
"grad_norm": 0.6176603895840452,
"learning_rate": 1.942857142857143e-05,
"loss": 1.3126,
"step": 700
},
{
"epoch": 0.21,
"grad_norm": 0.7327398805078954,
"learning_rate": 1.9142857142857146e-05,
"loss": 1.3055,
"step": 800
},
{
"epoch": 0.24,
"grad_norm": 0.6839292302496646,
"learning_rate": 1.885714285714286e-05,
"loss": 1.3093,
"step": 900
},
{
"epoch": 0.27,
"grad_norm": 0.607074650571682,
"learning_rate": 1.8571428571428575e-05,
"loss": 1.2985,
"step": 1000
},
{
"epoch": 0.27,
"eval_loss": 1.2995541095733643,
"eval_runtime": 1951.5754,
"eval_samples_per_second": 6.833,
"eval_steps_per_second": 0.854,
"step": 1000
},
{
"epoch": 0.29,
"grad_norm": 0.74727636749315,
"learning_rate": 1.8285714285714288e-05,
"loss": 1.3017,
"step": 1100
},
{
"epoch": 0.32,
"grad_norm": 0.6311499573020434,
"learning_rate": 1.8e-05,
"loss": 1.2896,
"step": 1200
},
{
"epoch": 0.35,
"grad_norm": 0.5915205156004358,
"learning_rate": 1.7714285714285717e-05,
"loss": 1.2834,
"step": 1300
},
{
"epoch": 0.37,
"grad_norm": 0.5946814358633083,
"learning_rate": 1.742857142857143e-05,
"loss": 1.3073,
"step": 1400
},
{
"epoch": 0.4,
"grad_norm": 0.6097257778288048,
"learning_rate": 1.7142857142857142e-05,
"loss": 1.2843,
"step": 1500
},
{
"epoch": 0.4,
"eval_loss": 1.2924432754516602,
"eval_runtime": 1987.1893,
"eval_samples_per_second": 6.71,
"eval_steps_per_second": 0.839,
"step": 1500
},
{
"epoch": 0.43,
"grad_norm": 0.596713525589904,
"learning_rate": 1.6857142857142858e-05,
"loss": 1.2748,
"step": 1600
},
{
"epoch": 0.45,
"grad_norm": 0.6872784300870175,
"learning_rate": 1.6571428571428574e-05,
"loss": 1.2971,
"step": 1700
},
{
"epoch": 0.48,
"grad_norm": 0.5519473130229932,
"learning_rate": 1.6285714285714287e-05,
"loss": 1.2746,
"step": 1800
},
{
"epoch": 0.51,
"grad_norm": 0.6633345934082727,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.2906,
"step": 1900
},
{
"epoch": 0.53,
"grad_norm": 0.6476833370052498,
"learning_rate": 1.5714285714285715e-05,
"loss": 1.2813,
"step": 2000
},
{
"epoch": 0.53,
"eval_loss": 1.2873681783676147,
"eval_runtime": 1954.0241,
"eval_samples_per_second": 6.824,
"eval_steps_per_second": 0.853,
"step": 2000
},
{
"epoch": 0.56,
"grad_norm": 0.6831512981144761,
"learning_rate": 1.542857142857143e-05,
"loss": 1.2973,
"step": 2100
},
{
"epoch": 0.59,
"grad_norm": 0.6111369458671693,
"learning_rate": 1.5142857142857144e-05,
"loss": 1.2847,
"step": 2200
},
{
"epoch": 0.61,
"grad_norm": 0.6468631061437224,
"learning_rate": 1.4857142857142858e-05,
"loss": 1.2754,
"step": 2300
},
{
"epoch": 0.64,
"grad_norm": 0.6799634395265591,
"learning_rate": 1.4571428571428573e-05,
"loss": 1.2857,
"step": 2400
},
{
"epoch": 0.67,
"grad_norm": 0.7108542425010391,
"learning_rate": 1.4285714285714287e-05,
"loss": 1.289,
"step": 2500
},
{
"epoch": 0.67,
"eval_loss": 1.283585786819458,
"eval_runtime": 1870.2159,
"eval_samples_per_second": 7.13,
"eval_steps_per_second": 0.891,
"step": 2500
},
{
"epoch": 0.69,
"grad_norm": 0.7253629154301096,
"learning_rate": 1.4e-05,
"loss": 1.2793,
"step": 2600
},
{
"epoch": 0.72,
"grad_norm": 0.614991663938332,
"learning_rate": 1.3714285714285716e-05,
"loss": 1.2767,
"step": 2700
},
{
"epoch": 0.75,
"grad_norm": 0.5699407855446496,
"learning_rate": 1.3428571428571429e-05,
"loss": 1.2719,
"step": 2800
},
{
"epoch": 0.77,
"grad_norm": 0.763568295530126,
"learning_rate": 1.3142857142857145e-05,
"loss": 1.2733,
"step": 2900
},
{
"epoch": 0.8,
"grad_norm": 0.6538876064837678,
"learning_rate": 1.2857142857142859e-05,
"loss": 1.2822,
"step": 3000
},
{
"epoch": 0.8,
"eval_loss": 1.2801251411437988,
"eval_runtime": 1889.0286,
"eval_samples_per_second": 7.059,
"eval_steps_per_second": 0.882,
"step": 3000
},
{
"epoch": 0.83,
"grad_norm": 0.6413250796291871,
"learning_rate": 1.2571428571428572e-05,
"loss": 1.2709,
"step": 3100
},
{
"epoch": 0.85,
"grad_norm": 0.7125599551490435,
"learning_rate": 1.2285714285714288e-05,
"loss": 1.2829,
"step": 3200
},
{
"epoch": 0.88,
"grad_norm": 0.6792031678140497,
"learning_rate": 1.2e-05,
"loss": 1.2728,
"step": 3300
},
{
"epoch": 0.91,
"grad_norm": 0.6989586202409576,
"learning_rate": 1.1714285714285716e-05,
"loss": 1.2835,
"step": 3400
},
{
"epoch": 0.93,
"grad_norm": 0.7569471708203911,
"learning_rate": 1.1428571428571429e-05,
"loss": 1.2793,
"step": 3500
},
{
"epoch": 0.93,
"eval_loss": 1.2773550748825073,
"eval_runtime": 1896.8518,
"eval_samples_per_second": 7.03,
"eval_steps_per_second": 0.879,
"step": 3500
},
{
"epoch": 0.96,
"grad_norm": 0.6242374298012554,
"learning_rate": 1.1142857142857143e-05,
"loss": 1.2805,
"step": 3600
},
{
"epoch": 0.99,
"grad_norm": 0.6585613373985719,
"learning_rate": 1.0857142857142858e-05,
"loss": 1.265,
"step": 3700
},
{
"epoch": 1.01,
"grad_norm": 0.7452400076297672,
"learning_rate": 1.0571428571428572e-05,
"loss": 1.2589,
"step": 3800
},
{
"epoch": 1.04,
"grad_norm": 0.7861020289489161,
"learning_rate": 1.0285714285714285e-05,
"loss": 1.2616,
"step": 3900
},
{
"epoch": 1.07,
"grad_norm": 0.7757094950303476,
"learning_rate": 1e-05,
"loss": 1.2721,
"step": 4000
},
{
"epoch": 1.07,
"eval_loss": 1.2751243114471436,
"eval_runtime": 1895.5553,
"eval_samples_per_second": 7.035,
"eval_steps_per_second": 0.879,
"step": 4000
},
{
"epoch": 1.09,
"grad_norm": 0.6999523427612176,
"learning_rate": 9.714285714285715e-06,
"loss": 1.2649,
"step": 4100
},
{
"epoch": 1.12,
"grad_norm": 0.6528269639884913,
"learning_rate": 9.42857142857143e-06,
"loss": 1.2675,
"step": 4200
},
{
"epoch": 1.15,
"grad_norm": 0.8019204201326198,
"learning_rate": 9.142857142857144e-06,
"loss": 1.2775,
"step": 4300
},
{
"epoch": 1.17,
"grad_norm": 0.8453041527524593,
"learning_rate": 8.857142857142858e-06,
"loss": 1.2682,
"step": 4400
},
{
"epoch": 1.2,
"grad_norm": 0.6858153075780669,
"learning_rate": 8.571428571428571e-06,
"loss": 1.2753,
"step": 4500
},
{
"epoch": 1.2,
"eval_loss": 1.2731624841690063,
"eval_runtime": 2031.5498,
"eval_samples_per_second": 6.564,
"eval_steps_per_second": 0.821,
"step": 4500
}
],
"logging_steps": 100,
"max_steps": 7500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 6.185622662887244e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}