|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7599939200486396, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.90500075999392e-05, |
|
"loss": 7.9101, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.81000151998784e-05, |
|
"loss": 7.5794, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.7150022799817604e-05, |
|
"loss": 7.5385, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.620003039975681e-05, |
|
"loss": 7.5241, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.5250037999696e-05, |
|
"loss": 7.5219, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.4300045599635205e-05, |
|
"loss": 7.5021, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.33500531995744e-05, |
|
"loss": 7.4248, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.240006079951361e-05, |
|
"loss": 7.4127, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.145006839945281e-05, |
|
"loss": 7.4314, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.050007599939201e-05, |
|
"loss": 7.4032, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 3.9550083599331206e-05, |
|
"loss": 7.4693, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.860009119927041e-05, |
|
"loss": 7.4423, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 3.765009879920961e-05, |
|
"loss": 7.444, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 3.670010639914881e-05, |
|
"loss": 7.4172, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 3.575011399908801e-05, |
|
"loss": 7.427, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.4800121599027206e-05, |
|
"loss": 7.422, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.3850129198966415e-05, |
|
"loss": 7.4367, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.290013679890561e-05, |
|
"loss": 7.4049, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.1950144398844814e-05, |
|
"loss": 7.3746, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.100015199878401e-05, |
|
"loss": 7.3933, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.005015959872321e-05, |
|
"loss": 7.3898, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 2.9100167198662416e-05, |
|
"loss": 7.4056, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.8150174798601615e-05, |
|
"loss": 7.3624, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.7200182398540814e-05, |
|
"loss": 7.3256, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 2.6250189998480014e-05, |
|
"loss": 7.3855, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.5300197598419213e-05, |
|
"loss": 7.3835, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.4350205198358412e-05, |
|
"loss": 7.4472, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.3400212798297615e-05, |
|
"loss": 7.3903, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.2450220398236815e-05, |
|
"loss": 7.3914, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.1500227998176014e-05, |
|
"loss": 7.3494, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.0550235598115217e-05, |
|
"loss": 7.3547, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.9600243198054416e-05, |
|
"loss": 7.4164, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.865025079799362e-05, |
|
"loss": 7.4019, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.7700258397932818e-05, |
|
"loss": 7.3963, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.6750265997872018e-05, |
|
"loss": 7.3885, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.580027359781122e-05, |
|
"loss": 7.317, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4850281197750418e-05, |
|
"loss": 7.3519, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.390028879768962e-05, |
|
"loss": 7.3486, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.295029639762882e-05, |
|
"loss": 7.3544, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.200030399756802e-05, |
|
"loss": 7.3557, |
|
"step": 20000 |
|
} |
|
], |
|
"max_steps": 26316, |
|
"num_train_epochs": 1, |
|
"total_flos": 4297035955567104.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|