|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.006814619630647616, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 9.086159507530154e-05, |
|
"grad_norm": 28.37936019897461, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 8.8371, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 9.086159507530154e-05, |
|
"eval_loss": 10.420028686523438, |
|
"eval_runtime": 306.3179, |
|
"eval_samples_per_second": 30.256, |
|
"eval_steps_per_second": 15.128, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001817231901506031, |
|
"grad_norm": 34.581912994384766, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 9.9277, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0002725847852259046, |
|
"grad_norm": 35.70059585571289, |
|
"learning_rate": 0.0001, |
|
"loss": 9.3615, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0003634463803012062, |
|
"grad_norm": 25.815174102783203, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 7.0004, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00045430797537650774, |
|
"grad_norm": 52.796356201171875, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 5.8176, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0005451695704518092, |
|
"grad_norm": 18.662376403808594, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.2304, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0006360311655271108, |
|
"grad_norm": 17.944053649902344, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.4738, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0007268927606024124, |
|
"grad_norm": 10.449116706848145, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 1.3214, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0008177543556777139, |
|
"grad_norm": 4.770611763000488, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.9449, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0009086159507530155, |
|
"grad_norm": 2.742938756942749, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.0423, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.000999477545828317, |
|
"grad_norm": 5.579481601715088, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.7174, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0010903391409036185, |
|
"grad_norm": 13.289307594299316, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.2919, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00118120073597892, |
|
"grad_norm": 10.471413612365723, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.7718, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0012720623310542216, |
|
"grad_norm": 4.605869293212891, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.1433, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0013629239261295232, |
|
"grad_norm": 5.941074848175049, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.1174, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0014537855212048247, |
|
"grad_norm": 11.986332893371582, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.1588, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0015446471162801263, |
|
"grad_norm": 11.422152519226074, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.2491, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0016355087113554278, |
|
"grad_norm": 9.860111236572266, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.2102, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0017263703064307294, |
|
"grad_norm": 2.8064475059509277, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0361, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.001817231901506031, |
|
"grad_norm": 0.4391842484474182, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.0055, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0019080934965813325, |
|
"grad_norm": 3.745144844055176, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0863, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.001998955091656634, |
|
"grad_norm": 0.050352372229099274, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.0008, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0020898166867319356, |
|
"grad_norm": 1.2719851732254028, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.0119, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.002180678281807237, |
|
"grad_norm": 0.5530160069465637, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0032, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0022715398768825387, |
|
"grad_norm": 0.06411629170179367, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.0006, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0022715398768825387, |
|
"eval_loss": 1.8638120889663696, |
|
"eval_runtime": 306.1366, |
|
"eval_samples_per_second": 30.274, |
|
"eval_steps_per_second": 15.137, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00236240147195784, |
|
"grad_norm": 0.2939962148666382, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.0027, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.002453263067033142, |
|
"grad_norm": 0.6323854923248291, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0051, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.002544124662108443, |
|
"grad_norm": 0.011483444832265377, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.0002, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.002634986257183745, |
|
"grad_norm": 0.028377123177051544, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.0002, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0027258478522590463, |
|
"grad_norm": 0.0203834380954504, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0003, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.002816709447334348, |
|
"grad_norm": 9.312389373779297, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.4706, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0029075710424096494, |
|
"grad_norm": 68.22864532470703, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 3.3187, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.002998432637484951, |
|
"grad_norm": 48.021480560302734, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.134, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0030892942325602525, |
|
"grad_norm": 18.471052169799805, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.2329, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0031801558276355543, |
|
"grad_norm": 16.42901039123535, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.4616, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0032710174227108557, |
|
"grad_norm": 13.609719276428223, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.3694, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0033618790177861574, |
|
"grad_norm": 2.441948652267456, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.043, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0034527406128614588, |
|
"grad_norm": 0.41872337460517883, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0039, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00354360220793676, |
|
"grad_norm": 0.22431544959545135, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0019, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.003634463803012062, |
|
"grad_norm": 0.3925679624080658, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0035, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0037253253980873632, |
|
"grad_norm": 0.04403270035982132, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.0006, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.003816186993162665, |
|
"grad_norm": 0.034630876034498215, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.0005, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.003907048588237967, |
|
"grad_norm": 0.46163317561149597, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0027, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.003997910183313268, |
|
"grad_norm": 0.06355968862771988, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0006, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0040887717783885695, |
|
"grad_norm": 0.05269589275121689, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0005, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.004179633373463871, |
|
"grad_norm": 0.01986519619822502, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0002, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.004270494968539173, |
|
"grad_norm": 0.019215445965528488, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.0002, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.004361356563614474, |
|
"grad_norm": 0.006207428406924009, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0001, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.004452218158689776, |
|
"grad_norm": 0.012795104645192623, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0002, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0045430797537650775, |
|
"grad_norm": 0.004560490138828754, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.0001, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0045430797537650775, |
|
"eval_loss": 2.606266498565674, |
|
"eval_runtime": 305.9895, |
|
"eval_samples_per_second": 30.289, |
|
"eval_steps_per_second": 15.144, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.004633941348840379, |
|
"grad_norm": 46.32371520996094, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.8385, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.00472480294391568, |
|
"grad_norm": 50.98200607299805, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 5.1699, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.004815664538990982, |
|
"grad_norm": 50.70225143432617, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 5.2527, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.004906526134066284, |
|
"grad_norm": 47.06248474121094, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 4.5287, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0049973877291415855, |
|
"grad_norm": 43.91443634033203, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 4.1084, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.005088249324216886, |
|
"grad_norm": 43.865379333496094, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 4.248, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.005179110919292188, |
|
"grad_norm": 31.40968132019043, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.0994, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.00526997251436749, |
|
"grad_norm": 34.816925048828125, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 3.3691, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.005360834109442791, |
|
"grad_norm": 29.733379364013672, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.6941, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.005451695704518093, |
|
"grad_norm": 24.68531036376953, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.9541, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.005542557299593394, |
|
"grad_norm": 23.424503326416016, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 1.9792, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.005633418894668696, |
|
"grad_norm": 32.10466766357422, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 2.261, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.005724280489743997, |
|
"grad_norm": 27.93880844116211, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.3343, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.005815142084819299, |
|
"grad_norm": 23.051822662353516, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 1.5509, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.005906003679894601, |
|
"grad_norm": 25.748632431030273, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 1.6823, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.005996865274969902, |
|
"grad_norm": 27.34827423095703, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.5725, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.006087726870045203, |
|
"grad_norm": 22.656187057495117, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.0763, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.006178588465120505, |
|
"grad_norm": 19.859046936035156, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 1.081, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.006269450060195807, |
|
"grad_norm": 20.085308074951172, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.2761, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.006360311655271109, |
|
"grad_norm": 17.73591423034668, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.9149, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0064511732503464096, |
|
"grad_norm": 21.190202713012695, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.4188, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.006542034845421711, |
|
"grad_norm": 20.775806427001953, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.0984, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.006632896440497013, |
|
"grad_norm": 19.472455978393555, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.8902, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.006723758035572315, |
|
"grad_norm": 20.528032302856445, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.855, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.006814619630647616, |
|
"grad_norm": 18.677600860595703, |
|
"learning_rate": 0.0, |
|
"loss": 0.8934, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.006814619630647616, |
|
"eval_loss": 0.611670970916748, |
|
"eval_runtime": 306.2641, |
|
"eval_samples_per_second": 30.261, |
|
"eval_steps_per_second": 15.131, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.46820537778176e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|