|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.899159663865546, |
|
"eval_steps": 8, |
|
"global_step": 116, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03361344537815126, |
|
"grad_norm": 2.28125, |
|
"learning_rate": 2e-05, |
|
"loss": 3.0408, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03361344537815126, |
|
"eval_loss": 3.0278432369232178, |
|
"eval_runtime": 18.2975, |
|
"eval_samples_per_second": 2.733, |
|
"eval_steps_per_second": 0.383, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06722689075630252, |
|
"grad_norm": 1.5078125, |
|
"learning_rate": 4e-05, |
|
"loss": 3.042, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10084033613445378, |
|
"grad_norm": 1.625, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3331, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.13445378151260504, |
|
"grad_norm": 1.34375, |
|
"learning_rate": 8e-05, |
|
"loss": 3.1531, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.16806722689075632, |
|
"grad_norm": 1.28125, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7659, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.20168067226890757, |
|
"grad_norm": 1.1484375, |
|
"learning_rate": 0.00012, |
|
"loss": 2.9096, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 2.15625, |
|
"learning_rate": 0.00014, |
|
"loss": 3.1465, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2689075630252101, |
|
"grad_norm": 1.703125, |
|
"learning_rate": 0.00016, |
|
"loss": 2.6971, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2689075630252101, |
|
"eval_loss": 2.588528633117676, |
|
"eval_runtime": 20.8612, |
|
"eval_samples_per_second": 2.397, |
|
"eval_steps_per_second": 0.336, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.3025210084033613, |
|
"grad_norm": 0.62890625, |
|
"learning_rate": 0.00018, |
|
"loss": 2.3843, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.33613445378151263, |
|
"grad_norm": 0.8671875, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5628, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3697478991596639, |
|
"grad_norm": 0.71484375, |
|
"learning_rate": 0.00019995608365087946, |
|
"loss": 2.4701, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.40336134453781514, |
|
"grad_norm": 0.57421875, |
|
"learning_rate": 0.00019982437317643217, |
|
"loss": 2.205, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.4369747899159664, |
|
"grad_norm": 0.439453125, |
|
"learning_rate": 0.0001996049842615217, |
|
"loss": 2.2017, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 0.328125, |
|
"learning_rate": 0.00019929810960135172, |
|
"loss": 2.1823, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5042016806722689, |
|
"grad_norm": 0.19140625, |
|
"learning_rate": 0.0001989040187322164, |
|
"loss": 2.1578, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5378151260504201, |
|
"grad_norm": 0.24609375, |
|
"learning_rate": 0.00019842305779475968, |
|
"loss": 2.0158, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5378151260504201, |
|
"eval_loss": 2.1604020595550537, |
|
"eval_runtime": 20.8469, |
|
"eval_samples_per_second": 2.398, |
|
"eval_steps_per_second": 0.336, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.2216796875, |
|
"learning_rate": 0.0001978556492299504, |
|
"loss": 2.2053, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.6050420168067226, |
|
"grad_norm": 0.22265625, |
|
"learning_rate": 0.0001972022914080411, |
|
"loss": 2.0244, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6386554621848739, |
|
"grad_norm": 0.15625, |
|
"learning_rate": 0.00019646355819083589, |
|
"loss": 2.1662, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.6722689075630253, |
|
"grad_norm": 0.2294921875, |
|
"learning_rate": 0.00019564009842765225, |
|
"loss": 1.9738, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7058823529411765, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00019473263538541914, |
|
"loss": 1.993, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7394957983193278, |
|
"grad_norm": 0.11962890625, |
|
"learning_rate": 0.0001937419661134121, |
|
"loss": 1.9879, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.773109243697479, |
|
"grad_norm": 0.1611328125, |
|
"learning_rate": 0.00019266896074318334, |
|
"loss": 2.0819, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8067226890756303, |
|
"grad_norm": 0.1474609375, |
|
"learning_rate": 0.00019151456172430183, |
|
"loss": 1.9578, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8067226890756303, |
|
"eval_loss": 2.0713210105895996, |
|
"eval_runtime": 20.87, |
|
"eval_samples_per_second": 2.396, |
|
"eval_steps_per_second": 0.335, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8403361344537815, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00019027978299657436, |
|
"loss": 1.9516, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.8739495798319328, |
|
"grad_norm": 0.1162109375, |
|
"learning_rate": 0.00018896570909947475, |
|
"loss": 2.0161, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.907563025210084, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.0001875734942195637, |
|
"loss": 2.1119, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.9411764705882353, |
|
"grad_norm": 0.11083984375, |
|
"learning_rate": 0.00018610436117673555, |
|
"loss": 1.9466, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.9747899159663865, |
|
"grad_norm": 0.1240234375, |
|
"learning_rate": 0.0001845596003501826, |
|
"loss": 2.0912, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0084033613445378, |
|
"grad_norm": 0.10107421875, |
|
"learning_rate": 0.0001829405685450202, |
|
"loss": 2.0932, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0420168067226891, |
|
"grad_norm": 0.09814453125, |
|
"learning_rate": 0.00018124868780056814, |
|
"loss": 2.0034, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.0756302521008403, |
|
"grad_norm": 0.0810546875, |
|
"learning_rate": 0.00017948544414133534, |
|
"loss": 1.9457, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.0756302521008403, |
|
"eval_loss": 2.0372533798217773, |
|
"eval_runtime": 20.998, |
|
"eval_samples_per_second": 2.381, |
|
"eval_steps_per_second": 0.333, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1092436974789917, |
|
"grad_norm": 0.06689453125, |
|
"learning_rate": 0.00017765238627180424, |
|
"loss": 1.9816, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.09423828125, |
|
"learning_rate": 0.00017575112421616202, |
|
"loss": 1.9487, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.1764705882352942, |
|
"grad_norm": 0.078125, |
|
"learning_rate": 0.00017378332790417273, |
|
"loss": 1.9633, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.2100840336134453, |
|
"grad_norm": 0.0927734375, |
|
"learning_rate": 0.00017175072570443312, |
|
"loss": 1.9589, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.2436974789915967, |
|
"grad_norm": 0.080078125, |
|
"learning_rate": 0.00016965510290629972, |
|
"loss": 1.9918, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.2773109243697478, |
|
"grad_norm": 0.08203125, |
|
"learning_rate": 0.00016749830015182107, |
|
"loss": 2.0112, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.3109243697478992, |
|
"grad_norm": 0.0712890625, |
|
"learning_rate": 0.00016528221181905217, |
|
"loss": 1.9123, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.3445378151260505, |
|
"grad_norm": 0.08203125, |
|
"learning_rate": 0.00016300878435817113, |
|
"loss": 1.9927, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.3445378151260505, |
|
"eval_loss": 2.016634941101074, |
|
"eval_runtime": 20.893, |
|
"eval_samples_per_second": 2.393, |
|
"eval_steps_per_second": 0.335, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.3781512605042017, |
|
"grad_norm": 0.080078125, |
|
"learning_rate": 0.00016068001458185936, |
|
"loss": 1.9231, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.4117647058823528, |
|
"grad_norm": 0.07568359375, |
|
"learning_rate": 0.0001582979479114472, |
|
"loss": 1.9237, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.4453781512605042, |
|
"grad_norm": 0.08544921875, |
|
"learning_rate": 0.00015586467658036524, |
|
"loss": 1.9926, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.4789915966386555, |
|
"grad_norm": 0.06396484375, |
|
"learning_rate": 0.0001533823377964791, |
|
"loss": 1.8798, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.5126050420168067, |
|
"grad_norm": 0.09814453125, |
|
"learning_rate": 0.00015085311186492206, |
|
"loss": 1.9035, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.5462184873949578, |
|
"grad_norm": 0.07421875, |
|
"learning_rate": 0.00014827922027307451, |
|
"loss": 1.9381, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.5798319327731094, |
|
"grad_norm": 0.0888671875, |
|
"learning_rate": 0.0001456629237393713, |
|
"loss": 1.9413, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.6134453781512605, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 0.00014300652022765207, |
|
"loss": 1.9848, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.6134453781512605, |
|
"eval_loss": 2.002051830291748, |
|
"eval_runtime": 20.8487, |
|
"eval_samples_per_second": 2.398, |
|
"eval_steps_per_second": 0.336, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.6470588235294117, |
|
"grad_norm": 0.0732421875, |
|
"learning_rate": 0.00014031234292879725, |
|
"loss": 1.9653, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.680672268907563, |
|
"grad_norm": 0.07666015625, |
|
"learning_rate": 0.00013758275821142382, |
|
"loss": 1.8987, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.0849609375, |
|
"learning_rate": 0.0001348201635434399, |
|
"loss": 1.9558, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.7478991596638656, |
|
"grad_norm": 0.14453125, |
|
"learning_rate": 0.00013202698538628376, |
|
"loss": 1.9345, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.7815126050420167, |
|
"grad_norm": 0.07080078125, |
|
"learning_rate": 0.00012920567706369758, |
|
"loss": 2.042, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.815126050420168, |
|
"grad_norm": 0.06982421875, |
|
"learning_rate": 0.00012635871660690676, |
|
"loss": 1.9398, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.8487394957983194, |
|
"grad_norm": 0.06689453125, |
|
"learning_rate": 0.00012348860457809838, |
|
"loss": 1.9625, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"grad_norm": 0.0712890625, |
|
"learning_rate": 0.00012059786187410984, |
|
"loss": 1.9446, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"eval_loss": 1.9901797771453857, |
|
"eval_runtime": 20.9042, |
|
"eval_samples_per_second": 2.392, |
|
"eval_steps_per_second": 0.335, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.9159663865546217, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 0.0001176890275122573, |
|
"loss": 1.8896, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.949579831932773, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 0.00011476465640024814, |
|
"loss": 1.9352, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.9831932773109244, |
|
"grad_norm": 0.068359375, |
|
"learning_rate": 0.00011182731709213659, |
|
"loss": 1.8319, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.0168067226890756, |
|
"grad_norm": 0.076171875, |
|
"learning_rate": 0.00010887958953229349, |
|
"loss": 1.9105, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0504201680672267, |
|
"grad_norm": 0.0751953125, |
|
"learning_rate": 0.00010592406278937144, |
|
"loss": 2.0352, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.0840336134453783, |
|
"grad_norm": 0.07568359375, |
|
"learning_rate": 0.00010296333278225599, |
|
"loss": 1.9011, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.1176470588235294, |
|
"grad_norm": 0.068359375, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9055, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.1512605042016806, |
|
"grad_norm": 0.0654296875, |
|
"learning_rate": 9.703666721774402e-05, |
|
"loss": 1.9023, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.1512605042016806, |
|
"eval_loss": 1.9825478792190552, |
|
"eval_runtime": 20.8366, |
|
"eval_samples_per_second": 2.4, |
|
"eval_steps_per_second": 0.336, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.184873949579832, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 9.407593721062859e-05, |
|
"loss": 1.9397, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.2184873949579833, |
|
"grad_norm": 0.0732421875, |
|
"learning_rate": 9.112041046770653e-05, |
|
"loss": 1.9439, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.2521008403361344, |
|
"grad_norm": 0.06982421875, |
|
"learning_rate": 8.817268290786343e-05, |
|
"loss": 1.871, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.07080078125, |
|
"learning_rate": 8.523534359975189e-05, |
|
"loss": 1.9338, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.3193277310924367, |
|
"grad_norm": 0.06884765625, |
|
"learning_rate": 8.231097248774274e-05, |
|
"loss": 1.9644, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.3529411764705883, |
|
"grad_norm": 0.06591796875, |
|
"learning_rate": 7.940213812589018e-05, |
|
"loss": 1.8381, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.3865546218487395, |
|
"grad_norm": 0.07421875, |
|
"learning_rate": 7.651139542190164e-05, |
|
"loss": 1.8596, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.4201680672268906, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 7.364128339309326e-05, |
|
"loss": 1.834, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.4201680672268906, |
|
"eval_loss": 1.9764043092727661, |
|
"eval_runtime": 20.8236, |
|
"eval_samples_per_second": 2.401, |
|
"eval_steps_per_second": 0.336, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.453781512605042, |
|
"grad_norm": 0.06982421875, |
|
"learning_rate": 7.079432293630244e-05, |
|
"loss": 1.8935, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.4873949579831933, |
|
"grad_norm": 0.0888671875, |
|
"learning_rate": 6.797301461371625e-05, |
|
"loss": 1.8493, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.5210084033613445, |
|
"grad_norm": 0.0712890625, |
|
"learning_rate": 6.517983645656014e-05, |
|
"loss": 1.7431, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.5546218487394956, |
|
"grad_norm": 0.0751953125, |
|
"learning_rate": 6.24172417885762e-05, |
|
"loss": 1.9838, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.588235294117647, |
|
"grad_norm": 0.07568359375, |
|
"learning_rate": 5.96876570712028e-05, |
|
"loss": 1.8481, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.6218487394957983, |
|
"grad_norm": 0.08056640625, |
|
"learning_rate": 5.699347977234799e-05, |
|
"loss": 1.8767, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.6554621848739495, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 5.43370762606287e-05, |
|
"loss": 1.9405, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.689075630252101, |
|
"grad_norm": 0.12060546875, |
|
"learning_rate": 5.172077972692553e-05, |
|
"loss": 1.9834, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.689075630252101, |
|
"eval_loss": 1.9720275402069092, |
|
"eval_runtime": 20.9083, |
|
"eval_samples_per_second": 2.391, |
|
"eval_steps_per_second": 0.335, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.722689075630252, |
|
"grad_norm": 0.07275390625, |
|
"learning_rate": 4.914688813507797e-05, |
|
"loss": 1.9549, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.7563025210084033, |
|
"grad_norm": 0.07568359375, |
|
"learning_rate": 4.661766220352097e-05, |
|
"loss": 1.9516, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.7899159663865545, |
|
"grad_norm": 0.06884765625, |
|
"learning_rate": 4.4135323419634766e-05, |
|
"loss": 1.8236, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.8235294117647056, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 4.170205208855281e-05, |
|
"loss": 1.8927, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.076171875, |
|
"learning_rate": 3.931998541814069e-05, |
|
"loss": 1.8204, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.8907563025210083, |
|
"grad_norm": 0.07421875, |
|
"learning_rate": 3.69912156418289e-05, |
|
"loss": 1.9341, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.92436974789916, |
|
"grad_norm": 0.0693359375, |
|
"learning_rate": 3.471778818094785e-05, |
|
"loss": 1.8912, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.957983193277311, |
|
"grad_norm": 0.087890625, |
|
"learning_rate": 3.250169984817897e-05, |
|
"loss": 1.997, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.957983193277311, |
|
"eval_loss": 1.9688143730163574, |
|
"eval_runtime": 20.9804, |
|
"eval_samples_per_second": 2.383, |
|
"eval_steps_per_second": 0.334, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.991596638655462, |
|
"grad_norm": 0.07958984375, |
|
"learning_rate": 3.034489709370033e-05, |
|
"loss": 1.9296, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.0252100840336134, |
|
"grad_norm": 0.0791015625, |
|
"learning_rate": 2.8249274295566864e-05, |
|
"loss": 1.7802, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.0588235294117645, |
|
"grad_norm": 0.0771484375, |
|
"learning_rate": 2.6216672095827266e-05, |
|
"loss": 1.8806, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 3.092436974789916, |
|
"grad_norm": 0.0693359375, |
|
"learning_rate": 2.4248875783837987e-05, |
|
"loss": 1.915, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 3.1260504201680672, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 2.234761372819577e-05, |
|
"loss": 1.8913, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.1596638655462184, |
|
"grad_norm": 0.07177734375, |
|
"learning_rate": 2.0514555858664663e-05, |
|
"loss": 1.893, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 3.19327731092437, |
|
"grad_norm": 0.07470703125, |
|
"learning_rate": 1.875131219943187e-05, |
|
"loss": 1.8335, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.226890756302521, |
|
"grad_norm": 0.072265625, |
|
"learning_rate": 1.7059431454979824e-05, |
|
"loss": 1.8704, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.226890756302521, |
|
"eval_loss": 1.9671721458435059, |
|
"eval_runtime": 20.8472, |
|
"eval_samples_per_second": 2.398, |
|
"eval_steps_per_second": 0.336, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.2605042016806722, |
|
"grad_norm": 0.0791015625, |
|
"learning_rate": 1.5440399649817385e-05, |
|
"loss": 1.9465, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 3.2941176470588234, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 1.3895638823264446e-05, |
|
"loss": 1.9373, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 3.327731092436975, |
|
"grad_norm": 0.06884765625, |
|
"learning_rate": 1.2426505780436326e-05, |
|
"loss": 1.8813, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 3.361344537815126, |
|
"grad_norm": 0.0751953125, |
|
"learning_rate": 1.103429090052528e-05, |
|
"loss": 2.0051, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.3949579831932772, |
|
"grad_norm": 0.07177734375, |
|
"learning_rate": 9.720217003425647e-06, |
|
"loss": 1.7872, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 0.06884765625, |
|
"learning_rate": 8.485438275698154e-06, |
|
"loss": 1.841, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 3.46218487394958, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 7.331039256816663e-06, |
|
"loss": 1.8911, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 3.495798319327731, |
|
"grad_norm": 0.08203125, |
|
"learning_rate": 6.258033886587911e-06, |
|
"loss": 1.9608, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 3.495798319327731, |
|
"eval_loss": 1.9667043685913086, |
|
"eval_runtime": 20.8449, |
|
"eval_samples_per_second": 2.399, |
|
"eval_steps_per_second": 0.336, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 3.5294117647058822, |
|
"grad_norm": 0.07861328125, |
|
"learning_rate": 5.267364614580861e-06, |
|
"loss": 1.9863, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 3.5630252100840334, |
|
"grad_norm": 0.0703125, |
|
"learning_rate": 4.359901572347758e-06, |
|
"loss": 1.9057, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 3.596638655462185, |
|
"grad_norm": 0.08984375, |
|
"learning_rate": 3.5364418091641373e-06, |
|
"loss": 1.9399, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 3.630252100840336, |
|
"grad_norm": 0.07470703125, |
|
"learning_rate": 2.7977085919589254e-06, |
|
"loss": 1.815, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 3.6638655462184873, |
|
"grad_norm": 0.07470703125, |
|
"learning_rate": 2.144350770049597e-06, |
|
"loss": 1.8613, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 3.697478991596639, |
|
"grad_norm": 0.068359375, |
|
"learning_rate": 1.576942205240317e-06, |
|
"loss": 1.7939, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.73109243697479, |
|
"grad_norm": 0.0712890625, |
|
"learning_rate": 1.0959812677835968e-06, |
|
"loss": 1.7972, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 3.764705882352941, |
|
"grad_norm": 0.0693359375, |
|
"learning_rate": 7.018903986483083e-07, |
|
"loss": 1.8624, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 3.764705882352941, |
|
"eval_loss": 1.966654658317566, |
|
"eval_runtime": 20.828, |
|
"eval_samples_per_second": 2.401, |
|
"eval_steps_per_second": 0.336, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 3.7983193277310923, |
|
"grad_norm": 0.07421875, |
|
"learning_rate": 3.950157384783104e-07, |
|
"loss": 1.8563, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 3.831932773109244, |
|
"grad_norm": 0.08251953125, |
|
"learning_rate": 1.7562682356786487e-07, |
|
"loss": 1.9232, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.865546218487395, |
|
"grad_norm": 0.07666015625, |
|
"learning_rate": 4.391634912056519e-08, |
|
"loss": 1.9241, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 3.899159663865546, |
|
"grad_norm": 0.07373046875, |
|
"learning_rate": 0.0, |
|
"loss": 1.857, |
|
"step": 116 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 116, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 29, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3243126006677504e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|