|
{ |
|
"best_metric": 1.3787956237792969, |
|
"best_model_checkpoint": "test-calls-summarization/checkpoint-1000", |
|
"epoch": 25.579536370903277, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9999999999999996e-05, |
|
"loss": 2.0533, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.999999999999999e-05, |
|
"loss": 1.0512, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00015, |
|
"loss": 0.7919, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019999999999999998, |
|
"loss": 0.6876, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00025, |
|
"loss": 0.6567, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003, |
|
"loss": 0.6271, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0002998495486459378, |
|
"loss": 0.5943, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00029969909729187557, |
|
"loss": 0.5666, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002995486459378134, |
|
"loss": 0.5603, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0002993981945837512, |
|
"loss": 0.5282, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00029924774322968905, |
|
"loss": 0.5173, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00029909729187562687, |
|
"loss": 0.5011, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002989468405215647, |
|
"loss": 0.4926, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00029879638916750247, |
|
"loss": 0.478, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0002986459378134403, |
|
"loss": 0.4766, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002984954864593781, |
|
"loss": 0.4756, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002983450351053159, |
|
"loss": 0.4229, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002981945837512537, |
|
"loss": 0.4239, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00029804413239719154, |
|
"loss": 0.431, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00029789368104312936, |
|
"loss": 0.4333, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0002977432296890672, |
|
"loss": 0.4218, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.000297592778335005, |
|
"loss": 0.4357, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0002974423269809428, |
|
"loss": 0.4197, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002972918756268806, |
|
"loss": 0.4213, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00029714142427281844, |
|
"loss": 0.4087, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0002969909729187562, |
|
"loss": 0.4041, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00029684052156469403, |
|
"loss": 0.4103, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00029669007021063186, |
|
"loss": 0.4009, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002965396188565697, |
|
"loss": 0.4077, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0002963891675025075, |
|
"loss": 0.4257, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00029623871614844533, |
|
"loss": 0.4111, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0002960882647943831, |
|
"loss": 0.3634, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00029593781344032093, |
|
"loss": 0.3532, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00029578736208625875, |
|
"loss": 0.3591, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0002956369107321965, |
|
"loss": 0.3506, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00029548645937813435, |
|
"loss": 0.3494, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.0002953360080240722, |
|
"loss": 0.3598, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00029518555667001, |
|
"loss": 0.3656, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0002950351053159478, |
|
"loss": 0.351, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00029488465396188565, |
|
"loss": 0.3589, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002947342026078234, |
|
"loss": 0.3564, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00029458375125376125, |
|
"loss": 0.3665, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.0002944332998996991, |
|
"loss": 0.3472, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0002942828485456369, |
|
"loss": 0.3876, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0002941323971915747, |
|
"loss": 0.3831, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00029398194583751255, |
|
"loss": 0.3597, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0002938314944834503, |
|
"loss": 0.3781, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00029368104312938815, |
|
"loss": 0.3078, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00029353059177532597, |
|
"loss": 0.2986, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00029338014042126374, |
|
"loss": 0.3031, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 0.9930399656295776, |
|
"eval_runtime": 0.3176, |
|
"eval_samples_per_second": 31.491, |
|
"eval_steps_per_second": 3.149, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.00029322968906720157, |
|
"loss": 0.3132, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0002930792377131394, |
|
"loss": 0.3044, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.0002929287863590772, |
|
"loss": 0.3154, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.00029277833500501504, |
|
"loss": 0.3143, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00029262788365095287, |
|
"loss": 0.3169, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00029247743229689064, |
|
"loss": 0.3181, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00029232698094282846, |
|
"loss": 0.3227, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.0002921765295887663, |
|
"loss": 0.3181, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00029202607823470406, |
|
"loss": 0.3528, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.0002918756268806419, |
|
"loss": 0.3341, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.0002917251755265797, |
|
"loss": 0.3274, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.00029157472417251754, |
|
"loss": 0.3227, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 0.00029142427281845536, |
|
"loss": 0.3069, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 0.0002912738214643932, |
|
"loss": 0.2687, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.00029112337011033096, |
|
"loss": 0.2736, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 0.0002909729187562688, |
|
"loss": 0.2736, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.0002908224674022066, |
|
"loss": 0.2684, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 0.0002906720160481444, |
|
"loss": 0.2608, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 0.0002905215646940822, |
|
"loss": 0.2688, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 0.00029037111334002003, |
|
"loss": 0.3065, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.00029022066198595785, |
|
"loss": 0.2872, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 0.0002900702106318957, |
|
"loss": 0.285, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 0.0002899197592778335, |
|
"loss": 0.4004, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.0002897693079237713, |
|
"loss": 0.2869, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.0002896188565697091, |
|
"loss": 0.288, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.0002894684052156469, |
|
"loss": 0.2779, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.0002893179538615847, |
|
"loss": 0.2856, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.0002891675025075225, |
|
"loss": 0.3069, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.00028901705115346035, |
|
"loss": 0.2484, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0002888665997993982, |
|
"loss": 0.2466, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 0.000288716148445336, |
|
"loss": 0.2498, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 0.0002885656970912738, |
|
"loss": 0.2475, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.0002884152457372116, |
|
"loss": 0.2406, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 0.0002882647943831494, |
|
"loss": 0.2436, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.00028811434302908724, |
|
"loss": 0.2492, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 0.000287963891675025, |
|
"loss": 0.2553, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 0.00028781344032096284, |
|
"loss": 0.2551, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 0.00028766298896690067, |
|
"loss": 0.2585, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 0.0002875125376128385, |
|
"loss": 0.2648, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 0.0002873620862587763, |
|
"loss": 0.2755, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 0.00028721163490471414, |
|
"loss": 0.2614, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.0002870611835506519, |
|
"loss": 0.2678, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 0.00028691073219658974, |
|
"loss": 0.2726, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 0.00028676028084252756, |
|
"loss": 0.2561, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 0.00028660982948846533, |
|
"loss": 0.214, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 0.00028645937813440316, |
|
"loss": 0.2213, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 0.000286308926780341, |
|
"loss": 0.2217, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 0.0002861584754262788, |
|
"loss": 0.2213, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 0.00028600802407221664, |
|
"loss": 0.2344, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 0.00028585757271815446, |
|
"loss": 0.4603, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_loss": 1.0373685359954834, |
|
"eval_runtime": 0.3183, |
|
"eval_samples_per_second": 31.418, |
|
"eval_steps_per_second": 3.142, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 0.00028570712136409223, |
|
"loss": 0.2353, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 0.00028555667001003006, |
|
"loss": 0.2269, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 0.0002854062186559679, |
|
"loss": 0.2318, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 0.00028525576730190565, |
|
"loss": 0.2338, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 0.0002851053159478435, |
|
"loss": 0.2336, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 0.0002849548645937813, |
|
"loss": 0.2385, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 0.00028480441323971913, |
|
"loss": 0.2356, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 0.00028465396188565695, |
|
"loss": 0.2437, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 0.0002845035105315948, |
|
"loss": 0.2417, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 0.00028435305917753255, |
|
"loss": 0.2167, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 0.0002842026078234704, |
|
"loss": 0.1974, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 0.0002840521564694082, |
|
"loss": 0.2092, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 0.000283901705115346, |
|
"loss": 0.2009, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 0.00028375125376128385, |
|
"loss": 0.5423, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.0002836008024072217, |
|
"loss": 0.7044, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 0.00028345035105315945, |
|
"loss": 0.3172, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.00028329989969909727, |
|
"loss": 0.2165, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 0.0002831494483450351, |
|
"loss": 0.2017, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"learning_rate": 0.00028299899699097287, |
|
"loss": 0.1976, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 0.0002828485456369107, |
|
"loss": 0.2036, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.0002826980942828485, |
|
"loss": 0.2082, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 0.00028254764292878634, |
|
"loss": 0.2104, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"learning_rate": 0.00028239719157472417, |
|
"loss": 0.2194, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 0.000282246740220662, |
|
"loss": 0.2104, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 0.00028209628886659977, |
|
"loss": 0.2183, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 0.0002819458375125376, |
|
"loss": 0.1773, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 0.0002817953861584754, |
|
"loss": 0.1753, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.0002816449348044132, |
|
"loss": 0.1737, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 0.000281494483450351, |
|
"loss": 0.1807, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"learning_rate": 0.00028134403209628884, |
|
"loss": 0.1858, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 0.00028119358074222666, |
|
"loss": 0.1836, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 8.44, |
|
"learning_rate": 0.0002810431293881645, |
|
"loss": 0.1887, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 8.51, |
|
"learning_rate": 0.0002808926780341023, |
|
"loss": 0.1908, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 0.0002807422266800401, |
|
"loss": 0.1933, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 0.0002805917753259779, |
|
"loss": 0.1969, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 0.00028044132397191574, |
|
"loss": 0.194, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 8.76, |
|
"learning_rate": 0.0002802908726178535, |
|
"loss": 0.1895, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00028014042126379133, |
|
"loss": 0.1957, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 0.00027998996990972916, |
|
"loss": 0.1927, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 0.000279839518555667, |
|
"loss": 0.1891, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 0.0002796890672016048, |
|
"loss": 0.1809, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 0.00027953861584754263, |
|
"loss": 0.1443, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 0.0002793881644934804, |
|
"loss": 0.1487, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"learning_rate": 0.00027923771313941823, |
|
"loss": 0.1521, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 0.00027908726178535605, |
|
"loss": 0.1544, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 9.34, |
|
"learning_rate": 0.0002789368104312938, |
|
"loss": 0.1564, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 0.00027878635907723165, |
|
"loss": 0.1634, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 0.0002786359077231695, |
|
"loss": 0.1644, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 0.0002784854563691073, |
|
"loss": 0.1643, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"learning_rate": 0.0002783350050150451, |
|
"loss": 0.1721, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 9.59, |
|
"eval_loss": 1.1369589567184448, |
|
"eval_runtime": 0.3185, |
|
"eval_samples_per_second": 31.399, |
|
"eval_steps_per_second": 3.14, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"learning_rate": 0.00027818455366098295, |
|
"loss": 0.1755, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 0.0002780341023069207, |
|
"loss": 0.1733, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 0.00027788365095285855, |
|
"loss": 0.1689, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 0.00027773319959879637, |
|
"loss": 0.1668, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 0.00027758274824473414, |
|
"loss": 0.1774, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 0.00027743229689067197, |
|
"loss": 0.1742, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 10.04, |
|
"learning_rate": 0.0002772818455366098, |
|
"loss": 0.1513, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 0.0002771313941825476, |
|
"loss": 0.1361, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 10.17, |
|
"learning_rate": 0.00027698094282848544, |
|
"loss": 0.1387, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 0.00027683049147442327, |
|
"loss": 0.1472, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 0.00027668004012036104, |
|
"loss": 0.1439, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 0.00027652958876629887, |
|
"loss": 0.1389, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 0.0002763791374122367, |
|
"loss": 0.1454, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 10.49, |
|
"learning_rate": 0.00027622868605817446, |
|
"loss": 0.1504, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 0.0002760782347041123, |
|
"loss": 0.1475, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 0.0002759277833500501, |
|
"loss": 0.1478, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"learning_rate": 0.00027577733199598794, |
|
"loss": 0.1496, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 10.74, |
|
"learning_rate": 0.00027562688064192576, |
|
"loss": 0.1475, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 10.81, |
|
"learning_rate": 0.0002754764292878636, |
|
"loss": 0.1499, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 10.87, |
|
"learning_rate": 0.00027532597793380136, |
|
"loss": 0.152, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 10.94, |
|
"learning_rate": 0.0002751755265797392, |
|
"loss": 0.1514, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 0.000275025075225677, |
|
"loss": 0.153, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 0.0002748746238716148, |
|
"loss": 0.1153, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 11.13, |
|
"learning_rate": 0.0002747241725175526, |
|
"loss": 0.1181, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 0.00027457372116349043, |
|
"loss": 0.1201, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 0.00027442326980942826, |
|
"loss": 0.1164, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 11.32, |
|
"learning_rate": 0.0002742728184553661, |
|
"loss": 0.1177, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 11.38, |
|
"learning_rate": 0.0002741223671013039, |
|
"loss": 0.1205, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 0.0002739719157472417, |
|
"loss": 0.1257, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 11.51, |
|
"learning_rate": 0.0002738214643931795, |
|
"loss": 0.1291, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 11.57, |
|
"learning_rate": 0.00027367101303911733, |
|
"loss": 0.1277, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 0.00027352056168505515, |
|
"loss": 0.1343, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 0.000273370110330993, |
|
"loss": 0.1312, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 11.77, |
|
"learning_rate": 0.0002732196589769308, |
|
"loss": 0.1314, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 11.83, |
|
"learning_rate": 0.0002730692076228686, |
|
"loss": 0.1367, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"learning_rate": 0.0002729187562688064, |
|
"loss": 0.1336, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 11.96, |
|
"learning_rate": 0.0002727683049147442, |
|
"loss": 0.1367, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 0.000272617853560682, |
|
"loss": 0.1266, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 0.0002724674022066198, |
|
"loss": 0.1001, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"learning_rate": 0.00027231695085255765, |
|
"loss": 0.106, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 12.21, |
|
"learning_rate": 0.00027216649949849547, |
|
"loss": 0.1078, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 12.28, |
|
"learning_rate": 0.0002720160481444333, |
|
"loss": 0.1065, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 12.34, |
|
"learning_rate": 0.0002718655967903711, |
|
"loss": 0.1077, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 12.41, |
|
"learning_rate": 0.0002717151454363089, |
|
"loss": 0.1089, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 12.47, |
|
"learning_rate": 0.0002715646940822467, |
|
"loss": 0.1094, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 12.53, |
|
"learning_rate": 0.00027141424272818454, |
|
"loss": 0.1205, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 0.0002712637913741223, |
|
"loss": 0.1231, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 12.66, |
|
"learning_rate": 0.00027111334002006014, |
|
"loss": 0.1169, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 0.00027096288866599797, |
|
"loss": 0.1154, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 12.79, |
|
"learning_rate": 0.0002708124373119358, |
|
"loss": 0.113, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 12.79, |
|
"eval_loss": 1.3787956237792969, |
|
"eval_runtime": 0.319, |
|
"eval_samples_per_second": 31.345, |
|
"eval_steps_per_second": 3.135, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 12.85, |
|
"learning_rate": 0.0002706619859578736, |
|
"loss": 0.1123, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 12.92, |
|
"learning_rate": 0.00027051153460381144, |
|
"loss": 0.117, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 0.0002703610832497492, |
|
"loss": 0.1199, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 0.00027021063189568704, |
|
"loss": 0.0943, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 13.11, |
|
"learning_rate": 0.00027006018054162486, |
|
"loss": 0.0851, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 13.17, |
|
"learning_rate": 0.00026990972918756263, |
|
"loss": 0.0906, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 13.24, |
|
"learning_rate": 0.00026975927783350046, |
|
"loss": 0.0911, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 0.0002696088264794383, |
|
"loss": 0.0925, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 13.37, |
|
"learning_rate": 0.0002694583751253761, |
|
"loss": 0.095, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"learning_rate": 0.00026930792377131393, |
|
"loss": 0.0921, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 13.49, |
|
"learning_rate": 0.00026915747241725176, |
|
"loss": 0.0958, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 13.56, |
|
"learning_rate": 0.00026900702106318953, |
|
"loss": 0.0969, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 13.62, |
|
"learning_rate": 0.00026885656970912736, |
|
"loss": 0.0973, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 13.69, |
|
"learning_rate": 0.0002687061183550652, |
|
"loss": 0.1008, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 0.00026855566700100295, |
|
"loss": 0.1021, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 13.81, |
|
"learning_rate": 0.0002684052156469408, |
|
"loss": 0.1018, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 13.88, |
|
"learning_rate": 0.0002682547642928786, |
|
"loss": 0.1019, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"learning_rate": 0.00026810431293881643, |
|
"loss": 0.1035, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 0.00026795386158475425, |
|
"loss": 0.1081, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 14.07, |
|
"learning_rate": 0.0002678034102306921, |
|
"loss": 0.0823, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 14.13, |
|
"learning_rate": 0.00026765295887662985, |
|
"loss": 0.0767, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 0.0002675025075225677, |
|
"loss": 0.0808, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 14.26, |
|
"learning_rate": 0.0002673520561685055, |
|
"loss": 0.0827, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 14.32, |
|
"learning_rate": 0.00026720160481444327, |
|
"loss": 0.0831, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 0.0002670511534603811, |
|
"loss": 0.0853, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 14.45, |
|
"learning_rate": 0.0002669007021063189, |
|
"loss": 0.084, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 14.52, |
|
"learning_rate": 0.00026675025075225675, |
|
"loss": 0.0867, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 0.00026659979939819457, |
|
"loss": 0.0856, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"learning_rate": 0.0002664493480441324, |
|
"loss": 0.0864, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 0.00026629889669007017, |
|
"loss": 0.0867, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 0.000266148445336008, |
|
"loss": 0.0862, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 14.84, |
|
"learning_rate": 0.0002659979939819458, |
|
"loss": 0.0902, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 0.0002658475426278836, |
|
"loss": 0.0924, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 14.96, |
|
"learning_rate": 0.0002656970912738214, |
|
"loss": 0.0939, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 0.00026554663991975924, |
|
"loss": 0.082, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 15.09, |
|
"learning_rate": 0.00026539618856569707, |
|
"loss": 0.0703, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"learning_rate": 0.0002652457372116349, |
|
"loss": 0.0695, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 15.22, |
|
"learning_rate": 0.0002650952858575727, |
|
"loss": 0.0703, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 15.28, |
|
"learning_rate": 0.0002649448345035105, |
|
"loss": 0.0707, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 15.35, |
|
"learning_rate": 0.0002647943831494483, |
|
"loss": 0.071, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 15.41, |
|
"learning_rate": 0.00026464393179538614, |
|
"loss": 0.0727, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 15.48, |
|
"learning_rate": 0.0002644934804413239, |
|
"loss": 0.0771, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 15.54, |
|
"learning_rate": 0.00026434302908726173, |
|
"loss": 0.0793, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 0.00026419257773319956, |
|
"loss": 0.076, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 0.0002640421263791374, |
|
"loss": 0.0744, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 15.73, |
|
"learning_rate": 0.0002638916750250752, |
|
"loss": 0.0779, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"learning_rate": 0.00026374122367101303, |
|
"loss": 0.0785, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 15.86, |
|
"learning_rate": 0.0002635907723169508, |
|
"loss": 0.0782, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 15.92, |
|
"learning_rate": 0.00026344032096288863, |
|
"loss": 0.079, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 15.99, |
|
"learning_rate": 0.00026328986960882646, |
|
"loss": 0.0815, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 15.99, |
|
"eval_loss": 1.4458354711532593, |
|
"eval_runtime": 0.3198, |
|
"eval_samples_per_second": 31.27, |
|
"eval_steps_per_second": 3.127, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"learning_rate": 0.0002631394182547643, |
|
"loss": 0.0598, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 16.12, |
|
"learning_rate": 0.0002629889669007021, |
|
"loss": 0.0578, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 0.00026283851554663993, |
|
"loss": 0.0596, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 16.24, |
|
"learning_rate": 0.0002626880641925777, |
|
"loss": 0.0588, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 16.31, |
|
"learning_rate": 0.00026253761283851553, |
|
"loss": 0.0592, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 16.37, |
|
"learning_rate": 0.00026238716148445335, |
|
"loss": 0.0614, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"learning_rate": 0.0002622367101303911, |
|
"loss": 0.0641, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 0.00026208625877632895, |
|
"loss": 0.0661, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 16.56, |
|
"learning_rate": 0.0002619358074222668, |
|
"loss": 0.0685, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 16.63, |
|
"learning_rate": 0.0002617853560682046, |
|
"loss": 0.0646, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 16.69, |
|
"learning_rate": 0.0002616349047141424, |
|
"loss": 0.0662, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"learning_rate": 0.00026148445336008025, |
|
"loss": 0.0699, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"learning_rate": 0.000261334002006018, |
|
"loss": 0.069, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 16.88, |
|
"learning_rate": 0.00026118355065195585, |
|
"loss": 0.0683, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 16.95, |
|
"learning_rate": 0.00026103309929789367, |
|
"loss": 0.0714, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"learning_rate": 0.00026088264794383144, |
|
"loss": 0.0682, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 0.00026073219658976927, |
|
"loss": 0.0537, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 17.14, |
|
"learning_rate": 0.0002605817452357071, |
|
"loss": 0.055, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 0.0002604312938816449, |
|
"loss": 0.0557, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 0.00026028084252758274, |
|
"loss": 0.0544, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 0.00026013039117352057, |
|
"loss": 0.0545, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 17.39, |
|
"learning_rate": 0.00025997993981945834, |
|
"loss": 0.0589, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 17.46, |
|
"learning_rate": 0.00025982948846539616, |
|
"loss": 0.0598, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 17.52, |
|
"learning_rate": 0.000259679037111334, |
|
"loss": 0.0608, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 17.59, |
|
"learning_rate": 0.00025952858575727176, |
|
"loss": 0.0628, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 0.0002593781344032096, |
|
"loss": 0.0603, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 17.71, |
|
"learning_rate": 0.0002592276830491474, |
|
"loss": 0.065, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 17.78, |
|
"learning_rate": 0.00025907723169508524, |
|
"loss": 0.0648, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 17.84, |
|
"learning_rate": 0.00025892678034102306, |
|
"loss": 0.0649, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"learning_rate": 0.0002587763289869609, |
|
"loss": 0.0647, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 17.97, |
|
"learning_rate": 0.00025862587763289866, |
|
"loss": 0.0687, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"learning_rate": 0.0002584754262788365, |
|
"loss": 0.0592, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 0.0002583249749247743, |
|
"loss": 0.047, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 18.16, |
|
"learning_rate": 0.0002581745235707121, |
|
"loss": 0.0486, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 18.23, |
|
"learning_rate": 0.0002580240722166499, |
|
"loss": 0.0526, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 18.29, |
|
"learning_rate": 0.00025787362086258773, |
|
"loss": 0.05, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 18.35, |
|
"learning_rate": 0.00025772316950852556, |
|
"loss": 0.0646, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 18.42, |
|
"learning_rate": 0.0002575727181544634, |
|
"loss": 0.0557, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 18.48, |
|
"learning_rate": 0.0002574222668004012, |
|
"loss": 0.0534, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 18.55, |
|
"learning_rate": 0.000257271815446339, |
|
"loss": 0.056, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 18.61, |
|
"learning_rate": 0.0002571213640922768, |
|
"loss": 0.0553, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 0.00025697091273821463, |
|
"loss": 0.057, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 18.74, |
|
"learning_rate": 0.0002568204613841524, |
|
"loss": 0.0545, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 0.0002566700100300902, |
|
"loss": 0.0557, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 18.86, |
|
"learning_rate": 0.00025651955867602805, |
|
"loss": 0.0587, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 18.93, |
|
"learning_rate": 0.0002563691073219659, |
|
"loss": 0.0574, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 18.99, |
|
"learning_rate": 0.0002562186559679037, |
|
"loss": 0.0625, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 19.06, |
|
"learning_rate": 0.0002560682046138415, |
|
"loss": 0.1149, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 19.12, |
|
"learning_rate": 0.0002559177532597793, |
|
"loss": 0.059, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 0.0002557673019057171, |
|
"loss": 0.0472, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"eval_loss": 1.7803338766098022, |
|
"eval_runtime": 0.3184, |
|
"eval_samples_per_second": 31.405, |
|
"eval_steps_per_second": 3.14, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 19.25, |
|
"learning_rate": 0.00025561685055165495, |
|
"loss": 0.0432, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 19.31, |
|
"learning_rate": 0.0002554663991975927, |
|
"loss": 0.0415, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 19.38, |
|
"learning_rate": 0.00025531594784353054, |
|
"loss": 0.047, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 19.44, |
|
"learning_rate": 0.00025516549648946837, |
|
"loss": 0.0473, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 0.0002550150451354062, |
|
"loss": 0.0482, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 19.57, |
|
"learning_rate": 0.000254864593781344, |
|
"loss": 0.0493, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 19.63, |
|
"learning_rate": 0.00025471414242728184, |
|
"loss": 0.0513, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 0.0002545636910732196, |
|
"loss": 0.0514, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 19.76, |
|
"learning_rate": 0.00025441323971915744, |
|
"loss": 0.0509, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"learning_rate": 0.00025426278836509526, |
|
"loss": 0.0512, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 19.89, |
|
"learning_rate": 0.0002541123370110331, |
|
"loss": 0.0535, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 19.95, |
|
"learning_rate": 0.00025396188565697086, |
|
"loss": 0.0522, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 20.02, |
|
"learning_rate": 0.0002538114343029087, |
|
"loss": 0.0491, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 20.08, |
|
"learning_rate": 0.0002536609829488465, |
|
"loss": 0.0364, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 20.14, |
|
"learning_rate": 0.00025351053159478434, |
|
"loss": 0.0398, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 20.21, |
|
"learning_rate": 0.00025336008024072216, |
|
"loss": 0.0393, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 0.00025320962888665993, |
|
"loss": 0.0418, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 20.34, |
|
"learning_rate": 0.00025305917753259776, |
|
"loss": 0.0436, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 0.0002529087261785356, |
|
"loss": 0.0439, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 20.46, |
|
"learning_rate": 0.0002527582748244734, |
|
"loss": 0.0458, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 20.53, |
|
"learning_rate": 0.00025260782347041123, |
|
"loss": 0.0443, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 20.59, |
|
"learning_rate": 0.00025245737211634906, |
|
"loss": 0.0443, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 20.66, |
|
"learning_rate": 0.00025230692076228683, |
|
"loss": 0.0439, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 20.72, |
|
"learning_rate": 0.00025215646940822465, |
|
"loss": 0.0431, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 20.78, |
|
"learning_rate": 0.0002520060180541625, |
|
"loss": 0.0451, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 20.85, |
|
"learning_rate": 0.00025185556670010025, |
|
"loss": 0.0474, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"learning_rate": 0.0002517051153460381, |
|
"loss": 0.0479, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 20.98, |
|
"learning_rate": 0.0002515546639919759, |
|
"loss": 0.0483, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 21.04, |
|
"learning_rate": 0.0002514042126379137, |
|
"loss": 0.0368, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 0.00025125376128385155, |
|
"loss": 0.032, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 21.17, |
|
"learning_rate": 0.0002511033099297894, |
|
"loss": 0.0343, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 21.23, |
|
"learning_rate": 0.00025095285857572715, |
|
"loss": 0.0366, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 21.29, |
|
"learning_rate": 0.000250802407221665, |
|
"loss": 0.0377, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 21.36, |
|
"learning_rate": 0.0002506519558676028, |
|
"loss": 0.0378, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 21.42, |
|
"learning_rate": 0.00025050150451354057, |
|
"loss": 0.0374, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 21.49, |
|
"learning_rate": 0.0002503510531594784, |
|
"loss": 0.0421, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 21.55, |
|
"learning_rate": 0.0002502006018054162, |
|
"loss": 0.037, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 21.61, |
|
"learning_rate": 0.00025005015045135405, |
|
"loss": 0.0392, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 21.68, |
|
"learning_rate": 0.00024989969909729187, |
|
"loss": 0.0421, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 21.74, |
|
"learning_rate": 0.0002497492477432297, |
|
"loss": 0.0425, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 21.81, |
|
"learning_rate": 0.00024959879638916747, |
|
"loss": 0.0442, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 21.87, |
|
"learning_rate": 0.0002494483450351053, |
|
"loss": 0.042, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 21.93, |
|
"learning_rate": 0.0002492978936810431, |
|
"loss": 0.0425, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 0.0002491474423269809, |
|
"loss": 0.0432, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 22.06, |
|
"learning_rate": 0.0002489969909729187, |
|
"loss": 0.029, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 22.13, |
|
"learning_rate": 0.00024884653961885654, |
|
"loss": 0.0303, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 22.19, |
|
"learning_rate": 0.00024869608826479436, |
|
"loss": 0.0324, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 22.25, |
|
"learning_rate": 0.0002485456369107322, |
|
"loss": 0.0319, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 22.32, |
|
"learning_rate": 0.00024839518555667, |
|
"loss": 0.0325, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 22.38, |
|
"learning_rate": 0.0002482447342026078, |
|
"loss": 0.033, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 22.38, |
|
"eval_loss": 1.8502811193466187, |
|
"eval_runtime": 0.319, |
|
"eval_samples_per_second": 31.348, |
|
"eval_steps_per_second": 3.135, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 22.45, |
|
"learning_rate": 0.0002480942828485456, |
|
"loss": 0.032, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 22.51, |
|
"learning_rate": 0.00024794383149448344, |
|
"loss": 0.0352, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"learning_rate": 0.0002477933801404212, |
|
"loss": 0.0367, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 22.64, |
|
"learning_rate": 0.00024764292878635903, |
|
"loss": 0.0382, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 0.00024749247743229686, |
|
"loss": 0.039, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 22.77, |
|
"learning_rate": 0.0002473420260782347, |
|
"loss": 0.0387, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 22.83, |
|
"learning_rate": 0.0002471915747241725, |
|
"loss": 0.0411, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 22.89, |
|
"learning_rate": 0.00024704112337011033, |
|
"loss": 0.04, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 22.96, |
|
"learning_rate": 0.0002468906720160481, |
|
"loss": 0.0405, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 23.02, |
|
"learning_rate": 0.00024674022066198593, |
|
"loss": 0.0347, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 23.09, |
|
"learning_rate": 0.00024658976930792375, |
|
"loss": 0.0295, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 23.15, |
|
"learning_rate": 0.0002464393179538615, |
|
"loss": 0.0298, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 23.21, |
|
"learning_rate": 0.00024628886659979935, |
|
"loss": 0.0314, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 23.28, |
|
"learning_rate": 0.0002461384152457372, |
|
"loss": 0.0334, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 23.34, |
|
"learning_rate": 0.000245987963891675, |
|
"loss": 0.035, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 23.41, |
|
"learning_rate": 0.0002458375125376128, |
|
"loss": 0.033, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 23.47, |
|
"learning_rate": 0.00024568706118355065, |
|
"loss": 0.0336, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 23.53, |
|
"learning_rate": 0.0002455366098294884, |
|
"loss": 0.0338, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 0.00024538615847542625, |
|
"loss": 0.0371, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 23.66, |
|
"learning_rate": 0.0002452357071213641, |
|
"loss": 0.0372, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 23.73, |
|
"learning_rate": 0.00024508525576730184, |
|
"loss": 0.0365, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 23.79, |
|
"learning_rate": 0.00024493480441323967, |
|
"loss": 0.0368, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 23.85, |
|
"learning_rate": 0.0002447843530591775, |
|
"loss": 0.0347, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 23.92, |
|
"learning_rate": 0.0002446339017051153, |
|
"loss": 0.0362, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 23.98, |
|
"learning_rate": 0.00024448345035105315, |
|
"loss": 0.0379, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 24.04, |
|
"learning_rate": 0.00024433299899699097, |
|
"loss": 0.0398, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 24.11, |
|
"learning_rate": 0.00024418254764292874, |
|
"loss": 0.0288, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 24.17, |
|
"learning_rate": 0.00024403209628886657, |
|
"loss": 0.0305, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"learning_rate": 0.0002438816449348044, |
|
"loss": 0.0304, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 0.0002437311935807422, |
|
"loss": 0.0282, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 24.36, |
|
"learning_rate": 0.00024358074222668002, |
|
"loss": 0.0289, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 24.43, |
|
"learning_rate": 0.00024343029087261784, |
|
"loss": 0.0302, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 24.49, |
|
"learning_rate": 0.00024327983951855564, |
|
"loss": 0.0258, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 24.56, |
|
"learning_rate": 0.00024312938816449346, |
|
"loss": 0.0266, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 24.62, |
|
"learning_rate": 0.0002429789368104313, |
|
"loss": 0.0278, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 24.68, |
|
"learning_rate": 0.00024282848545636906, |
|
"loss": 0.0316, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 24.75, |
|
"learning_rate": 0.00024267803410230689, |
|
"loss": 0.0328, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 24.81, |
|
"learning_rate": 0.0002425275827482447, |
|
"loss": 0.0336, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 24.88, |
|
"learning_rate": 0.0002423771313941825, |
|
"loss": 0.0311, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 24.94, |
|
"learning_rate": 0.00024222668004012033, |
|
"loss": 0.0324, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.00024207622868605816, |
|
"loss": 0.0339, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 25.07, |
|
"learning_rate": 0.00024192577733199596, |
|
"loss": 0.0235, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 25.13, |
|
"learning_rate": 0.00024177532597793378, |
|
"loss": 0.029, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 0.0002416248746238716, |
|
"loss": 0.0281, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 25.26, |
|
"learning_rate": 0.00024147442326980938, |
|
"loss": 0.0276, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 25.32, |
|
"learning_rate": 0.0002413239719157472, |
|
"loss": 0.0282, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 25.39, |
|
"learning_rate": 0.00024117352056168503, |
|
"loss": 0.0308, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 25.45, |
|
"learning_rate": 0.00024102306920762283, |
|
"loss": 0.0313, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 25.52, |
|
"learning_rate": 0.00024087261785356065, |
|
"loss": 0.0311, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 25.58, |
|
"learning_rate": 0.00024072216649949848, |
|
"loss": 0.0307, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 25.58, |
|
"eval_loss": 1.802398920059204, |
|
"eval_runtime": 0.3194, |
|
"eval_samples_per_second": 31.309, |
|
"eval_steps_per_second": 3.131, |
|
"step": 2000 |
|
} |
|
], |
|
"max_steps": 10000, |
|
"num_train_epochs": 129, |
|
"total_flos": 4.2458758196763034e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|