clm7b0129-cds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-150
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.12310217480508823, | |
"eval_steps": 50, | |
"global_step": 150, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.07778492569923401, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.053281307220459, | |
"logits/rejected": -2.495474338531494, | |
"logps/chosen": -0.3126755356788635, | |
"logps/rejected": -0.3312620520591736, | |
"loss": 7.6211, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4690132737159729, | |
"rewards/margins": 0.027879873290657997, | |
"rewards/rejected": -0.49689316749572754, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07773654907941818, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.0624098777770996, | |
"logits/rejected": -2.4424185752868652, | |
"logps/chosen": -0.26926660537719727, | |
"logps/rejected": -0.2978014051914215, | |
"loss": 7.5195, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4038998484611511, | |
"rewards/margins": 0.04280223697423935, | |
"rewards/rejected": -0.44670209288597107, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.07357177883386612, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.068427562713623, | |
"logits/rejected": -2.486642360687256, | |
"logps/chosen": -0.29993391036987305, | |
"logps/rejected": -0.34360918402671814, | |
"loss": 7.4913, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4499008059501648, | |
"rewards/margins": 0.06551288068294525, | |
"rewards/rejected": -0.5154137015342712, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.14212799072265625, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.015650987625122, | |
"logits/rejected": -2.3838727474212646, | |
"logps/chosen": -0.2911723852157593, | |
"logps/rejected": -0.30521970987319946, | |
"loss": 7.5217, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4367586076259613, | |
"rewards/margins": 0.021070968359708786, | |
"rewards/rejected": -0.4578295648097992, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.08107248693704605, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.1150989532470703, | |
"logits/rejected": -2.4338631629943848, | |
"logps/chosen": -0.26249754428863525, | |
"logps/rejected": -0.3132360577583313, | |
"loss": 7.519, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.3937462866306305, | |
"rewards/margins": 0.07610772550106049, | |
"rewards/rejected": -0.4698540270328522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.0232737064361572, | |
"eval_logits/rejected": -2.4952735900878906, | |
"eval_logps/chosen": -0.27974528074264526, | |
"eval_logps/rejected": -0.3420677185058594, | |
"eval_loss": 0.9291417598724365, | |
"eval_rewards/accuracies": 0.49494948983192444, | |
"eval_rewards/chosen": -0.41961798071861267, | |
"eval_rewards/margins": 0.09348361939191818, | |
"eval_rewards/rejected": -0.5131015777587891, | |
"eval_runtime": 26.0563, | |
"eval_samples_per_second": 30.242, | |
"eval_steps_per_second": 3.799, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06815352290868759, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -1.9890680313110352, | |
"logits/rejected": -2.3848204612731934, | |
"logps/chosen": -0.26213228702545166, | |
"logps/rejected": -0.31342557072639465, | |
"loss": 7.432, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3931984603404999, | |
"rewards/margins": 0.0769399031996727, | |
"rewards/rejected": -0.4701383709907532, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.06748568266630173, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.070542812347412, | |
"logits/rejected": -2.3977038860321045, | |
"logps/chosen": -0.24570491909980774, | |
"logps/rejected": -0.3655605912208557, | |
"loss": 7.35, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3685573935508728, | |
"rewards/margins": 0.17978355288505554, | |
"rewards/rejected": -0.548340916633606, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.10909309983253479, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.2012317180633545, | |
"logits/rejected": -2.346029758453369, | |
"logps/chosen": -0.2279246598482132, | |
"logps/rejected": -0.35396742820739746, | |
"loss": 7.5082, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.341886967420578, | |
"rewards/margins": 0.18906418979167938, | |
"rewards/rejected": -0.5309511423110962, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.05977805703878403, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.0719449520111084, | |
"logits/rejected": -2.4491190910339355, | |
"logps/chosen": -0.2503294348716736, | |
"logps/rejected": -0.29939892888069153, | |
"loss": 7.5129, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.37549418210983276, | |
"rewards/margins": 0.07360419631004333, | |
"rewards/rejected": -0.4490983486175537, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.051751479506492615, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.0634045600891113, | |
"logits/rejected": -2.458428382873535, | |
"logps/chosen": -0.24033495783805847, | |
"logps/rejected": -0.29080909490585327, | |
"loss": 7.4432, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3605024516582489, | |
"rewards/margins": 0.07571124285459518, | |
"rewards/rejected": -0.4362136721611023, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0207154750823975, | |
"eval_logits/rejected": -2.486215353012085, | |
"eval_logps/chosen": -0.2376101016998291, | |
"eval_logps/rejected": -0.32593628764152527, | |
"eval_loss": 0.9085211753845215, | |
"eval_rewards/accuracies": 0.5353535413742065, | |
"eval_rewards/chosen": -0.35641518235206604, | |
"eval_rewards/margins": 0.13248924911022186, | |
"eval_rewards/rejected": -0.4889043867588043, | |
"eval_runtime": 26.0119, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06007291004061699, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1248741149902344, | |
"logits/rejected": -2.409808874130249, | |
"logps/chosen": -0.2354653775691986, | |
"logps/rejected": -0.30269068479537964, | |
"loss": 7.317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3531980812549591, | |
"rewards/margins": 0.10083796828985214, | |
"rewards/rejected": -0.45403605699539185, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.055738095194101334, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0635311603546143, | |
"logits/rejected": -2.4297730922698975, | |
"logps/chosen": -0.2315257489681244, | |
"logps/rejected": -0.33639490604400635, | |
"loss": 7.2775, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3472886383533478, | |
"rewards/margins": 0.15730372071266174, | |
"rewards/rejected": -0.5045923590660095, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.07971248030662537, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.07852840423584, | |
"logits/rejected": -2.4043469429016113, | |
"logps/chosen": -0.20596058666706085, | |
"logps/rejected": -0.33416762948036194, | |
"loss": 7.336, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3089408874511719, | |
"rewards/margins": 0.19231058657169342, | |
"rewards/rejected": -0.5012514591217041, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.08581534773111343, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.115981340408325, | |
"logits/rejected": -2.5363636016845703, | |
"logps/chosen": -0.22111928462982178, | |
"logps/rejected": -0.3136863708496094, | |
"loss": 7.2892, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.33167898654937744, | |
"rewards/margins": 0.1388506144285202, | |
"rewards/rejected": -0.47052955627441406, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.06293604522943497, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.070842742919922, | |
"logits/rejected": -2.4669342041015625, | |
"logps/chosen": -0.20812074840068817, | |
"logps/rejected": -0.29536327719688416, | |
"loss": 7.26, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.31218111515045166, | |
"rewards/margins": 0.13086381554603577, | |
"rewards/rejected": -0.4430449604988098, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.062544822692871, | |
"eval_logits/rejected": -2.5318312644958496, | |
"eval_logps/chosen": -0.2108660489320755, | |
"eval_logps/rejected": -0.3196176588535309, | |
"eval_loss": 0.8929102420806885, | |
"eval_rewards/accuracies": 0.5555555820465088, | |
"eval_rewards/chosen": -0.31629908084869385, | |
"eval_rewards/margins": 0.1631273776292801, | |
"eval_rewards/rejected": -0.47942644357681274, | |
"eval_runtime": 26.0407, | |
"eval_samples_per_second": 30.26, | |
"eval_steps_per_second": 3.802, | |
"step": 150 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 5.453968644641915e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |