|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10040160642570281, |
|
"eval_steps": 9, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004016064257028112, |
|
"grad_norm": 0.7228589057922363, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 1.6631, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004016064257028112, |
|
"eval_loss": 1.0697861909866333, |
|
"eval_runtime": 32.5901, |
|
"eval_samples_per_second": 6.444, |
|
"eval_steps_per_second": 0.828, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008032128514056224, |
|
"grad_norm": 1.0123487710952759, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.6836, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.012048192771084338, |
|
"grad_norm": 1.005481481552124, |
|
"learning_rate": 6e-06, |
|
"loss": 1.9228, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01606425702811245, |
|
"grad_norm": 0.8270795941352844, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.45, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.020080321285140562, |
|
"grad_norm": 0.9999402165412903, |
|
"learning_rate": 1e-05, |
|
"loss": 2.668, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.024096385542168676, |
|
"grad_norm": 0.9072601199150085, |
|
"learning_rate": 1.2e-05, |
|
"loss": 1.8627, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.028112449799196786, |
|
"grad_norm": 1.1333314180374146, |
|
"learning_rate": 1.4e-05, |
|
"loss": 2.8953, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0321285140562249, |
|
"grad_norm": 0.9936800003051758, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.2984, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03614457831325301, |
|
"grad_norm": 0.7884902954101562, |
|
"learning_rate": 1.8e-05, |
|
"loss": 1.4, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03614457831325301, |
|
"eval_loss": 1.0618449449539185, |
|
"eval_runtime": 32.5955, |
|
"eval_samples_per_second": 6.443, |
|
"eval_steps_per_second": 0.828, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.040160642570281124, |
|
"grad_norm": 1.1049511432647705, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3974, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04417670682730924, |
|
"grad_norm": 1.0087590217590332, |
|
"learning_rate": 1.999390827019096e-05, |
|
"loss": 2.1244, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04819277108433735, |
|
"grad_norm": 1.021081805229187, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 1.6924, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.05220883534136546, |
|
"grad_norm": 0.8872256278991699, |
|
"learning_rate": 1.9945218953682736e-05, |
|
"loss": 1.8039, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05622489959839357, |
|
"grad_norm": 1.019092321395874, |
|
"learning_rate": 1.9902680687415704e-05, |
|
"loss": 1.8685, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.060240963855421686, |
|
"grad_norm": 1.4794679880142212, |
|
"learning_rate": 1.9848077530122083e-05, |
|
"loss": 2.1413, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0642570281124498, |
|
"grad_norm": 1.2401800155639648, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 2.1287, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06827309236947791, |
|
"grad_norm": 1.0547970533370972, |
|
"learning_rate": 1.9702957262759964e-05, |
|
"loss": 1.1896, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.07228915662650602, |
|
"grad_norm": 1.1523879766464233, |
|
"learning_rate": 1.961261695938319e-05, |
|
"loss": 1.5816, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07228915662650602, |
|
"eval_loss": 0.985172688961029, |
|
"eval_runtime": 32.6746, |
|
"eval_samples_per_second": 6.427, |
|
"eval_steps_per_second": 0.826, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07630522088353414, |
|
"grad_norm": 1.4140926599502563, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 1.9035, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.08032128514056225, |
|
"grad_norm": 1.519662618637085, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 1.9739, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08433734939759036, |
|
"grad_norm": 0.977906346321106, |
|
"learning_rate": 1.9271838545667876e-05, |
|
"loss": 1.1063, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08835341365461848, |
|
"grad_norm": 1.301216959953308, |
|
"learning_rate": 1.913545457642601e-05, |
|
"loss": 1.7746, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.09236947791164658, |
|
"grad_norm": 1.1488702297210693, |
|
"learning_rate": 1.8987940462991673e-05, |
|
"loss": 1.5895, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0963855421686747, |
|
"grad_norm": 1.351751685142517, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 2.5842, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.10040160642570281, |
|
"grad_norm": 1.2297149896621704, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 1.935, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.173894848512e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|