|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.917525773195877, |
|
"eval_steps": 30, |
|
"global_step": 48, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 1.6045022010803223, |
|
"learning_rate": 2.9375e-05, |
|
"loss": 10.1016, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.32989690721649484, |
|
"grad_norm": 1.7116177082061768, |
|
"learning_rate": 2.875e-05, |
|
"loss": 10.2357, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.4948453608247423, |
|
"grad_norm": 1.7441893815994263, |
|
"learning_rate": 2.8125e-05, |
|
"loss": 10.0531, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.6597938144329897, |
|
"grad_norm": 2.1694791316986084, |
|
"learning_rate": 2.75e-05, |
|
"loss": 10.2016, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.8247422680412371, |
|
"grad_norm": 2.1092519760131836, |
|
"learning_rate": 2.6875000000000003e-05, |
|
"loss": 10.0665, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.9896907216494846, |
|
"grad_norm": 2.16532301902771, |
|
"learning_rate": 2.625e-05, |
|
"loss": 9.8524, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.1546391752577319, |
|
"grad_norm": 2.3102128505706787, |
|
"learning_rate": 2.5625e-05, |
|
"loss": 9.9192, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.3195876288659794, |
|
"grad_norm": 2.627959966659546, |
|
"learning_rate": 2.5e-05, |
|
"loss": 9.8474, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.4845360824742269, |
|
"grad_norm": 3.065120220184326, |
|
"learning_rate": 2.4375e-05, |
|
"loss": 9.8735, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.6494845360824741, |
|
"grad_norm": 2.6623458862304688, |
|
"learning_rate": 2.3749999999999998e-05, |
|
"loss": 9.6075, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.8144329896907216, |
|
"grad_norm": 2.8638663291931152, |
|
"learning_rate": 2.3125000000000003e-05, |
|
"loss": 9.6006, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.9793814432989691, |
|
"grad_norm": 3.157634735107422, |
|
"learning_rate": 2.25e-05, |
|
"loss": 9.511, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.1443298969072164, |
|
"grad_norm": 3.286606788635254, |
|
"learning_rate": 2.1875e-05, |
|
"loss": 9.4582, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.3092783505154637, |
|
"grad_norm": 3.352224588394165, |
|
"learning_rate": 2.125e-05, |
|
"loss": 9.3268, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.4742268041237114, |
|
"grad_norm": 3.678314208984375, |
|
"learning_rate": 2.0625e-05, |
|
"loss": 9.3043, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.6391752577319587, |
|
"grad_norm": 3.6437971591949463, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 9.1926, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.804123711340206, |
|
"grad_norm": 3.841749429702759, |
|
"learning_rate": 1.9375e-05, |
|
"loss": 9.0797, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 2.9690721649484537, |
|
"grad_norm": 3.7387771606445312, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 9.0155, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.134020618556701, |
|
"grad_norm": 3.5350756645202637, |
|
"learning_rate": 1.8125e-05, |
|
"loss": 8.8982, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 3.2989690721649483, |
|
"grad_norm": 3.9751718044281006, |
|
"learning_rate": 1.7500000000000002e-05, |
|
"loss": 8.8391, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.463917525773196, |
|
"grad_norm": 3.5453691482543945, |
|
"learning_rate": 1.6875e-05, |
|
"loss": 8.8007, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 3.6288659793814433, |
|
"grad_norm": 3.744359254837036, |
|
"learning_rate": 1.625e-05, |
|
"loss": 8.6382, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 3.7938144329896906, |
|
"grad_norm": 4.300820350646973, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 8.5496, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 3.9587628865979383, |
|
"grad_norm": 3.571364164352417, |
|
"learning_rate": 1.5e-05, |
|
"loss": 8.4785, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.123711340206185, |
|
"grad_norm": 3.5201902389526367, |
|
"learning_rate": 1.4375e-05, |
|
"loss": 8.4425, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 4.288659793814433, |
|
"grad_norm": 3.3802483081817627, |
|
"learning_rate": 1.375e-05, |
|
"loss": 8.4132, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 4.453608247422681, |
|
"grad_norm": 3.3173177242279053, |
|
"learning_rate": 1.3125e-05, |
|
"loss": 8.3052, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 4.618556701030927, |
|
"grad_norm": 3.4614617824554443, |
|
"learning_rate": 1.25e-05, |
|
"loss": 8.1881, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.783505154639175, |
|
"grad_norm": 3.216718912124634, |
|
"learning_rate": 1.1874999999999999e-05, |
|
"loss": 8.2378, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 4.948453608247423, |
|
"grad_norm": 3.36505126953125, |
|
"learning_rate": 1.125e-05, |
|
"loss": 8.1809, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.948453608247423, |
|
"eval_audio_cosine_sim": 0.557277500629425, |
|
"eval_loss": 3.095508575439453, |
|
"eval_runtime": 2121.8578, |
|
"eval_samples_per_second": 0.006, |
|
"eval_steps_per_second": 0.006, |
|
"eval_text_cosine_sim": 0.3839966356754303, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.11340206185567, |
|
"grad_norm": 3.3731958866119385, |
|
"learning_rate": 1.0625e-05, |
|
"loss": 8.1047, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 5.278350515463917, |
|
"grad_norm": 3.572460651397705, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 7.9906, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 5.443298969072165, |
|
"grad_norm": 3.343137502670288, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 8.1973, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 5.608247422680412, |
|
"grad_norm": 3.5458569526672363, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 8.0155, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 5.77319587628866, |
|
"grad_norm": 3.403402328491211, |
|
"learning_rate": 8.125e-06, |
|
"loss": 8.0047, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 5.938144329896907, |
|
"grad_norm": 3.5528311729431152, |
|
"learning_rate": 7.5e-06, |
|
"loss": 7.8875, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 6.103092783505154, |
|
"grad_norm": 3.659574270248413, |
|
"learning_rate": 6.875e-06, |
|
"loss": 7.82, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 6.268041237113402, |
|
"grad_norm": 3.419759511947632, |
|
"learning_rate": 6.25e-06, |
|
"loss": 7.8027, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 6.43298969072165, |
|
"grad_norm": 3.7508034706115723, |
|
"learning_rate": 5.625e-06, |
|
"loss": 7.8398, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 6.597938144329897, |
|
"grad_norm": 3.735914945602417, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 7.8924, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 6.762886597938144, |
|
"grad_norm": 3.593177318572998, |
|
"learning_rate": 4.3750000000000005e-06, |
|
"loss": 7.9886, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 6.927835051546392, |
|
"grad_norm": 3.5156137943267822, |
|
"learning_rate": 3.75e-06, |
|
"loss": 7.8703, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 7.092783505154639, |
|
"grad_norm": 3.6164469718933105, |
|
"learning_rate": 3.125e-06, |
|
"loss": 7.7555, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 7.257731958762887, |
|
"grad_norm": 3.5708608627319336, |
|
"learning_rate": 2.4999999999999998e-06, |
|
"loss": 7.7007, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 7.422680412371134, |
|
"grad_norm": 3.396042585372925, |
|
"learning_rate": 1.875e-06, |
|
"loss": 7.9848, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 7.587628865979381, |
|
"grad_norm": 3.5203356742858887, |
|
"learning_rate": 1.2499999999999999e-06, |
|
"loss": 7.85, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 7.752577319587629, |
|
"grad_norm": 3.814443349838257, |
|
"learning_rate": 6.249999999999999e-07, |
|
"loss": 7.566, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 7.917525773195877, |
|
"grad_norm": 3.4914655685424805, |
|
"learning_rate": 0.0, |
|
"loss": 7.8495, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 7.917525773195877, |
|
"step": 48, |
|
"total_flos": 962957869218000.0, |
|
"train_loss": 8.715420136849085, |
|
"train_runtime": 7523.7142, |
|
"train_samples_per_second": 0.103, |
|
"train_steps_per_second": 0.006 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 48, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 8, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 962957869218000.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|