agentlans's picture
Upload 8 files
9f61df5 verified
raw
history blame
9.82 kB
{
"best_metric": 0.14551572501659393,
"best_model_checkpoint": "multilingual-e5-small-aligned-sentiment-20241214-new/checkpoint-23439",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 23439,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06399590426212723,
"grad_norm": 1.8201525211334229,
"learning_rate": 4.8933401595631215e-05,
"loss": 0.3311,
"step": 500
},
{
"epoch": 0.12799180852425446,
"grad_norm": 1.933944821357727,
"learning_rate": 4.786680319126243e-05,
"loss": 0.259,
"step": 1000
},
{
"epoch": 0.19198771278638166,
"grad_norm": 1.5761635303497314,
"learning_rate": 4.680020478689364e-05,
"loss": 0.244,
"step": 1500
},
{
"epoch": 0.2559836170485089,
"grad_norm": 2.812056303024292,
"learning_rate": 4.573360638252485e-05,
"loss": 0.2353,
"step": 2000
},
{
"epoch": 0.3199795213106361,
"grad_norm": 2.022963285446167,
"learning_rate": 4.4667007978156063e-05,
"loss": 0.2261,
"step": 2500
},
{
"epoch": 0.3839754255727633,
"grad_norm": 2.1023800373077393,
"learning_rate": 4.360040957378728e-05,
"loss": 0.22,
"step": 3000
},
{
"epoch": 0.4479713298348906,
"grad_norm": 1.8184232711791992,
"learning_rate": 4.2533811169418495e-05,
"loss": 0.2166,
"step": 3500
},
{
"epoch": 0.5119672340970178,
"grad_norm": 1.9675606489181519,
"learning_rate": 4.146721276504971e-05,
"loss": 0.2124,
"step": 4000
},
{
"epoch": 0.575963138359145,
"grad_norm": 1.7122745513916016,
"learning_rate": 4.040061436068092e-05,
"loss": 0.209,
"step": 4500
},
{
"epoch": 0.6399590426212722,
"grad_norm": 2.49656081199646,
"learning_rate": 3.933401595631213e-05,
"loss": 0.205,
"step": 5000
},
{
"epoch": 0.7039549468833994,
"grad_norm": 3.310446262359619,
"learning_rate": 3.8267417551943344e-05,
"loss": 0.203,
"step": 5500
},
{
"epoch": 0.7679508511455266,
"grad_norm": 2.4122066497802734,
"learning_rate": 3.7200819147574556e-05,
"loss": 0.1964,
"step": 6000
},
{
"epoch": 0.831946755407654,
"grad_norm": 1.8602417707443237,
"learning_rate": 3.613422074320577e-05,
"loss": 0.1982,
"step": 6500
},
{
"epoch": 0.8959426596697811,
"grad_norm": 3.1396756172180176,
"learning_rate": 3.506762233883698e-05,
"loss": 0.1935,
"step": 7000
},
{
"epoch": 0.9599385639319084,
"grad_norm": 4.411809921264648,
"learning_rate": 3.400102393446819e-05,
"loss": 0.1946,
"step": 7500
},
{
"epoch": 1.0,
"eval_loss": 0.16471454501152039,
"eval_mse": 0.16471455099075522,
"eval_runtime": 97.1359,
"eval_samples_per_second": 1874.807,
"eval_steps_per_second": 234.352,
"step": 7813
},
{
"epoch": 1.0239344681940357,
"grad_norm": 2.101229429244995,
"learning_rate": 3.293442553009941e-05,
"loss": 0.1776,
"step": 8000
},
{
"epoch": 1.0879303724561629,
"grad_norm": 1.918332576751709,
"learning_rate": 3.1867827125730624e-05,
"loss": 0.1548,
"step": 8500
},
{
"epoch": 1.15192627671829,
"grad_norm": 2.1414854526519775,
"learning_rate": 3.0801228721361836e-05,
"loss": 0.1489,
"step": 9000
},
{
"epoch": 1.2159221809804173,
"grad_norm": 1.7239971160888672,
"learning_rate": 2.9734630316993045e-05,
"loss": 0.1472,
"step": 9500
},
{
"epoch": 1.2799180852425445,
"grad_norm": 1.5635288953781128,
"learning_rate": 2.866803191262426e-05,
"loss": 0.1491,
"step": 10000
},
{
"epoch": 1.3439139895046717,
"grad_norm": 2.0498759746551514,
"learning_rate": 2.7601433508255476e-05,
"loss": 0.1493,
"step": 10500
},
{
"epoch": 1.4079098937667989,
"grad_norm": 1.2982215881347656,
"learning_rate": 2.6534835103886685e-05,
"loss": 0.1458,
"step": 11000
},
{
"epoch": 1.471905798028926,
"grad_norm": 1.9183140993118286,
"learning_rate": 2.54682366995179e-05,
"loss": 0.149,
"step": 11500
},
{
"epoch": 1.5359017022910533,
"grad_norm": 1.4604493379592896,
"learning_rate": 2.4401638295149112e-05,
"loss": 0.1441,
"step": 12000
},
{
"epoch": 1.5998976065531805,
"grad_norm": 1.4501017332077026,
"learning_rate": 2.3335039890780325e-05,
"loss": 0.1483,
"step": 12500
},
{
"epoch": 1.6638935108153077,
"grad_norm": 1.7177495956420898,
"learning_rate": 2.2268441486411537e-05,
"loss": 0.1443,
"step": 13000
},
{
"epoch": 1.727889415077435,
"grad_norm": 2.2127881050109863,
"learning_rate": 2.120184308204275e-05,
"loss": 0.1458,
"step": 13500
},
{
"epoch": 1.7918853193395623,
"grad_norm": 1.5237836837768555,
"learning_rate": 2.0135244677673965e-05,
"loss": 0.1435,
"step": 14000
},
{
"epoch": 1.8558812236016895,
"grad_norm": 2.217888355255127,
"learning_rate": 1.9068646273305177e-05,
"loss": 0.141,
"step": 14500
},
{
"epoch": 1.9198771278638167,
"grad_norm": 3.9023308753967285,
"learning_rate": 1.800204786893639e-05,
"loss": 0.139,
"step": 15000
},
{
"epoch": 1.983873032125944,
"grad_norm": 1.6923686265945435,
"learning_rate": 1.69354494645676e-05,
"loss": 0.1385,
"step": 15500
},
{
"epoch": 2.0,
"eval_loss": 0.15275108814239502,
"eval_mse": 0.15275108203598506,
"eval_runtime": 101.0827,
"eval_samples_per_second": 1801.605,
"eval_steps_per_second": 225.202,
"step": 15626
},
{
"epoch": 2.0478689363880713,
"grad_norm": 3.1119792461395264,
"learning_rate": 1.5868851060198814e-05,
"loss": 0.1234,
"step": 16000
},
{
"epoch": 2.1118648406501985,
"grad_norm": 1.3244566917419434,
"learning_rate": 1.480225265583003e-05,
"loss": 0.1168,
"step": 16500
},
{
"epoch": 2.1758607449123257,
"grad_norm": 1.8541600704193115,
"learning_rate": 1.3735654251461241e-05,
"loss": 0.1129,
"step": 17000
},
{
"epoch": 2.239856649174453,
"grad_norm": 1.291128396987915,
"learning_rate": 1.2669055847092454e-05,
"loss": 0.1161,
"step": 17500
},
{
"epoch": 2.30385255343658,
"grad_norm": 1.4121185541152954,
"learning_rate": 1.1602457442723666e-05,
"loss": 0.1137,
"step": 18000
},
{
"epoch": 2.3678484576987073,
"grad_norm": 1.1205600500106812,
"learning_rate": 1.053585903835488e-05,
"loss": 0.1151,
"step": 18500
},
{
"epoch": 2.4318443619608345,
"grad_norm": 2.112236976623535,
"learning_rate": 9.469260633986092e-06,
"loss": 0.114,
"step": 19000
},
{
"epoch": 2.4958402662229617,
"grad_norm": 1.419636845588684,
"learning_rate": 8.402662229617304e-06,
"loss": 0.1152,
"step": 19500
},
{
"epoch": 2.559836170485089,
"grad_norm": 1.89504873752594,
"learning_rate": 7.336063825248518e-06,
"loss": 0.1153,
"step": 20000
},
{
"epoch": 2.623832074747216,
"grad_norm": 1.8703386783599854,
"learning_rate": 6.26946542087973e-06,
"loss": 0.1131,
"step": 20500
},
{
"epoch": 2.6878279790093433,
"grad_norm": 1.3758282661437988,
"learning_rate": 5.202867016510943e-06,
"loss": 0.1134,
"step": 21000
},
{
"epoch": 2.7518238832714705,
"grad_norm": 0.9953869581222534,
"learning_rate": 4.1362686121421564e-06,
"loss": 0.1122,
"step": 21500
},
{
"epoch": 2.8158197875335977,
"grad_norm": 1.4371693134307861,
"learning_rate": 3.069670207773369e-06,
"loss": 0.1117,
"step": 22000
},
{
"epoch": 2.879815691795725,
"grad_norm": 1.3081939220428467,
"learning_rate": 2.003071803404582e-06,
"loss": 0.1121,
"step": 22500
},
{
"epoch": 2.943811596057852,
"grad_norm": 1.5292563438415527,
"learning_rate": 9.364733990357951e-07,
"loss": 0.1121,
"step": 23000
},
{
"epoch": 3.0,
"eval_loss": 0.14551572501659393,
"eval_mse": 0.14551572992011078,
"eval_runtime": 108.2898,
"eval_samples_per_second": 1681.7,
"eval_steps_per_second": 210.214,
"step": 23439
},
{
"epoch": 3.0,
"step": 23439,
"total_flos": 4.9403660544e+16,
"train_loss": 0.16052449254575832,
"train_runtime": 3301.1135,
"train_samples_per_second": 908.784,
"train_steps_per_second": 7.1
}
],
"logging_steps": 500,
"max_steps": 23439,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.9403660544e+16,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}