|
{ |
|
"best_metric": 0.7244487404823303, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.8097165991902834, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008097165991902834, |
|
"grad_norm": 160.5043182373047, |
|
"learning_rate": 1e-06, |
|
"loss": 11.5445, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008097165991902834, |
|
"eval_loss": 11.895453453063965, |
|
"eval_runtime": 1.2561, |
|
"eval_samples_per_second": 41.398, |
|
"eval_steps_per_second": 10.349, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016194331983805668, |
|
"grad_norm": 113.18714904785156, |
|
"learning_rate": 2e-06, |
|
"loss": 12.2262, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.024291497975708502, |
|
"grad_norm": 134.26119995117188, |
|
"learning_rate": 3e-06, |
|
"loss": 12.1703, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.032388663967611336, |
|
"grad_norm": 151.49256896972656, |
|
"learning_rate": 4e-06, |
|
"loss": 11.2431, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04048582995951417, |
|
"grad_norm": 128.835205078125, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 12.309, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.048582995951417005, |
|
"grad_norm": 126.78450012207031, |
|
"learning_rate": 6e-06, |
|
"loss": 12.4684, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05668016194331984, |
|
"grad_norm": 128.61981201171875, |
|
"learning_rate": 7e-06, |
|
"loss": 11.275, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06477732793522267, |
|
"grad_norm": 132.9573516845703, |
|
"learning_rate": 8e-06, |
|
"loss": 9.7015, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0728744939271255, |
|
"grad_norm": 114.10719299316406, |
|
"learning_rate": 9e-06, |
|
"loss": 8.4523, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08097165991902834, |
|
"grad_norm": 102.33385467529297, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 7.9721, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08906882591093117, |
|
"grad_norm": 85.0543212890625, |
|
"learning_rate": 1.1e-05, |
|
"loss": 6.5994, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09716599190283401, |
|
"grad_norm": 114.57733917236328, |
|
"learning_rate": 1.2e-05, |
|
"loss": 5.7194, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 84.9945297241211, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 4.7599, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11336032388663968, |
|
"grad_norm": 82.66494750976562, |
|
"learning_rate": 1.4e-05, |
|
"loss": 6.2553, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1214574898785425, |
|
"grad_norm": 66.0272445678711, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.5185, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12955465587044535, |
|
"grad_norm": 59.03416061401367, |
|
"learning_rate": 1.6e-05, |
|
"loss": 2.4052, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.13765182186234817, |
|
"grad_norm": 60.23641586303711, |
|
"learning_rate": 1.7e-05, |
|
"loss": 2.7286, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.145748987854251, |
|
"grad_norm": 37.41645431518555, |
|
"learning_rate": 1.8e-05, |
|
"loss": 2.7418, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 31.624427795410156, |
|
"learning_rate": 1.9e-05, |
|
"loss": 1.5455, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16194331983805668, |
|
"grad_norm": 33.63788986206055, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 2.2783, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1700404858299595, |
|
"grad_norm": 35.38190841674805, |
|
"learning_rate": 2.1e-05, |
|
"loss": 1.6235, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17813765182186234, |
|
"grad_norm": 46.83808135986328, |
|
"learning_rate": 2.2e-05, |
|
"loss": 1.8509, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1862348178137652, |
|
"grad_norm": 21.023977279663086, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 0.9755, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19433198380566802, |
|
"grad_norm": 34.79740905761719, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.9948, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20242914979757085, |
|
"grad_norm": 26.90549659729004, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.7971, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 30.40579605102539, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 1.1038, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.21862348178137653, |
|
"grad_norm": 36.04926681518555, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 0.7739, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22672064777327935, |
|
"grad_norm": 35.41645050048828, |
|
"learning_rate": 2.8e-05, |
|
"loss": 1.3239, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23481781376518218, |
|
"grad_norm": 26.374845504760742, |
|
"learning_rate": 2.9e-05, |
|
"loss": 1.2692, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.242914979757085, |
|
"grad_norm": 24.77640151977539, |
|
"learning_rate": 3e-05, |
|
"loss": 0.8267, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25101214574898784, |
|
"grad_norm": 27.26534652709961, |
|
"learning_rate": 2.9984895998119723e-05, |
|
"loss": 0.8929, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2591093117408907, |
|
"grad_norm": 16.130844116210938, |
|
"learning_rate": 2.993961440992859e-05, |
|
"loss": 0.8517, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.26720647773279355, |
|
"grad_norm": 18.54865074157715, |
|
"learning_rate": 2.9864246426519023e-05, |
|
"loss": 0.7742, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.27530364372469635, |
|
"grad_norm": 12.706560134887695, |
|
"learning_rate": 2.9758943828979444e-05, |
|
"loss": 0.6831, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2834008097165992, |
|
"grad_norm": 12.227797508239746, |
|
"learning_rate": 2.9623918682727355e-05, |
|
"loss": 0.749, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.291497975708502, |
|
"grad_norm": 17.94428062438965, |
|
"learning_rate": 2.9459442910437798e-05, |
|
"loss": 0.7489, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.29959514170040485, |
|
"grad_norm": 12.291033744812012, |
|
"learning_rate": 2.9265847744427305e-05, |
|
"loss": 0.6317, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 23.2242431640625, |
|
"learning_rate": 2.904352305959606e-05, |
|
"loss": 0.9003, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 15.302254676818848, |
|
"learning_rate": 2.8792916588271762e-05, |
|
"loss": 0.6306, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32388663967611336, |
|
"grad_norm": 17.329072952270508, |
|
"learning_rate": 2.8514533018536286e-05, |
|
"loss": 0.8858, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3319838056680162, |
|
"grad_norm": 11.782532691955566, |
|
"learning_rate": 2.820893297785107e-05, |
|
"loss": 0.7977, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.340080971659919, |
|
"grad_norm": 10.306597709655762, |
|
"learning_rate": 2.7876731904027994e-05, |
|
"loss": 0.6432, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3481781376518219, |
|
"grad_norm": 15.597854614257812, |
|
"learning_rate": 2.7518598805819542e-05, |
|
"loss": 0.6245, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3562753036437247, |
|
"grad_norm": 13.594710350036621, |
|
"learning_rate": 2.7135254915624213e-05, |
|
"loss": 0.7767, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3643724696356275, |
|
"grad_norm": 9.834576606750488, |
|
"learning_rate": 2.672747223702045e-05, |
|
"loss": 0.768, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3724696356275304, |
|
"grad_norm": 4.552821159362793, |
|
"learning_rate": 2.6296071990054167e-05, |
|
"loss": 0.6271, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.3805668016194332, |
|
"grad_norm": 9.330282211303711, |
|
"learning_rate": 2.5841922957410875e-05, |
|
"loss": 0.7619, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38866396761133604, |
|
"grad_norm": 10.170756340026855, |
|
"learning_rate": 2.5365939734802973e-05, |
|
"loss": 0.7114, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3967611336032389, |
|
"grad_norm": 7.350127696990967, |
|
"learning_rate": 2.4869080889095693e-05, |
|
"loss": 0.7545, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4048582995951417, |
|
"grad_norm": 11.022176742553711, |
|
"learning_rate": 2.4352347027881003e-05, |
|
"loss": 0.7612, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4048582995951417, |
|
"eval_loss": 0.7244487404823303, |
|
"eval_runtime": 1.2651, |
|
"eval_samples_per_second": 41.102, |
|
"eval_steps_per_second": 10.275, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41295546558704455, |
|
"grad_norm": 6.105991363525391, |
|
"learning_rate": 2.3816778784387097e-05, |
|
"loss": 0.7112, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 5.9137163162231445, |
|
"learning_rate": 2.3263454721781537e-05, |
|
"loss": 0.6772, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.4291497975708502, |
|
"grad_norm": 6.709981918334961, |
|
"learning_rate": 2.2693489161088592e-05, |
|
"loss": 0.7273, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.43724696356275305, |
|
"grad_norm": 4.602910041809082, |
|
"learning_rate": 2.210802993709498e-05, |
|
"loss": 0.7377, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.44534412955465585, |
|
"grad_norm": 8.946381568908691, |
|
"learning_rate": 2.1508256086763372e-05, |
|
"loss": 0.6814, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4534412955465587, |
|
"grad_norm": 5.092156887054443, |
|
"learning_rate": 2.0895375474808857e-05, |
|
"loss": 0.6879, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.46153846153846156, |
|
"grad_norm": 4.789443016052246, |
|
"learning_rate": 2.0270622361220143e-05, |
|
"loss": 0.6239, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.46963562753036436, |
|
"grad_norm": 4.035036087036133, |
|
"learning_rate": 1.963525491562421e-05, |
|
"loss": 0.6038, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4777327935222672, |
|
"grad_norm": 4.545828819274902, |
|
"learning_rate": 1.8990552683500128e-05, |
|
"loss": 0.7022, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.48582995951417, |
|
"grad_norm": 6.806140899658203, |
|
"learning_rate": 1.8337814009344716e-05, |
|
"loss": 0.7606, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4939271255060729, |
|
"grad_norm": 4.344721794128418, |
|
"learning_rate": 1.767835342197955e-05, |
|
"loss": 0.6619, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5020242914979757, |
|
"grad_norm": 5.1954345703125, |
|
"learning_rate": 1.7013498987264832e-05, |
|
"loss": 0.6223, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5101214574898786, |
|
"grad_norm": 8.843324661254883, |
|
"learning_rate": 1.6344589633551502e-05, |
|
"loss": 0.833, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5182186234817814, |
|
"grad_norm": 8.470301628112793, |
|
"learning_rate": 1.5672972455257726e-05, |
|
"loss": 0.872, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 8.447321891784668, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.8794, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5344129554655871, |
|
"grad_norm": 7.982692241668701, |
|
"learning_rate": 1.4327027544742281e-05, |
|
"loss": 0.8019, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5425101214574899, |
|
"grad_norm": 9.073297500610352, |
|
"learning_rate": 1.36554103664485e-05, |
|
"loss": 0.6302, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.5506072874493927, |
|
"grad_norm": 4.642770767211914, |
|
"learning_rate": 1.2986501012735174e-05, |
|
"loss": 0.6202, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5587044534412956, |
|
"grad_norm": 5.061112403869629, |
|
"learning_rate": 1.2321646578020452e-05, |
|
"loss": 0.6198, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5668016194331984, |
|
"grad_norm": 7.011550426483154, |
|
"learning_rate": 1.1662185990655285e-05, |
|
"loss": 0.8346, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5748987854251012, |
|
"grad_norm": 4.438355445861816, |
|
"learning_rate": 1.1009447316499875e-05, |
|
"loss": 0.6787, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.582995951417004, |
|
"grad_norm": 6.0901055335998535, |
|
"learning_rate": 1.036474508437579e-05, |
|
"loss": 0.8458, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.5910931174089069, |
|
"grad_norm": 76.47030639648438, |
|
"learning_rate": 9.729377638779859e-06, |
|
"loss": 0.8839, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.5991902834008097, |
|
"grad_norm": 6.974242687225342, |
|
"learning_rate": 9.104624525191147e-06, |
|
"loss": 0.733, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6072874493927125, |
|
"grad_norm": 7.109065055847168, |
|
"learning_rate": 8.491743913236629e-06, |
|
"loss": 0.8075, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 8.350259780883789, |
|
"learning_rate": 7.89197006290502e-06, |
|
"loss": 0.7467, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6234817813765182, |
|
"grad_norm": 5.253984451293945, |
|
"learning_rate": 7.30651083891141e-06, |
|
"loss": 0.7108, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 7.530630111694336, |
|
"learning_rate": 6.736545278218464e-06, |
|
"loss": 0.7081, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.6396761133603239, |
|
"grad_norm": 18.218433380126953, |
|
"learning_rate": 6.1832212156129045e-06, |
|
"loss": 0.6932, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.6477732793522267, |
|
"grad_norm": 5.084344387054443, |
|
"learning_rate": 5.647652972118998e-06, |
|
"loss": 0.7684, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6558704453441295, |
|
"grad_norm": 4.171597480773926, |
|
"learning_rate": 5.130919110904311e-06, |
|
"loss": 0.7214, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.6639676113360324, |
|
"grad_norm": 5.165872573852539, |
|
"learning_rate": 4.6340602651970304e-06, |
|
"loss": 0.7379, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.6720647773279352, |
|
"grad_norm": 8.189788818359375, |
|
"learning_rate": 4.158077042589129e-06, |
|
"loss": 0.8242, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.680161943319838, |
|
"grad_norm": 5.687074661254883, |
|
"learning_rate": 3.7039280099458373e-06, |
|
"loss": 0.6482, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.6882591093117408, |
|
"grad_norm": 4.640507221221924, |
|
"learning_rate": 3.272527762979553e-06, |
|
"loss": 0.7058, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6963562753036437, |
|
"grad_norm": 4.909543514251709, |
|
"learning_rate": 2.86474508437579e-06, |
|
"loss": 0.6626, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7044534412955465, |
|
"grad_norm": 5.721399784088135, |
|
"learning_rate": 2.4814011941804603e-06, |
|
"loss": 0.7401, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7125506072874493, |
|
"grad_norm": 4.203503131866455, |
|
"learning_rate": 2.1232680959720085e-06, |
|
"loss": 0.7072, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.7206477732793523, |
|
"grad_norm": 7.876823425292969, |
|
"learning_rate": 1.79106702214893e-06, |
|
"loss": 0.559, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.728744939271255, |
|
"grad_norm": 9.562280654907227, |
|
"learning_rate": 1.4854669814637145e-06, |
|
"loss": 0.7441, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 4.9196319580078125, |
|
"learning_rate": 1.2070834117282414e-06, |
|
"loss": 0.6806, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.7449392712550608, |
|
"grad_norm": 4.913059711456299, |
|
"learning_rate": 9.56476940403942e-07, |
|
"loss": 0.6895, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.7530364372469636, |
|
"grad_norm": 6.508387088775635, |
|
"learning_rate": 7.341522555726971e-07, |
|
"loss": 0.7577, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.7611336032388664, |
|
"grad_norm": 3.305694103240967, |
|
"learning_rate": 5.405570895622014e-07, |
|
"loss": 0.7149, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 6.714405536651611, |
|
"learning_rate": 3.760813172726457e-07, |
|
"loss": 0.7731, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7773279352226721, |
|
"grad_norm": 4.101876735687256, |
|
"learning_rate": 2.41056171020555e-07, |
|
"loss": 0.7061, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.7854251012145749, |
|
"grad_norm": 3.9142746925354004, |
|
"learning_rate": 1.357535734809795e-07, |
|
"loss": 0.7258, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.7935222672064778, |
|
"grad_norm": 3.286195993423462, |
|
"learning_rate": 6.038559007141397e-08, |
|
"loss": 0.7034, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.8016194331983806, |
|
"grad_norm": 4.398167133331299, |
|
"learning_rate": 1.510400188028116e-08, |
|
"loss": 0.6205, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8097165991902834, |
|
"grad_norm": 4.571166515350342, |
|
"learning_rate": 0.0, |
|
"loss": 0.733, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8097165991902834, |
|
"eval_loss": 0.728439450263977, |
|
"eval_runtime": 1.266, |
|
"eval_samples_per_second": 41.075, |
|
"eval_steps_per_second": 10.269, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1932120607948800.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|