{ "best_metric": NaN, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.0196174595389897, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00039234919077979404, "grad_norm": 0.7801864743232727, "learning_rate": 2.9999999999999997e-05, "loss": 1.1076, "step": 1 }, { "epoch": 0.00039234919077979404, "eval_loss": 2.6347837448120117, "eval_runtime": 5.3964, "eval_samples_per_second": 9.265, "eval_steps_per_second": 1.297, "step": 1 }, { "epoch": 0.0007846983815595881, "grad_norm": 0.4603287875652313, "learning_rate": 5.9999999999999995e-05, "loss": 1.7697, "step": 2 }, { "epoch": 0.0011770475723393821, "grad_norm": 0.5424301028251648, "learning_rate": 8.999999999999999e-05, "loss": 2.1838, "step": 3 }, { "epoch": 0.0015693967631191762, "grad_norm": 0.5732556581497192, "learning_rate": 0.00011999999999999999, "loss": 2.1462, "step": 4 }, { "epoch": 0.00196174595389897, "grad_norm": 0.5106447339057922, "learning_rate": 0.00015, "loss": 2.5732, "step": 5 }, { "epoch": 0.0023540951446787643, "grad_norm": 0.4012216329574585, "learning_rate": 0.00017999999999999998, "loss": 2.3035, "step": 6 }, { "epoch": 0.0027464443354585583, "grad_norm": 0.4464437961578369, "learning_rate": 0.00020999999999999998, "loss": 2.5018, "step": 7 }, { "epoch": 0.0031387935262383523, "grad_norm": 0.49094539880752563, "learning_rate": 0.00023999999999999998, "loss": 2.4145, "step": 8 }, { "epoch": 0.003531142717018146, "grad_norm": 0.5340158939361572, "learning_rate": 0.00027, "loss": 2.5605, "step": 9 }, { "epoch": 0.00392349190779794, "grad_norm": 0.469400018453598, "learning_rate": 0.0003, "loss": 2.5807, "step": 10 }, { "epoch": 0.0043158410985777345, "grad_norm": 0.40649205446243286, "learning_rate": 0.0002999794957488703, "loss": 2.3967, "step": 11 }, { "epoch": 0.0047081902893575285, "grad_norm": 0.43658336997032166, "learning_rate": 0.0002999179886011389, "loss": 2.3976, "step": 12 }, { "epoch": 0.0051005394801373225, "grad_norm": 0.5445865392684937, "learning_rate": 0.0002998154953722457, "loss": 2.3869, "step": 13 }, { "epoch": 0.005492888670917117, "grad_norm": 0.4407218098640442, "learning_rate": 0.00029967204408281613, "loss": 2.1568, "step": 14 }, { "epoch": 0.005885237861696911, "grad_norm": 0.3098980188369751, "learning_rate": 0.00029948767395100045, "loss": 2.1049, "step": 15 }, { "epoch": 0.006277587052476705, "grad_norm": 0.38185837864875793, "learning_rate": 0.0002992624353817517, "loss": 2.2054, "step": 16 }, { "epoch": 0.006669936243256498, "grad_norm": 0.3709363043308258, "learning_rate": 0.0002989963899530457, "loss": 2.1322, "step": 17 }, { "epoch": 0.007062285434036292, "grad_norm": 0.3822822570800781, "learning_rate": 0.00029868961039904624, "loss": 2.2528, "step": 18 }, { "epoch": 0.007454634624816086, "grad_norm": 0.40470457077026367, "learning_rate": 0.00029834218059022024, "loss": 2.1562, "step": 19 }, { "epoch": 0.00784698381559588, "grad_norm": 0.46746203303337097, "learning_rate": 0.00029795419551040833, "loss": 2.2253, "step": 20 }, { "epoch": 0.008239333006375674, "grad_norm": 0.5920839905738831, "learning_rate": 0.00029752576123085736, "loss": 2.2653, "step": 21 }, { "epoch": 0.008631682197155469, "grad_norm": 0.4972122609615326, "learning_rate": 0.0002970569948812214, "loss": 2.174, "step": 22 }, { "epoch": 0.009024031387935262, "grad_norm": 0.31875213980674744, "learning_rate": 0.0002965480246175399, "loss": 2.094, "step": 23 }, { "epoch": 0.009416380578715057, "grad_norm": 0.49916020035743713, "learning_rate": 0.0002959989895872009, "loss": 2.0421, "step": 24 }, { "epoch": 0.00980872976949485, "grad_norm": 0.35592731833457947, "learning_rate": 0.0002954100398908995, "loss": 2.0896, "step": 25 }, { "epoch": 0.00980872976949485, "eval_loss": 1.9814097881317139, "eval_runtime": 4.9578, "eval_samples_per_second": 10.085, "eval_steps_per_second": 1.412, "step": 25 }, { "epoch": 0.010201078960274645, "grad_norm": 0.5454434156417847, "learning_rate": 0.0002947813365416023, "loss": 2.0981, "step": 26 }, { "epoch": 0.010593428151054438, "grad_norm": 0.41645103693008423, "learning_rate": 0.0002941130514205272, "loss": 2.006, "step": 27 }, { "epoch": 0.010985777341834233, "grad_norm": 0.539344847202301, "learning_rate": 0.0002934053672301536, "loss": 1.9416, "step": 28 }, { "epoch": 0.011378126532614026, "grad_norm": 0.408270001411438, "learning_rate": 0.00029265847744427303, "loss": 2.0615, "step": 29 }, { "epoch": 0.011770475723393821, "grad_norm": 0.4333832263946533, "learning_rate": 0.00029187258625509513, "loss": 2.0464, "step": 30 }, { "epoch": 0.012162824914173614, "grad_norm": 0.3659256398677826, "learning_rate": 0.00029104790851742417, "loss": 1.9172, "step": 31 }, { "epoch": 0.01255517410495341, "grad_norm": 0.45351165533065796, "learning_rate": 0.0002901846696899191, "loss": 1.9599, "step": 32 }, { "epoch": 0.012947523295733203, "grad_norm": 0.4614894688129425, "learning_rate": 0.00028928310577345606, "loss": 1.9147, "step": 33 }, { "epoch": 0.013339872486512996, "grad_norm": NaN, "learning_rate": 0.0002883434632466077, "loss": 1.9759, "step": 34 }, { "epoch": 0.01373222167729279, "grad_norm": NaN, "learning_rate": 0.00028736599899825856, "loss": 0.0, "step": 35 }, { "epoch": 0.014124570868072584, "grad_norm": NaN, "learning_rate": 0.00028635098025737434, "loss": 0.0, "step": 36 }, { "epoch": 0.014516920058852379, "grad_norm": NaN, "learning_rate": 0.00028529868451994384, "loss": 0.0, "step": 37 }, { "epoch": 0.014909269249632172, "grad_norm": NaN, "learning_rate": 0.0002842093994731145, "loss": 0.0, "step": 38 }, { "epoch": 0.015301618440411967, "grad_norm": NaN, "learning_rate": 0.00028308342291654174, "loss": 0.0, "step": 39 }, { "epoch": 0.01569396763119176, "grad_norm": NaN, "learning_rate": 0.00028192106268097334, "loss": 0.0, "step": 40 }, { "epoch": 0.016086316821971553, "grad_norm": NaN, "learning_rate": 0.00028072263654409154, "loss": 0.0, "step": 41 }, { "epoch": 0.016478666012751348, "grad_norm": NaN, "learning_rate": 0.0002794884721436361, "loss": 0.0, "step": 42 }, { "epoch": 0.016871015203531143, "grad_norm": NaN, "learning_rate": 0.00027821890688783083, "loss": 0.0, "step": 43 }, { "epoch": 0.017263364394310938, "grad_norm": NaN, "learning_rate": 0.0002769142878631403, "loss": 0.0, "step": 44 }, { "epoch": 0.01765571358509073, "grad_norm": NaN, "learning_rate": 0.00027557497173937923, "loss": 0.0, "step": 45 }, { "epoch": 0.018048062775870524, "grad_norm": NaN, "learning_rate": 0.000274201324672203, "loss": 0.0, "step": 46 }, { "epoch": 0.01844041196665032, "grad_norm": NaN, "learning_rate": 0.00027279372220300385, "loss": 0.0, "step": 47 }, { "epoch": 0.018832761157430114, "grad_norm": NaN, "learning_rate": 0.0002713525491562421, "loss": 0.0, "step": 48 }, { "epoch": 0.019225110348209905, "grad_norm": NaN, "learning_rate": 0.00026987819953423867, "loss": 0.0, "step": 49 }, { "epoch": 0.0196174595389897, "grad_norm": NaN, "learning_rate": 0.00026837107640945905, "loss": 0.0, "step": 50 }, { "epoch": 0.0196174595389897, "eval_loss": NaN, "eval_runtime": 4.7136, "eval_samples_per_second": 10.608, "eval_steps_per_second": 1.485, "step": 50 } ], "logging_steps": 1, "max_steps": 200, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.877552082065818e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }