Nexspear's picture
Training in progress, step 72, checkpoint
94064f1 verified
raw
history blame
6.74 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.2521739130434781,
"eval_steps": 9,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017391304347826087,
"eval_loss": 1.59055495262146,
"eval_runtime": 2.6995,
"eval_samples_per_second": 35.932,
"eval_steps_per_second": 4.816,
"step": 1
},
{
"epoch": 0.05217391304347826,
"grad_norm": 0.5041409134864807,
"learning_rate": 1.5e-05,
"loss": 1.7739,
"step": 3
},
{
"epoch": 0.10434782608695652,
"grad_norm": 0.5414333343505859,
"learning_rate": 3e-05,
"loss": 1.402,
"step": 6
},
{
"epoch": 0.1565217391304348,
"grad_norm": 0.3529357612133026,
"learning_rate": 4.5e-05,
"loss": 1.6939,
"step": 9
},
{
"epoch": 0.1565217391304348,
"eval_loss": 1.5777380466461182,
"eval_runtime": 2.6751,
"eval_samples_per_second": 36.26,
"eval_steps_per_second": 4.86,
"step": 9
},
{
"epoch": 0.20869565217391303,
"grad_norm": 0.3947656452655792,
"learning_rate": 4.993910125649561e-05,
"loss": 1.7722,
"step": 12
},
{
"epoch": 0.2608695652173913,
"grad_norm": 0.4697956442832947,
"learning_rate": 4.962019382530521e-05,
"loss": 1.6984,
"step": 15
},
{
"epoch": 0.3130434782608696,
"grad_norm": 0.34927043318748474,
"learning_rate": 4.9031542398457974e-05,
"loss": 1.7339,
"step": 18
},
{
"epoch": 0.3130434782608696,
"eval_loss": 1.4486348628997803,
"eval_runtime": 2.683,
"eval_samples_per_second": 36.153,
"eval_steps_per_second": 4.845,
"step": 18
},
{
"epoch": 0.3652173913043478,
"grad_norm": 0.4831562042236328,
"learning_rate": 4.817959636416969e-05,
"loss": 1.7264,
"step": 21
},
{
"epoch": 0.41739130434782606,
"grad_norm": 0.56829833984375,
"learning_rate": 4.707368982147318e-05,
"loss": 1.7197,
"step": 24
},
{
"epoch": 0.46956521739130436,
"grad_norm": 0.5362353920936584,
"learning_rate": 4.572593931387604e-05,
"loss": 1.5556,
"step": 27
},
{
"epoch": 0.46956521739130436,
"eval_loss": 1.2610658407211304,
"eval_runtime": 2.6707,
"eval_samples_per_second": 36.32,
"eval_steps_per_second": 4.868,
"step": 27
},
{
"epoch": 0.5217391304347826,
"grad_norm": 0.5250989198684692,
"learning_rate": 4.415111107797445e-05,
"loss": 1.5176,
"step": 30
},
{
"epoch": 0.5739130434782609,
"grad_norm": 0.4496021866798401,
"learning_rate": 4.2366459261474933e-05,
"loss": 1.2318,
"step": 33
},
{
"epoch": 0.6260869565217392,
"grad_norm": 0.5903450846672058,
"learning_rate": 4.039153688314145e-05,
"loss": 1.5067,
"step": 36
},
{
"epoch": 0.6260869565217392,
"eval_loss": 1.09618079662323,
"eval_runtime": 2.705,
"eval_samples_per_second": 35.859,
"eval_steps_per_second": 4.806,
"step": 36
},
{
"epoch": 0.6782608695652174,
"grad_norm": 0.5637620687484741,
"learning_rate": 3.824798160583012e-05,
"loss": 1.2329,
"step": 39
},
{
"epoch": 0.7304347826086957,
"grad_norm": 0.5909444689750671,
"learning_rate": 3.5959278669726935e-05,
"loss": 1.1362,
"step": 42
},
{
"epoch": 0.782608695652174,
"grad_norm": 0.4471411406993866,
"learning_rate": 3.355050358314172e-05,
"loss": 1.3341,
"step": 45
},
{
"epoch": 0.782608695652174,
"eval_loss": 1.0226554870605469,
"eval_runtime": 2.6701,
"eval_samples_per_second": 36.328,
"eval_steps_per_second": 4.869,
"step": 45
},
{
"epoch": 0.8347826086956521,
"grad_norm": 0.4761001169681549,
"learning_rate": 3.104804738999169e-05,
"loss": 1.0345,
"step": 48
},
{
"epoch": 0.8869565217391304,
"grad_norm": 0.4806910753250122,
"learning_rate": 2.8479327524001636e-05,
"loss": 0.9969,
"step": 51
},
{
"epoch": 0.9391304347826087,
"grad_norm": 0.6003803014755249,
"learning_rate": 2.587248741756253e-05,
"loss": 1.2715,
"step": 54
},
{
"epoch": 0.9391304347826087,
"eval_loss": 0.9930722117424011,
"eval_runtime": 2.6757,
"eval_samples_per_second": 36.252,
"eval_steps_per_second": 4.859,
"step": 54
},
{
"epoch": 0.991304347826087,
"grad_norm": 0.5368942618370056,
"learning_rate": 2.3256088156396868e-05,
"loss": 1.1923,
"step": 57
},
{
"epoch": 1.0434782608695652,
"grad_norm": 0.5342758893966675,
"learning_rate": 2.0658795558326743e-05,
"loss": 1.0875,
"step": 60
},
{
"epoch": 1.0956521739130434,
"grad_norm": 0.4160667359828949,
"learning_rate": 1.8109066104575023e-05,
"loss": 1.3013,
"step": 63
},
{
"epoch": 1.0956521739130434,
"eval_loss": 0.9771125912666321,
"eval_runtime": 2.6789,
"eval_samples_per_second": 36.209,
"eval_steps_per_second": 4.853,
"step": 63
},
{
"epoch": 1.1478260869565218,
"grad_norm": 0.38854989409446716,
"learning_rate": 1.56348351646022e-05,
"loss": 1.4154,
"step": 66
},
{
"epoch": 1.2,
"grad_norm": 0.5519515872001648,
"learning_rate": 1.3263210930352737e-05,
"loss": 1.1699,
"step": 69
},
{
"epoch": 1.2521739130434781,
"grad_norm": 0.3825100362300873,
"learning_rate": 1.1020177413231334e-05,
"loss": 1.188,
"step": 72
},
{
"epoch": 1.2521739130434781,
"eval_loss": 0.9695695638656616,
"eval_runtime": 2.6795,
"eval_samples_per_second": 36.201,
"eval_steps_per_second": 4.852,
"step": 72
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.8932733953703936e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}