File size: 3,368 Bytes
086516e 631732d 086516e 631732d 086516e ce84aee 631732d 086516e 631732d 086516e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8280430582390285,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05520287054926856,
"grad_norm": 1.2856197357177734,
"learning_rate": 1.1037527593818985e-05,
"loss": 0.7609,
"step": 100
},
{
"epoch": 0.11040574109853712,
"grad_norm": 1.0077331066131592,
"learning_rate": 2.207505518763797e-05,
"loss": 0.5552,
"step": 200
},
{
"epoch": 0.1656086116478057,
"grad_norm": 1.2774763107299805,
"learning_rate": 3.311258278145696e-05,
"loss": 0.5229,
"step": 300
},
{
"epoch": 0.22081148219707425,
"grad_norm": 1.3611174821853638,
"learning_rate": 4.415011037527594e-05,
"loss": 0.5191,
"step": 400
},
{
"epoch": 0.2760143527463428,
"grad_norm": 1.112617015838623,
"learning_rate": 5.518763796909493e-05,
"loss": 0.5068,
"step": 500
},
{
"epoch": 0.3312172232956114,
"grad_norm": 1.1730616092681885,
"learning_rate": 6.622516556291392e-05,
"loss": 0.4895,
"step": 600
},
{
"epoch": 0.3864200938448799,
"grad_norm": 1.1149307489395142,
"learning_rate": 7.726269315673289e-05,
"loss": 0.5044,
"step": 700
},
{
"epoch": 0.4416229643941485,
"grad_norm": 1.1326206922531128,
"learning_rate": 8.830022075055188e-05,
"loss": 0.5081,
"step": 800
},
{
"epoch": 0.49682583494341703,
"grad_norm": 1.1592661142349243,
"learning_rate": 9.933774834437086e-05,
"loss": 0.5024,
"step": 900
},
{
"epoch": 0.5520287054926856,
"grad_norm": 1.4300315380096436,
"learning_rate": 9.996717238759354e-05,
"loss": 0.5078,
"step": 1000
},
{
"epoch": 0.6072315760419542,
"grad_norm": 1.3841311931610107,
"learning_rate": 9.986022415440564e-05,
"loss": 0.5091,
"step": 1100
},
{
"epoch": 0.6624344465912227,
"grad_norm": 1.3825188875198364,
"learning_rate": 9.967918047007774e-05,
"loss": 0.4915,
"step": 1200
},
{
"epoch": 0.7176373171404913,
"grad_norm": 1.248660683631897,
"learning_rate": 9.942431037699172e-05,
"loss": 0.5049,
"step": 1300
},
{
"epoch": 0.7728401876897598,
"grad_norm": 1.2805066108703613,
"learning_rate": 9.909599262824883e-05,
"loss": 0.4787,
"step": 1400
},
{
"epoch": 0.8280430582390285,
"grad_norm": 1.5455862283706665,
"learning_rate": 9.869471512481871e-05,
"loss": 0.4844,
"step": 1500
}
],
"logging_steps": 100,
"max_steps": 9055,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.944410411838996e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}
|