File size: 3,852 Bytes
f73495a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.987012987012987,
"eval_steps": 500,
"global_step": 19,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05194805194805195,
"grad_norm": 4.959436893463135,
"learning_rate": 2e-05,
"loss": 2.0897,
"step": 1
},
{
"epoch": 0.1038961038961039,
"grad_norm": 3.942784547805786,
"learning_rate": 4e-05,
"loss": 2.3866,
"step": 2
},
{
"epoch": 0.15584415584415584,
"grad_norm": 4.221513748168945,
"learning_rate": 6e-05,
"loss": 2.4587,
"step": 3
},
{
"epoch": 0.2077922077922078,
"grad_norm": 3.284791946411133,
"learning_rate": 8e-05,
"loss": 2.1799,
"step": 4
},
{
"epoch": 0.2597402597402597,
"grad_norm": 5.71047306060791,
"learning_rate": 0.0001,
"loss": 2.1779,
"step": 5
},
{
"epoch": 0.3116883116883117,
"grad_norm": 3.1526741981506348,
"learning_rate": 0.00012,
"loss": 2.1932,
"step": 6
},
{
"epoch": 0.36363636363636365,
"grad_norm": 3.1342685222625732,
"learning_rate": 0.00014,
"loss": 1.4442,
"step": 7
},
{
"epoch": 0.4155844155844156,
"grad_norm": 5.338088035583496,
"learning_rate": 0.00016,
"loss": 2.0,
"step": 8
},
{
"epoch": 0.4675324675324675,
"grad_norm": 4.116346836090088,
"learning_rate": 0.00018,
"loss": 1.5286,
"step": 9
},
{
"epoch": 0.5194805194805194,
"grad_norm": 2.5058088302612305,
"learning_rate": 0.0002,
"loss": 1.4238,
"step": 10
},
{
"epoch": 0.5714285714285714,
"grad_norm": 4.636209487915039,
"learning_rate": 0.0001999317060143023,
"loss": 1.511,
"step": 11
},
{
"epoch": 0.6233766233766234,
"grad_norm": 3.1061031818389893,
"learning_rate": 0.00019972691733857883,
"loss": 1.3241,
"step": 12
},
{
"epoch": 0.6753246753246753,
"grad_norm": 2.3636441230773926,
"learning_rate": 0.0001993859136895274,
"loss": 1.044,
"step": 13
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.5445024967193604,
"learning_rate": 0.0001989091608371146,
"loss": 0.9548,
"step": 14
},
{
"epoch": 0.7792207792207793,
"grad_norm": 2.371872901916504,
"learning_rate": 0.0001982973099683902,
"loss": 1.0614,
"step": 15
},
{
"epoch": 0.8311688311688312,
"grad_norm": 5.0257039070129395,
"learning_rate": 0.00019755119679804367,
"loss": 1.1408,
"step": 16
},
{
"epoch": 0.8831168831168831,
"grad_norm": 1.9052318334579468,
"learning_rate": 0.00019667184042691875,
"loss": 0.8993,
"step": 17
},
{
"epoch": 0.935064935064935,
"grad_norm": 2.2592344284057617,
"learning_rate": 0.0001956604419500441,
"loss": 1.0319,
"step": 18
},
{
"epoch": 0.987012987012987,
"grad_norm": 2.264328718185425,
"learning_rate": 0.00019451838281608197,
"loss": 1.483,
"step": 19
}
],
"logging_steps": 1,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 19,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.307716021406925e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|