File size: 5,713 Bytes
6441e1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
{
  "best_metric": 0.5459790825843811,
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
  "epoch": 0.12876227265411236,
  "eval_steps": 25,
  "global_step": 25,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.005150490906164494,
      "grad_norm": 195.5614013671875,
      "learning_rate": 5e-05,
      "loss": 183.4214,
      "step": 1
    },
    {
      "epoch": 0.005150490906164494,
      "eval_loss": 6.080627918243408,
      "eval_runtime": 4.0984,
      "eval_samples_per_second": 12.2,
      "eval_steps_per_second": 3.172,
      "step": 1
    },
    {
      "epoch": 0.010300981812328988,
      "grad_norm": 212.53482055664062,
      "learning_rate": 0.0001,
      "loss": 184.5833,
      "step": 2
    },
    {
      "epoch": 0.01545147271849348,
      "grad_norm": 182.6468963623047,
      "learning_rate": 9.990365154573717e-05,
      "loss": 173.0269,
      "step": 3
    },
    {
      "epoch": 0.020601963624657976,
      "grad_norm": 172.3017120361328,
      "learning_rate": 9.961501876182148e-05,
      "loss": 161.0496,
      "step": 4
    },
    {
      "epoch": 0.02575245453082247,
      "grad_norm": 148.2727813720703,
      "learning_rate": 9.913533761814537e-05,
      "loss": 127.753,
      "step": 5
    },
    {
      "epoch": 0.03090294543698696,
      "grad_norm": 159.01747131347656,
      "learning_rate": 9.846666218300807e-05,
      "loss": 109.5256,
      "step": 6
    },
    {
      "epoch": 0.03605343634315146,
      "grad_norm": 125.15324401855469,
      "learning_rate": 9.761185582727977e-05,
      "loss": 84.6997,
      "step": 7
    },
    {
      "epoch": 0.04120392724931595,
      "grad_norm": 95.49300384521484,
      "learning_rate": 9.657457896300791e-05,
      "loss": 76.9491,
      "step": 8
    },
    {
      "epoch": 0.04635441815548044,
      "grad_norm": 85.85482025146484,
      "learning_rate": 9.535927336897098e-05,
      "loss": 69.9889,
      "step": 9
    },
    {
      "epoch": 0.05150490906164494,
      "grad_norm": 102.58478546142578,
      "learning_rate": 9.397114317029975e-05,
      "loss": 64.823,
      "step": 10
    },
    {
      "epoch": 0.05665539996780943,
      "grad_norm": 96.248291015625,
      "learning_rate": 9.241613255361455e-05,
      "loss": 62.2639,
      "step": 11
    },
    {
      "epoch": 0.06180589087397392,
      "grad_norm": 107.83429718017578,
      "learning_rate": 9.070090031310558e-05,
      "loss": 56.1169,
      "step": 12
    },
    {
      "epoch": 0.06695638178013842,
      "grad_norm": 96.302001953125,
      "learning_rate": 8.883279133655399e-05,
      "loss": 63.8062,
      "step": 13
    },
    {
      "epoch": 0.07210687268630292,
      "grad_norm": 139.03134155273438,
      "learning_rate": 8.681980515339464e-05,
      "loss": 47.5832,
      "step": 14
    },
    {
      "epoch": 0.0772573635924674,
      "grad_norm": 91.16569519042969,
      "learning_rate": 8.467056167950311e-05,
      "loss": 41.2896,
      "step": 15
    },
    {
      "epoch": 0.0824078544986319,
      "grad_norm": 101.69834899902344,
      "learning_rate": 8.239426430539243e-05,
      "loss": 40.1252,
      "step": 16
    },
    {
      "epoch": 0.0875583454047964,
      "grad_norm": 94.4932861328125,
      "learning_rate": 8.000066048588211e-05,
      "loss": 35.824,
      "step": 17
    },
    {
      "epoch": 0.09270883631096088,
      "grad_norm": 65.91960906982422,
      "learning_rate": 7.75e-05,
      "loss": 32.7603,
      "step": 18
    },
    {
      "epoch": 0.09785932721712538,
      "grad_norm": 45.94514846801758,
      "learning_rate": 7.490299105985507e-05,
      "loss": 28.143,
      "step": 19
    },
    {
      "epoch": 0.10300981812328988,
      "grad_norm": 49.135292053222656,
      "learning_rate": 7.222075445642904e-05,
      "loss": 24.3015,
      "step": 20
    },
    {
      "epoch": 0.10816030902945437,
      "grad_norm": 52.58118438720703,
      "learning_rate": 6.946477593864228e-05,
      "loss": 24.7888,
      "step": 21
    },
    {
      "epoch": 0.11331079993561886,
      "grad_norm": 40.06536865234375,
      "learning_rate": 6.664685702961344e-05,
      "loss": 21.0641,
      "step": 22
    },
    {
      "epoch": 0.11846129084178336,
      "grad_norm": 61.566043853759766,
      "learning_rate": 6.377906449072578e-05,
      "loss": 19.7781,
      "step": 23
    },
    {
      "epoch": 0.12361178174794785,
      "grad_norm": 54.66169357299805,
      "learning_rate": 6.087367864990233e-05,
      "loss": 20.4498,
      "step": 24
    },
    {
      "epoch": 0.12876227265411236,
      "grad_norm": 158.48204040527344,
      "learning_rate": 5.794314081535644e-05,
      "loss": 31.4924,
      "step": 25
    },
    {
      "epoch": 0.12876227265411236,
      "eval_loss": 0.5459790825843811,
      "eval_runtime": 4.1249,
      "eval_samples_per_second": 12.121,
      "eval_steps_per_second": 3.152,
      "step": 25
    }
  ],
  "logging_steps": 1,
  "max_steps": 50,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 25,
  "stateful_callbacks": {
    "EarlyStoppingCallback": {
      "args": {
        "early_stopping_patience": 1,
        "early_stopping_threshold": 0.0
      },
      "attributes": {
        "early_stopping_patience_counter": 0
      }
    },
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 2.629552387915776e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}