fats-fme commited on
Commit
a073efb
1 Parent(s): 07852c9

Training in progress, step 24, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9414035a263696e37060fa959a03ec8e17fc44763468a3b5369ba73a6ac77878
3
  size 70667778
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a86ba9385bb768d29deab79937c811ef79a601d619335c9e94af62c9f11709ce
3
  size 70667778
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b56707cf0118f1c71ed1325203a29952e82af4ced399f22eab06a5f52bf685e
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff1de991a5e8b028670d79166d941671fcf1f558f0044b08eaf5154d066f0048
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac52da7845dfee7f009bf115e99d18473acfef24b7a31c6c60182154e212e940
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5995616407d184ecedacf74ac31cd0731474c7e2323f03405d89945bad93ff
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eab5119e8dfb2acf8726e3995087ca3cdec10190f996fbdc638954b26a5cddc2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b375860c66674ec9637bb873a1ff676ec525ee5502840d2f1d554d4237b8f0c4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.256,
5
  "eval_steps": 12,
6
- "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -107,6 +107,98 @@
107
  "eval_samples_per_second": 21.668,
108
  "eval_steps_per_second": 5.486,
109
  "step": 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  }
111
  ],
112
  "logging_steps": 1,
@@ -126,7 +218,7 @@
126
  "attributes": {}
127
  }
128
  },
129
- "total_flos": 1730290321981440.0,
130
  "train_batch_size": 2,
131
  "trial_name": null,
132
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.512,
5
  "eval_steps": 12,
6
+ "global_step": 24,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
107
  "eval_samples_per_second": 21.668,
108
  "eval_steps_per_second": 5.486,
109
  "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.2773333333333333,
113
+ "grad_norm": NaN,
114
+ "learning_rate": 2.6000000000000002e-05,
115
+ "loss": 0.0,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.2986666666666667,
120
+ "grad_norm": NaN,
121
+ "learning_rate": 2.8000000000000003e-05,
122
+ "loss": 0.0,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.32,
127
+ "grad_norm": NaN,
128
+ "learning_rate": 3e-05,
129
+ "loss": 0.0,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.3413333333333333,
134
+ "grad_norm": NaN,
135
+ "learning_rate": 3.2000000000000005e-05,
136
+ "loss": 0.0,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.3626666666666667,
141
+ "grad_norm": NaN,
142
+ "learning_rate": 3.4000000000000007e-05,
143
+ "loss": 0.0,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.384,
148
+ "grad_norm": NaN,
149
+ "learning_rate": 3.6e-05,
150
+ "loss": 0.0,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.4053333333333333,
155
+ "grad_norm": NaN,
156
+ "learning_rate": 3.8e-05,
157
+ "loss": 0.0,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.4266666666666667,
162
+ "grad_norm": NaN,
163
+ "learning_rate": 4e-05,
164
+ "loss": 0.0,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.448,
169
+ "grad_norm": NaN,
170
+ "learning_rate": 4.2e-05,
171
+ "loss": 0.0,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.4693333333333333,
176
+ "grad_norm": NaN,
177
+ "learning_rate": 4.4000000000000006e-05,
178
+ "loss": 0.0,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.49066666666666664,
183
+ "grad_norm": NaN,
184
+ "learning_rate": 4.600000000000001e-05,
185
+ "loss": 0.0,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.512,
190
+ "grad_norm": NaN,
191
+ "learning_rate": 4.8e-05,
192
+ "loss": 0.0,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.512,
197
+ "eval_loss": NaN,
198
+ "eval_runtime": 3.751,
199
+ "eval_samples_per_second": 21.061,
200
+ "eval_steps_per_second": 5.332,
201
+ "step": 24
202
  }
203
  ],
204
  "logging_steps": 1,
 
218
  "attributes": {}
219
  }
220
  },
221
+ "total_flos": 3460580643962880.0,
222
  "train_batch_size": 2,
223
  "trial_name": null,
224
  "trial_params": null