dimasik87 commited on
Commit
51fa518
·
verified ·
1 Parent(s): 804756a

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:959933e1af9857170a4b99347ec3762d0d5f8de070b5a1d13603b3caf975bf48
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c233f1c620c09bf19f826d1fa61facea592ff868c11502b37369601e02f8356
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4c40c52a45a881eb464287fa64d73edebeb642339a644660b5f6de8173aa66d
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d552aa8a592378110f79c37c3fed8a2f487ef00c846e2d86597ff438816332e7
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18c91c570b07019a1068294715f275674400f8131145c713b4a085e0034f5c76
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a3b96852ea2a689adf8f1cc86874c6e93a130324b13ae84494c715e8376b0b4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d42533d2ce676231a5f1b4cd529190e6e167a0f19701b5e4bdd8cf1b59bbcf1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.06571741511500548,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 9.406,
145
  "eval_steps_per_second": 4.751,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -163,7 +206,7 @@
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 1.109738548887552e+16,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08762322015334063,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 9.406,
145
  "eval_steps_per_second": 4.751,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.0700985761226725,
150
+ "grad_norm": 0.1723138689994812,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 0.0143,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.07447973713033954,
157
+ "grad_norm": 0.11893691122531891,
158
+ "learning_rate": 0.00018526401643540922,
159
+ "loss": 0.0115,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.07886089813800658,
164
+ "grad_norm": 0.23403066396713257,
165
+ "learning_rate": 0.00018090169943749476,
166
+ "loss": 0.0339,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.0832420591456736,
171
+ "grad_norm": 0.4720830023288727,
172
+ "learning_rate": 0.0001760405965600031,
173
+ "loss": 0.0228,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.08762322015334063,
178
+ "grad_norm": 0.11689777672290802,
179
+ "learning_rate": 0.00017071067811865476,
180
+ "loss": 0.0248,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.08762322015334063,
185
+ "eval_loss": 0.017750361934304237,
186
+ "eval_runtime": 10.3124,
187
+ "eval_samples_per_second": 9.406,
188
+ "eval_steps_per_second": 4.752,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 1.479651398516736e+16,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null